Compare commits
5 Commits
9093059d05
...
8372aa7278
| Author | SHA1 | Date | |
|---|---|---|---|
| 8372aa7278 | |||
| 7b5a939271 | |||
| 2fa1dff4b7 | |||
| 31fb275ee3 | |||
| 8a0f6e71e8 |
@@ -1,9 +1,6 @@
|
||||
import pandas as pd
|
||||
import re
|
||||
|
||||
from collections import Counter
|
||||
|
||||
|
||||
class InteractionAnalysis:
|
||||
def __init__(self, word_exclusions: set[str]):
|
||||
self.word_exclusions = word_exclusions
|
||||
@@ -12,51 +9,6 @@ class InteractionAnalysis:
|
||||
tokens = re.findall(r"\b[a-z]{3,}\b", text)
|
||||
return [t for t in tokens if t not in self.word_exclusions]
|
||||
|
||||
def _vocab_richness_per_user(
|
||||
self, df: pd.DataFrame, min_words: int = 20, top_most_used_words: int = 100
|
||||
) -> list:
|
||||
df = df.copy()
|
||||
df["content"] = df["content"].fillna("").astype(str).str.lower()
|
||||
df["tokens"] = df["content"].apply(self._tokenize)
|
||||
|
||||
rows = []
|
||||
for author, group in df.groupby("author"):
|
||||
all_tokens = [t for tokens in group["tokens"] for t in tokens]
|
||||
|
||||
total_words = len(all_tokens)
|
||||
unique_words = len(set(all_tokens))
|
||||
events = len(group)
|
||||
|
||||
# Min amount of words for a user, any less than this might give weird results
|
||||
if total_words < min_words:
|
||||
continue
|
||||
|
||||
# 100% = they never reused a word (excluding stop words)
|
||||
vocab_richness = unique_words / total_words
|
||||
avg_words = total_words / max(events, 1)
|
||||
|
||||
counts = Counter(all_tokens)
|
||||
top_words = [
|
||||
{"word": w, "count": int(c)}
|
||||
for w, c in counts.most_common(top_most_used_words)
|
||||
]
|
||||
|
||||
rows.append(
|
||||
{
|
||||
"author": author,
|
||||
"events": int(events),
|
||||
"total_words": int(total_words),
|
||||
"unique_words": int(unique_words),
|
||||
"vocab_richness": round(vocab_richness, 3),
|
||||
"avg_words_per_event": round(avg_words, 2),
|
||||
"top_words": top_words,
|
||||
}
|
||||
)
|
||||
|
||||
rows = sorted(rows, key=lambda x: x["vocab_richness"], reverse=True)
|
||||
|
||||
return rows
|
||||
|
||||
def interaction_graph(self, df: pd.DataFrame):
|
||||
interactions = {a: {} for a in df["author"].dropna().unique()}
|
||||
|
||||
|
||||
@@ -61,3 +61,19 @@ class LinguisticAnalysis:
|
||||
.head(limit)
|
||||
.to_dict(orient="records")
|
||||
)
|
||||
|
||||
def lexical_diversity(self, df: pd.DataFrame) -> dict:
|
||||
tokens = (
|
||||
df["content"].fillna("").astype(str).str.lower()
|
||||
.str.findall(r"\b[a-z]{2,}\b")
|
||||
.explode()
|
||||
)
|
||||
tokens = tokens[~tokens.isin(self.word_exclusions)]
|
||||
total = max(len(tokens), 1)
|
||||
unique = int(tokens.nunique())
|
||||
|
||||
return {
|
||||
"total_tokens": total,
|
||||
"unique_tokens": unique,
|
||||
"ttr": round(unique / total, 4),
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ class StatGen:
|
||||
self.linguistic_analysis = LinguisticAnalysis(EXCLUDE_WORDS)
|
||||
self.cultural_analysis = CulturalAnalysis()
|
||||
self.summary_analysis = SummaryAnalysis()
|
||||
self.user_analysis = UserAnalysis()
|
||||
self.user_analysis = UserAnalysis(EXCLUDE_WORDS)
|
||||
|
||||
## Private Methods
|
||||
def _prepare_filtered_df(self, df: pd.DataFrame, filters: dict | None = None) -> pd.DataFrame:
|
||||
@@ -94,6 +94,7 @@ class StatGen:
|
||||
"word_frequencies": self.linguistic_analysis.word_frequencies(filtered_df),
|
||||
"common_two_phrases": self.linguistic_analysis.ngrams(filtered_df),
|
||||
"common_three_phrases": self.linguistic_analysis.ngrams(filtered_df, n=3),
|
||||
"lexical_diversity": self.linguistic_analysis.lexical_diversity(filtered_df)
|
||||
}
|
||||
|
||||
def emotional(self, df: pd.DataFrame, filters: dict | None = None) -> dict:
|
||||
@@ -127,13 +128,9 @@ class StatGen:
|
||||
filtered_df = self._prepare_filtered_df(df, filters)
|
||||
|
||||
return {
|
||||
"identity_markers": self.cultural_analysis.get_identity_markers(
|
||||
filtered_df
|
||||
),
|
||||
"identity_markers": self.cultural_analysis.get_identity_markers(filtered_df),
|
||||
"stance_markers": self.cultural_analysis.get_stance_markers(filtered_df),
|
||||
"entity_salience": self.cultural_analysis.get_avg_emotions_per_entity(
|
||||
filtered_df
|
||||
),
|
||||
"avg_emotion_per_entity": self.cultural_analysis.get_avg_emotions_per_entity(filtered_df)
|
||||
}
|
||||
|
||||
def summary(self, df: pd.DataFrame, filters: dict | None = None) -> dict:
|
||||
|
||||
@@ -1,7 +1,61 @@
|
||||
import pandas as pd
|
||||
import re
|
||||
|
||||
from collections import Counter
|
||||
|
||||
class UserAnalysis:
|
||||
def __init__(self, word_exclusions: set[str]):
|
||||
self.word_exclusions = word_exclusions
|
||||
|
||||
def _tokenize(self, text: str):
|
||||
tokens = re.findall(r"\b[a-z]{3,}\b", text)
|
||||
return [t for t in tokens if t not in self.word_exclusions]
|
||||
|
||||
def _vocab_richness_per_user(
|
||||
self, df: pd.DataFrame, min_words: int = 20, top_most_used_words: int = 100
|
||||
) -> list:
|
||||
df = df.copy()
|
||||
df["content"] = df["content"].fillna("").astype(str).str.lower()
|
||||
df["tokens"] = df["content"].apply(self._tokenize)
|
||||
|
||||
rows = []
|
||||
for author, group in df.groupby("author"):
|
||||
all_tokens = [t for tokens in group["tokens"] for t in tokens]
|
||||
|
||||
total_words = len(all_tokens)
|
||||
unique_words = len(set(all_tokens))
|
||||
events = len(group)
|
||||
|
||||
# Min amount of words for a user, any less than this might give weird results
|
||||
if total_words < min_words:
|
||||
continue
|
||||
|
||||
# 100% = they never reused a word (excluding stop words)
|
||||
vocab_richness = unique_words / total_words
|
||||
avg_words = total_words / max(events, 1)
|
||||
|
||||
counts = Counter(all_tokens)
|
||||
top_words = [
|
||||
{"word": w, "count": int(c)}
|
||||
for w, c in counts.most_common(top_most_used_words)
|
||||
]
|
||||
|
||||
rows.append(
|
||||
{
|
||||
"author": author,
|
||||
"events": int(events),
|
||||
"total_words": int(total_words),
|
||||
"unique_words": int(unique_words),
|
||||
"vocab_richness": round(vocab_richness, 3),
|
||||
"avg_words_per_event": round(avg_words, 2),
|
||||
"top_words": top_words,
|
||||
}
|
||||
)
|
||||
|
||||
rows = sorted(rows, key=lambda x: x["vocab_richness"], reverse=True)
|
||||
|
||||
return rows
|
||||
|
||||
def top_users(self, df: pd.DataFrame) -> list:
|
||||
counts = df.groupby(["author", "source"]).size().sort_values(ascending=False)
|
||||
|
||||
|
||||
@@ -524,6 +524,27 @@ def get_interaction_analysis(dataset_id):
|
||||
print(traceback.format_exc())
|
||||
return jsonify({"error": f"An unexpected error occurred"}), 500
|
||||
|
||||
@app.route("/dataset/<int:dataset_id>/all", methods=["GET"])
|
||||
@jwt_required()
|
||||
def get_full_dataset(dataset_id: int):
|
||||
try:
|
||||
user_id = int(get_jwt_identity())
|
||||
if not dataset_manager.authorize_user_dataset(dataset_id, user_id):
|
||||
raise NotAuthorisedException(
|
||||
"This user is not authorised to access this dataset"
|
||||
)
|
||||
|
||||
dataset_content = dataset_manager.get_dataset_content(dataset_id)
|
||||
return jsonify(dataset_content.to_dict(orient="records")), 200
|
||||
except NotAuthorisedException:
|
||||
return jsonify({"error": "User is not authorised to access this content"}), 403
|
||||
except NonExistentDatasetException:
|
||||
return jsonify({"error": "Dataset does not exist"}), 404
|
||||
except ValueError as e:
|
||||
return jsonify({"error": f"Malformed or missing data"}), 400
|
||||
except Exception as e:
|
||||
print(traceback.format_exc())
|
||||
return jsonify({"error": f"An unexpected error occurred"}), 500
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(debug=True)
|
||||
|
||||
@@ -101,7 +101,7 @@ class DatasetManager:
|
||||
row["source"],
|
||||
row.get("topic"),
|
||||
row.get("topic_confidence"),
|
||||
Json(row["ner_entities"]) if row.get("ner_entities") else None,
|
||||
Json(row["entities"]) if row.get("entities") is not None else None,
|
||||
row.get("emotion_anger"),
|
||||
row.get("emotion_disgust"),
|
||||
row.get("emotion_fear"),
|
||||
|
||||
Reference in New Issue
Block a user