refactor: update analysis classes to accept DataFrame as parameter instead of instance variable
This commit is contained in:
@@ -6,13 +6,12 @@ from typing import Any
|
||||
|
||||
|
||||
class CulturalAnalysis:
|
||||
def __init__(self, df: pd.DataFrame, content_col: str = "content", topic_col: str = "topic"):
|
||||
self.df = df
|
||||
def __init__(self, content_col: str = "content", topic_col: str = "topic"):
|
||||
self.content_col = content_col
|
||||
self.topic_col = topic_col
|
||||
|
||||
def get_identity_markers(self):
|
||||
df = self.df.copy()
|
||||
def get_identity_markers(self, original_df: pd.DataFrame) -> dict[str, Any]:
|
||||
df = original_df.copy()
|
||||
s = df[self.content_col].fillna("").astype(str).str.lower()
|
||||
|
||||
in_group_words = {"we", "us", "our", "ourselves"}
|
||||
@@ -60,8 +59,8 @@ class CulturalAnalysis:
|
||||
|
||||
return result
|
||||
|
||||
def get_stance_markers(self) -> dict[str, Any]:
|
||||
s = self.df[self.content_col].fillna("").astype(str)
|
||||
def get_stance_markers(self, df: pd.DataFrame) -> dict[str, Any]:
|
||||
s = df[self.content_col].fillna("").astype(str)
|
||||
|
||||
hedges = {
|
||||
"maybe", "perhaps", "possibly", "probably", "likely", "seems", "seem",
|
||||
@@ -104,13 +103,11 @@ class CulturalAnalysis:
|
||||
"permission_per_1k_tokens": round(1000 * perm_counts.sum() / token_counts.sum(), 3),
|
||||
}
|
||||
|
||||
def get_avg_emotions_per_entity(self, top_n: int = 25, min_posts: int = 10) -> dict[str, Any]:
|
||||
if "entities" not in self.df.columns:
|
||||
def get_avg_emotions_per_entity(self, df: pd.DataFrame, top_n: int = 25, min_posts: int = 10) -> dict[str, Any]:
|
||||
if "entities" not in df.columns:
|
||||
return {"entity_emotion_avg": {}}
|
||||
|
||||
df = self.df
|
||||
emotion_cols = [c for c in df.columns if c.startswith("emotion_")]
|
||||
|
||||
entity_counter = Counter()
|
||||
|
||||
for row in df["entities"].dropna():
|
||||
|
||||
@@ -1,18 +1,15 @@
|
||||
import pandas as pd
|
||||
|
||||
class EmotionalAnalysis:
|
||||
def __init__(self, df: pd.DataFrame):
|
||||
self.df = df
|
||||
|
||||
def avg_emotion_by_topic(self) -> dict:
|
||||
def avg_emotion_by_topic(self, df: pd.DataFrame) -> dict:
|
||||
emotion_cols = [
|
||||
col for col in self.df.columns
|
||||
col for col in df.columns
|
||||
if col.startswith("emotion_")
|
||||
]
|
||||
|
||||
counts = (
|
||||
self.df[
|
||||
(self.df["topic"] != "Misc")
|
||||
df[
|
||||
(df["topic"] != "Misc")
|
||||
]
|
||||
.groupby("topic")
|
||||
.size()
|
||||
@@ -20,8 +17,8 @@ class EmotionalAnalysis:
|
||||
)
|
||||
|
||||
avg_emotion_by_topic = (
|
||||
self.df[
|
||||
(self.df["topic"] != "Misc")
|
||||
df[
|
||||
(df["topic"] != "Misc")
|
||||
]
|
||||
.groupby("topic")[emotion_cols]
|
||||
.mean()
|
||||
|
||||
@@ -5,8 +5,7 @@ from collections import Counter
|
||||
|
||||
|
||||
class InteractionAnalysis:
|
||||
def __init__(self, df: pd.DataFrame, word_exclusions: set[str]):
|
||||
self.df = df
|
||||
def __init__(self, word_exclusions: set[str]):
|
||||
self.word_exclusions = word_exclusions
|
||||
|
||||
def _tokenize(self, text: str):
|
||||
@@ -14,9 +13,9 @@ class InteractionAnalysis:
|
||||
return [t for t in tokens if t not in self.word_exclusions]
|
||||
|
||||
def _vocab_richness_per_user(
|
||||
self, min_words: int = 20, top_most_used_words: int = 100
|
||||
self, df: pd.DataFrame, min_words: int = 20, top_most_used_words: int = 100
|
||||
) -> list:
|
||||
df = self.df.copy()
|
||||
df = df.copy()
|
||||
df["content"] = df["content"].fillna("").astype(str).str.lower()
|
||||
df["tokens"] = df["content"].apply(self._tokenize)
|
||||
|
||||
@@ -58,10 +57,8 @@ class InteractionAnalysis:
|
||||
|
||||
return rows
|
||||
|
||||
def top_users(self) -> list:
|
||||
counts = (
|
||||
self.df.groupby(["author", "source"]).size().sort_values(ascending=False)
|
||||
)
|
||||
def top_users(self, df: pd.DataFrame) -> list:
|
||||
counts = df.groupby(["author", "source"]).size().sort_values(ascending=False)
|
||||
|
||||
top_users = [
|
||||
{"author": author, "source": source, "count": int(count)}
|
||||
@@ -70,14 +67,14 @@ class InteractionAnalysis:
|
||||
|
||||
return top_users
|
||||
|
||||
def per_user_analysis(self) -> dict:
|
||||
per_user = self.df.groupby(["author", "type"]).size().unstack(fill_value=0)
|
||||
def per_user_analysis(self, df: pd.DataFrame) -> dict:
|
||||
per_user = df.groupby(["author", "type"]).size().unstack(fill_value=0)
|
||||
|
||||
emotion_cols = [col for col in self.df.columns if col.startswith("emotion_")]
|
||||
emotion_cols = [col for col in df.columns if col.startswith("emotion_")]
|
||||
|
||||
avg_emotions_by_author = {}
|
||||
if emotion_cols:
|
||||
avg_emotions = self.df.groupby("author")[emotion_cols].mean().fillna(0.0)
|
||||
avg_emotions = df.groupby("author")[emotion_cols].mean().fillna(0.0)
|
||||
avg_emotions_by_author = {
|
||||
author: {emotion: float(score) for emotion, score in row.items()}
|
||||
for author, row in avg_emotions.iterrows()
|
||||
@@ -97,7 +94,7 @@ class InteractionAnalysis:
|
||||
per_user = per_user.sort_values("comment_post_ratio", ascending=True)
|
||||
per_user_records = per_user.reset_index().to_dict(orient="records")
|
||||
|
||||
vocab_rows = self._vocab_richness_per_user()
|
||||
vocab_rows = self._vocab_richness_per_user(df)
|
||||
vocab_by_author = {row["author"]: row for row in vocab_rows}
|
||||
|
||||
# merge vocab richness + per_user information
|
||||
@@ -112,7 +109,14 @@ class InteractionAnalysis:
|
||||
"comment_post_ratio": float(row.get("comment_post_ratio", 0)),
|
||||
"comment_share": float(row.get("comment_share", 0)),
|
||||
"avg_emotions": avg_emotions_by_author.get(author, {}),
|
||||
"vocab": vocab_by_author.get(author, {"vocab_richness": 0, "avg_words_per_event": 0, "top_words": []}),
|
||||
"vocab": vocab_by_author.get(
|
||||
author,
|
||||
{
|
||||
"vocab_richness": 0,
|
||||
"avg_words_per_event": 0,
|
||||
"top_words": [],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -120,13 +124,13 @@ class InteractionAnalysis:
|
||||
|
||||
return merged_users
|
||||
|
||||
def interaction_graph(self):
|
||||
interactions = {a: {} for a in self.df["author"].dropna().unique()}
|
||||
def interaction_graph(self, df: pd.DataFrame):
|
||||
interactions = {a: {} for a in df["author"].dropna().unique()}
|
||||
|
||||
# reply_to refers to the comment id, this allows us to map comment ids to usernames
|
||||
id_to_author = self.df.set_index("id")["author"].to_dict()
|
||||
id_to_author = df.set_index("id")["author"].to_dict()
|
||||
|
||||
for _, row in self.df.iterrows():
|
||||
for _, row in df.iterrows():
|
||||
a = row["author"]
|
||||
reply_id = row["reply_to"]
|
||||
|
||||
@@ -141,10 +145,10 @@ class InteractionAnalysis:
|
||||
|
||||
return interactions
|
||||
|
||||
def average_thread_depth(self):
|
||||
def average_thread_depth(self, df: pd.DataFrame):
|
||||
depths = []
|
||||
id_to_reply = self.df.set_index("id")["reply_to"].to_dict()
|
||||
for _, row in self.df.iterrows():
|
||||
id_to_reply = df.set_index("id")["reply_to"].to_dict()
|
||||
for _, row in df.iterrows():
|
||||
depth = 0
|
||||
current_id = row["id"]
|
||||
|
||||
@@ -163,16 +167,16 @@ class InteractionAnalysis:
|
||||
|
||||
return round(sum(depths) / len(depths), 2)
|
||||
|
||||
def average_thread_length_by_emotion(self):
|
||||
def average_thread_length_by_emotion(self, df: pd.DataFrame):
|
||||
emotion_exclusions = {"emotion_neutral", "emotion_surprise"}
|
||||
|
||||
emotion_cols = [
|
||||
c
|
||||
for c in self.df.columns
|
||||
for c in df.columns
|
||||
if c.startswith("emotion_") and c not in emotion_exclusions
|
||||
]
|
||||
|
||||
id_to_reply = self.df.set_index("id")["reply_to"].to_dict()
|
||||
id_to_reply = df.set_index("id")["reply_to"].to_dict()
|
||||
length_cache = {}
|
||||
|
||||
def thread_length_from(start_id):
|
||||
@@ -211,7 +215,7 @@ class InteractionAnalysis:
|
||||
emotion_to_lengths = {}
|
||||
|
||||
# Fill NaNs in emotion cols to avoid max() issues
|
||||
emo_df = self.df[["id"] + emotion_cols].copy()
|
||||
emo_df = df[["id"] + emotion_cols].copy()
|
||||
emo_df[emotion_cols] = emo_df[emotion_cols].fillna(0)
|
||||
|
||||
for _, row in emo_df.iterrows():
|
||||
|
||||
@@ -4,9 +4,9 @@ import re
|
||||
from collections import Counter
|
||||
from itertools import islice
|
||||
|
||||
|
||||
class LinguisticAnalysis:
|
||||
def __init__(self, df: pd.DataFrame, word_exclusions: set[str]):
|
||||
self.df = df
|
||||
def __init__(self, word_exclusions: set[str]):
|
||||
self.word_exclusions = word_exclusions
|
||||
|
||||
def _tokenize(self, text: str):
|
||||
@@ -14,29 +14,20 @@ class LinguisticAnalysis:
|
||||
return [t for t in tokens if t not in self.word_exclusions]
|
||||
|
||||
def _clean_text(self, text: str) -> str:
|
||||
text = re.sub(r"http\S+", "", text) # remove URLs
|
||||
text = re.sub(r"http\S+", "", text) # remove URLs
|
||||
text = re.sub(r"www\S+", "", text)
|
||||
text = re.sub(r"&\w+;", "", text) # remove HTML entities
|
||||
text = re.sub(r"\bamp\b", "", text) # remove stray amp
|
||||
text = re.sub(r"&\w+;", "", text) # remove HTML entities
|
||||
text = re.sub(r"\bamp\b", "", text) # remove stray amp
|
||||
text = re.sub(r"\S+\.(jpg|jpeg|png|webp|gif)", "", text)
|
||||
return text
|
||||
|
||||
def word_frequencies(self, limit: int = 100) -> dict:
|
||||
texts = (
|
||||
self.df["content"]
|
||||
.dropna()
|
||||
.astype(str)
|
||||
.str.lower()
|
||||
)
|
||||
def word_frequencies(self, df: pd.DataFrame, limit: int = 100) -> list[dict]:
|
||||
texts = df["content"].dropna().astype(str).str.lower()
|
||||
|
||||
words = []
|
||||
for text in texts:
|
||||
tokens = re.findall(r"\b[a-z]{3,}\b", text)
|
||||
words.extend(
|
||||
w for w in tokens
|
||||
if w not in self.word_exclusions
|
||||
)
|
||||
|
||||
words.extend(w for w in tokens if w not in self.word_exclusions)
|
||||
|
||||
counts = Counter(words)
|
||||
|
||||
@@ -48,16 +39,16 @@ class LinguisticAnalysis:
|
||||
)
|
||||
|
||||
return word_frequencies.to_dict(orient="records")
|
||||
|
||||
def ngrams(self, n=2, limit=100):
|
||||
texts = self.df["content"].dropna().astype(str).apply(self._clean_text).str.lower()
|
||||
|
||||
def ngrams(self, df: pd.DataFrame, n=2, limit=100):
|
||||
texts = df["content"].dropna().astype(str).apply(self._clean_text).str.lower()
|
||||
all_ngrams = []
|
||||
|
||||
for text in texts:
|
||||
tokens = re.findall(r"\b[a-z]{3,}\b", text)
|
||||
|
||||
# stop word removal causes strange behaviors in ngrams
|
||||
#tokens = [w for w in tokens if w not in self.word_exclusions]
|
||||
# tokens = [w for w in tokens if w not in self.word_exclusions]
|
||||
|
||||
ngrams = zip(*(islice(tokens, i, None) for i in range(n)))
|
||||
all_ngrams.extend([" ".join(ng) for ng in ngrams])
|
||||
@@ -69,4 +60,4 @@ class LinguisticAnalysis:
|
||||
.sort_values("count", ascending=False)
|
||||
.head(limit)
|
||||
.to_dict(orient="records")
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1,16 +1,14 @@
|
||||
import pandas as pd
|
||||
|
||||
|
||||
class TemporalAnalysis:
|
||||
def __init__(self, df: pd.DataFrame):
|
||||
self.df = df
|
||||
|
||||
def avg_reply_time_per_emotion(self) -> dict:
|
||||
df = self.df.copy()
|
||||
def avg_reply_time_per_emotion(self, df: pd.DataFrame) -> list[dict]:
|
||||
df = df.copy()
|
||||
|
||||
replies = df[
|
||||
(df["type"] == "comment") &
|
||||
(df["reply_to"].notna()) &
|
||||
(df["reply_to"] != "")
|
||||
(df["type"] == "comment")
|
||||
& (df["reply_to"].notna())
|
||||
& (df["reply_to"] != "")
|
||||
]
|
||||
|
||||
id_to_time = df.set_index("id")["dt"].to_dict()
|
||||
@@ -23,48 +21,51 @@ class TemporalAnalysis:
|
||||
return None
|
||||
|
||||
return (row["dt"] - parent_time).total_seconds()
|
||||
|
||||
|
||||
replies["reply_time"] = replies.apply(compute_reply_time, axis=1)
|
||||
emotion_cols = [col for col in df.columns if col.startswith("emotion_") and col not in ("emotion_neutral", "emotion_surprise")]
|
||||
emotion_cols = [
|
||||
col
|
||||
for col in df.columns
|
||||
if col.startswith("emotion_")
|
||||
and col not in ("emotion_neutral", "emotion_surprise")
|
||||
]
|
||||
replies["dominant_emotion"] = replies[emotion_cols].idxmax(axis=1)
|
||||
|
||||
|
||||
grouped = (
|
||||
replies
|
||||
.groupby("dominant_emotion")["reply_time"]
|
||||
replies.groupby("dominant_emotion")["reply_time"]
|
||||
.agg(["mean", "count"])
|
||||
.reset_index()
|
||||
)
|
||||
|
||||
return grouped.to_dict(orient="records")
|
||||
|
||||
def posts_per_day(self) -> dict:
|
||||
per_day = (
|
||||
self.df.groupby("date")
|
||||
.size()
|
||||
.reset_index(name="count")
|
||||
)
|
||||
|
||||
def posts_per_day(self, df: pd.DataFrame) -> list[dict]:
|
||||
per_day = df.groupby("date").size().reset_index(name="count")
|
||||
|
||||
return per_day.to_dict(orient="records")
|
||||
|
||||
def heatmap(self) -> dict:
|
||||
|
||||
def heatmap(self, df: pd.DataFrame) -> list[dict]:
|
||||
weekday_order = [
|
||||
"Monday", "Tuesday", "Wednesday",
|
||||
"Thursday", "Friday", "Saturday", "Sunday"
|
||||
"Monday",
|
||||
"Tuesday",
|
||||
"Wednesday",
|
||||
"Thursday",
|
||||
"Friday",
|
||||
"Saturday",
|
||||
"Sunday",
|
||||
]
|
||||
|
||||
self.df["weekday"] = pd.Categorical(
|
||||
self.df["weekday"],
|
||||
categories=weekday_order,
|
||||
ordered=True
|
||||
df = df.copy()
|
||||
df["weekday"] = pd.Categorical(
|
||||
df["weekday"], categories=weekday_order, ordered=True
|
||||
)
|
||||
|
||||
heatmap = (
|
||||
self.df
|
||||
.groupby(["weekday", "hour"], observed=True)
|
||||
df.groupby(["weekday", "hour"], observed=True)
|
||||
.size()
|
||||
.unstack(fill_value=0)
|
||||
.reindex(columns=range(24), fill_value=0)
|
||||
)
|
||||
|
||||
heatmap.columns = heatmap.columns.map(str)
|
||||
return heatmap.to_dict(orient="records")
|
||||
return heatmap.to_dict(orient="records")
|
||||
|
||||
Reference in New Issue
Block a user