style: run python linter & prettifier on backend code

This commit is contained in:
2026-03-25 19:34:43 +00:00
parent aae10c4d9d
commit 376773a0cc
17 changed files with 408 additions and 315 deletions

View File

@@ -15,7 +15,8 @@ class CulturalAnalysis:
emotion_exclusions = {"emotion_neutral", "emotion_surprise"} emotion_exclusions = {"emotion_neutral", "emotion_surprise"}
emotion_cols = [ emotion_cols = [
c for c in df.columns c
for c in df.columns
if c.startswith("emotion_") and c not in emotion_exclusions if c.startswith("emotion_") and c not in emotion_exclusions
] ]
@@ -40,7 +41,6 @@ class CulturalAnalysis:
"out_group_usage": out_count, "out_group_usage": out_count,
"in_group_ratio": round(in_count / max(total_tokens, 1), 5), "in_group_ratio": round(in_count / max(total_tokens, 1), 5),
"out_group_ratio": round(out_count / max(total_tokens, 1), 5), "out_group_ratio": round(out_count / max(total_tokens, 1), 5),
"in_group_posts": int(in_mask.sum()), "in_group_posts": int(in_mask.sum()),
"out_group_posts": int(out_mask.sum()), "out_group_posts": int(out_mask.sum()),
"tie_posts": int(tie_mask.sum()), "tie_posts": int(tie_mask.sum()),
@@ -49,8 +49,16 @@ class CulturalAnalysis:
if emotion_cols: if emotion_cols:
emo = df[emotion_cols].apply(pd.to_numeric, errors="coerce").fillna(0.0) emo = df[emotion_cols].apply(pd.to_numeric, errors="coerce").fillna(0.0)
in_avg = emo.loc[in_mask].mean() if in_mask.any() else pd.Series(0.0, index=emotion_cols) in_avg = (
out_avg = emo.loc[out_mask].mean() if out_mask.any() else pd.Series(0.0, index=emotion_cols) emo.loc[in_mask].mean()
if in_mask.any()
else pd.Series(0.0, index=emotion_cols)
)
out_avg = (
emo.loc[out_mask].mean()
if out_mask.any()
else pd.Series(0.0, index=emotion_cols)
)
result["in_group_emotion_avg"] = in_avg.to_dict() result["in_group_emotion_avg"] = in_avg.to_dict()
result["out_group_emotion_avg"] = out_avg.to_dict() result["out_group_emotion_avg"] = out_avg.to_dict()
@@ -60,9 +68,15 @@ class CulturalAnalysis:
def get_stance_markers(self, df: pd.DataFrame) -> dict[str, Any]: def get_stance_markers(self, df: pd.DataFrame) -> dict[str, Any]:
s = df[self.content_col].fillna("").astype(str) s = df[self.content_col].fillna("").astype(str)
hedge_pattern = re.compile(r"\b(maybe|perhaps|possibly|probably|likely|seems|seem|i think|i feel|i guess|kind of|sort of|somewhat)\b") hedge_pattern = re.compile(
certainty_pattern = re.compile(r"\b(definitely|certainly|clearly|obviously|undeniably|always|never)\b") r"\b(maybe|perhaps|possibly|probably|likely|seems|seem|i think|i feel|i guess|kind of|sort of|somewhat)\b"
deontic_pattern = re.compile(r"\b(must|should|need|needs|have to|has to|ought|required|require)\b") )
certainty_pattern = re.compile(
r"\b(definitely|certainly|clearly|obviously|undeniably|always|never)\b"
)
deontic_pattern = re.compile(
r"\b(must|should|need|needs|have to|has to|ought|required|require)\b"
)
permission_pattern = re.compile(r"\b(can|allowed|okay|ok|permitted)\b") permission_pattern = re.compile(r"\b(can|allowed|okay|ok|permitted)\b")
hedge_counts = s.str.count(hedge_pattern) hedge_counts = s.str.count(hedge_pattern)
@@ -70,20 +84,32 @@ class CulturalAnalysis:
deontic_counts = s.str.count(deontic_pattern) deontic_counts = s.str.count(deontic_pattern)
perm_counts = s.str.count(permission_pattern) perm_counts = s.str.count(permission_pattern)
token_counts = s.apply(lambda t: len(re.findall(r"\b[a-z]{2,}\b", t))).replace(0, 1) token_counts = s.apply(lambda t: len(re.findall(r"\b[a-z]{2,}\b", t))).replace(
0, 1
)
return { return {
"hedge_total": int(hedge_counts.sum()), "hedge_total": int(hedge_counts.sum()),
"certainty_total": int(certainty_counts.sum()), "certainty_total": int(certainty_counts.sum()),
"deontic_total": int(deontic_counts.sum()), "deontic_total": int(deontic_counts.sum()),
"permission_total": int(perm_counts.sum()), "permission_total": int(perm_counts.sum()),
"hedge_per_1k_tokens": round(1000 * hedge_counts.sum() / token_counts.sum(), 3), "hedge_per_1k_tokens": round(
"certainty_per_1k_tokens": round(1000 * certainty_counts.sum() / token_counts.sum(), 3), 1000 * hedge_counts.sum() / token_counts.sum(), 3
"deontic_per_1k_tokens": round(1000 * deontic_counts.sum() / token_counts.sum(), 3), ),
"permission_per_1k_tokens": round(1000 * perm_counts.sum() / token_counts.sum(), 3), "certainty_per_1k_tokens": round(
1000 * certainty_counts.sum() / token_counts.sum(), 3
),
"deontic_per_1k_tokens": round(
1000 * deontic_counts.sum() / token_counts.sum(), 3
),
"permission_per_1k_tokens": round(
1000 * perm_counts.sum() / token_counts.sum(), 3
),
} }
def get_avg_emotions_per_entity(self, df: pd.DataFrame, top_n: int = 25, min_posts: int = 10) -> dict[str, Any]: def get_avg_emotions_per_entity(
self, df: pd.DataFrame, top_n: int = 25, min_posts: int = 10
) -> dict[str, Any]:
if "ner_entities" not in df.columns: if "ner_entities" not in df.columns:
return {"entity_emotion_avg": {}} return {"entity_emotion_avg": {}}
@@ -92,10 +118,14 @@ class CulturalAnalysis:
entity_df = df[["ner_entities"] + emotion_cols].explode("ner_entities") entity_df = df[["ner_entities"] + emotion_cols].explode("ner_entities")
entity_df["entity_text"] = entity_df["ner_entities"].apply( entity_df["entity_text"] = entity_df["ner_entities"].apply(
lambda e: e.get("text").strip() lambda e: (
if isinstance(e, dict) and isinstance(e.get("text"), str) and len(e.get("text")) >= 3 e.get("text").strip()
if isinstance(e, dict)
and isinstance(e.get("text"), str)
and len(e.get("text")) >= 3
else None else None
) )
)
entity_df = entity_df.dropna(subset=["entity_text"]) entity_df = entity_df.dropna(subset=["entity_text"])
entity_counts = entity_df["entity_text"].value_counts().head(top_n) entity_counts = entity_df["entity_text"].value_counts().head(top_n)

View File

@@ -2,6 +2,7 @@ import pandas as pd
from server.analysis.nlp import NLP from server.analysis.nlp import NLP
class DatasetEnrichment: class DatasetEnrichment:
def __init__(self, df: pd.DataFrame, topics: dict): def __init__(self, df: pd.DataFrame, topics: dict):
self.df = self._explode_comments(df) self.df = self._explode_comments(df)
@@ -10,7 +11,9 @@ class DatasetEnrichment:
def _explode_comments(self, df) -> pd.DataFrame: def _explode_comments(self, df) -> pd.DataFrame:
comments_df = df[["id", "comments"]].explode("comments") comments_df = df[["id", "comments"]].explode("comments")
comments_df = comments_df[comments_df["comments"].apply(lambda x: isinstance(x, dict))] comments_df = comments_df[
comments_df["comments"].apply(lambda x: isinstance(x, dict))
]
comments_df = pd.json_normalize(comments_df["comments"]) comments_df = pd.json_normalize(comments_df["comments"])
posts_df = df.drop(columns=["comments"]) posts_df = df.drop(columns=["comments"])
@@ -26,8 +29,8 @@ class DatasetEnrichment:
return df return df
def enrich(self) -> pd.DataFrame: def enrich(self) -> pd.DataFrame:
self.df['timestamp'] = pd.to_numeric(self.df['timestamp'], errors='raise') self.df["timestamp"] = pd.to_numeric(self.df["timestamp"], errors="raise")
self.df['date'] = pd.to_datetime(self.df['timestamp'], unit='s').dt.date self.df["date"] = pd.to_datetime(self.df["timestamp"], unit="s").dt.date
self.df["dt"] = pd.to_datetime(self.df["timestamp"], unit="s", utc=True) self.df["dt"] = pd.to_datetime(self.df["timestamp"], unit="s", utc=True)
self.df["hour"] = self.df["dt"].dt.hour self.df["hour"] = self.df["dt"].dt.hour
self.df["weekday"] = self.df["dt"].dt.day_name() self.df["weekday"] = self.df["dt"].dt.day_name()

View File

@@ -1,6 +1,7 @@
import pandas as pd import pandas as pd
import re import re
class InteractionAnalysis: class InteractionAnalysis:
def __init__(self, word_exclusions: set[str]): def __init__(self, word_exclusions: set[str]):
self.word_exclusions = word_exclusions self.word_exclusions = word_exclusions
@@ -76,12 +77,16 @@ class InteractionAnalysis:
total_authors = len(author_counts) total_authors = len(author_counts)
top_10_pct_n = max(1, int(total_authors * 0.1)) top_10_pct_n = max(1, int(total_authors * 0.1))
top_10_pct_share = round(author_counts.head(top_10_pct_n).sum() / total_comments, 4) top_10_pct_share = round(
author_counts.head(top_10_pct_n).sum() / total_comments, 4
)
return { return {
"total_commenting_authors": total_authors, "total_commenting_authors": total_authors,
"top_10pct_author_count": top_10_pct_n, "top_10pct_author_count": top_10_pct_n,
"top_10pct_comment_share": float(top_10_pct_share), "top_10pct_comment_share": float(top_10_pct_share),
"single_comment_authors": int((author_counts == 1).sum()), "single_comment_authors": int((author_counts == 1).sum()),
"single_comment_author_ratio": float(round((author_counts == 1).sum() / total_authors, 4)), "single_comment_author_ratio": float(
round((author_counts == 1).sum() / total_authors, 4)
),
} }

View File

@@ -64,7 +64,10 @@ class LinguisticAnalysis:
def lexical_diversity(self, df: pd.DataFrame) -> dict: def lexical_diversity(self, df: pd.DataFrame) -> dict:
tokens = ( tokens = (
df["content"].fillna("").astype(str).str.lower() df["content"]
.fillna("")
.astype(str)
.str.lower()
.str.findall(r"\b[a-z]{2,}\b") .str.findall(r"\b[a-z]{2,}\b")
.explode() .explode()
) )

View File

@@ -6,6 +6,7 @@ from typing import Any
from transformers import pipeline from transformers import pipeline
from sentence_transformers import SentenceTransformer from sentence_transformers import SentenceTransformer
class NLP: class NLP:
_topic_models: dict[str, SentenceTransformer] = {} _topic_models: dict[str, SentenceTransformer] = {}
_emotion_classifiers: dict[str, Any] = {} _emotion_classifiers: dict[str, Any] = {}
@@ -207,8 +208,7 @@ class NLP:
self.df.drop(columns=existing_drop, inplace=True) self.df.drop(columns=existing_drop, inplace=True)
remaining_emotion_cols = [ remaining_emotion_cols = [
c for c in self.df.columns c for c in self.df.columns if c.startswith("emotion_")
if c.startswith("emotion_")
] ]
if remaining_emotion_cols: if remaining_emotion_cols:
@@ -227,8 +227,6 @@ class NLP:
self.df[remaining_emotion_cols] = normalized.values self.df[remaining_emotion_cols] = normalized.values
def add_topic_col(self, confidence_threshold: float = 0.3) -> None: def add_topic_col(self, confidence_threshold: float = 0.3) -> None:
titles = self.df[self.title_col].fillna("").astype(str) titles = self.df[self.title_col].fillna("").astype(str)
contents = self.df[self.content_col].fillna("").astype(str) contents = self.df[self.content_col].fillna("").astype(str)
@@ -302,8 +300,4 @@ class NLP:
for label in all_labels: for label in all_labels:
col_name = f"entity_{label}" col_name = f"entity_{label}"
self.df[col_name] = [ self.df[col_name] = [d.get(label, 0) for d in entity_count_dicts]
d.get(label, 0) for d in entity_count_dicts
]

View File

@@ -3,6 +3,7 @@ import re
from collections import Counter from collections import Counter
class UserAnalysis: class UserAnalysis:
def __init__(self, word_exclusions: set[str]): def __init__(self, word_exclusions: set[str]):
self.word_exclusions = word_exclusions self.word_exclusions = word_exclusions

View File

@@ -30,7 +30,9 @@ load_dotenv()
max_fetch_limit = int(get_env("MAX_FETCH_LIMIT")) max_fetch_limit = int(get_env("MAX_FETCH_LIMIT"))
frontend_url = get_env("FRONTEND_URL") frontend_url = get_env("FRONTEND_URL")
jwt_secret_key = get_env("JWT_SECRET_KEY") jwt_secret_key = get_env("JWT_SECRET_KEY")
jwt_access_token_expires = int(os.getenv("JWT_ACCESS_TOKEN_EXPIRES", 1200)) # Default to 20 minutes jwt_access_token_expires = int(
os.getenv("JWT_ACCESS_TOKEN_EXPIRES", 1200)
) # Default to 20 minutes
# Flask Configuration # Flask Configuration
CORS(app, resources={r"/*": {"origins": frontend_url}}) CORS(app, resources={r"/*": {"origins": frontend_url}})
@@ -52,6 +54,7 @@ connectors = get_available_connectors()
with open("server/topics.json") as f: with open("server/topics.json") as f:
default_topic_list = json.load(f) default_topic_list = json.load(f)
@app.route("/register", methods=["POST"]) @app.route("/register", methods=["POST"])
def register_user(): def register_user():
data = request.get_json() data = request.get_json()
@@ -107,9 +110,13 @@ def login_user():
def profile(): def profile():
current_user = get_jwt_identity() current_user = get_jwt_identity()
return jsonify( return (
jsonify(
message="Access granted", user=auth_manager.get_user_by_id(current_user) message="Access granted", user=auth_manager.get_user_by_id(current_user)
), 200 ),
200,
)
@app.route("/user/datasets") @app.route("/user/datasets")
@jwt_required() @jwt_required()
@@ -117,11 +124,13 @@ def get_user_datasets():
current_user = int(get_jwt_identity()) current_user = int(get_jwt_identity())
return jsonify(dataset_manager.get_user_datasets(current_user)), 200 return jsonify(dataset_manager.get_user_datasets(current_user)), 200
@app.route("/datasets/sources", methods=["GET"]) @app.route("/datasets/sources", methods=["GET"])
def get_dataset_sources(): def get_dataset_sources():
list_metadata = list(get_connector_metadata().values()) list_metadata = list(get_connector_metadata().values())
return jsonify(list_metadata) return jsonify(list_metadata)
@app.route("/datasets/scrape", methods=["POST"]) @app.route("/datasets/scrape", methods=["POST"])
@jwt_required() @jwt_required()
def scrape_data(): def scrape_data():
@@ -178,9 +187,7 @@ def scrape_data():
try: try:
dataset_id = dataset_manager.save_dataset_info( dataset_id = dataset_manager.save_dataset_info(
user_id, user_id, dataset_name, default_topic_list
dataset_name,
default_topic_list
) )
dataset_manager.set_dataset_status( dataset_manager.set_dataset_status(
@@ -189,22 +196,21 @@ def scrape_data():
f"Data is being fetched from {', '.join(source['name'] for source in source_configs)}", f"Data is being fetched from {', '.join(source['name'] for source in source_configs)}",
) )
fetch_and_process_dataset.delay( fetch_and_process_dataset.delay(dataset_id, source_configs, default_topic_list)
dataset_id,
source_configs,
default_topic_list
)
except Exception: except Exception:
print(traceback.format_exc()) print(traceback.format_exc())
return jsonify({"error": "Failed to queue dataset processing"}), 500 return jsonify({"error": "Failed to queue dataset processing"}), 500
return jsonify( return (
jsonify(
{ {
"message": "Dataset queued for processing", "message": "Dataset queued for processing",
"dataset_id": dataset_id, "dataset_id": dataset_id,
"status": "processing", "status": "processing",
} }
), 202 ),
202,
)
@app.route("/datasets/upload", methods=["POST"]) @app.route("/datasets/upload", methods=["POST"])
@@ -226,9 +232,12 @@ def upload_data():
if not post_file.filename.endswith(".jsonl") or not topic_file.filename.endswith( if not post_file.filename.endswith(".jsonl") or not topic_file.filename.endswith(
".json" ".json"
): ):
return jsonify( return (
jsonify(
{"error": "Invalid file type. Only .jsonl and .json files are allowed."} {"error": "Invalid file type. Only .jsonl and .json files are allowed."}
), 400 ),
400,
)
try: try:
current_user = int(get_jwt_identity()) current_user = int(get_jwt_identity())
@@ -241,13 +250,16 @@ def upload_data():
process_dataset.delay(dataset_id, posts_df.to_dict(orient="records"), topics) process_dataset.delay(dataset_id, posts_df.to_dict(orient="records"), topics)
return jsonify( return (
jsonify(
{ {
"message": "Dataset queued for processing", "message": "Dataset queued for processing",
"dataset_id": dataset_id, "dataset_id": dataset_id,
"status": "processing", "status": "processing",
} }
), 202 ),
202,
)
except ValueError as e: except ValueError as e:
return jsonify({"error": f"Failed to read JSONL file"}), 400 return jsonify({"error": f"Failed to read JSONL file"}), 400
except Exception as e: except Exception as e:
@@ -296,9 +308,12 @@ def update_dataset(dataset_id):
return jsonify({"error": "A valid name must be provided"}), 400 return jsonify({"error": "A valid name must be provided"}), 400
dataset_manager.update_dataset_name(dataset_id, new_name.strip()) dataset_manager.update_dataset_name(dataset_id, new_name.strip())
return jsonify( return (
jsonify(
{"message": f"Dataset {dataset_id} renamed to '{new_name.strip()}'"} {"message": f"Dataset {dataset_id} renamed to '{new_name.strip()}'"}
), 200 ),
200,
)
except NotAuthorisedException: except NotAuthorisedException:
return jsonify({"error": "User is not authorised to access this content"}), 403 return jsonify({"error": "User is not authorised to access this content"}), 403
except NonExistentDatasetException: except NonExistentDatasetException:
@@ -321,11 +336,14 @@ def delete_dataset(dataset_id):
dataset_manager.delete_dataset_info(dataset_id) dataset_manager.delete_dataset_info(dataset_id)
dataset_manager.delete_dataset_content(dataset_id) dataset_manager.delete_dataset_content(dataset_id)
return jsonify( return (
jsonify(
{ {
"message": f"Dataset {dataset_id} metadata and content successfully deleted" "message": f"Dataset {dataset_id} metadata and content successfully deleted"
} }
), 200 ),
200,
)
except NotAuthorisedException: except NotAuthorisedException:
return jsonify({"error": "User is not authorised to access this content"}), 403 return jsonify({"error": "User is not authorised to access this content"}), 403
except NonExistentDatasetException: except NonExistentDatasetException:
@@ -524,6 +542,7 @@ def get_interaction_analysis(dataset_id):
print(traceback.format_exc()) print(traceback.format_exc())
return jsonify({"error": f"An unexpected error occurred"}), 500 return jsonify({"error": f"An unexpected error occurred"}), 500
@app.route("/dataset/<int:dataset_id>/all", methods=["GET"]) @app.route("/dataset/<int:dataset_id>/all", methods=["GET"])
@jwt_required() @jwt_required()
def get_full_dataset(dataset_id: int): def get_full_dataset(dataset_id: int):
@@ -546,5 +565,6 @@ def get_full_dataset(dataset_id: int):
print(traceback.format_exc()) print(traceback.format_exc())
return jsonify({"error": f"An unexpected error occurred"}), 500 return jsonify({"error": f"An unexpected error occurred"}), 500
if __name__ == "__main__": if __name__ == "__main__":
app.run(debug=True) app.run(debug=True)

View File

@@ -1,6 +1,7 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from dto.post import Post from dto.post import Post
class BaseConnector(ABC): class BaseConnector(ABC):
# Each subclass declares these at the class level # Each subclass declares these at the class level
source_name: str # machine-readable: "reddit", "youtube" source_name: str # machine-readable: "reddit", "youtube"
@@ -14,16 +15,13 @@ class BaseConnector(ABC):
def is_available(cls) -> bool: def is_available(cls) -> bool:
"""Returns True if all required env vars are set.""" """Returns True if all required env vars are set."""
import os import os
return all(os.getenv(var) for var in cls.required_env) return all(os.getenv(var) for var in cls.required_env)
@abstractmethod @abstractmethod
def get_new_posts_by_search(self, def get_new_posts_by_search(
search: str = None, self, search: str = None, category: str = None, post_limit: int = 10
category: str = None, ) -> list[Post]: ...
post_limit: int = 10
) -> list[Post]:
...
@abstractmethod @abstractmethod
def category_exists(self, category: str) -> bool: def category_exists(self, category: str) -> bool: ...
...

View File

@@ -11,9 +11,8 @@ from server.connectors.base import BaseConnector
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
HEADERS = { HEADERS = {"User-Agent": "Mozilla/5.0 (compatible; ForumScraper/1.0)"}
"User-Agent": "Mozilla/5.0 (compatible; ForumScraper/1.0)"
}
class BoardsAPI(BaseConnector): class BoardsAPI(BaseConnector):
source_name: str = "boards.ie" source_name: str = "boards.ie"
@@ -25,10 +24,8 @@ class BoardsAPI(BaseConnector):
def __init__(self): def __init__(self):
self.base_url = "https://www.boards.ie" self.base_url = "https://www.boards.ie"
def get_new_posts_by_search(self, def get_new_posts_by_search(
search: str, self, search: str, category: str, post_limit: int
category: str,
post_limit: int
) -> list[Post]: ) -> list[Post]:
if search: if search:
raise NotImplementedError("Search not compatible with boards.ie") raise NotImplementedError("Search not compatible with boards.ie")
@@ -96,7 +93,9 @@ class BoardsAPI(BaseConnector):
for i, future in enumerate(as_completed(futures)): for i, future in enumerate(as_completed(futures)):
post_url = futures[future] post_url = futures[future]
logger.debug(f"Fetching Post {i + 1} / {len(urls)} details from URL: {post_url}") logger.debug(
f"Fetching Post {i + 1} / {len(urls)} details from URL: {post_url}"
)
try: try:
post = future.result() post = future.result()
posts.append(post) posts.append(post)
@@ -105,7 +104,6 @@ class BoardsAPI(BaseConnector):
return posts return posts
def _fetch_page(self, url: str) -> str: def _fetch_page(self, url: str) -> str:
response = requests.get(url, headers=HEADERS) response = requests.get(url, headers=HEADERS)
response.raise_for_status() response.raise_for_status()
@@ -122,10 +120,16 @@ class BoardsAPI(BaseConnector):
timestamp_tag = soup.select_one(".postbit-header") timestamp_tag = soup.select_one(".postbit-header")
timestamp = None timestamp = None
if timestamp_tag: if timestamp_tag:
match = re.search(r"\d{2}-\d{2}-\d{4}\s+\d{2}:\d{2}[AP]M", timestamp_tag.get_text()) match = re.search(
r"\d{2}-\d{2}-\d{4}\s+\d{2}:\d{2}[AP]M", timestamp_tag.get_text()
)
timestamp = match.group(0) if match else None timestamp = match.group(0) if match else None
# convert to unix epoch # convert to unix epoch
timestamp = datetime.datetime.strptime(timestamp, "%d-%m-%Y %I:%M%p").timestamp() if timestamp else None timestamp = (
datetime.datetime.strptime(timestamp, "%d-%m-%Y %I:%M%p").timestamp()
if timestamp
else None
)
# Post ID # Post ID
post_num = re.search(r"discussion/(\d+)", post_url) post_num = re.search(r"discussion/(\d+)", post_url)
@@ -133,7 +137,9 @@ class BoardsAPI(BaseConnector):
# Content # Content
content_tag = soup.select_one(".Message.userContent") content_tag = soup.select_one(".Message.userContent")
content = content_tag.get_text(separator="\n", strip=True) if content_tag else None content = (
content_tag.get_text(separator="\n", strip=True) if content_tag else None
)
# Title # Title
title_tag = soup.select_one(".PageTitle h1") title_tag = soup.select_one(".PageTitle h1")
@@ -150,7 +156,7 @@ class BoardsAPI(BaseConnector):
url=post_url, url=post_url,
timestamp=timestamp, timestamp=timestamp,
source=self.source_name, source=self.source_name,
comments=comments comments=comments,
) )
return post return post
@@ -168,9 +174,9 @@ class BoardsAPI(BaseConnector):
soup = BeautifulSoup(html, "html.parser") soup = BeautifulSoup(html, "html.parser")
next_link = soup.find("a", class_="Next") next_link = soup.find("a", class_="Next")
if next_link and next_link.get('href'): if next_link and next_link.get("href"):
href = next_link.get('href') href = next_link.get("href")
current_url = href if href.startswith('http') else url + href current_url = href if href.startswith("http") else url + href
else: else:
current_url = None current_url = None
@@ -186,21 +192,29 @@ class BoardsAPI(BaseConnector):
comment_id = tag.get("id") comment_id = tag.get("id")
# Author # Author
user_elem = tag.find('span', class_='userinfo-username-title') user_elem = tag.find("span", class_="userinfo-username-title")
username = user_elem.get_text(strip=True) if user_elem else None username = user_elem.get_text(strip=True) if user_elem else None
# Timestamp # Timestamp
date_elem = tag.find('span', class_='DateCreated') date_elem = tag.find("span", class_="DateCreated")
timestamp = date_elem.get_text(strip=True) if date_elem else None timestamp = date_elem.get_text(strip=True) if date_elem else None
timestamp = datetime.datetime.strptime(timestamp, "%d-%m-%Y %I:%M%p").timestamp() if timestamp else None timestamp = (
datetime.datetime.strptime(timestamp, "%d-%m-%Y %I:%M%p").timestamp()
if timestamp
else None
)
# Content # Content
message_div = tag.find('div', class_='Message userContent') message_div = tag.find("div", class_="Message userContent")
if message_div.blockquote: if message_div.blockquote:
message_div.blockquote.decompose() message_div.blockquote.decompose()
content = message_div.get_text(separator="\n", strip=True) if message_div else None content = (
message_div.get_text(separator="\n", strip=True)
if message_div
else None
)
comment = Comment( comment = Comment(
id=comment_id, id=comment_id,
@@ -209,10 +223,8 @@ class BoardsAPI(BaseConnector):
content=content, content=content,
timestamp=timestamp, timestamp=timestamp,
reply_to=None, reply_to=None,
source=self.source_name source=self.source_name,
) )
comments.append(comment) comments.append(comment)
return comments return comments

View File

@@ -9,6 +9,7 @@ from server.connectors.base import BaseConnector
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class RedditAPI(BaseConnector): class RedditAPI(BaseConnector):
source_name: str = "reddit" source_name: str = "reddit"
display_name: str = "Reddit" display_name: str = "Reddit"
@@ -19,22 +20,18 @@ class RedditAPI(BaseConnector):
self.url = "https://www.reddit.com/" self.url = "https://www.reddit.com/"
# Public Methods # # Public Methods #
def get_new_posts_by_search(self, def get_new_posts_by_search(
search: str, self, search: str, category: str, post_limit: int
category: str,
post_limit: int
) -> list[Post]: ) -> list[Post]:
prefix = f"r/{category}/" if category else "" prefix = f"r/{category}/" if category else ""
params = {'limit': post_limit} params = {"limit": post_limit}
if search: if search:
endpoint = f"{prefix}search.json" endpoint = f"{prefix}search.json"
params.update({ params.update(
'q': search, {"q": search, "sort": "new", "restrict_sr": "on" if category else "off"}
'sort': 'new', )
'restrict_sr': 'on' if category else 'off'
})
else: else:
endpoint = f"{prefix}new.json" endpoint = f"{prefix}new.json"
@@ -43,19 +40,19 @@ class RedditAPI(BaseConnector):
while len(posts) < post_limit: while len(posts) < post_limit:
batch_limit = min(100, post_limit - len(posts)) batch_limit = min(100, post_limit - len(posts))
params['limit'] = batch_limit params["limit"] = batch_limit
if after: if after:
params['after'] = after params["after"] = after
data = self._fetch_post_overviews(endpoint, params) data = self._fetch_post_overviews(endpoint, params)
if not data or 'data' not in data or not data['data'].get('children'): if not data or "data" not in data or not data["data"].get("children"):
break break
batch_posts = self._parse_posts(data) batch_posts = self._parse_posts(data)
posts.extend(batch_posts) posts.extend(batch_posts)
after = data['data'].get('after') after = data["data"].get("after")
if not after: if not after:
break break
@@ -70,21 +67,20 @@ class RedditAPI(BaseConnector):
while len(posts) < limit: while len(posts) < limit:
batch_limit = min(100, limit - len(posts)) batch_limit = min(100, limit - len(posts))
params = { params = {"limit": batch_limit, "after": after}
'limit': batch_limit,
'after': after
}
data = self._fetch_post_overviews(url, params) data = self._fetch_post_overviews(url, params)
batch_posts = self._parse_posts(data) batch_posts = self._parse_posts(data)
logger.debug(f"Fetched {len(batch_posts)} new posts from subreddit {subreddit}") logger.debug(
f"Fetched {len(batch_posts)} new posts from subreddit {subreddit}"
)
if not batch_posts: if not batch_posts:
break break
posts.extend(batch_posts) posts.extend(batch_posts)
after = data['data'].get('after') after = data["data"].get("after")
if not after: if not after:
break break
@@ -99,8 +95,8 @@ class RedditAPI(BaseConnector):
data = self._fetch_post_overviews(f"r/{category}/about.json", {}) data = self._fetch_post_overviews(f"r/{category}/about.json", {})
return ( return (
data is not None data is not None
and 'data' in data and "data" in data
and data['data'].get('id') is not None and data["data"].get("id") is not None
) )
except Exception: except Exception:
return False return False
@@ -109,25 +105,26 @@ class RedditAPI(BaseConnector):
def _parse_posts(self, data) -> list[Post]: def _parse_posts(self, data) -> list[Post]:
posts = [] posts = []
total_num_posts = len(data['data']['children']) total_num_posts = len(data["data"]["children"])
current_index = 0 current_index = 0
for item in data['data']['children']: for item in data["data"]["children"]:
current_index += 1 current_index += 1
logger.debug(f"Parsing post {current_index} of {total_num_posts}") logger.debug(f"Parsing post {current_index} of {total_num_posts}")
post_data = item['data'] post_data = item["data"]
post = Post( post = Post(
id=post_data['id'], id=post_data["id"],
author=post_data['author'], author=post_data["author"],
title=post_data['title'], title=post_data["title"],
content=post_data.get('selftext', ''), content=post_data.get("selftext", ""),
url=post_data['url'], url=post_data["url"],
timestamp=post_data['created_utc'], timestamp=post_data["created_utc"],
source=self.source_name, source=self.source_name,
comments=self._get_post_comments(post_data['id'])) comments=self._get_post_comments(post_data["id"]),
post.subreddit = post_data['subreddit'] )
post.upvotes = post_data['ups'] post.subreddit = post_data["subreddit"]
post.upvotes = post_data["ups"]
posts.append(post) posts.append(post)
return posts return posts
@@ -140,41 +137,39 @@ class RedditAPI(BaseConnector):
if len(data) < 2: if len(data) < 2:
return comments return comments
comment_data = data[1]['data']['children'] comment_data = data[1]["data"]["children"]
def _parse_comment_tree(items, parent_id=None): def _parse_comment_tree(items, parent_id=None):
for item in items: for item in items:
if item['kind'] != 't1': if item["kind"] != "t1":
continue continue
comment_info = item['data'] comment_info = item["data"]
comment = Comment( comment = Comment(
id=comment_info['id'], id=comment_info["id"],
post_id=post_id, post_id=post_id,
author=comment_info['author'], author=comment_info["author"],
content=comment_info.get('body', ''), content=comment_info.get("body", ""),
timestamp=comment_info['created_utc'], timestamp=comment_info["created_utc"],
reply_to=parent_id or comment_info.get('parent_id', None), reply_to=parent_id or comment_info.get("parent_id", None),
source=self.source_name source=self.source_name,
) )
comments.append(comment) comments.append(comment)
# Process replies recursively # Process replies recursively
replies = comment_info.get('replies') replies = comment_info.get("replies")
if replies and isinstance(replies, dict): if replies and isinstance(replies, dict):
reply_items = replies.get('data', {}).get('children', []) reply_items = replies.get("data", {}).get("children", [])
_parse_comment_tree(reply_items, parent_id=comment.id) _parse_comment_tree(reply_items, parent_id=comment.id)
_parse_comment_tree(comment_data) _parse_comment_tree(comment_data)
return comments return comments
def _parse_user(self, data) -> User: def _parse_user(self, data) -> User:
user_data = data['data'] user_data = data["data"]
user = User( user = User(username=user_data["name"], created_utc=user_data["created_utc"])
username=user_data['name'], user.karma = user_data["total_karma"]
created_utc=user_data['created_utc'])
user.karma = user_data['total_karma']
return user return user
def _fetch_post_overviews(self, endpoint: str, params: dict) -> dict: def _fetch_post_overviews(self, endpoint: str, params: dict) -> dict:
@@ -184,12 +179,20 @@ class RedditAPI(BaseConnector):
for attempt in range(max_retries): for attempt in range(max_retries):
try: try:
response = requests.get(url, headers={'User-agent': 'python:ethnography-college-project:0.1 (by /u/ThisBirchWood)'}, params=params) response = requests.get(
url,
headers={
"User-agent": "python:ethnography-college-project:0.1 (by /u/ThisBirchWood)"
},
params=params,
)
if response.status_code == 429: if response.status_code == 429:
wait_time = response.headers.get("Retry-After", backoff) wait_time = response.headers.get("Retry-After", backoff)
logger.warning(f"Rate limited by Reddit API. Retrying in {wait_time} seconds...") logger.warning(
f"Rate limited by Reddit API. Retrying in {wait_time} seconds..."
)
time.sleep(wait_time) time.sleep(wait_time)
backoff *= 2 backoff *= 2

View File

@@ -3,6 +3,7 @@ import importlib
import server.connectors import server.connectors
from server.connectors.base import BaseConnector from server.connectors.base import BaseConnector
def _discover_connectors() -> list[type[BaseConnector]]: def _discover_connectors() -> list[type[BaseConnector]]:
"""Walk the connectors package and collect all BaseConnector subclasses.""" """Walk the connectors package and collect all BaseConnector subclasses."""
for _, module_name, _ in pkgutil.iter_modules(server.connectors.__path__): for _, module_name, _ in pkgutil.iter_modules(server.connectors.__path__):
@@ -11,20 +12,24 @@ def _discover_connectors() -> list[type[BaseConnector]]:
importlib.import_module(f"server.connectors.{module_name}") importlib.import_module(f"server.connectors.{module_name}")
return [ return [
cls for cls in BaseConnector.__subclasses__() cls
for cls in BaseConnector.__subclasses__()
if cls.source_name # guard against abstract intermediaries if cls.source_name # guard against abstract intermediaries
] ]
def get_available_connectors() -> dict[str, type[BaseConnector]]: def get_available_connectors() -> dict[str, type[BaseConnector]]:
return {c.source_name: c for c in _discover_connectors() if c.is_available()} return {c.source_name: c for c in _discover_connectors() if c.is_available()}
def get_connector_metadata() -> dict[str, dict]: def get_connector_metadata() -> dict[str, dict]:
res = {} res = {}
for id, obj in get_available_connectors().items(): for id, obj in get_available_connectors().items():
res[id] = {"id": id, res[id] = {
"id": id,
"label": obj.display_name, "label": obj.display_name,
"search_enabled": obj.search_enabled, "search_enabled": obj.search_enabled,
"categories_enabled": obj.categories_enabled "categories_enabled": obj.categories_enabled,
} }
return res return res

View File

@@ -12,6 +12,7 @@ load_dotenv()
API_KEY = os.getenv("YOUTUBE_API_KEY") API_KEY = os.getenv("YOUTUBE_API_KEY")
class YouTubeAPI(BaseConnector): class YouTubeAPI(BaseConnector):
source_name: str = "youtube" source_name: str = "youtube"
display_name: str = "YouTube" display_name: str = "YouTube"
@@ -19,36 +20,40 @@ class YouTubeAPI(BaseConnector):
categories_enabled: bool = False categories_enabled: bool = False
def __init__(self): def __init__(self):
self.youtube = build('youtube', 'v3', developerKey=API_KEY) self.youtube = build("youtube", "v3", developerKey=API_KEY)
def get_new_posts_by_search(self, def get_new_posts_by_search(
search: str, self, search: str, category: str, post_limit: int
category: str,
post_limit: int
) -> list[Post]: ) -> list[Post]:
videos = self._search_videos(search, post_limit) videos = self._search_videos(search, post_limit)
posts = [] posts = []
for video in videos: for video in videos:
video_id = video['id']['videoId'] video_id = video["id"]["videoId"]
snippet = video['snippet'] snippet = video["snippet"]
title = snippet['title'] title = snippet["title"]
description = snippet['description'] description = snippet["description"]
published_at = datetime.datetime.strptime(snippet['publishedAt'], "%Y-%m-%dT%H:%M:%SZ").timestamp() published_at = datetime.datetime.strptime(
channel_title = snippet['channelTitle'] snippet["publishedAt"], "%Y-%m-%dT%H:%M:%SZ"
).timestamp()
channel_title = snippet["channelTitle"]
comments = [] comments = []
comments_data = self._get_video_comments(video_id) comments_data = self._get_video_comments(video_id)
for comment_thread in comments_data: for comment_thread in comments_data:
comment_snippet = comment_thread['snippet']['topLevelComment']['snippet'] comment_snippet = comment_thread["snippet"]["topLevelComment"][
"snippet"
]
comment = Comment( comment = Comment(
id=comment_thread['id'], id=comment_thread["id"],
post_id=video_id, post_id=video_id,
content=comment_snippet['textDisplay'], content=comment_snippet["textDisplay"],
author=comment_snippet['authorDisplayName'], author=comment_snippet["authorDisplayName"],
timestamp=datetime.datetime.strptime(comment_snippet['publishedAt'], "%Y-%m-%dT%H:%M:%SZ").timestamp(), timestamp=datetime.datetime.strptime(
comment_snippet["publishedAt"], "%Y-%m-%dT%H:%M:%SZ"
).timestamp(),
reply_to=None, reply_to=None,
source=self.source_name source=self.source_name,
) )
comments.append(comment) comments.append(comment)
@@ -61,7 +66,7 @@ class YouTubeAPI(BaseConnector):
url=f"https://www.youtube.com/watch?v={video_id}", url=f"https://www.youtube.com/watch?v={video_id}",
title=title, title=title,
source=self.source_name, source=self.source_name,
comments=comments comments=comments,
) )
posts.append(post) posts.append(post)
@@ -73,19 +78,14 @@ class YouTubeAPI(BaseConnector):
def _search_videos(self, query, limit): def _search_videos(self, query, limit):
request = self.youtube.search().list( request = self.youtube.search().list(
q=query, q=query, part="snippet", type="video", maxResults=limit
part='snippet',
type='video',
maxResults=limit
) )
response = request.execute() response = request.execute()
return response.get('items', []) return response.get("items", [])
def _get_video_comments(self, video_id): def _get_video_comments(self, video_id):
request = self.youtube.commentThreads().list( request = self.youtube.commentThreads().list(
part='snippet', part="snippet", videoId=video_id, textFormat="plainText"
videoId=video_id,
textFormat='plainText'
) )
try: try:
@@ -93,4 +93,4 @@ class YouTubeAPI(BaseConnector):
except HttpError as e: except HttpError as e:
print(f"Error fetching comments for video {video_id}: {e}") print(f"Error fetching comments for video {video_id}: {e}")
return [] return []
return response.get('items', []) return response.get("items", [])

View File

@@ -5,6 +5,7 @@ from flask_bcrypt import Bcrypt
EMAIL_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+") EMAIL_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+")
class AuthManager: class AuthManager:
def __init__(self, db: PostgresConnector, bcrypt: Bcrypt): def __init__(self, db: PostgresConnector, bcrypt: Bcrypt):
self.db = db self.db = db
@@ -38,7 +39,7 @@ class AuthManager:
def authenticate_user(self, username, password): def authenticate_user(self, username, password):
user = self.get_user_by_username(username) user = self.get_user_by_username(username)
if user and self.bcrypt.check_password_hash(user['password_hash'], password): if user and self.bcrypt.check_password_hash(user["password_hash"], password):
return user return user
return None return None
@@ -48,7 +49,9 @@ class AuthManager:
return result[0] if result else None return result[0] if result else None
def get_user_by_username(self, username) -> dict: def get_user_by_username(self, username) -> dict:
query = "SELECT id, username, email, password_hash FROM users WHERE username = %s" query = (
"SELECT id, username, email, password_hash FROM users WHERE username = %s"
)
result = self.db.execute(query, (username,), fetch=True) result = self.db.execute(query, (username,), fetch=True)
return result[0] if result else None return result[0] if result else None

View File

@@ -3,6 +3,7 @@ from server.db.database import PostgresConnector
from psycopg2.extras import Json from psycopg2.extras import Json
from server.exceptions import NonExistentDatasetException from server.exceptions import NonExistentDatasetException
class DatasetManager: class DatasetManager:
def __init__(self, db: PostgresConnector): def __init__(self, db: PostgresConnector):
self.db = db self.db = db
@@ -42,7 +43,9 @@ class DatasetManager:
VALUES (%s, %s, %s) VALUES (%s, %s, %s)
RETURNING id RETURNING id
""" """
result = self.db.execute(query, (user_id, dataset_name, Json(topics)), fetch=True) result = self.db.execute(
query, (user_id, dataset_name, Json(topics)), fetch=True
)
return result[0]["id"] if result else None return result[0]["id"] if result else None
def save_dataset_content(self, dataset_id: int, event_data: pd.DataFrame): def save_dataset_content(self, dataset_id: int, event_data: pd.DataFrame):
@@ -113,7 +116,9 @@ class DatasetManager:
self.db.execute_batch(query, values) self.db.execute_batch(query, values)
def set_dataset_status(self, dataset_id: int, status: str, status_message: str | None = None): def set_dataset_status(
self, dataset_id: int, status: str, status_message: str | None = None
):
if status not in ["fetching", "processing", "complete", "error"]: if status not in ["fetching", "processing", "complete", "error"]:
raise ValueError("Invalid status") raise ValueError("Invalid status")

View File

@@ -22,7 +22,9 @@ class PostgresConnector:
database=os.getenv("POSTGRES_DB", "postgres"), database=os.getenv("POSTGRES_DB", "postgres"),
) )
except psycopg2.OperationalError as e: except psycopg2.OperationalError as e:
raise DatabaseNotConfiguredException(f"Ensure database is up and running: {e}") raise DatabaseNotConfiguredException(
f"Ensure database is up and running: {e}"
)
self.connection.autocommit = False self.connection.autocommit = False

View File

@@ -5,6 +5,7 @@ from server.utils import get_env
load_dotenv() load_dotenv()
REDIS_URL = get_env("REDIS_URL") REDIS_URL = get_env("REDIS_URL")
def create_celery(): def create_celery():
celery = Celery( celery = Celery(
"ethnograph", "ethnograph",
@@ -16,6 +17,7 @@ def create_celery():
celery.conf.accept_content = ["json"] celery.conf.accept_content = ["json"]
return celery return celery
celery = create_celery() celery = create_celery()
from server.queue import tasks from server.queue import tasks

View File

@@ -9,6 +9,7 @@ from server.connectors.registry import get_available_connectors
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@celery.task(bind=True, max_retries=3) @celery.task(bind=True, max_retries=3)
def process_dataset(self, dataset_id: int, posts: list, topics: dict): def process_dataset(self, dataset_id: int, posts: list, topics: dict):
db = PostgresConnector() db = PostgresConnector()
@@ -21,15 +22,19 @@ def process_dataset(self, dataset_id: int, posts: list, topics: dict):
enriched_df = processor.enrich() enriched_df = processor.enrich()
dataset_manager.save_dataset_content(dataset_id, enriched_df) dataset_manager.save_dataset_content(dataset_id, enriched_df)
dataset_manager.set_dataset_status(dataset_id, "complete", "NLP Processing Completed Successfully") dataset_manager.set_dataset_status(
dataset_id, "complete", "NLP Processing Completed Successfully"
)
except Exception as e: except Exception as e:
dataset_manager.set_dataset_status(dataset_id, "error", f"An error occurred: {e}") dataset_manager.set_dataset_status(
dataset_id, "error", f"An error occurred: {e}"
)
@celery.task(bind=True, max_retries=3) @celery.task(bind=True, max_retries=3)
def fetch_and_process_dataset(self, def fetch_and_process_dataset(
dataset_id: int, self, dataset_id: int, source_info: list[dict], topics: dict
source_info: list[dict], ):
topics: dict):
connectors = get_available_connectors() connectors = get_available_connectors()
db = PostgresConnector() db = PostgresConnector()
dataset_manager = DatasetManager(db) dataset_manager = DatasetManager(db)
@@ -44,9 +49,7 @@ def fetch_and_process_dataset(self,
connector = connectors[name]() connector = connectors[name]()
raw_posts = connector.get_new_posts_by_search( raw_posts = connector.get_new_posts_by_search(
search=search, search=search, category=category, post_limit=limit
category=category,
post_limit=limit
) )
posts.extend(post.to_dict() for post in raw_posts) posts.extend(post.to_dict() for post in raw_posts)
@@ -56,6 +59,10 @@ def fetch_and_process_dataset(self,
enriched_df = processor.enrich() enriched_df = processor.enrich()
dataset_manager.save_dataset_content(dataset_id, enriched_df) dataset_manager.save_dataset_content(dataset_id, enriched_df)
dataset_manager.set_dataset_status(dataset_id, "complete", "NLP Processing Completed Successfully") dataset_manager.set_dataset_status(
dataset_id, "complete", "NLP Processing Completed Successfully"
)
except Exception as e: except Exception as e:
dataset_manager.set_dataset_status(dataset_id, "error", f"An error occurred: {e}") dataset_manager.set_dataset_status(
dataset_id, "error", f"An error occurred: {e}"
)