refactor(dataset creation): update API methods to return only posts

This commit is contained in:
2026-02-09 21:20:08 +00:00
parent 645d2fdfdb
commit ec91904481
6 changed files with 87 additions and 65 deletions

View File

@@ -19,20 +19,20 @@ class BoardsAPI:
self.url = "https://www.boards.ie"
self.source_name = "Boards.ie"
def get_new_category_posts(self, category: str, limit: int = 100) -> tuple[list[Post], list[Comment]]:
def get_new_category_posts(self, category: str, post_limit: int, comment_limit: int) -> list[Post]:
urls = []
current_page = 1
logger.info(f"Fetching posts from category: {category}")
while len(urls) < limit:
while len(urls) < post_limit:
url = f"{self.url}/categories/{category}/p{current_page}"
html = self._fetch_page(url)
soup = BeautifulSoup(html, "html.parser")
logger.debug(f"Processing page {current_page} for category {category}")
for a in soup.select("a.threadbit-threadlink"):
if len(urls) >= limit:
if len(urls) >= post_limit:
break
href = a.get("href")
@@ -45,13 +45,11 @@ class BoardsAPI:
# Fetch post details for each URL and create Post objects
posts = []
comments = []
def fetch_and_parse(post_url):
html = self._fetch_page(post_url)
post = self._parse_thread(html, post_url)
comments = self._parse_comments(post_url, post.id, comment_limit=500)
return (post, comments)
post = self._parse_thread(html, post_url, comment_limit)
return post
with ThreadPoolExecutor(max_workers=30) as executor:
futures = {executor.submit(fetch_and_parse, url): url for url in urls}
@@ -60,13 +58,12 @@ class BoardsAPI:
post_url = futures[future]
logger.debug(f"Fetching Post {i + 1} / {len(urls)} details from URL: {post_url}")
try:
post, post_comments = future.result()
post = future.result()
posts.append(post)
comments.extend(post_comments)
except Exception as e:
logger.error(f"Error fetching post from {post_url}: {e}")
return posts, comments
return posts
def _fetch_page(self, url: str) -> str:
@@ -74,7 +71,7 @@ class BoardsAPI:
response.raise_for_status()
return response.text
def _parse_thread(self, html: str, post_url: str) -> Post:
def _parse_thread(self, html: str, post_url: str, comment_limit: int) -> Post:
soup = BeautifulSoup(html, "html.parser")
# Author
@@ -102,6 +99,9 @@ class BoardsAPI:
title_tag = soup.select_one(".PageTitle h1")
title = title_tag.text.strip() if title_tag else None
# Comments
comments = self._parse_comments(post_url, post_num, comment_limit)
post = Post(
id=post_num,
author=author,
@@ -109,12 +109,13 @@ class BoardsAPI:
content=content,
url=post_url,
timestamp=timestamp,
source=self.source_name
source=self.source_name,
comments=comments
)
return post
def _parse_comments(self, url: str, post_id: str, comment_limit: int = 500) -> list[Comment]:
def _parse_comments(self, url: str, post_id: str, comment_limit: int) -> list[Comment]:
comments = []
current_url = url

View File

@@ -14,7 +14,7 @@ class RedditAPI:
self.source_name = "Reddit"
# Public Methods #
def search_new_subreddit_posts(self, search: str, subreddit: str, limit: int = 10) -> tuple[list[Post], list[Comment]]:
def search_new_subreddit_posts(self, search: str, subreddit: str, limit: int) -> list[Post]:
params = {
'q': search,
'limit': limit,
@@ -25,27 +25,25 @@ class RedditAPI:
logger.info(f"Searching subreddit '{subreddit}' for '{search}' with limit {limit}")
url = f"r/{subreddit}/search.json"
posts = []
comments = []
while len(posts) < limit:
batch_limit = min(100, limit - len(posts))
params['limit'] = batch_limit
data = self._fetch_data(url, params)
batch_posts, batch_comments = self._parse_posts(data)
data = self._fetch_post_overviews(url, params)
batch_posts = self._parse_posts(data)
logger.debug(f"Fetched {len(batch_posts)} posts and {len(batch_comments)} comments from search in subreddit {subreddit}")
logger.debug(f"Fetched {len(batch_posts)} posts from search in subreddit {subreddit}")
if not batch_posts:
break
posts.extend(batch_posts)
comments.extend(batch_comments)
return posts, comments
return posts
def get_new_subreddit_posts(self, subreddit: str, limit: int = 10) -> tuple[list[Post], list[Comment]]:
def get_new_subreddit_posts(self, subreddit: str, limit: int = 10) -> list[Post]:
posts = []
comments = []
after = None
url = f"r/{subreddit}/new.json"
@@ -58,30 +56,28 @@ class RedditAPI:
'after': after
}
data = self._fetch_data(url, params)
batch_posts, batch_comments = self._parse_posts(data)
data = self._fetch_post_overviews(url, params)
batch_posts = self._parse_posts(data)
logger.debug(f"Fetched {len(batch_posts)} new posts and {len(batch_comments)} comments from subreddit {subreddit}")
logger.debug(f"Fetched {len(batch_posts)} new posts from subreddit {subreddit}")
if not batch_posts:
break
posts.extend(batch_posts)
comments.extend(batch_comments)
after = data['data'].get('after')
if not after:
break
return posts, comments
return posts
def get_user(self, username: str) -> User:
data = self._fetch_data(f"user/{username}/about.json", {})
data = self._fetch_post_overviews(f"user/{username}/about.json", {})
return self._parse_user(data)
## Private Methods ##
def _parse_posts(self, data) -> tuple[list[Post], list[Comment]]:
def _parse_posts(self, data) -> list[Post]:
posts = []
comments = []
total_num_posts = len(data['data']['children'])
current_index = 0
@@ -98,19 +94,19 @@ class RedditAPI:
content=post_data.get('selftext', ''),
url=post_data['url'],
timestamp=post_data['created_utc'],
source=self.source_name)
source=self.source_name,
comments=self._get_post_comments(post_data['id']))
post.subreddit = post_data['subreddit']
post.upvotes = post_data['ups']
posts.append(post)
comments.extend(self._get_post_comments(post.id))
return posts, comments
return posts
def _get_post_comments(self, post_id: str) -> list[Comment]:
comments: list[Comment] = []
url = f"comments/{post_id}.json"
data = self._fetch_data(url, {})
data = self._fetch_post_overviews(url, {})
if len(data) < 2:
return comments
@@ -151,7 +147,7 @@ class RedditAPI:
user.karma = user_data['total_karma']
return user
def _fetch_data(self, endpoint: str, params: dict) -> dict:
def _fetch_post_overviews(self, endpoint: str, params: dict) -> dict:
url = f"{self.url}{endpoint}"
max_retries = 15
backoff = 1 # seconds

View File

@@ -40,10 +40,9 @@ class YouTubeAPI:
return []
return response.get('items', [])
def fetch_video_and_comments(self, query, video_limit, comment_limit) -> tuple[list[Post], list[Comment]]:
def fetch_videos(self, query, video_limit, comment_limit) -> list[Post]:
videos = self.search_videos(query, video_limit)
posts = []
comments = []
for video in videos:
video_id = video['id']['videoId']
@@ -53,16 +52,7 @@ class YouTubeAPI:
published_at = datetime.datetime.strptime(snippet['publishedAt'], "%Y-%m-%dT%H:%M:%SZ").timestamp()
channel_title = snippet['channelTitle']
post = Post(
id=video_id,
content=f"{title}\n\n{description}",
author=channel_title,
timestamp=published_at,
url=f"https://www.youtube.com/watch?v={video_id}",
title=title,
source="YouTube"
)
comments = []
comments_data = self.get_video_comments(video_id, comment_limit)
for comment_thread in comments_data:
comment_snippet = comment_thread['snippet']['topLevelComment']['snippet']
@@ -77,6 +67,18 @@ class YouTubeAPI:
)
comments.append(comment)
post = Post(
id=video_id,
content=f"{title}\n\n{description}",
author=channel_title,
timestamp=published_at,
url=f"https://www.youtube.com/watch?v={video_id}",
title=title,
source="YouTube",
comments=comments
)
posts.append(post)
return posts, comments
return posts