Files
crosspost/connectors/boards_api.py

106 lines
3.2 KiB
Python

import requests
import logging
import re
from dto.post import Post
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor, as_completed
logger = logging.getLogger(__name__)
HEADERS = {
"User-Agent": "Mozilla/5.0 (compatible; ForumScraper/1.0)"
}
class BoardsAPI:
def __init__(self):
self.url = "https://www.boards.ie"
self.source_name = "Boards.ie"
def get_new_category_posts(self, category: str, limit: int = 100) -> list[Post]:
urls = []
current_page = 1
logger.info(f"Fetching posts from category: {category}")
while len(urls) < limit:
url = f"{self.url}/categories/{category}/p{current_page}"
html = self._fetch_page(url)
soup = BeautifulSoup(html, "html.parser")
logger.debug(f"Processing page {current_page} for category {category}")
for a in soup.select("a.threadbit-threadlink"):
if len(urls) >= limit:
break
href = a.get("href")
if href:
urls.append(href)
current_page += 1
logger.debug(f"Fetched {len(urls)} post URLs from category {category}")
# Fetch post details for each URL and create Post objects
posts = []
def fetch_and_parse(post_url):
html = self._fetch_page(post_url)
return self._parse_thread(html, post_url)
with ThreadPoolExecutor(max_workers=30) as executor:
futures = {executor.submit(fetch_and_parse, url): url for url in urls}
for i, future in enumerate(as_completed(futures)):
post_url = futures[future]
logger.debug(f"Fetching Post {i + 1} / {len(urls)} details from URL: {post_url}")
posts.append(future.result())
return posts
def _fetch_page(self, url: str) -> str:
response = requests.get(url, headers=HEADERS)
response.raise_for_status()
return response.text
def _parse_thread(self, html: str, post_url: str) -> Post:
soup = BeautifulSoup(html, "html.parser")
# Author
author_tag = soup.select_one(".userinfo-username-title")
author = author_tag.text.strip() if author_tag else None
# Timestamp
timestamp_tag = soup.select_one(".postbit-header")
timestamp = None
if timestamp_tag:
match = re.search(r"\d{2}-\d{2}-\d{4}\s+\d{2}:\d{2}[AP]M", timestamp_tag.get_text())
timestamp = match.group(0) if match else None
# Post ID
post_num = re.search(r"discussion/(\d+)", post_url)
post_num = post_num.group(1) if post_num else None
# Content
content_tag = soup.select_one(".Message.userContent")
content = content_tag.get_text(separator="\n", strip=True) if content_tag else None
# Title
title_tag = soup.select_one(".PageTitle h1")
title = title_tag.text.strip() if title_tag else None
post = Post(
id=post_num,
author=author,
title=title,
content=content,
url=post_url,
timestamp=timestamp,
source=self.source_name
)
return post