From 8f385733ed258b1536dbe573b0ea5f3ab3b22019 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A5l-Kristian=20Hamre?= Date: Mon, 21 Jul 2025 15:42:55 +0200 Subject: [PATCH] Modularized the tool. --- database.py | 142 ------------------------------------------ main.py | 117 ---------------------------------- sentiment_analyzer.py | 19 ------ setup_nltk.py | 11 ---- ticker_extractor.py | 65 ------------------- 5 files changed, 354 deletions(-) delete mode 100644 database.py delete mode 100644 main.py delete mode 100644 sentiment_analyzer.py delete mode 100644 setup_nltk.py delete mode 100644 ticker_extractor.py diff --git a/database.py b/database.py deleted file mode 100644 index 21c1255..0000000 --- a/database.py +++ /dev/null @@ -1,142 +0,0 @@ -# database.py - -import sqlite3 -import time - -DB_FILE = "reddit_stocks.db" - -def get_db_connection(): - """Establishes a connection to the SQLite database.""" - conn = sqlite3.connect(DB_FILE) - conn.row_factory = sqlite3.Row - return conn - -def initialize_db(): - """ - Initializes the database and creates the necessary tables if they don't exist. - """ - conn = get_db_connection() - cursor = conn.cursor() - - # --- Create tickers table (This is the corrected section) --- - cursor.execute(""" - CREATE TABLE IF NOT EXISTS tickers ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - symbol TEXT NOT NULL UNIQUE, - market_cap INTEGER, - last_updated INTEGER - ) - """) - - # --- Create subreddits table (This is the corrected section) --- - cursor.execute(""" - CREATE TABLE IF NOT EXISTS subreddits ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT NOT NULL UNIQUE - ) - """) - - # --- Create mentions table with sentiment_score column --- - cursor.execute(""" - CREATE TABLE IF NOT EXISTS mentions ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - ticker_id INTEGER, - subreddit_id INTEGER, - post_id TEXT NOT NULL, - mention_timestamp INTEGER NOT NULL, - sentiment_score REAL, - FOREIGN KEY (ticker_id) REFERENCES tickers (id), - FOREIGN KEY (subreddit_id) REFERENCES subreddits (id), - UNIQUE(ticker_id, post_id) - ) - """) - - conn.commit() - conn.close() - print("Database initialized successfully.") - -def add_mention(conn, ticker_id, subreddit_id, post_id, timestamp, sentiment): - """Adds a new mention with its sentiment score to the database.""" - cursor = conn.cursor() - try: - cursor.execute( - "INSERT INTO mentions (ticker_id, subreddit_id, post_id, mention_timestamp, sentiment_score) VALUES (?, ?, ?, ?, ?)", - (ticker_id, subreddit_id, post_id, timestamp, sentiment) - ) - conn.commit() - except sqlite3.IntegrityError: - pass # Ignore duplicate mentions - -def get_or_create_entity(conn, table_name, column_name, value): - """Generic function to get or create an entity and return its ID.""" - cursor = conn.cursor() - cursor.execute(f"SELECT id FROM {table_name} WHERE {column_name} = ?", (value,)) - result = cursor.fetchone() - if result: - return result['id'] - else: - cursor.execute(f"INSERT INTO {table_name} ({column_name}) VALUES (?)", (value,)) - conn.commit() - return cursor.lastrowid - -def update_ticker_market_cap(conn, ticker_id, market_cap): - """Updates the market cap and timestamp for a specific ticker.""" - cursor = conn.cursor() - current_timestamp = int(time.time()) - cursor.execute( - "UPDATE tickers SET market_cap = ?, last_updated = ? WHERE id = ?", - (market_cap, current_timestamp, ticker_id) - ) - conn.commit() - -def get_ticker_info(conn, ticker_id): - """Retrieves all info for a specific ticker by its ID.""" - cursor = conn.cursor() - cursor.execute("SELECT * FROM tickers WHERE id = ?", (ticker_id,)) - return cursor.fetchone() - -def generate_summary_report(): - """Queries the DB to generate a summary with market caps and avg. sentiment.""" - print("\n--- Summary Report ---") - conn = get_db_connection() - cursor = conn.cursor() - - query = """ - SELECT - t.symbol, - t.market_cap, - COUNT(m.id) as mention_count, - AVG(m.sentiment_score) as avg_sentiment - FROM mentions m - JOIN tickers t ON m.ticker_id = t.id - GROUP BY t.symbol, t.market_cap - ORDER BY mention_count DESC - LIMIT 20; - """ - results = cursor.execute(query).fetchall() - - print(f"{'Ticker':<10} | {'Mentions':<10} | {'Sentiment':<18} | {'Market Cap':<20}") - print("-" * 65) - - for row in results: - # Format Market Cap - market_cap_str = "N/A" - if row['market_cap'] and row['market_cap'] > 0: - mc = row['market_cap'] - if mc >= 1e12: market_cap_str = f"${mc/1e12:.2f}T" - elif mc >= 1e9: market_cap_str = f"${mc/1e9:.2f}B" - elif mc >= 1e6: market_cap_str = f"${mc/1e6:.2f}M" - else: market_cap_str = f"${mc:,}" - - # Determine Sentiment Label - sentiment_score = row['avg_sentiment'] - if sentiment_score is not None: - if sentiment_score > 0.1: sentiment_label = f"Bullish ({sentiment_score:+.2f})" - elif sentiment_score < -0.1: sentiment_label = f"Bearish ({sentiment_score:+.2f})" - else: sentiment_label = f"Neutral ({sentiment_score:+.2f})" - else: - sentiment_label = "N/A" - - print(f"{row['symbol']:<10} | {row['mention_count']:<10} | {sentiment_label:<18} | {market_cap_str:<20}") - - conn.close() \ No newline at end of file diff --git a/main.py b/main.py deleted file mode 100644 index 99df3e7..0000000 --- a/main.py +++ /dev/null @@ -1,117 +0,0 @@ -# main.py - -import argparse -import json -import os -import time - -import praw -import yfinance as yf -from dotenv import load_dotenv - -import database -from ticker_extractor import extract_tickers -from sentiment_analyzer import get_sentiment_score - -load_dotenv() -MARKET_CAP_REFRESH_INTERVAL = 86400 - -# (load_subreddits, get_market_cap, get_reddit_instance functions are unchanged) -def load_subreddits(filepath): - try: - with open(filepath, 'r') as f: - return json.load(f).get("subreddits", []) - except (FileNotFoundError, json.JSONDecodeError) as e: - print(f"Error loading config: {e}") - return None - -def get_market_cap(ticker_symbol): - try: - ticker = yf.Ticker(ticker_symbol) - return ticker.fast_info.get('marketCap') - except Exception: - return None - -def get_reddit_instance(): - client_id = os.getenv("REDDIT_CLIENT_ID") - client_secret = os.getenv("REDDIT_CLIENT_SECRET") - user_agent = os.getenv("REDDIT_USER_AGENT") - if not all([client_id, client_secret, user_agent]): - print("Error: Reddit API credentials not found.") - return None - return praw.Reddit(client_id=client_id, client_secret=client_secret, user_agent=user_agent) - -# --- UPDATED: Function now accepts post_limit and comment_limit --- -def scan_subreddits(reddit, subreddits_list, post_limit=25, comment_limit=100): - """Scans subreddits, analyzes posts and comments, and stores results in the database.""" - conn = database.get_db_connection() - - print(f"\nScanning {len(subreddits_list)} subreddits (Top {post_limit} posts, {comment_limit} comments/post)...") - for subreddit_name in subreddits_list: - try: - subreddit_id = database.get_or_create_entity(conn, 'subreddits', 'name', subreddit_name) - subreddit = reddit.subreddit(subreddit_name) - print(f"Scanning r/{subreddit_name}...") - - for submission in subreddit.hot(limit=post_limit): - # --- 1. Process the Post Title and Body --- - post_text = submission.title + " " + submission.selftext - tickers_in_post = extract_tickers(post_text) - post_sentiment = get_sentiment_score(submission.title) - - for ticker_symbol in set(tickers_in_post): - ticker_id = database.get_or_create_entity(conn, 'tickers', 'symbol', ticker_symbol) - database.add_mention(conn, ticker_id, subreddit_id, submission.id, int(submission.created_utc), post_sentiment) - # (Market cap logic remains the same) - ticker_info = database.get_ticker_info(conn, ticker_id) - current_time = int(time.time()) - if not ticker_info['last_updated'] or (current_time - ticker_info['last_updated'] > MARKET_CAP_REFRESH_INTERVAL): - print(f" -> Fetching market cap for {ticker_symbol}...") - market_cap = get_market_cap(ticker_symbol) - database.update_ticker_market_cap(conn, ticker_id, market_cap or ticker_info['market_cap']) - - # --- 2. Process the Comments --- - # Expand "MoreComments" objects. limit=None means we try to get all, but PRAW is protective. - # A limit of 32 is the max PRAW will do in a single call. We'll iterate to be safe. - submission.comments.replace_more(limit=10) - comment_count = 0 - for comment in submission.comments.list(): - if comment_count >= comment_limit: - break # Stop processing comments for this post if we hit our limit - - tickers_in_comment = extract_tickers(comment.body) - if not tickers_in_comment: - continue # Skip comments that don't mention any tickers - - comment_sentiment = get_sentiment_score(comment.body) - - for ticker_symbol in set(tickers_in_comment): - ticker_id = database.get_or_create_entity(conn, 'tickers', 'symbol', ticker_symbol) - # We use the submission.id as the post_id to group mentions correctly - database.add_mention(conn, ticker_id, subreddit_id, submission.id, int(comment.created_utc), comment_sentiment) - - comment_count += 1 - - except Exception as e: - print(f"Could not scan r/{subreddit_name}. Error: {e}") - - conn.close() - print("\n--- Scan Complete ---") - -def main(): - parser = argparse.ArgumentParser(description="Analyze stock ticker mentions on Reddit.") - parser.add_argument("config_file", help="Path to the JSON file containing subreddits.") - args = parser.parse_args() - - database.initialize_db() - subreddits = load_subreddits(args.config_file) - if not subreddits: return - reddit = get_reddit_instance() - if not reddit: return - - # We now pass the limits to the scan function - scan_subreddits(reddit, subreddits, post_limit=25, comment_limit=100) - database.generate_summary_report() - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/sentiment_analyzer.py b/sentiment_analyzer.py deleted file mode 100644 index 32b08e8..0000000 --- a/sentiment_analyzer.py +++ /dev/null @@ -1,19 +0,0 @@ -# sentiment_analyzer.py - -from nltk.sentiment.vader import SentimentIntensityAnalyzer - -# Initialize the VADER sentiment intensity analyzer -# We only need to create one instance of this. -_analyzer = SentimentIntensityAnalyzer() - -def get_sentiment_score(text): - """ - Analyzes a piece of text and returns its sentiment score. - - The 'compound' score is a single metric that summarizes the sentiment. - It ranges from -1 (most negative) to +1 (most positive). - """ - # The polarity_scores() method returns a dictionary with 'neg', 'neu', 'pos', and 'compound' scores. - # We are most interested in the 'compound' score. - scores = _analyzer.polarity_scores(text) - return scores['compound'] \ No newline at end of file diff --git a/setup_nltk.py b/setup_nltk.py deleted file mode 100644 index bfd5209..0000000 --- a/setup_nltk.py +++ /dev/null @@ -1,11 +0,0 @@ -import nltk - -# This will download the 'vader_lexicon' dataset -# It only needs to be run once -try: - nltk.data.find('sentiment/vader_lexicon.zip') - print("VADER lexicon is already downloaded.") -except LookupError: - print("Downloading VADER lexicon...") - nltk.download('vader_lexicon') - print("Download complete.") \ No newline at end of file diff --git a/ticker_extractor.py b/ticker_extractor.py deleted file mode 100644 index 7cd19cf..0000000 --- a/ticker_extractor.py +++ /dev/null @@ -1,65 +0,0 @@ -# ticker_extractor.py - -import re - -# A set of common English words and acronyms that look like stock tickers. -# This helps reduce false positives. -COMMON_WORDS_BLACKLIST = { - "401K", "403B", "457B", "ABOVE", "AI", "ALL", "ALPHA", "AMA", "AMEX", - "AND", "ANY", "AR", "ARE", "AROUND", "ASSET", "AT", "ATH", "ATL", "AUD", - "BE", "BEAR", "BELOW", "BETA", "BIG", "BIS", "BLEND", "BOE", "BOJ", - "BOND", "BRB", "BRL", "BTC", "BTW", "BULL", "BUT", "BUY", "BUZZ", "CAD", - "CAN", "CEO", "CFO", "CHF", "CIA", "CNY", "COME", "COST", "COULD", "CPI", - "CTB", "CTO", "CYCLE", "CZK", "DAO", "DATE", "DAX", "DAY", "DCA", "DD", - "DEBT", "DIA", "DIV", "DJIA", "DKK", "DM", "DO", "DOGE", "DR", "EACH", - "EARLY", "EARN", "ECB", "EDGAR", "EDIT", "EPS", "ESG", "ETF", "ETH", - "EU", "EUR", "EV", "EVERY", "FAQ", "FAR", "FAST", "FBI", "FDA", "FIHTX", - "FINRA", "FINT", "FINTX", "FINTY", "FOMC", "FOMO", "FOR", "FRAUD", - "FRG", "FSPSX", "FTSE", "FUD", "FULL", "FUND", "FXAIX", "FXIAX", "FY", - "FYI", "FZROX", "GAIN", "GDP", "GET", "GBP", "GO", "GOAL", "GPU", "GRAB", - "GTG", "HAS", "HAVE", "HATE", "HEAR", "HEDGE", "HINT", "HKD", "HODL", - "HOLD", "HOUR", "HSA", "HUF", "IMHO", "IMO", "IN", "INR", "IPO", "IRA", - "IRS", "IS", "ISM", "IT", "IV", "IVV", "IWM", "JPY", "JUST", "KNOW", - "KRW", "LARGE", "LAST", "LATE", "LATER", "LBO", "LIKE", "LMAO", "LOL", - "LONG", "LOOK", "LOSS", "LOVE", "M&A", "MAKE", "MAX", "MC", "MID", "MIGHT", - "MIN", "ML", "MOASS", "MONTH", "MUST", "MXN", "MY", "NATO", "NEAR", - "NEED", "NEW", "NEXT", "NFA", "NFT", "NGMI", "NIGHT", "NO", "NOK", "NONE", - "NOT", "NOW", "NSA", "NULL", "NZD", "NYSE", "OF", "OK", "OLD", "ON", - "OP", "OR", "OTC", "OUGHT", "OUT", "OVER", "PE", "PEAK", "PEG", - "PLAN", "PLN", "PMI", "PPI", "PRICE", "PROFIT", "PSA", "Q1", "Q2", "Q3", - "Q4", "QQQ", "RBA", "RBNZ", "REIT", "REKT", "RH", "RISK", "ROE", "ROFL", - "ROI", "ROTH", "RSD", "RUB", "SAVE", "SCALP", "SCAM", "SCHB", "SEC", - "SEE", "SEK", "SELL", "SEP", "SGD", "SHALL", "SHARE", "SHORT", "SO", - "SOME", "SOON", "SPAC", "SPEND", "SPLG", "SPX", "SPY", "STILL", "STOCK", - "SWING", "TAKE", "TERM", "THE", "THINK", "THIS", "TIME", "TL", "TL;DR", - "TLDR", "TODAY", "TO", "TOTAL", "TRADE", "TREND", "TRUE", "TRY", "TTYL", - "TWO", "UK", "UNDER", "UP", "US", "USA", "USD", "VTI", "VALUE", "VOO", - "VR", "WAGMI", "WANT", "WATCH", "WAY", "WE", "WEB3", "WEEK", "WHO", - "WHY", "WILL", "WORTH", "WOULD", "WSB", "YET", "YIELD", "YOLO", "YOU", - "ZAR", - "KARMA", "OTM", "ITM", "ATM", "JPOW", "OPEN", "CLOSE", "HIGH", "LOW", - "RE", "BS", "ASAP", "RULE", "REAL", "LIMIT", "STOP", "END", "START", "BOTS", - "UTC", "AH", "PM", "PR", "GMT", "EST", "CST", "PST", "BST", "AEDT", "AEST", - "CET", "CEST", "EDT", "IST", "JST", "MSK", "PDT", "PST", "YES", "NO", "OWN", - "BOMB", -} -def extract_tickers(text): - """ - Extracts potential stock tickers from a given piece of text. - A ticker is identified as a 1-5 character uppercase word, or a word prefixed with $. - """ - # Regex to find potential tickers: - # 1. Words prefixed with $: $AAPL, $TSLA - # 2. All-caps words between 1 and 5 characters: GME, AMC - ticker_regex = r"\$[A-Z]{1,5}\b|\b[A-Z]{2,5}\b" - - potential_tickers = re.findall(ticker_regex, text) - - # Filter out common words and remove the '$' prefix - tickers = [] - for ticker in potential_tickers: - cleaned_ticker = ticker.replace("$", "").upper() - if cleaned_ticker not in COMMON_WORDS_BLACKLIST: - tickers.append(cleaned_ticker) - - return tickers \ No newline at end of file