149 lines
5.9 KiB
Python
149 lines
5.9 KiB
Python
# main.py
|
|
|
|
import argparse
|
|
import json
|
|
import os
|
|
import time
|
|
|
|
import praw
|
|
import yfinance as yf
|
|
from dotenv import load_dotenv
|
|
|
|
# Import local modules
|
|
from . import database
|
|
from .ticker_extractor import extract_tickers
|
|
from .sentiment_analyzer import get_sentiment_score
|
|
|
|
load_dotenv()
|
|
MARKET_CAP_REFRESH_INTERVAL = 86400
|
|
|
|
# (load_subreddits, get_market_cap, get_reddit_instance functions are unchanged)
|
|
def load_subreddits(filepath):
|
|
try:
|
|
with open(filepath, 'r') as f:
|
|
return json.load(f).get("subreddits", [])
|
|
except (FileNotFoundError, json.JSONDecodeError) as e:
|
|
print(f"Error loading config: {e}")
|
|
return None
|
|
|
|
def get_market_cap(ticker_symbol):
|
|
try:
|
|
ticker = yf.Ticker(ticker_symbol)
|
|
return ticker.fast_info.get('marketCap')
|
|
except Exception:
|
|
return None
|
|
|
|
def get_reddit_instance():
|
|
client_id = os.getenv("REDDIT_CLIENT_ID")
|
|
client_secret = os.getenv("REDDIT_CLIENT_SECRET")
|
|
user_agent = os.getenv("REDDIT_USER_AGENT")
|
|
if not all([client_id, client_secret, user_agent]):
|
|
print("Error: Reddit API credentials not found.")
|
|
return None
|
|
return praw.Reddit(client_id=client_id, client_secret=client_secret, user_agent=user_agent)
|
|
|
|
# --- UPDATED: Function now accepts post_limit and comment_limit ---
|
|
def scan_subreddits(reddit, subreddits_list, post_limit=25, comment_limit=100):
|
|
"""Scans subreddits, analyzes posts and comments, and stores results in the database."""
|
|
conn = database.get_db_connection()
|
|
|
|
print(f"\nScanning {len(subreddits_list)} subreddits (Top {post_limit} posts, {comment_limit} comments/post)...")
|
|
for subreddit_name in subreddits_list:
|
|
try:
|
|
subreddit_id = database.get_or_create_entity(conn, 'subreddits', 'name', subreddit_name)
|
|
subreddit = reddit.subreddit(subreddit_name)
|
|
print(f"Scanning r/{subreddit_name}...")
|
|
|
|
for submission in subreddit.hot(limit=post_limit):
|
|
# --- 1. Process the Post Title and Body ---
|
|
post_text = submission.title + " " + submission.selftext
|
|
tickers_in_post = extract_tickers(post_text)
|
|
post_sentiment = get_sentiment_score(submission.title)
|
|
|
|
for ticker_symbol in set(tickers_in_post):
|
|
ticker_id = database.get_or_create_entity(conn, 'tickers', 'symbol', ticker_symbol)
|
|
database.add_mention(conn, ticker_id, subreddit_id, submission.id, int(submission.created_utc), post_sentiment)
|
|
# (Market cap logic remains the same)
|
|
ticker_info = database.get_ticker_info(conn, ticker_id)
|
|
current_time = int(time.time())
|
|
if not ticker_info['last_updated'] or (current_time - ticker_info['last_updated'] > MARKET_CAP_REFRESH_INTERVAL):
|
|
print(f" -> Fetching market cap for {ticker_symbol}...")
|
|
market_cap = get_market_cap(ticker_symbol)
|
|
database.update_ticker_market_cap(conn, ticker_id, market_cap or ticker_info['market_cap'])
|
|
|
|
# --- 2. Process the Comments ---
|
|
# Expand "MoreComments" objects. limit=None means we try to get all, but PRAW is protective.
|
|
# A limit of 32 is the max PRAW will do in a single call. We'll iterate to be safe.
|
|
submission.comments.replace_more(limit=10)
|
|
comment_count = 0
|
|
for comment in submission.comments.list():
|
|
if comment_count >= comment_limit:
|
|
break # Stop processing comments for this post if we hit our limit
|
|
|
|
tickers_in_comment = extract_tickers(comment.body)
|
|
if not tickers_in_comment:
|
|
continue # Skip comments that don't mention any tickers
|
|
|
|
comment_sentiment = get_sentiment_score(comment.body)
|
|
|
|
for ticker_symbol in set(tickers_in_comment):
|
|
ticker_id = database.get_or_create_entity(conn, 'tickers', 'symbol', ticker_symbol)
|
|
# We use the submission.id as the post_id to group mentions correctly
|
|
database.add_mention(conn, ticker_id, subreddit_id, submission.id, int(comment.created_utc), comment_sentiment)
|
|
|
|
comment_count += 1
|
|
|
|
except Exception as e:
|
|
print(f"Could not scan r/{subreddit_name}. Error: {e}")
|
|
|
|
conn.close()
|
|
print("\n--- Scan Complete ---")
|
|
|
|
def main():
|
|
"""Main function to run the Reddit stock analysis tool."""
|
|
parser = argparse.ArgumentParser(
|
|
description="Analyze stock ticker mentions on Reddit.",
|
|
formatter_class=argparse.RawTextHelpFormatter # For better help text formatting
|
|
)
|
|
|
|
# --- Existing Argument ---
|
|
parser.add_argument("config_file", help="Path to the JSON file containing subreddits.")
|
|
|
|
# --- NEW Arguments ---
|
|
parser.add_argument(
|
|
"-p", "--posts",
|
|
type=int,
|
|
default=25,
|
|
help="Number of posts to scan per subreddit.\n(Default: 25)"
|
|
)
|
|
parser.add_argument(
|
|
"-c", "--comments",
|
|
type=int,
|
|
default=100,
|
|
help="Number of comments to scan per post.\n(Default: 100)"
|
|
)
|
|
parser.add_argument(
|
|
"-l", "--limit",
|
|
type=int,
|
|
default=20,
|
|
help="Number of tickers to show in the final report.\n(Default: 20)"
|
|
)
|
|
args = parser.parse_args()
|
|
|
|
# --- Initialize and Run ---
|
|
database.initialize_db()
|
|
database.clean_stale_tickers()
|
|
|
|
subreddits = load_subreddits(args.config_file)
|
|
if not subreddits: return
|
|
|
|
reddit = get_reddit_instance()
|
|
if not reddit: return
|
|
|
|
# Pass the command-line arguments to the functions
|
|
scan_subreddits(reddit, subreddits, post_limit=args.posts, comment_limit=args.comments)
|
|
database.generate_summary_report(limit=args.limit)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main() |