Format all code.

This commit is contained in:
2025-07-25 23:22:37 +02:00
parent f940470de3
commit 56e0965a5f
19 changed files with 850 additions and 353 deletions

View File

@@ -8,6 +8,7 @@ from playwright.sync_api import sync_playwright
# Define the output directory as a constant # Define the output directory as a constant
OUTPUT_DIR = "images" OUTPUT_DIR = "images"
def export_image(url_path, filename_prefix): def export_image(url_path, filename_prefix):
""" """
Launches a headless browser, navigates to a URL path, and screenshots Launches a headless browser, navigates to a URL path, and screenshots
@@ -45,7 +46,9 @@ def export_image(url_path, filename_prefix):
except Exception as e: except Exception as e:
print(f"\nAn error occurred during export: {e}") print(f"\nAn error occurred during export: {e}")
print("Please ensure the 'rstat-dashboard' server is running in another terminal.") print(
"Please ensure the 'rstat-dashboard' server is running in another terminal."
)
if __name__ == "__main__": if __name__ == "__main__":
@@ -53,9 +56,16 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Export subreddit sentiment images.") parser = argparse.ArgumentParser(description="Export subreddit sentiment images.")
group = parser.add_mutually_exclusive_group(required=True) group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-s", "--subreddit", help="The name of the subreddit to export.") group.add_argument("-s", "--subreddit", help="The name of the subreddit to export.")
group.add_argument("-o", "--overall", action="store_true", help="Export the overall summary image.") group.add_argument(
"-o", "--overall", action="store_true", help="Export the overall summary image."
)
parser.add_argument("-w", "--weekly", action="store_true", help="Export the weekly view instead of the daily view (only for --subreddit).") parser.add_argument(
"-w",
"--weekly",
action="store_true",
help="Export the weekly view instead of the daily view (only for --subreddit).",
)
args = parser.parse_args() args = parser.parse_args()
# Determine the correct URL path and filename based on arguments # Determine the correct URL path and filename based on arguments

View File

@@ -21,11 +21,10 @@ if __name__ == "__main__":
# This uses a different internal code path that we have proven is stable. # This uses a different internal code path that we have proven is stable.
ticker = yf.Ticker(ticker_symbol) ticker = yf.Ticker(ticker_symbol)
data = ticker.history(period="2d", auto_adjust=False) data = ticker.history(period="2d", auto_adjust=False)
# --- END OF FIX ---
closing_price = None closing_price = None
if not data.empty: if not data.empty:
last_close_raw = data['Close'].iloc[-1] last_close_raw = data["Close"].iloc[-1]
if pd.notna(last_close_raw): if pd.notna(last_close_raw):
closing_price = float(last_close_raw) closing_price = float(last_close_raw)

View File

@@ -17,7 +17,7 @@ if __name__ == "__main__":
try: try:
# Directly get the market cap # Directly get the market cap
market_cap = yf.Ticker(ticker_symbol).info.get('marketCap') market_cap = yf.Ticker(ticker_symbol).info.get("marketCap")
# On success, print JSON to stdout and exit cleanly # On success, print JSON to stdout and exit cleanly
print(json.dumps({"market_cap": market_cap})) print(json.dumps({"market_cap": market_cap}))

View File

@@ -10,6 +10,7 @@ import socket
# --- IMPORTANT: Ensure this matches the "redirect uri" in your Reddit App settings --- # --- IMPORTANT: Ensure this matches the "redirect uri" in your Reddit App settings ---
REDIRECT_URI = "http://localhost:5000" REDIRECT_URI = "http://localhost:5000"
def main(): def main():
print("--- RSTAT Refresh Token Generator ---") print("--- RSTAT Refresh Token Generator ---")
load_dotenv() load_dotenv()
@@ -17,7 +18,9 @@ def main():
client_secret = os.getenv("REDDIT_CLIENT_SECRET") client_secret = os.getenv("REDDIT_CLIENT_SECRET")
if not all([client_id, client_secret]): if not all([client_id, client_secret]):
print("Error: REDDIT_CLIENT_ID and REDDIT_CLIENT_SECRET must be set in your .env file.") print(
"Error: REDDIT_CLIENT_ID and REDDIT_CLIENT_SECRET must be set in your .env file."
)
return return
# 1. Initialize PRAW # 1. Initialize PRAW
@@ -25,7 +28,7 @@ def main():
client_id=client_id, client_id=client_id,
client_secret=client_secret, client_secret=client_secret,
redirect_uri=REDIRECT_URI, redirect_uri=REDIRECT_URI,
user_agent="rstat_token_fetcher (by u/YourUsername)" # Can be anything user_agent="rstat_token_fetcher (by u/YourUsername)", # Can be anything
) )
# 2. Generate the authorization URL # 2. Generate the authorization URL
@@ -37,11 +40,17 @@ def main():
print("\nStep 1: Open this URL in your browser:\n") print("\nStep 1: Open this URL in your browser:\n")
print(auth_url) print(auth_url)
print("\nStep 2: Log in to Reddit, click 'Allow', and you'll be redirected to a 'page not found'.") print(
print("Step 3: Copy the ENTIRE URL from your browser's address bar after the redirect.") "\nStep 2: Log in to Reddit, click 'Allow', and you'll be redirected to a 'page not found'."
)
print(
"Step 3: Copy the ENTIRE URL from your browser's address bar after the redirect."
)
# 3. Get the redirected URL from the user # 3. Get the redirected URL from the user
redirected_url = input("\nStep 4: Paste the full redirected URL here and press Enter:\n> ") redirected_url = input(
"\nStep 4: Paste the full redirected URL here and press Enter:\n> "
)
# 4. Exchange the authorization code for a refresh token # 4. Exchange the authorization code for a refresh token
try: try:
@@ -57,12 +66,17 @@ def main():
print("\n--- SUCCESS! ---") print("\n--- SUCCESS! ---")
print("Your Refresh Token is:\n") print("Your Refresh Token is:\n")
print(refresh_token) print(refresh_token)
print("\nStep 5: Copy this token and add it to your .env file as REDDIT_REFRESH_TOKEN.") print(
print("Step 6: You can now delete your REDDIT_USERNAME and REDDIT_PASSWORD from the .env file.") "\nStep 5: Copy this token and add it to your .env file as REDDIT_REFRESH_TOKEN."
)
print(
"Step 6: You can now delete your REDDIT_USERNAME and REDDIT_PASSWORD from the .env file."
)
except Exception as e: except Exception as e:
print(f"\nAn error occurred: {e}") print(f"\nAn error occurred: {e}")
print("Please make sure you copied the full URL.") print("Please make sure you copied the full URL.")
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@@ -11,10 +11,11 @@ from pathlib import Path
# --- CONFIGURATION --- # --- CONFIGURATION ---
IMAGE_DIR = "images" IMAGE_DIR = "images"
def get_reddit_instance(): def get_reddit_instance():
"""Initializes and returns a PRAW Reddit instance using OAuth2 refresh token.""" """Initializes and returns a PRAW Reddit instance using OAuth2 refresh token."""
env_path = Path(__file__).parent / '.env' env_path = Path(__file__).parent / ".env"
load_dotenv(dotenv_path=env_path) load_dotenv(dotenv_path=env_path)
client_id = os.getenv("REDDIT_CLIENT_ID") client_id = os.getenv("REDDIT_CLIENT_ID")
@@ -23,16 +24,19 @@ def get_reddit_instance():
refresh_token = os.getenv("REDDIT_REFRESH_TOKEN") refresh_token = os.getenv("REDDIT_REFRESH_TOKEN")
if not all([client_id, client_secret, user_agent, refresh_token]): if not all([client_id, client_secret, user_agent, refresh_token]):
print("Error: Reddit API credentials (including REDDIT_REFRESH_TOKEN) must be set in .env file.") print(
"Error: Reddit API credentials (including REDDIT_REFRESH_TOKEN) must be set in .env file."
)
return None return None
return praw.Reddit( return praw.Reddit(
client_id=client_id, client_id=client_id,
client_secret=client_secret, client_secret=client_secret,
user_agent=user_agent, user_agent=user_agent,
refresh_token=refresh_token refresh_token=refresh_token,
) )
def find_latest_image(pattern): def find_latest_image(pattern):
"""Finds the most recent file in the IMAGE_DIR that matches a given pattern.""" """Finds the most recent file in the IMAGE_DIR that matches a given pattern."""
try: try:
@@ -47,12 +51,29 @@ def find_latest_image(pattern):
print(f"Error finding image file: {e}") print(f"Error finding image file: {e}")
return None return None
def main(): def main():
"""Main function to find an image and post it to Reddit.""" """Main function to find an image and post it to Reddit."""
parser = argparse.ArgumentParser(description="Find the latest sentiment image and post it to a subreddit.") parser = argparse.ArgumentParser(
parser.add_argument("-s", "--subreddit", help="The source subreddit of the image to post. (Defaults to overall summary)") description="Find the latest sentiment image and post it to a subreddit."
parser.add_argument("-w", "--weekly", action="store_true", help="Post the weekly summary instead of the daily one.") )
parser.add_argument("-t", "--target-subreddit", default="rstat", help="The subreddit to post the image to. (Default: rstat)") parser.add_argument(
"-s",
"--subreddit",
help="The source subreddit of the image to post. (Defaults to overall summary)",
)
parser.add_argument(
"-w",
"--weekly",
action="store_true",
help="Post the weekly summary instead of the daily one.",
)
parser.add_argument(
"-t",
"--target-subreddit",
default="rstat",
help="The subreddit to post the image to. (Default: rstat)",
)
args = parser.parse_args() args = parser.parse_args()
# --- 1. Determine filename pattern and post title --- # --- 1. Determine filename pattern and post title ---
@@ -65,9 +86,13 @@ def main():
else: else:
# Default to the overall summary # Default to the overall summary
if args.weekly: if args.weekly:
print("Warning: --weekly flag has no effect for overall summary. Posting overall daily image.") print(
"Warning: --weekly flag has no effect for overall summary. Posting overall daily image."
)
filename_pattern = "overall_summary_*.png" filename_pattern = "overall_summary_*.png"
post_title = f"Overall Top 10 Ticker Mentions Across Reddit ({current_date_str})" post_title = (
f"Overall Top 10 Ticker Mentions Across Reddit ({current_date_str})"
)
print(f"Searching for image pattern: {filename_pattern}") print(f"Searching for image pattern: {filename_pattern}")
@@ -75,7 +100,9 @@ def main():
image_to_post = find_latest_image(filename_pattern) image_to_post = find_latest_image(filename_pattern)
if not image_to_post: if not image_to_post:
print(f"Error: No image found matching the pattern '{filename_pattern}'. Please run the scraper and exporter first.") print(
f"Error: No image found matching the pattern '{filename_pattern}'. Please run the scraper and exporter first."
)
return return
print(f"Found image: {image_to_post}") print(f"Found image: {image_to_post}")
@@ -92,7 +119,7 @@ def main():
submission = target_sub.submit_image( submission = target_sub.submit_image(
title=post_title, title=post_title,
image_path=image_to_post, image_path=image_to_post,
flair_id=None # Optional: You can add a flair ID here if you want flair_id=None, # Optional: You can add a flair ID here if you want
) )
print("\n--- Post Successful! ---") print("\n--- Post Successful! ---")

View File

@@ -3,27 +3,34 @@
import argparse import argparse
from . import database from . import database
from .logger_setup import setup_logging, logger as log from .logger_setup import setup_logging, logger as log
# We can't reuse load_subreddits from main anymore if it's not in the same file # We can't reuse load_subreddits from main anymore if it's not in the same file
# So we will duplicate it here. It's small and keeps this script self-contained. # So we will duplicate it here. It's small and keeps this script self-contained.
import json import json
def load_subreddits(filepath): def load_subreddits(filepath):
"""Loads a list of subreddits from a JSON file.""" """Loads a list of subreddits from a JSON file."""
try: try:
with open(filepath, 'r') as f: with open(filepath, "r") as f:
data = json.load(f) data = json.load(f)
return data.get("subreddits", []) return data.get("subreddits", [])
except (FileNotFoundError, json.JSONDecodeError) as e: except (FileNotFoundError, json.JSONDecodeError) as e:
log.error(f"Error loading config file '{filepath}': {e}") log.error(f"Error loading config file '{filepath}': {e}")
return None return None
def run_cleanup(): def run_cleanup():
"""Main function for the cleanup tool.""" """Main function for the cleanup tool."""
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="A tool to clean stale data from the RSTAT database.", description="A tool to clean stale data from the RSTAT database.",
formatter_class=argparse.RawTextHelpFormatter formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--tickers",
action="store_true",
help="Clean tickers that are in the blacklist.",
) )
parser.add_argument("--tickers", action="store_true", help="Clean tickers that are in the blacklist.")
# --- UPDATED ARGUMENT DEFINITION --- # --- UPDATED ARGUMENT DEFINITION ---
# nargs='?': Makes the argument optional. # nargs='?': Makes the argument optional.
@@ -31,14 +38,18 @@ def run_cleanup():
# default=None: The value if the flag is not present at all. # default=None: The value if the flag is not present at all.
parser.add_argument( parser.add_argument(
"--subreddits", "--subreddits",
nargs='?', nargs="?",
const='subreddits.json', const="subreddits.json",
default=None, default=None,
help="Clean data from subreddits NOT in the specified config file.\n(Defaults to 'subreddits.json' if flag is used without a value)." help="Clean data from subreddits NOT in the specified config file.\n(Defaults to 'subreddits.json' if flag is used without a value).",
) )
parser.add_argument("--all", action="store_true", help="Run all available cleanup tasks.") parser.add_argument(
parser.add_argument("--stdout", action="store_true", help="Print all log messages to the console.") "--all", action="store_true", help="Run all available cleanup tasks."
)
parser.add_argument(
"--stdout", action="store_true", help="Print all log messages to the console."
)
args = parser.parse_args() args = parser.parse_args()
@@ -57,7 +68,7 @@ def run_cleanup():
if args.all or args.subreddits is not None: if args.all or args.subreddits is not None:
run_any_task = True run_any_task = True
# If --all is used, default to 'subreddits.json' if --subreddits wasn't also specified # If --all is used, default to 'subreddits.json' if --subreddits wasn't also specified
config_file = args.subreddits or 'subreddits.json' config_file = args.subreddits or "subreddits.json"
log.info(f"\nCleaning subreddits based on active list in: {config_file}") log.info(f"\nCleaning subreddits based on active list in: {config_file}")
active_subreddits = load_subreddits(config_file) active_subreddits = load_subreddits(config_file)
if active_subreddits is not None: if active_subreddits is not None:
@@ -65,10 +76,13 @@ def run_cleanup():
if not run_any_task: if not run_any_task:
parser.print_help() parser.print_help()
log.error("\nError: Please provide at least one cleanup option (e.g., --tickers, --subreddits, --all).") log.error(
"\nError: Please provide at least one cleanup option (e.g., --tickers, --subreddits, --all)."
)
return return
log.critical("\nCleanup finished.") log.critical("\nCleanup finished.")
if __name__ == "__main__": if __name__ == "__main__":
run_cleanup() run_cleanup()

View File

@@ -9,12 +9,13 @@ from .database import (
get_daily_summary_for_subreddit, get_daily_summary_for_subreddit,
get_weekly_summary_for_subreddit, get_weekly_summary_for_subreddit,
get_overall_daily_summary, # Now correctly imported get_overall_daily_summary, # Now correctly imported
get_overall_weekly_summary # Now correctly imported get_overall_weekly_summary, # Now correctly imported
) )
app = Flask(__name__, template_folder='../templates') app = Flask(__name__, template_folder="../templates")
@app.template_filter('format_mc')
@app.template_filter("format_mc")
def format_market_cap(mc): def format_market_cap(mc):
"""Formats a large number into a readable market cap string.""" """Formats a large number into a readable market cap string."""
if mc is None or mc == 0: if mc is None or mc == 0:
@@ -28,18 +29,20 @@ def format_market_cap(mc):
else: else:
return f"${mc:,}" return f"${mc:,}"
@app.context_processor @app.context_processor
def inject_subreddits(): def inject_subreddits():
"""Makes the list of all subreddits available to every template for the navbar.""" """Makes the list of all subreddits available to every template for the navbar."""
return dict(all_subreddits=get_all_scanned_subreddits()) return dict(all_subreddits=get_all_scanned_subreddits())
@app.route("/") @app.route("/")
def overall_dashboard(): def overall_dashboard():
"""Handler for the main, overall dashboard.""" """Handler for the main, overall dashboard."""
view_type = request.args.get('view', 'daily') view_type = request.args.get("view", "daily")
is_image_mode = request.args.get('image') == 'true' is_image_mode = request.args.get("image") == "true"
if view_type == 'weekly': if view_type == "weekly":
tickers, start, end = get_overall_weekly_summary() tickers, start, end = get_overall_weekly_summary()
date_string = f"{start.strftime('%b %d')} - {end.strftime('%b %d, %Y')}" date_string = f"{start.strftime('%b %d')} - {end.strftime('%b %d, %Y')}"
subtitle = "All Subreddits - Top 10 Weekly" subtitle = "All Subreddits - Top 10 Weekly"
@@ -57,16 +60,17 @@ def overall_dashboard():
view_type=view_type, view_type=view_type,
subreddit_name=None, subreddit_name=None,
is_image_mode=is_image_mode, is_image_mode=is_image_mode,
base_url="/" base_url="/",
) )
@app.route("/subreddit/<name>") @app.route("/subreddit/<name>")
def subreddit_dashboard(name): def subreddit_dashboard(name):
"""Handler for per-subreddit dashboards.""" """Handler for per-subreddit dashboards."""
view_type = request.args.get('view', 'daily') view_type = request.args.get("view", "daily")
is_image_mode = request.args.get('image') == 'true' is_image_mode = request.args.get("image") == "true"
if view_type == 'weekly': if view_type == "weekly":
today = datetime.now(timezone.utc) today = datetime.now(timezone.utc)
target_date = today - timedelta(days=7) target_date = today - timedelta(days=7)
tickers, start, end = get_weekly_summary_for_subreddit(name, target_date) tickers, start, end = get_weekly_summary_for_subreddit(name, target_date)
@@ -86,9 +90,10 @@ def subreddit_dashboard(name):
view_type=view_type, view_type=view_type,
subreddit_name=name, subreddit_name=name,
is_image_mode=is_image_mode, is_image_mode=is_image_mode,
base_url=f"/subreddit/{name}" base_url=f"/subreddit/{name}",
) )
@app.route("/deep-dive/<symbol>") @app.route("/deep-dive/<symbol>")
def deep_dive(symbol): def deep_dive(symbol):
"""The handler for the deep-dive page for a specific ticker.""" """The handler for the deep-dive page for a specific ticker."""
@@ -96,6 +101,7 @@ def deep_dive(symbol):
posts = get_deep_dive_details(symbol) posts = get_deep_dive_details(symbol)
return render_template("deep_dive.html", posts=posts, symbol=symbol) return render_template("deep_dive.html", posts=posts, symbol=symbol)
def start_dashboard(): def start_dashboard():
"""The main function called by the 'rstat-dashboard' command.""" """The main function called by the 'rstat-dashboard' command."""
log.info("Starting Flask server...") log.info("Starting Flask server...")
@@ -103,5 +109,6 @@ def start_dashboard():
log.info("Press CTRL+C to stop the server.") log.info("Press CTRL+C to stop the server.")
app.run(debug=True) app.run(debug=True)
if __name__ == "__main__": if __name__ == "__main__":
start_dashboard() start_dashboard()

View File

@@ -9,6 +9,7 @@ from datetime import datetime, timedelta, timezone
DB_FILE = "reddit_stocks.db" DB_FILE = "reddit_stocks.db"
MARKET_CAP_REFRESH_INTERVAL = 86400 MARKET_CAP_REFRESH_INTERVAL = 86400
def clean_stale_tickers(): def clean_stale_tickers():
""" """
Removes tickers and their associated mentions from the database Removes tickers and their associated mentions from the database
@@ -18,7 +19,7 @@ def clean_stale_tickers():
conn = get_db_connection() conn = get_db_connection()
cursor = conn.cursor() cursor = conn.cursor()
placeholders = ','.join('?' for _ in COMMON_WORDS_BLACKLIST) placeholders = ",".join("?" for _ in COMMON_WORDS_BLACKLIST)
query = f"SELECT id, symbol FROM tickers WHERE symbol IN ({placeholders})" query = f"SELECT id, symbol FROM tickers WHERE symbol IN ({placeholders})"
cursor.execute(query, tuple(COMMON_WORDS_BLACKLIST)) cursor.execute(query, tuple(COMMON_WORDS_BLACKLIST))
@@ -30,8 +31,8 @@ def clean_stale_tickers():
return return
for ticker in stale_tickers: for ticker in stale_tickers:
ticker_id = ticker['id'] ticker_id = ticker["id"]
ticker_symbol = ticker['symbol'] ticker_symbol = ticker["symbol"]
log.info(f"Removing stale ticker '{ticker_symbol}' (ID: {ticker_id})...") log.info(f"Removing stale ticker '{ticker_symbol}' (ID: {ticker_id})...")
cursor.execute("DELETE FROM mentions WHERE ticker_id = ?", (ticker_id,)) cursor.execute("DELETE FROM mentions WHERE ticker_id = ?", (ticker_id,))
cursor.execute("DELETE FROM tickers WHERE id = ?", (ticker_id,)) cursor.execute("DELETE FROM tickers WHERE id = ?", (ticker_id,))
@@ -41,6 +42,7 @@ def clean_stale_tickers():
conn.close() conn.close()
log.info(f"Cleanup complete. Removed {deleted_count} records.") log.info(f"Cleanup complete. Removed {deleted_count} records.")
def clean_stale_subreddits(active_subreddits): def clean_stale_subreddits(active_subreddits):
""" """
Removes all data associated with subreddits that are NOT in the active list. Removes all data associated with subreddits that are NOT in the active list.
@@ -57,9 +59,9 @@ def clean_stale_subreddits(active_subreddits):
db_subreddits = cursor.fetchall() db_subreddits = cursor.fetchall()
stale_sub_ids = [] stale_sub_ids = []
for sub in db_subreddits: for sub in db_subreddits:
if sub['name'] not in active_subreddits_lower: if sub["name"] not in active_subreddits_lower:
log.info(f"Found stale subreddit to remove: r/{sub['name']}") log.info(f"Found stale subreddit to remove: r/{sub['name']}")
stale_sub_ids.append(sub['id']) stale_sub_ids.append(sub["id"])
if not stale_sub_ids: if not stale_sub_ids:
log.info("No stale subreddits to clean.") log.info("No stale subreddits to clean.")
conn.close() conn.close()
@@ -73,15 +75,18 @@ def clean_stale_subreddits(active_subreddits):
conn.close() conn.close()
log.info("Stale subreddit cleanup complete.") log.info("Stale subreddit cleanup complete.")
def get_db_connection(): def get_db_connection():
conn = sqlite3.connect(DB_FILE) conn = sqlite3.connect(DB_FILE)
conn.row_factory = sqlite3.Row conn.row_factory = sqlite3.Row
return conn return conn
def initialize_db(): def initialize_db():
conn = get_db_connection() conn = get_db_connection()
cursor = conn.cursor() cursor = conn.cursor()
cursor.execute(""" cursor.execute(
"""
CREATE TABLE IF NOT EXISTS tickers ( CREATE TABLE IF NOT EXISTS tickers (
id INTEGER PRIMARY KEY AUTOINCREMENT, id INTEGER PRIMARY KEY AUTOINCREMENT,
symbol TEXT NOT NULL UNIQUE, symbol TEXT NOT NULL UNIQUE,
@@ -89,14 +94,18 @@ def initialize_db():
closing_price REAL, closing_price REAL,
last_updated INTEGER last_updated INTEGER
) )
""") """
cursor.execute(""" )
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS subreddits ( CREATE TABLE IF NOT EXISTS subreddits (
id INTEGER PRIMARY KEY AUTOINCREMENT, id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE name TEXT NOT NULL UNIQUE
) )
""") """
cursor.execute(""" )
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS mentions ( CREATE TABLE IF NOT EXISTS mentions (
id INTEGER PRIMARY KEY AUTOINCREMENT, id INTEGER PRIMARY KEY AUTOINCREMENT,
ticker_id INTEGER, ticker_id INTEGER,
@@ -109,8 +118,10 @@ def initialize_db():
FOREIGN KEY (ticker_id) REFERENCES tickers (id), FOREIGN KEY (ticker_id) REFERENCES tickers (id),
FOREIGN KEY (subreddit_id) REFERENCES subreddits (id) FOREIGN KEY (subreddit_id) REFERENCES subreddits (id)
) )
""") """
cursor.execute(""" )
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS posts ( CREATE TABLE IF NOT EXISTS posts (
id INTEGER PRIMARY KEY AUTOINCREMENT, id INTEGER PRIMARY KEY AUTOINCREMENT,
post_id TEXT NOT NULL UNIQUE, post_id TEXT NOT NULL UNIQUE,
@@ -122,12 +133,23 @@ def initialize_db():
avg_comment_sentiment REAL, avg_comment_sentiment REAL,
FOREIGN KEY (subreddit_id) REFERENCES subreddits (id) FOREIGN KEY (subreddit_id) REFERENCES subreddits (id)
) )
""") """
)
conn.commit() conn.commit()
conn.close() conn.close()
log.info("Database initialized successfully.") log.info("Database initialized successfully.")
def add_mention(conn, ticker_id, subreddit_id, post_id, mention_type, timestamp, mention_sentiment, post_avg_sentiment=None):
def add_mention(
conn,
ticker_id,
subreddit_id,
post_id,
mention_type,
timestamp,
mention_sentiment,
post_avg_sentiment=None,
):
cursor = conn.cursor() cursor = conn.cursor()
try: try:
cursor.execute( cursor.execute(
@@ -135,40 +157,52 @@ def add_mention(conn, ticker_id, subreddit_id, post_id, mention_type, timestamp,
INSERT INTO mentions (ticker_id, subreddit_id, post_id, mention_type, mention_timestamp, mention_sentiment, post_avg_sentiment) INSERT INTO mentions (ticker_id, subreddit_id, post_id, mention_type, mention_timestamp, mention_sentiment, post_avg_sentiment)
VALUES (?, ?, ?, ?, ?, ?, ?) VALUES (?, ?, ?, ?, ?, ?, ?)
""", """,
(ticker_id, subreddit_id, post_id, mention_type, timestamp, mention_sentiment, post_avg_sentiment) (
ticker_id,
subreddit_id,
post_id,
mention_type,
timestamp,
mention_sentiment,
post_avg_sentiment,
),
) )
conn.commit() conn.commit()
except sqlite3.IntegrityError: except sqlite3.IntegrityError:
pass pass
def get_or_create_entity(conn, table_name, column_name, value): def get_or_create_entity(conn, table_name, column_name, value):
"""Generic function to get or create an entity and return its ID.""" """Generic function to get or create an entity and return its ID."""
cursor = conn.cursor() cursor = conn.cursor()
cursor.execute(f"SELECT id FROM {table_name} WHERE {column_name} = ?", (value,)) cursor.execute(f"SELECT id FROM {table_name} WHERE {column_name} = ?", (value,))
result = cursor.fetchone() result = cursor.fetchone()
if result: if result:
return result['id'] return result["id"]
else: else:
cursor.execute(f"INSERT INTO {table_name} ({column_name}) VALUES (?)", (value,)) cursor.execute(f"INSERT INTO {table_name} ({column_name}) VALUES (?)", (value,))
conn.commit() conn.commit()
return cursor.lastrowid return cursor.lastrowid
def update_ticker_financials(conn, ticker_id, market_cap, closing_price): def update_ticker_financials(conn, ticker_id, market_cap, closing_price):
"""Updates the financials and timestamp for a specific ticker.""" """Updates the financials and timestamp for a specific ticker."""
cursor = conn.cursor() cursor = conn.cursor()
current_timestamp = int(time.time()) current_timestamp = int(time.time())
cursor.execute( cursor.execute(
"UPDATE tickers SET market_cap = ?, closing_price = ?, last_updated = ? WHERE id = ?", "UPDATE tickers SET market_cap = ?, closing_price = ?, last_updated = ? WHERE id = ?",
(market_cap, closing_price, current_timestamp, ticker_id) (market_cap, closing_price, current_timestamp, ticker_id),
) )
conn.commit() conn.commit()
def get_ticker_info(conn, ticker_id): def get_ticker_info(conn, ticker_id):
"""Retrieves all info for a specific ticker by its ID.""" """Retrieves all info for a specific ticker by its ID."""
cursor = conn.cursor() cursor = conn.cursor()
cursor.execute("SELECT * FROM tickers WHERE id = ?", (ticker_id,)) cursor.execute("SELECT * FROM tickers WHERE id = ?", (ticker_id,))
return cursor.fetchone() return cursor.fetchone()
def get_week_start_end(for_date): def get_week_start_end(for_date):
""" """
Calculates the start (Monday, 00:00:00) and end (Sunday, 23:59:59) Calculates the start (Monday, 00:00:00) and end (Sunday, 23:59:59)
@@ -185,6 +219,7 @@ def get_week_start_end(for_date):
return start_of_week, end_of_week return start_of_week, end_of_week
def add_or_update_post_analysis(conn, post_data): def add_or_update_post_analysis(conn, post_data):
""" """
Inserts a new post analysis record or updates an existing one. Inserts a new post analysis record or updates an existing one.
@@ -200,10 +235,11 @@ def add_or_update_post_analysis(conn, post_data):
comment_count = excluded.comment_count, comment_count = excluded.comment_count,
avg_comment_sentiment = excluded.avg_comment_sentiment; avg_comment_sentiment = excluded.avg_comment_sentiment;
""", """,
post_data post_data,
) )
conn.commit() conn.commit()
def get_overall_summary(limit=10): def get_overall_summary(limit=10):
""" """
Gets the top tickers across all subreddits from the LAST 24 HOURS. Gets the top tickers across all subreddits from the LAST 24 HOURS.
@@ -226,6 +262,7 @@ def get_overall_summary(limit=10):
conn.close() conn.close()
return results return results
def get_subreddit_summary(subreddit_name, limit=10): def get_subreddit_summary(subreddit_name, limit=10):
""" """
Gets the top tickers for a specific subreddit from the LAST 24 HOURS. Gets the top tickers for a specific subreddit from the LAST 24 HOURS.
@@ -244,10 +281,13 @@ def get_subreddit_summary(subreddit_name, limit=10):
GROUP BY t.symbol, t.market_cap, t.closing_price GROUP BY t.symbol, t.market_cap, t.closing_price
ORDER BY mention_count DESC LIMIT ?; ORDER BY mention_count DESC LIMIT ?;
""" """
results = conn.execute(query, (subreddit_name, one_day_ago_timestamp, limit)).fetchall() results = conn.execute(
query, (subreddit_name, one_day_ago_timestamp, limit)
).fetchall()
conn.close() conn.close()
return results return results
def get_daily_summary_for_subreddit(subreddit_name): def get_daily_summary_for_subreddit(subreddit_name):
"""Gets a summary for the DAILY image view (last 24 hours).""" """Gets a summary for the DAILY image view (last 24 hours)."""
conn = get_db_connection() conn = get_db_connection()
@@ -268,6 +308,7 @@ def get_daily_summary_for_subreddit(subreddit_name):
conn.close() conn.close()
return results return results
def get_weekly_summary_for_subreddit(subreddit_name, for_date): def get_weekly_summary_for_subreddit(subreddit_name, for_date):
"""Gets a summary for the WEEKLY image view (full week).""" """Gets a summary for the WEEKLY image view (full week)."""
conn = get_db_connection() conn = get_db_connection()
@@ -285,10 +326,13 @@ def get_weekly_summary_for_subreddit(subreddit_name, for_date):
GROUP BY t.symbol, t.market_cap, t.closing_price GROUP BY t.symbol, t.market_cap, t.closing_price
ORDER BY total_mentions DESC LIMIT 10; ORDER BY total_mentions DESC LIMIT 10;
""" """
results = conn.execute(query, (subreddit_name, start_timestamp, end_timestamp)).fetchall() results = conn.execute(
query, (subreddit_name, start_timestamp, end_timestamp)
).fetchall()
conn.close() conn.close()
return results, start_of_week, end_of_week return results, start_of_week, end_of_week
def get_overall_image_view_summary(): def get_overall_image_view_summary():
""" """
Gets a summary of top tickers across ALL subreddits for the DAILY image view (last 24 hours). Gets a summary of top tickers across ALL subreddits for the DAILY image view (last 24 hours).
@@ -311,6 +355,7 @@ def get_overall_image_view_summary():
conn.close() conn.close()
return results return results
def get_overall_daily_summary(): def get_overall_daily_summary():
""" """
Gets the top tickers across all subreddits from the LAST 24 HOURS. Gets the top tickers across all subreddits from the LAST 24 HOURS.
@@ -332,13 +377,16 @@ def get_overall_daily_summary():
conn.close() conn.close()
return results return results
def get_overall_weekly_summary(): def get_overall_weekly_summary():
""" """
Gets the top tickers across all subreddits for the LAST 7 DAYS. Gets the top tickers across all subreddits for the LAST 7 DAYS.
""" """
conn = get_db_connection() conn = get_db_connection()
today = datetime.now(timezone.utc) today = datetime.now(timezone.utc)
start_of_week, end_of_week = get_week_start_end(today - timedelta(days=7)) # Get last week's boundaries start_of_week, end_of_week = get_week_start_end(
today - timedelta(days=7)
) # Get last week's boundaries
start_timestamp = int(start_of_week.timestamp()) start_timestamp = int(start_of_week.timestamp())
end_timestamp = int(end_of_week.timestamp()) end_timestamp = int(end_of_week.timestamp())
query = """ query = """
@@ -354,6 +402,7 @@ def get_overall_weekly_summary():
conn.close() conn.close()
return results, start_of_week, end_of_week return results, start_of_week, end_of_week
def get_deep_dive_details(ticker_symbol): def get_deep_dive_details(ticker_symbol):
"""Gets all analyzed posts that mention a specific ticker.""" """Gets all analyzed posts that mention a specific ticker."""
conn = get_db_connection() conn = get_db_connection()
@@ -367,12 +416,16 @@ def get_deep_dive_details(ticker_symbol):
conn.close() conn.close()
return results return results
def get_all_scanned_subreddits(): def get_all_scanned_subreddits():
"""Gets a unique list of all subreddits we have data for.""" """Gets a unique list of all subreddits we have data for."""
conn = get_db_connection() conn = get_db_connection()
results = conn.execute("SELECT DISTINCT name FROM subreddits ORDER BY name ASC;").fetchall() results = conn.execute(
"SELECT DISTINCT name FROM subreddits ORDER BY name ASC;"
).fetchall()
conn.close() conn.close()
return [row['name'] for row in results] return [row["name"] for row in results]
def get_all_tickers(): def get_all_tickers():
"""Retrieves the ID and symbol of every ticker in the database.""" """Retrieves the ID and symbol of every ticker in the database."""
@@ -381,6 +434,7 @@ def get_all_tickers():
conn.close() conn.close()
return results return results
def get_ticker_by_symbol(symbol): def get_ticker_by_symbol(symbol):
""" """
Retrieves a single ticker's ID and symbol from the database. Retrieves a single ticker's ID and symbol from the database.
@@ -388,11 +442,14 @@ def get_ticker_by_symbol(symbol):
""" """
conn = get_db_connection() conn = get_db_connection()
cursor = conn.cursor() cursor = conn.cursor()
cursor.execute("SELECT id, symbol FROM tickers WHERE LOWER(symbol) = LOWER(?)", (symbol,)) cursor.execute(
"SELECT id, symbol FROM tickers WHERE LOWER(symbol) = LOWER(?)", (symbol,)
)
result = cursor.fetchone() result = cursor.fetchone()
conn.close() conn.close()
return result return result
def get_top_daily_ticker_symbols(): def get_top_daily_ticker_symbols():
"""Gets a simple list of the Top 10 ticker symbols from the last 24 hours.""" """Gets a simple list of the Top 10 ticker symbols from the last 24 hours."""
conn = get_db_connection() conn = get_db_connection()
@@ -405,7 +462,8 @@ def get_top_daily_ticker_symbols():
""" """
results = conn.execute(query, (one_day_ago_timestamp,)).fetchall() results = conn.execute(query, (one_day_ago_timestamp,)).fetchall()
conn.close() conn.close()
return [row['symbol'] for row in results] # Return a simple list of strings return [row["symbol"] for row in results] # Return a simple list of strings
def get_top_weekly_ticker_symbols(): def get_top_weekly_ticker_symbols():
"""Gets a simple list of the Top 10 ticker symbols from the last 7 days.""" """Gets a simple list of the Top 10 ticker symbols from the last 7 days."""
@@ -419,7 +477,8 @@ def get_top_weekly_ticker_symbols():
""" """
results = conn.execute(query, (seven_days_ago_timestamp,)).fetchall() results = conn.execute(query, (seven_days_ago_timestamp,)).fetchall()
conn.close() conn.close()
return [row['symbol'] for row in results] # Return a simple list of strings return [row["symbol"] for row in results] # Return a simple list of strings
def get_top_daily_ticker_symbols_for_subreddit(subreddit_name): def get_top_daily_ticker_symbols_for_subreddit(subreddit_name):
"""Gets a list of the Top 10 daily ticker symbols for a specific subreddit.""" """Gets a list of the Top 10 daily ticker symbols for a specific subreddit."""
@@ -432,9 +491,16 @@ def get_top_daily_ticker_symbols_for_subreddit(subreddit_name):
WHERE LOWER(s.name) = LOWER(?) AND m.mention_timestamp >= ? WHERE LOWER(s.name) = LOWER(?) AND m.mention_timestamp >= ?
GROUP BY t.symbol ORDER BY COUNT(m.id) DESC LIMIT 10; GROUP BY t.symbol ORDER BY COUNT(m.id) DESC LIMIT 10;
""" """
results = conn.execute(query, (subreddit_name, one_day_ago_timestamp,)).fetchall() results = conn.execute(
query,
(
subreddit_name,
one_day_ago_timestamp,
),
).fetchall()
conn.close() conn.close()
return [row['symbol'] for row in results] return [row["symbol"] for row in results]
def get_top_weekly_ticker_symbols_for_subreddit(subreddit_name): def get_top_weekly_ticker_symbols_for_subreddit(subreddit_name):
"""Gets a list of the Top 10 weekly ticker symbols for a specific subreddit.""" """Gets a list of the Top 10 weekly ticker symbols for a specific subreddit."""
@@ -447,6 +513,12 @@ def get_top_weekly_ticker_symbols_for_subreddit(subreddit_name):
WHERE LOWER(s.name) = LOWER(?) AND m.mention_timestamp >= ? WHERE LOWER(s.name) = LOWER(?) AND m.mention_timestamp >= ?
GROUP BY t.symbol ORDER BY COUNT(m.id) DESC LIMIT 10; GROUP BY t.symbol ORDER BY COUNT(m.id) DESC LIMIT 10;
""" """
results = conn.execute(query, (subreddit_name, seven_days_ago_timestamp,)).fetchall() results = conn.execute(
query,
(
subreddit_name,
seven_days_ago_timestamp,
),
).fetchall()
conn.close() conn.close()
return [row['symbol'] for row in results] return [row["symbol"] for row in results]

View File

@@ -149,6 +149,7 @@ def format_and_print_list(word_set, words_per_line=10):
# 4. Print the closing brace # 4. Print the closing brace
print("}") print("}")
# --- Main execution --- # --- Main execution ---
if __name__ == "__main__": if __name__ == "__main__":
format_and_print_list(COMMON_WORDS_BLACKLIST) format_and_print_list(COMMON_WORDS_BLACKLIST)

View File

@@ -5,6 +5,7 @@ import sys
logger = logging.getLogger("rstat_app") logger = logging.getLogger("rstat_app")
def setup_logging(console_verbose=False, debug_mode=False): def setup_logging(console_verbose=False, debug_mode=False):
""" """
Configures the application's logger with a new DEBUG level. Configures the application's logger with a new DEBUG level.
@@ -18,15 +19,17 @@ def setup_logging(console_verbose=False, debug_mode=False):
logger.handlers.clear() logger.handlers.clear()
# File Handler (Always verbose at INFO level or higher) # File Handler (Always verbose at INFO level or higher)
file_handler = logging.FileHandler("rstat.log", mode='a') file_handler = logging.FileHandler("rstat.log", mode="a")
file_handler.setLevel(logging.INFO) # We don't need debug spam in the file usually file_handler.setLevel(logging.INFO) # We don't need debug spam in the file usually
file_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') file_formatter = logging.Formatter(
"%(asctime)s - %(levelname)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
file_handler.setFormatter(file_formatter) file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler) logger.addHandler(file_handler)
# Console Handler (Verbosity is controlled) # Console Handler (Verbosity is controlled)
console_handler = logging.StreamHandler(sys.stdout) console_handler = logging.StreamHandler(sys.stdout)
console_formatter = logging.Formatter('%(message)s') console_formatter = logging.Formatter("%(message)s")
console_handler.setFormatter(console_formatter) console_handler.setFormatter(console_formatter)
if debug_mode: if debug_mode:

View File

@@ -16,18 +16,20 @@ from .ticker_extractor import extract_tickers
from .sentiment_analyzer import get_sentiment_score from .sentiment_analyzer import get_sentiment_score
from .logger_setup import setup_logging, logger as log from .logger_setup import setup_logging, logger as log
def load_subreddits(filepath): def load_subreddits(filepath):
"""Loads a list of subreddits from a JSON file.""" """Loads a list of subreddits from a JSON file."""
try: try:
with open(filepath, 'r') as f: with open(filepath, "r") as f:
return json.load(f).get("subreddits", []) return json.load(f).get("subreddits", [])
except (FileNotFoundError, json.JSONDecodeError) as e: except (FileNotFoundError, json.JSONDecodeError) as e:
log.error(f"Error loading config file '{filepath}': {e}") log.error(f"Error loading config file '{filepath}': {e}")
return None return None
def get_reddit_instance(): def get_reddit_instance():
"""Initializes and returns a PRAW Reddit instance.""" """Initializes and returns a PRAW Reddit instance."""
env_path = Path(__file__).parent.parent / '.env' env_path = Path(__file__).parent.parent / ".env"
load_dotenv(dotenv_path=env_path) load_dotenv(dotenv_path=env_path)
client_id = os.getenv("REDDIT_CLIENT_ID") client_id = os.getenv("REDDIT_CLIENT_ID")
@@ -36,7 +38,10 @@ def get_reddit_instance():
if not all([client_id, client_secret, user_agent]): if not all([client_id, client_secret, user_agent]):
log.error("Error: Reddit API credentials not found in .env file.") log.error("Error: Reddit API credentials not found in .env file.")
return None return None
return praw.Reddit(client_id=client_id, client_secret=client_secret, user_agent=user_agent) return praw.Reddit(
client_id=client_id, client_secret=client_secret, user_agent=user_agent
)
def get_financial_data_via_fetcher(ticker_symbol): def get_financial_data_via_fetcher(ticker_symbol):
""" """
@@ -48,26 +53,33 @@ def get_financial_data_via_fetcher(ticker_symbol):
# --- Call 1: Get Market Cap --- # --- Call 1: Get Market Cap ---
try: try:
mc_script_path = project_root / 'fetch_market_cap.py' mc_script_path = project_root / "fetch_market_cap.py"
command_mc = [sys.executable, str(mc_script_path), ticker_symbol] command_mc = [sys.executable, str(mc_script_path), ticker_symbol]
result_mc = subprocess.run(command_mc, capture_output=True, text=True, check=True, timeout=30) result_mc = subprocess.run(
command_mc, capture_output=True, text=True, check=True, timeout=30
)
financials.update(json.loads(result_mc.stdout)) financials.update(json.loads(result_mc.stdout))
except Exception as e: except Exception as e:
log.warning(f"Market cap fetcher failed for {ticker_symbol}: {e}") log.warning(f"Market cap fetcher failed for {ticker_symbol}: {e}")
# --- Call 2: Get Closing Price --- # --- Call 2: Get Closing Price ---
try: try:
cp_script_path = project_root / 'fetch_close_price.py' cp_script_path = project_root / "fetch_close_price.py"
command_cp = [sys.executable, str(cp_script_path), ticker_symbol] command_cp = [sys.executable, str(cp_script_path), ticker_symbol]
result_cp = subprocess.run(command_cp, capture_output=True, text=True, check=True, timeout=30) result_cp = subprocess.run(
command_cp, capture_output=True, text=True, check=True, timeout=30
)
financials.update(json.loads(result_cp.stdout)) financials.update(json.loads(result_cp.stdout))
except Exception as e: except Exception as e:
log.warning(f"Closing price fetcher failed for {ticker_symbol}: {e}") log.warning(f"Closing price fetcher failed for {ticker_symbol}: {e}")
return financials return financials
# --- HELPER FUNCTION: Contains all the optimized logic for one post --- # --- HELPER FUNCTION: Contains all the optimized logic for one post ---
def _process_submission(submission, subreddit_id, conn, comment_limit, fetch_financials): def _process_submission(
submission, subreddit_id, conn, comment_limit, fetch_financials
):
""" """
Processes a single Reddit submission with optimized logic. Processes a single Reddit submission with optimized logic.
- Uses a single loop over comments. - Uses a single loop over comments.
@@ -101,98 +113,208 @@ def _process_submission(submission, subreddit_id, conn, comment_limit, fetch_fin
# If the title has tickers, every comment is a mention for them # If the title has tickers, every comment is a mention for them
for ticker_symbol in tickers_in_title: for ticker_symbol in tickers_in_title:
if ticker_symbol not in ticker_id_cache: if ticker_symbol not in ticker_id_cache:
ticker_id_cache[ticker_symbol] = database.get_or_create_entity(conn, 'tickers', 'symbol', ticker_symbol) ticker_id_cache[ticker_symbol] = database.get_or_create_entity(
conn, "tickers", "symbol", ticker_symbol
)
ticker_id = ticker_id_cache[ticker_symbol] ticker_id = ticker_id_cache[ticker_symbol]
database.add_mention(conn, ticker_id, subreddit_id, submission.id, 'comment', int(comment.created_utc), comment_sentiment) database.add_mention(
conn,
ticker_id,
subreddit_id,
submission.id,
"comment",
int(comment.created_utc),
comment_sentiment,
)
else: else:
# If no title tickers, only direct mentions in comments count # If no title tickers, only direct mentions in comments count
for ticker_symbol in tickers_in_comment: for ticker_symbol in tickers_in_comment:
if ticker_symbol not in ticker_id_cache: if ticker_symbol not in ticker_id_cache:
ticker_id_cache[ticker_symbol] = database.get_or_create_entity(conn, 'tickers', 'symbol', ticker_symbol) ticker_id_cache[ticker_symbol] = database.get_or_create_entity(
conn, "tickers", "symbol", ticker_symbol
)
ticker_id = ticker_id_cache[ticker_symbol] ticker_id = ticker_id_cache[ticker_symbol]
database.add_mention(conn, ticker_id, subreddit_id, submission.id, 'comment', int(comment.created_utc), comment_sentiment) database.add_mention(
conn,
ticker_id,
subreddit_id,
submission.id,
"comment",
int(comment.created_utc),
comment_sentiment,
)
# 3. Process title mentions (if any) # 3. Process title mentions (if any)
if tickers_in_title: if tickers_in_title:
log.info(f" -> Title Mention(s): {', '.join(tickers_in_title)}. Attributing all comments.") log.info(
f" -> Title Mention(s): {', '.join(tickers_in_title)}. Attributing all comments."
)
post_sentiment = get_sentiment_score(submission.title) post_sentiment = get_sentiment_score(submission.title)
for ticker_symbol in tickers_in_title: for ticker_symbol in tickers_in_title:
if ticker_symbol not in ticker_id_cache: if ticker_symbol not in ticker_id_cache:
ticker_id_cache[ticker_symbol] = database.get_or_create_entity(conn, 'tickers', 'symbol', ticker_symbol) ticker_id_cache[ticker_symbol] = database.get_or_create_entity(
conn, "tickers", "symbol", ticker_symbol
)
ticker_id = ticker_id_cache[ticker_symbol] ticker_id = ticker_id_cache[ticker_symbol]
database.add_mention(conn, ticker_id, subreddit_id, submission.id, 'post', int(submission.created_utc), post_sentiment) database.add_mention(
conn,
ticker_id,
subreddit_id,
submission.id,
"post",
int(submission.created_utc),
post_sentiment,
)
# 4. Fetch financial data if enabled # 4. Fetch financial data if enabled
if fetch_financials: if fetch_financials:
for ticker_symbol in all_tickers_found_in_post: for ticker_symbol in all_tickers_found_in_post:
ticker_id = ticker_id_cache[ticker_symbol] # Guaranteed to be in cache ticker_id = ticker_id_cache[ticker_symbol] # Guaranteed to be in cache
ticker_info = database.get_ticker_info(conn, ticker_id) ticker_info = database.get_ticker_info(conn, ticker_id)
if not ticker_info['last_updated'] or (current_time - ticker_info['last_updated'] > database.MARKET_CAP_REFRESH_INTERVAL): if not ticker_info["last_updated"] or (
current_time - ticker_info["last_updated"]
> database.MARKET_CAP_REFRESH_INTERVAL
):
log.info(f" -> Fetching financial data for {ticker_symbol}...") log.info(f" -> Fetching financial data for {ticker_symbol}...")
financials = get_financial_data_via_fetcher(ticker_symbol) financials = get_financial_data_via_fetcher(ticker_symbol)
database.update_ticker_financials(conn, ticker_id, financials.get('market_cap'), financials.get('closing_price')) database.update_ticker_financials(
conn,
ticker_id,
financials.get("market_cap"),
financials.get("closing_price"),
)
# 5. Save deep dive analysis # 5. Save deep dive analysis
avg_sentiment = sum(all_comment_sentiments) / len(all_comment_sentiments) if all_comment_sentiments else 0 avg_sentiment = (
sum(all_comment_sentiments) / len(all_comment_sentiments)
if all_comment_sentiments
else 0
)
post_analysis_data = { post_analysis_data = {
"post_id": submission.id, "title": submission.title, "post_id": submission.id,
"post_url": f"https://reddit.com{submission.permalink}", "subreddit_id": subreddit_id, "title": submission.title,
"post_timestamp": int(submission.created_utc), "comment_count": len(all_comments), "post_url": f"https://reddit.com{submission.permalink}",
"avg_comment_sentiment": avg_sentiment "subreddit_id": subreddit_id,
"post_timestamp": int(submission.created_utc),
"comment_count": len(all_comments),
"avg_comment_sentiment": avg_sentiment,
} }
database.add_or_update_post_analysis(conn, post_analysis_data) database.add_or_update_post_analysis(conn, post_analysis_data)
def scan_subreddits(reddit, subreddits_list, post_limit=100, comment_limit=100, days_to_scan=1, fetch_financials=True):
def scan_subreddits(
reddit,
subreddits_list,
post_limit=100,
comment_limit=100,
days_to_scan=1,
fetch_financials=True,
):
conn = database.get_db_connection() conn = database.get_db_connection()
post_age_limit = days_to_scan * 86400 post_age_limit = days_to_scan * 86400
current_time = time.time() current_time = time.time()
log.info(f"Scanning {len(subreddits_list)} subreddit(s) for NEW posts in the last {days_to_scan} day(s)...") log.info(
f"Scanning {len(subreddits_list)} subreddit(s) for NEW posts in the last {days_to_scan} day(s)..."
)
if not fetch_financials: if not fetch_financials:
log.warning("NOTE: Financial data fetching is disabled for this run.") log.warning("NOTE: Financial data fetching is disabled for this run.")
for subreddit_name in subreddits_list: for subreddit_name in subreddits_list:
try: try:
normalized_sub_name = subreddit_name.lower() normalized_sub_name = subreddit_name.lower()
subreddit_id = database.get_or_create_entity(conn, 'subreddits', 'name', normalized_sub_name) subreddit_id = database.get_or_create_entity(
conn, "subreddits", "name", normalized_sub_name
)
subreddit = reddit.subreddit(normalized_sub_name) subreddit = reddit.subreddit(normalized_sub_name)
log.info(f"Scanning r/{normalized_sub_name}...") log.info(f"Scanning r/{normalized_sub_name}...")
for submission in subreddit.new(limit=post_limit): for submission in subreddit.new(limit=post_limit):
if (current_time - submission.created_utc) > post_age_limit: if (current_time - submission.created_utc) > post_age_limit:
log.info(f" -> Reached posts older than the {days_to_scan}-day limit.") log.info(
f" -> Reached posts older than the {days_to_scan}-day limit."
)
break break
# Call the new helper function for each post # Call the new helper function for each post
_process_submission(submission, subreddit_id, conn, comment_limit, fetch_financials) _process_submission(
submission, subreddit_id, conn, comment_limit, fetch_financials
)
except Exception as e: except Exception as e:
log.error(f"Could not scan r/{normalized_sub_name}. Error: {e}", exc_info=True) log.error(
f"Could not scan r/{normalized_sub_name}. Error: {e}", exc_info=True
)
conn.close() conn.close()
log.critical("\n--- Scan Complete ---") log.critical("\n--- Scan Complete ---")
def main(): def main():
"""Main function to run the Reddit stock analysis tool.""" """Main function to run the Reddit stock analysis tool."""
parser = argparse.ArgumentParser(description="Analyze stock ticker mentions on Reddit.", formatter_class=argparse.RawTextHelpFormatter) parser = argparse.ArgumentParser(
description="Analyze stock ticker mentions on Reddit.",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument("-f", "--config", default="subreddits.json", help="Path to the JSON file for scanning. (Default: subreddits.json)")
parser.add_argument("-s", "--subreddit", help="Scan a single subreddit, ignoring the config file.")
parser.add_argument("-d", "--days", type=int, default=1, help="Number of past days to scan for new posts. (Default: 1)")
parser.add_argument("-p", "--posts", type=int, default=200, help="Max posts to check per subreddit. (Default: 200)")
parser.add_argument("-c", "--comments", type=int, default=100, help="Number of comments to scan per post. (Default: 100)")
parser.add_argument("-n", "--no-financials", action="store_true", help="Disable fetching of financial data during the Reddit scan.")
parser.add_argument("--update-top-tickers", action="store_true", help="Update financial data only for tickers currently in the Top 10 daily/weekly dashboards.")
parser.add_argument( parser.add_argument(
"-u", "--update-financials-only", "-f",
nargs='?', "--config",
default="subreddits.json",
help="Path to the JSON file for scanning. (Default: subreddits.json)",
)
parser.add_argument(
"-s", "--subreddit", help="Scan a single subreddit, ignoring the config file."
)
parser.add_argument(
"-d",
"--days",
type=int,
default=1,
help="Number of past days to scan for new posts. (Default: 1)",
)
parser.add_argument(
"-p",
"--posts",
type=int,
default=200,
help="Max posts to check per subreddit. (Default: 200)",
)
parser.add_argument(
"-c",
"--comments",
type=int,
default=100,
help="Number of comments to scan per post. (Default: 100)",
)
parser.add_argument(
"-n",
"--no-financials",
action="store_true",
help="Disable fetching of financial data during the Reddit scan.",
)
parser.add_argument(
"--update-top-tickers",
action="store_true",
help="Update financial data only for tickers currently in the Top 10 daily/weekly dashboards.",
)
parser.add_argument(
"-u",
"--update-financials-only",
nargs="?",
const="ALL_TICKERS", # A special value to signify "update all" const="ALL_TICKERS", # A special value to signify "update all"
default=None, default=None,
metavar='TICKER', metavar="TICKER",
help="Update financials. Provide a ticker symbol to update just one,\nor use the flag alone to update all tickers in the database." help="Update financials. Provide a ticker symbol to update just one,\nor use the flag alone to update all tickers in the database.",
)
parser.add_argument(
"--debug",
action="store_true",
help="Enable detailed debug logging to the console.",
)
parser.add_argument(
"--stdout", action="store_true", help="Print all log messages to the console."
) )
parser.add_argument("--debug", action="store_true", help="Enable detailed debug logging to the console.")
parser.add_argument("--stdout", action="store_true", help="Print all log messages to the console.")
args = parser.parse_args() args = parser.parse_args()
setup_logging(console_verbose=args.stdout, debug_mode=args.debug) setup_logging(console_verbose=args.stdout, debug_mode=args.debug)
@@ -214,11 +336,17 @@ def main():
# 3. Get all subreddits and loop through them # 3. Get all subreddits and loop through them
all_subreddits = database.get_all_scanned_subreddits() all_subreddits = database.get_all_scanned_subreddits()
log.info(f"-> Checking top tickers for {len(all_subreddits)} individual subreddit(s)...") log.info(
f"-> Checking top tickers for {len(all_subreddits)} individual subreddit(s)..."
)
for sub_name in all_subreddits: for sub_name in all_subreddits:
log.debug(f" -> Checking r/{sub_name}...") log.debug(f" -> Checking r/{sub_name}...")
top_daily_sub = database.get_top_daily_ticker_symbols_for_subreddit(sub_name) top_daily_sub = database.get_top_daily_ticker_symbols_for_subreddit(
top_weekly_sub = database.get_top_weekly_ticker_symbols_for_subreddit(sub_name) sub_name
)
top_weekly_sub = database.get_top_weekly_ticker_symbols_for_subreddit(
sub_name
)
tickers_to_update.update(top_daily_sub) tickers_to_update.update(top_daily_sub)
tickers_to_update.update(top_weekly_sub) tickers_to_update.update(top_weekly_sub)
@@ -227,18 +355,21 @@ def main():
if not unique_top_tickers: if not unique_top_tickers:
log.info("No top tickers found in the last week. Nothing to update.") log.info("No top tickers found in the last week. Nothing to update.")
else: else:
log.info(f"Found {len(unique_top_tickers)} unique top tickers to update: {', '.join(unique_top_tickers)}") log.info(
f"Found {len(unique_top_tickers)} unique top tickers to update: {', '.join(unique_top_tickers)}"
)
conn = database.get_db_connection() conn = database.get_db_connection()
for ticker_symbol in unique_top_tickers: for ticker_symbol in unique_top_tickers:
# 4. Find the ticker's ID to perform the update # 4. Find the ticker's ID to perform the update
ticker_info = database.get_ticker_by_symbol(ticker_symbol) ticker_info = database.get_ticker_by_symbol(ticker_symbol)
if ticker_info: if ticker_info:
log.info(f" -> Updating financials for {ticker_info['symbol']}...") log.info(f" -> Updating financials for {ticker_info['symbol']}...")
financials = get_financial_data_via_fetcher(ticker_info['symbol']) financials = get_financial_data_via_fetcher(ticker_info["symbol"])
database.update_ticker_financials( database.update_ticker_financials(
conn, ticker_info['id'], conn,
financials.get('market_cap'), ticker_info["id"],
financials.get('closing_price') financials.get("market_cap"),
financials.get("closing_price"),
) )
conn.close() conn.close()
@@ -253,31 +384,37 @@ def main():
log.info(f"Found {len(all_tickers)} tickers in the database to update.") log.info(f"Found {len(all_tickers)} tickers in the database to update.")
conn = database.get_db_connection() conn = database.get_db_connection()
for ticker in all_tickers: for ticker in all_tickers:
symbol = ticker['symbol'] symbol = ticker["symbol"]
log.info(f" -> Updating financials for {symbol}...") log.info(f" -> Updating financials for {symbol}...")
financials = get_financial_data_via_fetcher(symbol) financials = get_financial_data_via_fetcher(symbol)
database.update_ticker_financials( database.update_ticker_financials(
conn, ticker['id'], conn,
financials.get('market_cap'), ticker["id"],
financials.get('closing_price') financials.get("market_cap"),
financials.get("closing_price"),
) )
conn.close() conn.close()
else: else:
ticker_symbol_to_update = update_mode ticker_symbol_to_update = update_mode
log.critical(f"--- Starting Financial Data Update for single ticker: {ticker_symbol_to_update} ---") log.critical(
f"--- Starting Financial Data Update for single ticker: {ticker_symbol_to_update} ---"
)
ticker_info = database.get_ticker_by_symbol(ticker_symbol_to_update) ticker_info = database.get_ticker_by_symbol(ticker_symbol_to_update)
if ticker_info: if ticker_info:
conn = database.get_db_connection() conn = database.get_db_connection()
log.info(f" -> Updating financials for {ticker_info['symbol']}...") log.info(f" -> Updating financials for {ticker_info['symbol']}...")
financials = get_financial_data_via_fetcher(ticker_info['symbol']) financials = get_financial_data_via_fetcher(ticker_info["symbol"])
database.update_ticker_financials( database.update_ticker_financials(
conn, ticker_info['id'], conn,
financials.get('market_cap'), ticker_info["id"],
financials.get('closing_price') financials.get("market_cap"),
financials.get("closing_price"),
) )
conn.close() conn.close()
else: else:
log.error(f"Ticker '{ticker_symbol_to_update}' not found in the database.") log.error(
f"Ticker '{ticker_symbol_to_update}' not found in the database."
)
log.critical("--- Financial Data Update Complete ---") log.critical("--- Financial Data Update Complete ---")
else: else:
@@ -295,7 +432,8 @@ def main():
return return
reddit = get_reddit_instance() reddit = get_reddit_instance()
if not reddit: return if not reddit:
return
scan_subreddits( scan_subreddits(
reddit, reddit,
@@ -303,8 +441,9 @@ def main():
post_limit=args.posts, post_limit=args.posts,
comment_limit=args.comments, comment_limit=args.comments,
days_to_scan=args.days, days_to_scan=args.days,
fetch_financials=(not args.no_financials) fetch_financials=(not args.no_financials),
) )
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@@ -16,4 +16,4 @@ def get_sentiment_score(text):
# The polarity_scores() method returns a dictionary with 'neg', 'neu', 'pos', and 'compound' scores. # The polarity_scores() method returns a dictionary with 'neg', 'neu', 'pos', and 'compound' scores.
# We are most interested in the 'compound' score. # We are most interested in the 'compound' score.
scores = _analyzer.polarity_scores(text) scores = _analyzer.polarity_scores(text)
return scores['compound'] return scores["compound"]

View File

@@ -3,9 +3,9 @@ import nltk
# This will download the 'vader_lexicon' dataset # This will download the 'vader_lexicon' dataset
# It only needs to be run once # It only needs to be run once
try: try:
nltk.data.find('sentiment/vader_lexicon.zip') nltk.data.find("sentiment/vader_lexicon.zip")
print("VADER lexicon is already downloaded.") print("VADER lexicon is already downloaded.")
except LookupError: except LookupError:
print("Downloading VADER lexicon...") print("Downloading VADER lexicon...")
nltk.download('vader_lexicon') nltk.download("vader_lexicon")
print("Download complete.") print("Download complete.")

View File

@@ -2,24 +2,24 @@
from setuptools import setup, find_packages from setuptools import setup, find_packages
with open('requirements.txt') as f: with open("requirements.txt") as f:
requirements = f.read().splitlines() requirements = f.read().splitlines()
setup( setup(
name='reddit-stock-analyzer', name="reddit-stock-analyzer",
version='0.0.1', version="0.0.1",
author='Pål-Kristian Hamre', author="Pål-Kristian Hamre",
author_email='its@pkhamre.com', author_email="its@pkhamre.com",
description='A command-line tool to analyze stock ticker mentions on Reddit.', description="A command-line tool to analyze stock ticker mentions on Reddit.",
# This now correctly finds your 'rstat_tool' package # This now correctly finds your 'rstat_tool' package
packages=find_packages(), packages=find_packages(),
install_requires=requirements, install_requires=requirements,
entry_points={ entry_points={
'console_scripts': [ "console_scripts": [
# The path is now 'package_name.module_name:function_name' # The path is now 'package_name.module_name:function_name'
'rstat=rstat_tool.main:main', "rstat=rstat_tool.main:main",
'rstat-dashboard=rstat_tool.dashboard:start_dashboard', "rstat-dashboard=rstat_tool.dashboard:start_dashboard",
'rstat-cleanup=rstat_tool.cleanup:run_cleanup', "rstat-cleanup=rstat_tool.cleanup:run_cleanup",
], ],
}, },
) )

View File

@@ -1,5 +1,6 @@
<!DOCTYPE html> <!DOCTYPE html>
<html lang="en"> <html lang="en">
<head> <head>
<meta charset="UTF-8"> <meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta name="viewport" content="width=device-width, initial-scale=1.0">
@@ -8,21 +9,64 @@
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin> <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;600;700;800&display=swap" rel="stylesheet"> <link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;600;700;800&display=swap" rel="stylesheet">
<style> <style>
body { margin: 0; padding: 2rem; font-family: 'Inter', sans-serif; background: #1a1a1a; display: flex; flex-direction: column; align-items: center; min-height: 100vh; } body {
.navbar { width: 100%; max-width: 1200px; background-color: rgba(45, 55, 72, 0.5); padding: 1rem 2rem; border-radius: 12px; margin-bottom: 2rem; display: flex; flex-wrap: wrap; gap: 1rem; align-items: center;} margin: 0;
.navbar a { color: #cbd5e0; text-decoration: none; font-weight: 600; padding: 0.5rem 1rem; border-radius: 6px; transition: background-color 0.2s, color 0.2s; } padding: 2rem;
.navbar a.active, .navbar a:hover { background-color: #4a5568; color: #ffffff; } font-family: 'Inter', sans-serif;
.view-switcher { margin-left: auto; display: flex; gap: 0.5rem; } background: #1a1a1a;
display: flex;
flex-direction: column;
align-items: center;
min-height: 100vh;
}
.navbar {
width: 100%;
max-width: 1200px;
background-color: rgba(45, 55, 72, 0.5);
padding: 1rem 2rem;
border-radius: 12px;
margin-bottom: 2rem;
display: flex;
flex-wrap: wrap;
gap: 1rem;
align-items: center;
}
.navbar a {
color: #cbd5e0;
text-decoration: none;
font-weight: 600;
padding: 0.5rem 1rem;
border-radius: 6px;
transition: background-color 0.2s, color 0.2s;
}
.navbar a.active,
.navbar a:hover {
background-color: #4a5568;
color: #ffffff;
}
.view-switcher {
margin-left: auto;
display: flex;
gap: 0.5rem;
}
.dropdown { .dropdown {
position: relative; /* Establishes a positioning context for the menu */ position: relative;
/* Establishes a positioning context for the menu */
display: inline-block; display: inline-block;
} }
.dropdown { .dropdown {
position: relative; position: relative;
display: inline-block; display: inline-block;
/* Remove the padding that was causing the misalignment */ /* Remove the padding that was causing the misalignment */
/* padding-bottom: 0.5rem; */ /* padding-bottom: 0.5rem; */
} }
.dropdown-button { .dropdown-button {
color: #cbd5e0; color: #cbd5e0;
font-weight: 600; font-weight: 600;
@@ -30,12 +74,16 @@
border-radius: 6px; border-radius: 6px;
cursor: pointer; cursor: pointer;
transition: background-color 0.2s, color 0.2s; transition: background-color 0.2s, color 0.2s;
display: block; /* Ensures it behaves predictably with padding */ display: block;
/* Ensures it behaves predictably with padding */
} }
.dropdown-button.active, .dropdown:hover .dropdown-button {
.dropdown-button.active,
.dropdown:hover .dropdown-button {
background-color: #4a5568; background-color: #4a5568;
color: #ffffff; color: #ffffff;
} }
.dropdown-menu { .dropdown-menu {
visibility: hidden; visibility: hidden;
opacity: 0; opacity: 0;
@@ -51,6 +99,7 @@
left: 0; left: 0;
transition: opacity 0.2s ease-in-out, visibility 0.2s ease-in-out; transition: opacity 0.2s ease-in-out, visibility 0.2s ease-in-out;
} }
.dropdown-menu a { .dropdown-menu a {
color: #e2e8f0; color: #e2e8f0;
padding: 0.75rem 1.5rem; padding: 0.75rem 1.5rem;
@@ -58,69 +107,220 @@
display: block; display: block;
text-align: left; text-align: left;
} }
.dropdown-menu a:hover { .dropdown-menu a:hover {
background-color: #4a5568; background-color: #4a5568;
} }
.dropdown:hover .dropdown-menu { .dropdown:hover .dropdown-menu {
visibility: visible; visibility: visible;
opacity: 1; opacity: 1;
} }
.image-container { width: 750px; background: linear-gradient(145deg, #2d3748, #1a202c); color: #ffffff; border-radius: 16px; padding: 2.5rem; box-shadow: 0 10px 30px rgba(0,0,0,0.5); text-align: center; }
header { display: flex; justify-content: space-between; align-items: flex-start; margin-bottom: 2rem; } .image-container {
width: 750px;
background: linear-gradient(145deg, #2d3748, #1a202c);
color: #ffffff;
border-radius: 16px;
padding: 2.5rem;
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.5);
text-align: center;
}
header {
display: flex;
justify-content: space-between;
align-items: flex-start;
margin-bottom: 2rem;
}
.header-action { .header-action {
display: flex; display: flex;
align-items: center; align-items: center;
gap: 1rem; gap: 1rem;
} }
.header-action .icon-link svg { .header-action .icon-link svg {
color: #a0aec0; color: #a0aec0;
transition: color 0.2s; transition: color 0.2s;
} }
.header-action .icon-link:hover svg { .header-action .icon-link:hover svg {
color: #ffffff; color: #ffffff;
} }
.title-block { text-align: left; }
.title-block h1 { font-size: 2.5rem; font-weight: 800; margin: 0; line-height: 1; } .title-block {
.title-block h2 { font-size: 1.25rem; font-weight: 600; margin: 0.5rem 0 0; color: #a0aec0; } text-align: left;
.date { font-size: 1.1rem; font-weight: 600; color: #a0aec0; letter-spacing: 0.02em; } }
table { width: 100%; border-collapse: collapse; text-align: left; }
th, td { padding: 1rem 0.5rem; border-bottom: 1px solid rgba(255, 255, 255, 0.1); } .title-block h1 {
th { font-weight: 700; text-transform: uppercase; font-size: 0.75rem; color: #718096; letter-spacing: 0.05em; } font-size: 2.5rem;
th.mentions, th.sentiment { text-align: center; } font-weight: 800;
th.financials { text-align: right; } margin: 0;
td { font-size: 1.1rem; font-weight: 600; } line-height: 1;
tr:last-child td { border-bottom: none; } }
td.rank { font-weight: 700; color: #cbd5e0; width: 5%; }
td.ticker { width: 15%; } .title-block h2 {
td.financials { text-align: right; width: 20%; } font-size: 1.25rem;
td.mentions { text-align: center; width: 15%; } font-weight: 600;
td.sentiment { text-align: center; width: 20%; } margin: 0.5rem 0 0;
.sentiment-bullish { color: #48bb78; font-weight: 700; } color: #a0aec0;
.sentiment-bearish { color: #f56565; font-weight: 700; } }
.sentiment-neutral { color: #a0aec0; font-weight: 600; }
footer { margin-top: 2.5rem; } .date {
.brand-name { font-size: 1.75rem; font-weight: 800; letter-spacing: -1px; } font-size: 1.1rem;
.brand-subtitle { font-size: 1rem; color: #a0aec0; } font-weight: 600;
/* Style for the ticker link in interactive mode */ color: #a0aec0;
letter-spacing: 0.02em;
}
table {
width: 100%;
border-collapse: collapse;
text-align: left;
}
th,
td {
padding: 1rem 0.5rem;
border-bottom: 1px solid rgba(255, 255, 255, 0.1);
}
th {
font-weight: 700;
text-transform: uppercase;
font-size: 0.75rem;
color: #718096;
letter-spacing: 0.05em;
}
th.mentions,
th.sentiment {
text-align: center;
}
th.financials {
text-align: right;
}
td {
font-size: 1.1rem;
font-weight: 600;
}
tr:last-child td {
border-bottom: none;
}
td.rank {
font-weight: 700;
color: #cbd5e0;
width: 5%;
}
td.ticker {
width: 15%;
}
td.financials {
text-align: right;
width: 20%;
}
td.mentions {
text-align: center;
width: 15%;
}
td.sentiment {
text-align: center;
width: 20%;
}
.sentiment-bullish {
color: #48bb78;
font-weight: 700;
}
.sentiment-bearish {
color: #f56565;
font-weight: 700;
}
.sentiment-neutral {
color: #a0aec0;
font-weight: 600;
}
footer {
margin-top: 2.5rem;
}
.brand-name {
font-size: 1.75rem;
font-weight: 800;
letter-spacing: -1px;
}
.brand-subtitle {
font-size: 1rem;
color: #a0aec0;
}
td.ticker a { td.ticker a {
color: inherit; /* Make the link color the same as the text */ color: inherit;
text-decoration: none; text-decoration: none;
display: inline-block; display: inline-block;
transition: transform 0.1s ease-in-out; transition: transform 0.1s ease-in-out;
} }
td.ticker a:hover { td.ticker a:hover {
text-decoration: underline; text-decoration: underline;
transform: scale(1.05); transform: scale(1.05);
} }
/* Ticker coloring (used in both modes) */ tr:nth-child(1) td.ticker {
tr:nth-child(1) td.ticker { color: #d8b4fe; } tr:nth-child(6) td.ticker { color: #fca5a5; } color: #d8b4fe;
tr:nth-child(2) td.ticker { color: #a3e635; } tr:nth-child(7) td.ticker { color: #fdba74; } }
tr:nth-child(3) td.ticker { color: #67e8f9; } tr:nth-child(8) td.ticker { color: #6ee7b7; }
tr:nth-child(4) td.ticker { color: #fde047; } tr:nth-child(9) td.ticker { color: #93c5fd; } tr:nth-child(6) td.ticker {
tr:nth-child(5) td.ticker { color: #fcd34d; } tr:nth-child(10) td.ticker { color: #d1d5db; } color: #fca5a5;
}
tr:nth-child(2) td.ticker {
color: #a3e635;
}
tr:nth-child(7) td.ticker {
color: #fdba74;
}
tr:nth-child(3) td.ticker {
color: #67e8f9;
}
tr:nth-child(8) td.ticker {
color: #6ee7b7;
}
tr:nth-child(4) td.ticker {
color: #fde047;
}
tr:nth-child(9) td.ticker {
color: #93c5fd;
}
tr:nth-child(5) td.ticker {
color: #fcd34d;
}
tr:nth-child(10) td.ticker {
color: #d1d5db;
}
</style> </style>
</head> </head>
<body> <body>
{% if not is_image_mode %} {% if not is_image_mode %}
<nav class="navbar"> <nav class="navbar">
@@ -149,4 +349,5 @@
{% block content %}{% endblock %} {% block content %}{% endblock %}
</main> </main>
</body> </body>
</html> </html>

View File

@@ -16,7 +16,8 @@
<!-- Only show the icon if we are NOT already in image mode --> <!-- Only show the icon if we are NOT already in image mode -->
{% if not is_image_mode %} {% if not is_image_mode %}
<a href="{{ base_url }}?view={{ view_type }}&image=true" class="icon-link" title="View as Shareable Image"> <a href="{{ base_url }}?view={{ view_type }}&image=true" class="icon-link" title="View as Shareable Image">
<svg xmlns="http://www.w3.org/2000/svg" width="22" height="22" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2.5" stroke-linecap="round" stroke-linejoin="round"> <svg xmlns="http://www.w3.org/2000/svg" width="22" height="22" viewBox="0 0 24 24" fill="none"
stroke="currentColor" stroke-width="2.5" stroke-linecap="round" stroke-linejoin="round">
<path d="M23 19a2 2 0 0 1-2 2H3a2 2 0 0 1-2-2V8a2 2 0 0 1 2-2h4l2-3h6l2 3h4a2 2 0 0 1 2 2z"></path> <path d="M23 19a2 2 0 0 1-2 2H3a2 2 0 0 1-2-2V8a2 2 0 0 1 2-2h4l2-3h6l2 3h4a2 2 0 0 1 2 2z"></path>
<circle cx="12" cy="13" r="4"></circle> <circle cx="12" cy="13" r="4"></circle>
</svg> </svg>

View File

@@ -15,8 +15,8 @@
<span>Avg. Sentiment: <span>Avg. Sentiment:
{% if post.avg_comment_sentiment > 0.1 %} {% if post.avg_comment_sentiment > 0.1 %}
<span class="sentiment-bullish">{{ "%.2f"|format(post.avg_comment_sentiment) }}</span> <span class="sentiment-bullish">{{ "%.2f"|format(post.avg_comment_sentiment) }}</span>
{% elif post.avg_comment_sentiment < -0.1 %} {% elif post.avg_comment_sentiment < -0.1 %} <span class="sentiment-bearish">{{
<span class="sentiment-bearish">{{ "%.2f"|format(post.avg_comment_sentiment) }}</span> "%.2f"|format(post.avg_comment_sentiment) }}</span>
{% else %} {% else %}
<span class="sentiment-neutral">{{ "%.2f"|format(post.avg_comment_sentiment) }}</span> <span class="sentiment-neutral">{{ "%.2f"|format(post.avg_comment_sentiment) }}</span>
{% endif %} {% endif %}

View File

@@ -6,8 +6,7 @@ import logging
# Set up a simple logger to see detailed error tracebacks # Set up a simple logger to see detailed error tracebacks
logging.basicConfig( logging.basicConfig(
level=logging.INFO, level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
format='%(asctime)s - %(levelname)s - %(message)s'
) )
# A list of tickers to test. One very common one, and two from your logs. # A list of tickers to test. One very common one, and two from your logs.
@@ -20,31 +19,41 @@ for ticker_symbol in TICKERS_TO_TEST:
# --- Test 1: The Ticker().info method --- # --- Test 1: The Ticker().info method ---
try: try:
logging.info(f"Attempting to create Ticker object and get .info for {ticker_symbol}...") logging.info(
f"Attempting to create Ticker object and get .info for {ticker_symbol}..."
)
ticker_obj = yf.Ticker(ticker_symbol) ticker_obj = yf.Ticker(ticker_symbol)
market_cap = ticker_obj.info.get('marketCap') market_cap = ticker_obj.info.get("marketCap")
if market_cap is not None: if market_cap is not None:
logging.info(f"SUCCESS: Got market cap for {ticker_symbol}: {market_cap}") logging.info(f"SUCCESS: Got market cap for {ticker_symbol}: {market_cap}")
else: else:
logging.warning(f"PARTIAL SUCCESS: .info call for {ticker_symbol} worked, but no market cap was found.") logging.warning(
f"PARTIAL SUCCESS: .info call for {ticker_symbol} worked, but no market cap was found."
)
except Exception: except Exception:
logging.error(f"FAILURE: An error occurred during the Ticker().info call for {ticker_symbol}.", exc_info=True) logging.error(
f"FAILURE: An error occurred during the Ticker().info call for {ticker_symbol}.",
exc_info=True,
)
# --- Test 2: The yf.download() method --- # --- Test 2: The yf.download() method ---
try: try:
logging.info(f"Attempting yf.download() for {ticker_symbol}...") logging.info(f"Attempting yf.download() for {ticker_symbol}...")
data = yf.download( data = yf.download(
ticker_symbol, ticker_symbol, period="2d", progress=False, auto_adjust=False
period="2d",
progress=False,
auto_adjust=False
) )
if not data.empty: if not data.empty:
logging.info(f"SUCCESS: yf.download() for {ticker_symbol} returned {len(data)} rows of data.") logging.info(
f"SUCCESS: yf.download() for {ticker_symbol} returned {len(data)} rows of data."
)
else: else:
logging.warning(f"PARTIAL SUCCESS: yf.download() for {ticker_symbol} worked, but returned no data (likely delisted).") logging.warning(
f"PARTIAL SUCCESS: yf.download() for {ticker_symbol} worked, but returned no data (likely delisted)."
)
except Exception: except Exception:
logging.error(f"FAILURE: An error occurred during the yf.download() call for {ticker_symbol}.", exc_info=True) logging.error(
f"FAILURE: An error occurred during the yf.download() call for {ticker_symbol}.",
exc_info=True,
)
print("\n--- YFINANCE Diagnostic Test Complete ---") print("\n--- YFINANCE Diagnostic Test Complete ---")