Format all code.
This commit is contained in:
@@ -8,6 +8,7 @@ from playwright.sync_api import sync_playwright
|
||||
# Define the output directory as a constant
|
||||
OUTPUT_DIR = "images"
|
||||
|
||||
|
||||
def export_image(url_path, filename_prefix):
|
||||
"""
|
||||
Launches a headless browser, navigates to a URL path, and screenshots
|
||||
@@ -45,7 +46,9 @@ def export_image(url_path, filename_prefix):
|
||||
|
||||
except Exception as e:
|
||||
print(f"\nAn error occurred during export: {e}")
|
||||
print("Please ensure the 'rstat-dashboard' server is running in another terminal.")
|
||||
print(
|
||||
"Please ensure the 'rstat-dashboard' server is running in another terminal."
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -53,9 +56,16 @@ if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Export subreddit sentiment images.")
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument("-s", "--subreddit", help="The name of the subreddit to export.")
|
||||
group.add_argument("-o", "--overall", action="store_true", help="Export the overall summary image.")
|
||||
group.add_argument(
|
||||
"-o", "--overall", action="store_true", help="Export the overall summary image."
|
||||
)
|
||||
|
||||
parser.add_argument("-w", "--weekly", action="store_true", help="Export the weekly view instead of the daily view (only for --subreddit).")
|
||||
parser.add_argument(
|
||||
"-w",
|
||||
"--weekly",
|
||||
action="store_true",
|
||||
help="Export the weekly view instead of the daily view (only for --subreddit).",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Determine the correct URL path and filename based on arguments
|
||||
|
@@ -21,11 +21,10 @@ if __name__ == "__main__":
|
||||
# This uses a different internal code path that we have proven is stable.
|
||||
ticker = yf.Ticker(ticker_symbol)
|
||||
data = ticker.history(period="2d", auto_adjust=False)
|
||||
# --- END OF FIX ---
|
||||
|
||||
closing_price = None
|
||||
if not data.empty:
|
||||
last_close_raw = data['Close'].iloc[-1]
|
||||
last_close_raw = data["Close"].iloc[-1]
|
||||
if pd.notna(last_close_raw):
|
||||
closing_price = float(last_close_raw)
|
||||
|
||||
|
@@ -17,7 +17,7 @@ if __name__ == "__main__":
|
||||
|
||||
try:
|
||||
# Directly get the market cap
|
||||
market_cap = yf.Ticker(ticker_symbol).info.get('marketCap')
|
||||
market_cap = yf.Ticker(ticker_symbol).info.get("marketCap")
|
||||
|
||||
# On success, print JSON to stdout and exit cleanly
|
||||
print(json.dumps({"market_cap": market_cap}))
|
||||
|
@@ -10,6 +10,7 @@ import socket
|
||||
# --- IMPORTANT: Ensure this matches the "redirect uri" in your Reddit App settings ---
|
||||
REDIRECT_URI = "http://localhost:5000"
|
||||
|
||||
|
||||
def main():
|
||||
print("--- RSTAT Refresh Token Generator ---")
|
||||
load_dotenv()
|
||||
@@ -17,7 +18,9 @@ def main():
|
||||
client_secret = os.getenv("REDDIT_CLIENT_SECRET")
|
||||
|
||||
if not all([client_id, client_secret]):
|
||||
print("Error: REDDIT_CLIENT_ID and REDDIT_CLIENT_SECRET must be set in your .env file.")
|
||||
print(
|
||||
"Error: REDDIT_CLIENT_ID and REDDIT_CLIENT_SECRET must be set in your .env file."
|
||||
)
|
||||
return
|
||||
|
||||
# 1. Initialize PRAW
|
||||
@@ -25,7 +28,7 @@ def main():
|
||||
client_id=client_id,
|
||||
client_secret=client_secret,
|
||||
redirect_uri=REDIRECT_URI,
|
||||
user_agent="rstat_token_fetcher (by u/YourUsername)" # Can be anything
|
||||
user_agent="rstat_token_fetcher (by u/YourUsername)", # Can be anything
|
||||
)
|
||||
|
||||
# 2. Generate the authorization URL
|
||||
@@ -37,11 +40,17 @@ def main():
|
||||
print("\nStep 1: Open this URL in your browser:\n")
|
||||
print(auth_url)
|
||||
|
||||
print("\nStep 2: Log in to Reddit, click 'Allow', and you'll be redirected to a 'page not found'.")
|
||||
print("Step 3: Copy the ENTIRE URL from your browser's address bar after the redirect.")
|
||||
print(
|
||||
"\nStep 2: Log in to Reddit, click 'Allow', and you'll be redirected to a 'page not found'."
|
||||
)
|
||||
print(
|
||||
"Step 3: Copy the ENTIRE URL from your browser's address bar after the redirect."
|
||||
)
|
||||
|
||||
# 3. Get the redirected URL from the user
|
||||
redirected_url = input("\nStep 4: Paste the full redirected URL here and press Enter:\n> ")
|
||||
redirected_url = input(
|
||||
"\nStep 4: Paste the full redirected URL here and press Enter:\n> "
|
||||
)
|
||||
|
||||
# 4. Exchange the authorization code for a refresh token
|
||||
try:
|
||||
@@ -57,12 +66,17 @@ def main():
|
||||
print("\n--- SUCCESS! ---")
|
||||
print("Your Refresh Token is:\n")
|
||||
print(refresh_token)
|
||||
print("\nStep 5: Copy this token and add it to your .env file as REDDIT_REFRESH_TOKEN.")
|
||||
print("Step 6: You can now delete your REDDIT_USERNAME and REDDIT_PASSWORD from the .env file.")
|
||||
print(
|
||||
"\nStep 5: Copy this token and add it to your .env file as REDDIT_REFRESH_TOKEN."
|
||||
)
|
||||
print(
|
||||
"Step 6: You can now delete your REDDIT_USERNAME and REDDIT_PASSWORD from the .env file."
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"\nAn error occurred: {e}")
|
||||
print("Please make sure you copied the full URL.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -11,10 +11,11 @@ from pathlib import Path
|
||||
# --- CONFIGURATION ---
|
||||
IMAGE_DIR = "images"
|
||||
|
||||
|
||||
def get_reddit_instance():
|
||||
"""Initializes and returns a PRAW Reddit instance using OAuth2 refresh token."""
|
||||
|
||||
env_path = Path(__file__).parent / '.env'
|
||||
env_path = Path(__file__).parent / ".env"
|
||||
load_dotenv(dotenv_path=env_path)
|
||||
|
||||
client_id = os.getenv("REDDIT_CLIENT_ID")
|
||||
@@ -23,16 +24,19 @@ def get_reddit_instance():
|
||||
refresh_token = os.getenv("REDDIT_REFRESH_TOKEN")
|
||||
|
||||
if not all([client_id, client_secret, user_agent, refresh_token]):
|
||||
print("Error: Reddit API credentials (including REDDIT_REFRESH_TOKEN) must be set in .env file.")
|
||||
print(
|
||||
"Error: Reddit API credentials (including REDDIT_REFRESH_TOKEN) must be set in .env file."
|
||||
)
|
||||
return None
|
||||
|
||||
return praw.Reddit(
|
||||
client_id=client_id,
|
||||
client_secret=client_secret,
|
||||
user_agent=user_agent,
|
||||
refresh_token=refresh_token
|
||||
refresh_token=refresh_token,
|
||||
)
|
||||
|
||||
|
||||
def find_latest_image(pattern):
|
||||
"""Finds the most recent file in the IMAGE_DIR that matches a given pattern."""
|
||||
try:
|
||||
@@ -47,12 +51,29 @@ def find_latest_image(pattern):
|
||||
print(f"Error finding image file: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function to find an image and post it to Reddit."""
|
||||
parser = argparse.ArgumentParser(description="Find the latest sentiment image and post it to a subreddit.")
|
||||
parser.add_argument("-s", "--subreddit", help="The source subreddit of the image to post. (Defaults to overall summary)")
|
||||
parser.add_argument("-w", "--weekly", action="store_true", help="Post the weekly summary instead of the daily one.")
|
||||
parser.add_argument("-t", "--target-subreddit", default="rstat", help="The subreddit to post the image to. (Default: rstat)")
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find the latest sentiment image and post it to a subreddit."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-s",
|
||||
"--subreddit",
|
||||
help="The source subreddit of the image to post. (Defaults to overall summary)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-w",
|
||||
"--weekly",
|
||||
action="store_true",
|
||||
help="Post the weekly summary instead of the daily one.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
"--target-subreddit",
|
||||
default="rstat",
|
||||
help="The subreddit to post the image to. (Default: rstat)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# --- 1. Determine filename pattern and post title ---
|
||||
@@ -65,9 +86,13 @@ def main():
|
||||
else:
|
||||
# Default to the overall summary
|
||||
if args.weekly:
|
||||
print("Warning: --weekly flag has no effect for overall summary. Posting overall daily image.")
|
||||
print(
|
||||
"Warning: --weekly flag has no effect for overall summary. Posting overall daily image."
|
||||
)
|
||||
filename_pattern = "overall_summary_*.png"
|
||||
post_title = f"Overall Top 10 Ticker Mentions Across Reddit ({current_date_str})"
|
||||
post_title = (
|
||||
f"Overall Top 10 Ticker Mentions Across Reddit ({current_date_str})"
|
||||
)
|
||||
|
||||
print(f"Searching for image pattern: {filename_pattern}")
|
||||
|
||||
@@ -75,7 +100,9 @@ def main():
|
||||
image_to_post = find_latest_image(filename_pattern)
|
||||
|
||||
if not image_to_post:
|
||||
print(f"Error: No image found matching the pattern '{filename_pattern}'. Please run the scraper and exporter first.")
|
||||
print(
|
||||
f"Error: No image found matching the pattern '{filename_pattern}'. Please run the scraper and exporter first."
|
||||
)
|
||||
return
|
||||
|
||||
print(f"Found image: {image_to_post}")
|
||||
@@ -92,7 +119,7 @@ def main():
|
||||
submission = target_sub.submit_image(
|
||||
title=post_title,
|
||||
image_path=image_to_post,
|
||||
flair_id=None # Optional: You can add a flair ID here if you want
|
||||
flair_id=None, # Optional: You can add a flair ID here if you want
|
||||
)
|
||||
|
||||
print("\n--- Post Successful! ---")
|
||||
|
@@ -3,27 +3,34 @@
|
||||
import argparse
|
||||
from . import database
|
||||
from .logger_setup import setup_logging, logger as log
|
||||
|
||||
# We can't reuse load_subreddits from main anymore if it's not in the same file
|
||||
# So we will duplicate it here. It's small and keeps this script self-contained.
|
||||
import json
|
||||
|
||||
|
||||
def load_subreddits(filepath):
|
||||
"""Loads a list of subreddits from a JSON file."""
|
||||
try:
|
||||
with open(filepath, 'r') as f:
|
||||
with open(filepath, "r") as f:
|
||||
data = json.load(f)
|
||||
return data.get("subreddits", [])
|
||||
except (FileNotFoundError, json.JSONDecodeError) as e:
|
||||
log.error(f"Error loading config file '{filepath}': {e}")
|
||||
return None
|
||||
|
||||
|
||||
def run_cleanup():
|
||||
"""Main function for the cleanup tool."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="A tool to clean stale data from the RSTAT database.",
|
||||
formatter_class=argparse.RawTextHelpFormatter
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--tickers",
|
||||
action="store_true",
|
||||
help="Clean tickers that are in the blacklist.",
|
||||
)
|
||||
parser.add_argument("--tickers", action="store_true", help="Clean tickers that are in the blacklist.")
|
||||
|
||||
# --- UPDATED ARGUMENT DEFINITION ---
|
||||
# nargs='?': Makes the argument optional.
|
||||
@@ -31,14 +38,18 @@ def run_cleanup():
|
||||
# default=None: The value if the flag is not present at all.
|
||||
parser.add_argument(
|
||||
"--subreddits",
|
||||
nargs='?',
|
||||
const='subreddits.json',
|
||||
nargs="?",
|
||||
const="subreddits.json",
|
||||
default=None,
|
||||
help="Clean data from subreddits NOT in the specified config file.\n(Defaults to 'subreddits.json' if flag is used without a value)."
|
||||
help="Clean data from subreddits NOT in the specified config file.\n(Defaults to 'subreddits.json' if flag is used without a value).",
|
||||
)
|
||||
|
||||
parser.add_argument("--all", action="store_true", help="Run all available cleanup tasks.")
|
||||
parser.add_argument("--stdout", action="store_true", help="Print all log messages to the console.")
|
||||
parser.add_argument(
|
||||
"--all", action="store_true", help="Run all available cleanup tasks."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--stdout", action="store_true", help="Print all log messages to the console."
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -57,7 +68,7 @@ def run_cleanup():
|
||||
if args.all or args.subreddits is not None:
|
||||
run_any_task = True
|
||||
# If --all is used, default to 'subreddits.json' if --subreddits wasn't also specified
|
||||
config_file = args.subreddits or 'subreddits.json'
|
||||
config_file = args.subreddits or "subreddits.json"
|
||||
log.info(f"\nCleaning subreddits based on active list in: {config_file}")
|
||||
active_subreddits = load_subreddits(config_file)
|
||||
if active_subreddits is not None:
|
||||
@@ -65,10 +76,13 @@ def run_cleanup():
|
||||
|
||||
if not run_any_task:
|
||||
parser.print_help()
|
||||
log.error("\nError: Please provide at least one cleanup option (e.g., --tickers, --subreddits, --all).")
|
||||
log.error(
|
||||
"\nError: Please provide at least one cleanup option (e.g., --tickers, --subreddits, --all)."
|
||||
)
|
||||
return
|
||||
|
||||
log.critical("\nCleanup finished.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_cleanup()
|
@@ -9,12 +9,13 @@ from .database import (
|
||||
get_daily_summary_for_subreddit,
|
||||
get_weekly_summary_for_subreddit,
|
||||
get_overall_daily_summary, # Now correctly imported
|
||||
get_overall_weekly_summary # Now correctly imported
|
||||
get_overall_weekly_summary, # Now correctly imported
|
||||
)
|
||||
|
||||
app = Flask(__name__, template_folder='../templates')
|
||||
app = Flask(__name__, template_folder="../templates")
|
||||
|
||||
@app.template_filter('format_mc')
|
||||
|
||||
@app.template_filter("format_mc")
|
||||
def format_market_cap(mc):
|
||||
"""Formats a large number into a readable market cap string."""
|
||||
if mc is None or mc == 0:
|
||||
@@ -28,18 +29,20 @@ def format_market_cap(mc):
|
||||
else:
|
||||
return f"${mc:,}"
|
||||
|
||||
|
||||
@app.context_processor
|
||||
def inject_subreddits():
|
||||
"""Makes the list of all subreddits available to every template for the navbar."""
|
||||
return dict(all_subreddits=get_all_scanned_subreddits())
|
||||
|
||||
|
||||
@app.route("/")
|
||||
def overall_dashboard():
|
||||
"""Handler for the main, overall dashboard."""
|
||||
view_type = request.args.get('view', 'daily')
|
||||
is_image_mode = request.args.get('image') == 'true'
|
||||
view_type = request.args.get("view", "daily")
|
||||
is_image_mode = request.args.get("image") == "true"
|
||||
|
||||
if view_type == 'weekly':
|
||||
if view_type == "weekly":
|
||||
tickers, start, end = get_overall_weekly_summary()
|
||||
date_string = f"{start.strftime('%b %d')} - {end.strftime('%b %d, %Y')}"
|
||||
subtitle = "All Subreddits - Top 10 Weekly"
|
||||
@@ -57,16 +60,17 @@ def overall_dashboard():
|
||||
view_type=view_type,
|
||||
subreddit_name=None,
|
||||
is_image_mode=is_image_mode,
|
||||
base_url="/"
|
||||
base_url="/",
|
||||
)
|
||||
|
||||
|
||||
@app.route("/subreddit/<name>")
|
||||
def subreddit_dashboard(name):
|
||||
"""Handler for per-subreddit dashboards."""
|
||||
view_type = request.args.get('view', 'daily')
|
||||
is_image_mode = request.args.get('image') == 'true'
|
||||
view_type = request.args.get("view", "daily")
|
||||
is_image_mode = request.args.get("image") == "true"
|
||||
|
||||
if view_type == 'weekly':
|
||||
if view_type == "weekly":
|
||||
today = datetime.now(timezone.utc)
|
||||
target_date = today - timedelta(days=7)
|
||||
tickers, start, end = get_weekly_summary_for_subreddit(name, target_date)
|
||||
@@ -86,9 +90,10 @@ def subreddit_dashboard(name):
|
||||
view_type=view_type,
|
||||
subreddit_name=name,
|
||||
is_image_mode=is_image_mode,
|
||||
base_url=f"/subreddit/{name}"
|
||||
base_url=f"/subreddit/{name}",
|
||||
)
|
||||
|
||||
|
||||
@app.route("/deep-dive/<symbol>")
|
||||
def deep_dive(symbol):
|
||||
"""The handler for the deep-dive page for a specific ticker."""
|
||||
@@ -96,6 +101,7 @@ def deep_dive(symbol):
|
||||
posts = get_deep_dive_details(symbol)
|
||||
return render_template("deep_dive.html", posts=posts, symbol=symbol)
|
||||
|
||||
|
||||
def start_dashboard():
|
||||
"""The main function called by the 'rstat-dashboard' command."""
|
||||
log.info("Starting Flask server...")
|
||||
@@ -103,5 +109,6 @@ def start_dashboard():
|
||||
log.info("Press CTRL+C to stop the server.")
|
||||
app.run(debug=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
start_dashboard()
|
@@ -9,6 +9,7 @@ from datetime import datetime, timedelta, timezone
|
||||
DB_FILE = "reddit_stocks.db"
|
||||
MARKET_CAP_REFRESH_INTERVAL = 86400
|
||||
|
||||
|
||||
def clean_stale_tickers():
|
||||
"""
|
||||
Removes tickers and their associated mentions from the database
|
||||
@@ -18,7 +19,7 @@ def clean_stale_tickers():
|
||||
conn = get_db_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
placeholders = ','.join('?' for _ in COMMON_WORDS_BLACKLIST)
|
||||
placeholders = ",".join("?" for _ in COMMON_WORDS_BLACKLIST)
|
||||
query = f"SELECT id, symbol FROM tickers WHERE symbol IN ({placeholders})"
|
||||
|
||||
cursor.execute(query, tuple(COMMON_WORDS_BLACKLIST))
|
||||
@@ -30,8 +31,8 @@ def clean_stale_tickers():
|
||||
return
|
||||
|
||||
for ticker in stale_tickers:
|
||||
ticker_id = ticker['id']
|
||||
ticker_symbol = ticker['symbol']
|
||||
ticker_id = ticker["id"]
|
||||
ticker_symbol = ticker["symbol"]
|
||||
log.info(f"Removing stale ticker '{ticker_symbol}' (ID: {ticker_id})...")
|
||||
cursor.execute("DELETE FROM mentions WHERE ticker_id = ?", (ticker_id,))
|
||||
cursor.execute("DELETE FROM tickers WHERE id = ?", (ticker_id,))
|
||||
@@ -41,6 +42,7 @@ def clean_stale_tickers():
|
||||
conn.close()
|
||||
log.info(f"Cleanup complete. Removed {deleted_count} records.")
|
||||
|
||||
|
||||
def clean_stale_subreddits(active_subreddits):
|
||||
"""
|
||||
Removes all data associated with subreddits that are NOT in the active list.
|
||||
@@ -57,9 +59,9 @@ def clean_stale_subreddits(active_subreddits):
|
||||
db_subreddits = cursor.fetchall()
|
||||
stale_sub_ids = []
|
||||
for sub in db_subreddits:
|
||||
if sub['name'] not in active_subreddits_lower:
|
||||
if sub["name"] not in active_subreddits_lower:
|
||||
log.info(f"Found stale subreddit to remove: r/{sub['name']}")
|
||||
stale_sub_ids.append(sub['id'])
|
||||
stale_sub_ids.append(sub["id"])
|
||||
if not stale_sub_ids:
|
||||
log.info("No stale subreddits to clean.")
|
||||
conn.close()
|
||||
@@ -73,15 +75,18 @@ def clean_stale_subreddits(active_subreddits):
|
||||
conn.close()
|
||||
log.info("Stale subreddit cleanup complete.")
|
||||
|
||||
|
||||
def get_db_connection():
|
||||
conn = sqlite3.connect(DB_FILE)
|
||||
conn.row_factory = sqlite3.Row
|
||||
return conn
|
||||
|
||||
|
||||
def initialize_db():
|
||||
conn = get_db_connection()
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("""
|
||||
cursor.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS tickers (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
symbol TEXT NOT NULL UNIQUE,
|
||||
@@ -89,14 +94,18 @@ def initialize_db():
|
||||
closing_price REAL,
|
||||
last_updated INTEGER
|
||||
)
|
||||
""")
|
||||
cursor.execute("""
|
||||
"""
|
||||
)
|
||||
cursor.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS subreddits (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL UNIQUE
|
||||
)
|
||||
""")
|
||||
cursor.execute("""
|
||||
"""
|
||||
)
|
||||
cursor.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS mentions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
ticker_id INTEGER,
|
||||
@@ -109,8 +118,10 @@ def initialize_db():
|
||||
FOREIGN KEY (ticker_id) REFERENCES tickers (id),
|
||||
FOREIGN KEY (subreddit_id) REFERENCES subreddits (id)
|
||||
)
|
||||
""")
|
||||
cursor.execute("""
|
||||
"""
|
||||
)
|
||||
cursor.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS posts (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
post_id TEXT NOT NULL UNIQUE,
|
||||
@@ -122,12 +133,23 @@ def initialize_db():
|
||||
avg_comment_sentiment REAL,
|
||||
FOREIGN KEY (subreddit_id) REFERENCES subreddits (id)
|
||||
)
|
||||
""")
|
||||
"""
|
||||
)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
log.info("Database initialized successfully.")
|
||||
|
||||
def add_mention(conn, ticker_id, subreddit_id, post_id, mention_type, timestamp, mention_sentiment, post_avg_sentiment=None):
|
||||
|
||||
def add_mention(
|
||||
conn,
|
||||
ticker_id,
|
||||
subreddit_id,
|
||||
post_id,
|
||||
mention_type,
|
||||
timestamp,
|
||||
mention_sentiment,
|
||||
post_avg_sentiment=None,
|
||||
):
|
||||
cursor = conn.cursor()
|
||||
try:
|
||||
cursor.execute(
|
||||
@@ -135,40 +157,52 @@ def add_mention(conn, ticker_id, subreddit_id, post_id, mention_type, timestamp,
|
||||
INSERT INTO mentions (ticker_id, subreddit_id, post_id, mention_type, mention_timestamp, mention_sentiment, post_avg_sentiment)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(ticker_id, subreddit_id, post_id, mention_type, timestamp, mention_sentiment, post_avg_sentiment)
|
||||
(
|
||||
ticker_id,
|
||||
subreddit_id,
|
||||
post_id,
|
||||
mention_type,
|
||||
timestamp,
|
||||
mention_sentiment,
|
||||
post_avg_sentiment,
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
except sqlite3.IntegrityError:
|
||||
pass
|
||||
|
||||
|
||||
def get_or_create_entity(conn, table_name, column_name, value):
|
||||
"""Generic function to get or create an entity and return its ID."""
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(f"SELECT id FROM {table_name} WHERE {column_name} = ?", (value,))
|
||||
result = cursor.fetchone()
|
||||
if result:
|
||||
return result['id']
|
||||
return result["id"]
|
||||
else:
|
||||
cursor.execute(f"INSERT INTO {table_name} ({column_name}) VALUES (?)", (value,))
|
||||
conn.commit()
|
||||
return cursor.lastrowid
|
||||
|
||||
|
||||
def update_ticker_financials(conn, ticker_id, market_cap, closing_price):
|
||||
"""Updates the financials and timestamp for a specific ticker."""
|
||||
cursor = conn.cursor()
|
||||
current_timestamp = int(time.time())
|
||||
cursor.execute(
|
||||
"UPDATE tickers SET market_cap = ?, closing_price = ?, last_updated = ? WHERE id = ?",
|
||||
(market_cap, closing_price, current_timestamp, ticker_id)
|
||||
(market_cap, closing_price, current_timestamp, ticker_id),
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
|
||||
def get_ticker_info(conn, ticker_id):
|
||||
"""Retrieves all info for a specific ticker by its ID."""
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT * FROM tickers WHERE id = ?", (ticker_id,))
|
||||
return cursor.fetchone()
|
||||
|
||||
|
||||
def get_week_start_end(for_date):
|
||||
"""
|
||||
Calculates the start (Monday, 00:00:00) and end (Sunday, 23:59:59)
|
||||
@@ -185,6 +219,7 @@ def get_week_start_end(for_date):
|
||||
|
||||
return start_of_week, end_of_week
|
||||
|
||||
|
||||
def add_or_update_post_analysis(conn, post_data):
|
||||
"""
|
||||
Inserts a new post analysis record or updates an existing one.
|
||||
@@ -200,10 +235,11 @@ def add_or_update_post_analysis(conn, post_data):
|
||||
comment_count = excluded.comment_count,
|
||||
avg_comment_sentiment = excluded.avg_comment_sentiment;
|
||||
""",
|
||||
post_data
|
||||
post_data,
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
|
||||
def get_overall_summary(limit=10):
|
||||
"""
|
||||
Gets the top tickers across all subreddits from the LAST 24 HOURS.
|
||||
@@ -226,6 +262,7 @@ def get_overall_summary(limit=10):
|
||||
conn.close()
|
||||
return results
|
||||
|
||||
|
||||
def get_subreddit_summary(subreddit_name, limit=10):
|
||||
"""
|
||||
Gets the top tickers for a specific subreddit from the LAST 24 HOURS.
|
||||
@@ -244,12 +281,15 @@ def get_subreddit_summary(subreddit_name, limit=10):
|
||||
GROUP BY t.symbol, t.market_cap, t.closing_price
|
||||
ORDER BY mention_count DESC LIMIT ?;
|
||||
"""
|
||||
results = conn.execute(query, (subreddit_name, one_day_ago_timestamp, limit)).fetchall()
|
||||
results = conn.execute(
|
||||
query, (subreddit_name, one_day_ago_timestamp, limit)
|
||||
).fetchall()
|
||||
conn.close()
|
||||
return results
|
||||
|
||||
|
||||
def get_daily_summary_for_subreddit(subreddit_name):
|
||||
""" Gets a summary for the DAILY image view (last 24 hours). """
|
||||
"""Gets a summary for the DAILY image view (last 24 hours)."""
|
||||
conn = get_db_connection()
|
||||
one_day_ago = datetime.now(timezone.utc) - timedelta(days=1)
|
||||
one_day_ago_timestamp = int(one_day_ago.timestamp())
|
||||
@@ -268,8 +308,9 @@ def get_daily_summary_for_subreddit(subreddit_name):
|
||||
conn.close()
|
||||
return results
|
||||
|
||||
|
||||
def get_weekly_summary_for_subreddit(subreddit_name, for_date):
|
||||
""" Gets a summary for the WEEKLY image view (full week). """
|
||||
"""Gets a summary for the WEEKLY image view (full week)."""
|
||||
conn = get_db_connection()
|
||||
start_of_week, end_of_week = get_week_start_end(for_date)
|
||||
start_timestamp = int(start_of_week.timestamp())
|
||||
@@ -285,10 +326,13 @@ def get_weekly_summary_for_subreddit(subreddit_name, for_date):
|
||||
GROUP BY t.symbol, t.market_cap, t.closing_price
|
||||
ORDER BY total_mentions DESC LIMIT 10;
|
||||
"""
|
||||
results = conn.execute(query, (subreddit_name, start_timestamp, end_timestamp)).fetchall()
|
||||
results = conn.execute(
|
||||
query, (subreddit_name, start_timestamp, end_timestamp)
|
||||
).fetchall()
|
||||
conn.close()
|
||||
return results, start_of_week, end_of_week
|
||||
|
||||
|
||||
def get_overall_image_view_summary():
|
||||
"""
|
||||
Gets a summary of top tickers across ALL subreddits for the DAILY image view (last 24 hours).
|
||||
@@ -311,6 +355,7 @@ def get_overall_image_view_summary():
|
||||
conn.close()
|
||||
return results
|
||||
|
||||
|
||||
def get_overall_daily_summary():
|
||||
"""
|
||||
Gets the top tickers across all subreddits from the LAST 24 HOURS.
|
||||
@@ -332,13 +377,16 @@ def get_overall_daily_summary():
|
||||
conn.close()
|
||||
return results
|
||||
|
||||
|
||||
def get_overall_weekly_summary():
|
||||
"""
|
||||
Gets the top tickers across all subreddits for the LAST 7 DAYS.
|
||||
"""
|
||||
conn = get_db_connection()
|
||||
today = datetime.now(timezone.utc)
|
||||
start_of_week, end_of_week = get_week_start_end(today - timedelta(days=7)) # Get last week's boundaries
|
||||
start_of_week, end_of_week = get_week_start_end(
|
||||
today - timedelta(days=7)
|
||||
) # Get last week's boundaries
|
||||
start_timestamp = int(start_of_week.timestamp())
|
||||
end_timestamp = int(end_of_week.timestamp())
|
||||
query = """
|
||||
@@ -354,8 +402,9 @@ def get_overall_weekly_summary():
|
||||
conn.close()
|
||||
return results, start_of_week, end_of_week
|
||||
|
||||
|
||||
def get_deep_dive_details(ticker_symbol):
|
||||
""" Gets all analyzed posts that mention a specific ticker. """
|
||||
"""Gets all analyzed posts that mention a specific ticker."""
|
||||
conn = get_db_connection()
|
||||
query = """
|
||||
SELECT DISTINCT p.*, s.name as subreddit_name FROM posts p
|
||||
@@ -367,12 +416,16 @@ def get_deep_dive_details(ticker_symbol):
|
||||
conn.close()
|
||||
return results
|
||||
|
||||
|
||||
def get_all_scanned_subreddits():
|
||||
""" Gets a unique list of all subreddits we have data for. """
|
||||
"""Gets a unique list of all subreddits we have data for."""
|
||||
conn = get_db_connection()
|
||||
results = conn.execute("SELECT DISTINCT name FROM subreddits ORDER BY name ASC;").fetchall()
|
||||
results = conn.execute(
|
||||
"SELECT DISTINCT name FROM subreddits ORDER BY name ASC;"
|
||||
).fetchall()
|
||||
conn.close()
|
||||
return [row['name'] for row in results]
|
||||
return [row["name"] for row in results]
|
||||
|
||||
|
||||
def get_all_tickers():
|
||||
"""Retrieves the ID and symbol of every ticker in the database."""
|
||||
@@ -381,6 +434,7 @@ def get_all_tickers():
|
||||
conn.close()
|
||||
return results
|
||||
|
||||
|
||||
def get_ticker_by_symbol(symbol):
|
||||
"""
|
||||
Retrieves a single ticker's ID and symbol from the database.
|
||||
@@ -388,11 +442,14 @@ def get_ticker_by_symbol(symbol):
|
||||
"""
|
||||
conn = get_db_connection()
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT id, symbol FROM tickers WHERE LOWER(symbol) = LOWER(?)", (symbol,))
|
||||
cursor.execute(
|
||||
"SELECT id, symbol FROM tickers WHERE LOWER(symbol) = LOWER(?)", (symbol,)
|
||||
)
|
||||
result = cursor.fetchone()
|
||||
conn.close()
|
||||
return result
|
||||
|
||||
|
||||
def get_top_daily_ticker_symbols():
|
||||
"""Gets a simple list of the Top 10 ticker symbols from the last 24 hours."""
|
||||
conn = get_db_connection()
|
||||
@@ -405,7 +462,8 @@ def get_top_daily_ticker_symbols():
|
||||
"""
|
||||
results = conn.execute(query, (one_day_ago_timestamp,)).fetchall()
|
||||
conn.close()
|
||||
return [row['symbol'] for row in results] # Return a simple list of strings
|
||||
return [row["symbol"] for row in results] # Return a simple list of strings
|
||||
|
||||
|
||||
def get_top_weekly_ticker_symbols():
|
||||
"""Gets a simple list of the Top 10 ticker symbols from the last 7 days."""
|
||||
@@ -419,7 +477,8 @@ def get_top_weekly_ticker_symbols():
|
||||
"""
|
||||
results = conn.execute(query, (seven_days_ago_timestamp,)).fetchall()
|
||||
conn.close()
|
||||
return [row['symbol'] for row in results] # Return a simple list of strings
|
||||
return [row["symbol"] for row in results] # Return a simple list of strings
|
||||
|
||||
|
||||
def get_top_daily_ticker_symbols_for_subreddit(subreddit_name):
|
||||
"""Gets a list of the Top 10 daily ticker symbols for a specific subreddit."""
|
||||
@@ -432,9 +491,16 @@ def get_top_daily_ticker_symbols_for_subreddit(subreddit_name):
|
||||
WHERE LOWER(s.name) = LOWER(?) AND m.mention_timestamp >= ?
|
||||
GROUP BY t.symbol ORDER BY COUNT(m.id) DESC LIMIT 10;
|
||||
"""
|
||||
results = conn.execute(query, (subreddit_name, one_day_ago_timestamp,)).fetchall()
|
||||
results = conn.execute(
|
||||
query,
|
||||
(
|
||||
subreddit_name,
|
||||
one_day_ago_timestamp,
|
||||
),
|
||||
).fetchall()
|
||||
conn.close()
|
||||
return [row['symbol'] for row in results]
|
||||
return [row["symbol"] for row in results]
|
||||
|
||||
|
||||
def get_top_weekly_ticker_symbols_for_subreddit(subreddit_name):
|
||||
"""Gets a list of the Top 10 weekly ticker symbols for a specific subreddit."""
|
||||
@@ -447,6 +513,12 @@ def get_top_weekly_ticker_symbols_for_subreddit(subreddit_name):
|
||||
WHERE LOWER(s.name) = LOWER(?) AND m.mention_timestamp >= ?
|
||||
GROUP BY t.symbol ORDER BY COUNT(m.id) DESC LIMIT 10;
|
||||
"""
|
||||
results = conn.execute(query, (subreddit_name, seven_days_ago_timestamp,)).fetchall()
|
||||
results = conn.execute(
|
||||
query,
|
||||
(
|
||||
subreddit_name,
|
||||
seven_days_ago_timestamp,
|
||||
),
|
||||
).fetchall()
|
||||
conn.close()
|
||||
return [row['symbol'] for row in results]
|
||||
return [row["symbol"] for row in results]
|
||||
|
@@ -130,7 +130,7 @@ def format_and_print_list(word_set, words_per_line=10):
|
||||
# 3. Iterate through the sorted list and print words, respecting the line limit
|
||||
for i in range(0, len(sorted_words), words_per_line):
|
||||
# Get a chunk of words for the current line
|
||||
line_chunk = sorted_words[i:i + words_per_line]
|
||||
line_chunk = sorted_words[i : i + words_per_line]
|
||||
|
||||
# Format each word with double quotes
|
||||
formatted_words = [f'"{word}"' for word in line_chunk]
|
||||
@@ -149,6 +149,7 @@ def format_and_print_list(word_set, words_per_line=10):
|
||||
# 4. Print the closing brace
|
||||
print("}")
|
||||
|
||||
|
||||
# --- Main execution ---
|
||||
if __name__ == "__main__":
|
||||
format_and_print_list(COMMON_WORDS_BLACKLIST)
|
@@ -5,6 +5,7 @@ import sys
|
||||
|
||||
logger = logging.getLogger("rstat_app")
|
||||
|
||||
|
||||
def setup_logging(console_verbose=False, debug_mode=False):
|
||||
"""
|
||||
Configures the application's logger with a new DEBUG level.
|
||||
@@ -18,15 +19,17 @@ def setup_logging(console_verbose=False, debug_mode=False):
|
||||
logger.handlers.clear()
|
||||
|
||||
# File Handler (Always verbose at INFO level or higher)
|
||||
file_handler = logging.FileHandler("rstat.log", mode='a')
|
||||
file_handler = logging.FileHandler("rstat.log", mode="a")
|
||||
file_handler.setLevel(logging.INFO) # We don't need debug spam in the file usually
|
||||
file_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
|
||||
file_formatter = logging.Formatter(
|
||||
"%(asctime)s - %(levelname)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
|
||||
)
|
||||
file_handler.setFormatter(file_formatter)
|
||||
logger.addHandler(file_handler)
|
||||
|
||||
# Console Handler (Verbosity is controlled)
|
||||
console_handler = logging.StreamHandler(sys.stdout)
|
||||
console_formatter = logging.Formatter('%(message)s')
|
||||
console_formatter = logging.Formatter("%(message)s")
|
||||
console_handler.setFormatter(console_formatter)
|
||||
|
||||
if debug_mode:
|
||||
|
@@ -16,18 +16,20 @@ from .ticker_extractor import extract_tickers
|
||||
from .sentiment_analyzer import get_sentiment_score
|
||||
from .logger_setup import setup_logging, logger as log
|
||||
|
||||
|
||||
def load_subreddits(filepath):
|
||||
"""Loads a list of subreddits from a JSON file."""
|
||||
try:
|
||||
with open(filepath, 'r') as f:
|
||||
with open(filepath, "r") as f:
|
||||
return json.load(f).get("subreddits", [])
|
||||
except (FileNotFoundError, json.JSONDecodeError) as e:
|
||||
log.error(f"Error loading config file '{filepath}': {e}")
|
||||
return None
|
||||
|
||||
|
||||
def get_reddit_instance():
|
||||
"""Initializes and returns a PRAW Reddit instance."""
|
||||
env_path = Path(__file__).parent.parent / '.env'
|
||||
env_path = Path(__file__).parent.parent / ".env"
|
||||
load_dotenv(dotenv_path=env_path)
|
||||
|
||||
client_id = os.getenv("REDDIT_CLIENT_ID")
|
||||
@@ -36,7 +38,10 @@ def get_reddit_instance():
|
||||
if not all([client_id, client_secret, user_agent]):
|
||||
log.error("Error: Reddit API credentials not found in .env file.")
|
||||
return None
|
||||
return praw.Reddit(client_id=client_id, client_secret=client_secret, user_agent=user_agent)
|
||||
return praw.Reddit(
|
||||
client_id=client_id, client_secret=client_secret, user_agent=user_agent
|
||||
)
|
||||
|
||||
|
||||
def get_financial_data_via_fetcher(ticker_symbol):
|
||||
"""
|
||||
@@ -48,26 +53,33 @@ def get_financial_data_via_fetcher(ticker_symbol):
|
||||
|
||||
# --- Call 1: Get Market Cap ---
|
||||
try:
|
||||
mc_script_path = project_root / 'fetch_market_cap.py'
|
||||
mc_script_path = project_root / "fetch_market_cap.py"
|
||||
command_mc = [sys.executable, str(mc_script_path), ticker_symbol]
|
||||
result_mc = subprocess.run(command_mc, capture_output=True, text=True, check=True, timeout=30)
|
||||
result_mc = subprocess.run(
|
||||
command_mc, capture_output=True, text=True, check=True, timeout=30
|
||||
)
|
||||
financials.update(json.loads(result_mc.stdout))
|
||||
except Exception as e:
|
||||
log.warning(f"Market cap fetcher failed for {ticker_symbol}: {e}")
|
||||
|
||||
# --- Call 2: Get Closing Price ---
|
||||
try:
|
||||
cp_script_path = project_root / 'fetch_close_price.py'
|
||||
cp_script_path = project_root / "fetch_close_price.py"
|
||||
command_cp = [sys.executable, str(cp_script_path), ticker_symbol]
|
||||
result_cp = subprocess.run(command_cp, capture_output=True, text=True, check=True, timeout=30)
|
||||
result_cp = subprocess.run(
|
||||
command_cp, capture_output=True, text=True, check=True, timeout=30
|
||||
)
|
||||
financials.update(json.loads(result_cp.stdout))
|
||||
except Exception as e:
|
||||
log.warning(f"Closing price fetcher failed for {ticker_symbol}: {e}")
|
||||
|
||||
return financials
|
||||
|
||||
|
||||
# --- HELPER FUNCTION: Contains all the optimized logic for one post ---
|
||||
def _process_submission(submission, subreddit_id, conn, comment_limit, fetch_financials):
|
||||
def _process_submission(
|
||||
submission, subreddit_id, conn, comment_limit, fetch_financials
|
||||
):
|
||||
"""
|
||||
Processes a single Reddit submission with optimized logic.
|
||||
- Uses a single loop over comments.
|
||||
@@ -101,98 +113,208 @@ def _process_submission(submission, subreddit_id, conn, comment_limit, fetch_fin
|
||||
# If the title has tickers, every comment is a mention for them
|
||||
for ticker_symbol in tickers_in_title:
|
||||
if ticker_symbol not in ticker_id_cache:
|
||||
ticker_id_cache[ticker_symbol] = database.get_or_create_entity(conn, 'tickers', 'symbol', ticker_symbol)
|
||||
ticker_id_cache[ticker_symbol] = database.get_or_create_entity(
|
||||
conn, "tickers", "symbol", ticker_symbol
|
||||
)
|
||||
ticker_id = ticker_id_cache[ticker_symbol]
|
||||
database.add_mention(conn, ticker_id, subreddit_id, submission.id, 'comment', int(comment.created_utc), comment_sentiment)
|
||||
database.add_mention(
|
||||
conn,
|
||||
ticker_id,
|
||||
subreddit_id,
|
||||
submission.id,
|
||||
"comment",
|
||||
int(comment.created_utc),
|
||||
comment_sentiment,
|
||||
)
|
||||
else:
|
||||
# If no title tickers, only direct mentions in comments count
|
||||
for ticker_symbol in tickers_in_comment:
|
||||
if ticker_symbol not in ticker_id_cache:
|
||||
ticker_id_cache[ticker_symbol] = database.get_or_create_entity(conn, 'tickers', 'symbol', ticker_symbol)
|
||||
ticker_id_cache[ticker_symbol] = database.get_or_create_entity(
|
||||
conn, "tickers", "symbol", ticker_symbol
|
||||
)
|
||||
ticker_id = ticker_id_cache[ticker_symbol]
|
||||
database.add_mention(conn, ticker_id, subreddit_id, submission.id, 'comment', int(comment.created_utc), comment_sentiment)
|
||||
database.add_mention(
|
||||
conn,
|
||||
ticker_id,
|
||||
subreddit_id,
|
||||
submission.id,
|
||||
"comment",
|
||||
int(comment.created_utc),
|
||||
comment_sentiment,
|
||||
)
|
||||
|
||||
# 3. Process title mentions (if any)
|
||||
if tickers_in_title:
|
||||
log.info(f" -> Title Mention(s): {', '.join(tickers_in_title)}. Attributing all comments.")
|
||||
log.info(
|
||||
f" -> Title Mention(s): {', '.join(tickers_in_title)}. Attributing all comments."
|
||||
)
|
||||
post_sentiment = get_sentiment_score(submission.title)
|
||||
for ticker_symbol in tickers_in_title:
|
||||
if ticker_symbol not in ticker_id_cache:
|
||||
ticker_id_cache[ticker_symbol] = database.get_or_create_entity(conn, 'tickers', 'symbol', ticker_symbol)
|
||||
ticker_id_cache[ticker_symbol] = database.get_or_create_entity(
|
||||
conn, "tickers", "symbol", ticker_symbol
|
||||
)
|
||||
ticker_id = ticker_id_cache[ticker_symbol]
|
||||
database.add_mention(conn, ticker_id, subreddit_id, submission.id, 'post', int(submission.created_utc), post_sentiment)
|
||||
database.add_mention(
|
||||
conn,
|
||||
ticker_id,
|
||||
subreddit_id,
|
||||
submission.id,
|
||||
"post",
|
||||
int(submission.created_utc),
|
||||
post_sentiment,
|
||||
)
|
||||
|
||||
# 4. Fetch financial data if enabled
|
||||
if fetch_financials:
|
||||
for ticker_symbol in all_tickers_found_in_post:
|
||||
ticker_id = ticker_id_cache[ticker_symbol] # Guaranteed to be in cache
|
||||
ticker_info = database.get_ticker_info(conn, ticker_id)
|
||||
if not ticker_info['last_updated'] or (current_time - ticker_info['last_updated'] > database.MARKET_CAP_REFRESH_INTERVAL):
|
||||
if not ticker_info["last_updated"] or (
|
||||
current_time - ticker_info["last_updated"]
|
||||
> database.MARKET_CAP_REFRESH_INTERVAL
|
||||
):
|
||||
log.info(f" -> Fetching financial data for {ticker_symbol}...")
|
||||
financials = get_financial_data_via_fetcher(ticker_symbol)
|
||||
database.update_ticker_financials(conn, ticker_id, financials.get('market_cap'), financials.get('closing_price'))
|
||||
database.update_ticker_financials(
|
||||
conn,
|
||||
ticker_id,
|
||||
financials.get("market_cap"),
|
||||
financials.get("closing_price"),
|
||||
)
|
||||
|
||||
# 5. Save deep dive analysis
|
||||
avg_sentiment = sum(all_comment_sentiments) / len(all_comment_sentiments) if all_comment_sentiments else 0
|
||||
avg_sentiment = (
|
||||
sum(all_comment_sentiments) / len(all_comment_sentiments)
|
||||
if all_comment_sentiments
|
||||
else 0
|
||||
)
|
||||
post_analysis_data = {
|
||||
"post_id": submission.id, "title": submission.title,
|
||||
"post_url": f"https://reddit.com{submission.permalink}", "subreddit_id": subreddit_id,
|
||||
"post_timestamp": int(submission.created_utc), "comment_count": len(all_comments),
|
||||
"avg_comment_sentiment": avg_sentiment
|
||||
"post_id": submission.id,
|
||||
"title": submission.title,
|
||||
"post_url": f"https://reddit.com{submission.permalink}",
|
||||
"subreddit_id": subreddit_id,
|
||||
"post_timestamp": int(submission.created_utc),
|
||||
"comment_count": len(all_comments),
|
||||
"avg_comment_sentiment": avg_sentiment,
|
||||
}
|
||||
database.add_or_update_post_analysis(conn, post_analysis_data)
|
||||
|
||||
def scan_subreddits(reddit, subreddits_list, post_limit=100, comment_limit=100, days_to_scan=1, fetch_financials=True):
|
||||
|
||||
def scan_subreddits(
|
||||
reddit,
|
||||
subreddits_list,
|
||||
post_limit=100,
|
||||
comment_limit=100,
|
||||
days_to_scan=1,
|
||||
fetch_financials=True,
|
||||
):
|
||||
conn = database.get_db_connection()
|
||||
post_age_limit = days_to_scan * 86400
|
||||
current_time = time.time()
|
||||
|
||||
log.info(f"Scanning {len(subreddits_list)} subreddit(s) for NEW posts in the last {days_to_scan} day(s)...")
|
||||
log.info(
|
||||
f"Scanning {len(subreddits_list)} subreddit(s) for NEW posts in the last {days_to_scan} day(s)..."
|
||||
)
|
||||
if not fetch_financials:
|
||||
log.warning("NOTE: Financial data fetching is disabled for this run.")
|
||||
|
||||
for subreddit_name in subreddits_list:
|
||||
try:
|
||||
normalized_sub_name = subreddit_name.lower()
|
||||
subreddit_id = database.get_or_create_entity(conn, 'subreddits', 'name', normalized_sub_name)
|
||||
subreddit_id = database.get_or_create_entity(
|
||||
conn, "subreddits", "name", normalized_sub_name
|
||||
)
|
||||
subreddit = reddit.subreddit(normalized_sub_name)
|
||||
log.info(f"Scanning r/{normalized_sub_name}...")
|
||||
|
||||
for submission in subreddit.new(limit=post_limit):
|
||||
if (current_time - submission.created_utc) > post_age_limit:
|
||||
log.info(f" -> Reached posts older than the {days_to_scan}-day limit.")
|
||||
log.info(
|
||||
f" -> Reached posts older than the {days_to_scan}-day limit."
|
||||
)
|
||||
break
|
||||
|
||||
# Call the new helper function for each post
|
||||
_process_submission(submission, subreddit_id, conn, comment_limit, fetch_financials)
|
||||
_process_submission(
|
||||
submission, subreddit_id, conn, comment_limit, fetch_financials
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
log.error(f"Could not scan r/{normalized_sub_name}. Error: {e}", exc_info=True)
|
||||
log.error(
|
||||
f"Could not scan r/{normalized_sub_name}. Error: {e}", exc_info=True
|
||||
)
|
||||
|
||||
conn.close()
|
||||
log.critical("\n--- Scan Complete ---")
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function to run the Reddit stock analysis tool."""
|
||||
parser = argparse.ArgumentParser(description="Analyze stock ticker mentions on Reddit.", formatter_class=argparse.RawTextHelpFormatter)
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Analyze stock ticker mentions on Reddit.",
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
|
||||
parser.add_argument("-f", "--config", default="subreddits.json", help="Path to the JSON file for scanning. (Default: subreddits.json)")
|
||||
parser.add_argument("-s", "--subreddit", help="Scan a single subreddit, ignoring the config file.")
|
||||
parser.add_argument("-d", "--days", type=int, default=1, help="Number of past days to scan for new posts. (Default: 1)")
|
||||
parser.add_argument("-p", "--posts", type=int, default=200, help="Max posts to check per subreddit. (Default: 200)")
|
||||
parser.add_argument("-c", "--comments", type=int, default=100, help="Number of comments to scan per post. (Default: 100)")
|
||||
parser.add_argument("-n", "--no-financials", action="store_true", help="Disable fetching of financial data during the Reddit scan.")
|
||||
parser.add_argument("--update-top-tickers", action="store_true", help="Update financial data only for tickers currently in the Top 10 daily/weekly dashboards.")
|
||||
parser.add_argument(
|
||||
"-u", "--update-financials-only",
|
||||
nargs='?',
|
||||
"-f",
|
||||
"--config",
|
||||
default="subreddits.json",
|
||||
help="Path to the JSON file for scanning. (Default: subreddits.json)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-s", "--subreddit", help="Scan a single subreddit, ignoring the config file."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--days",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Number of past days to scan for new posts. (Default: 1)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p",
|
||||
"--posts",
|
||||
type=int,
|
||||
default=200,
|
||||
help="Max posts to check per subreddit. (Default: 200)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-c",
|
||||
"--comments",
|
||||
type=int,
|
||||
default=100,
|
||||
help="Number of comments to scan per post. (Default: 100)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-n",
|
||||
"--no-financials",
|
||||
action="store_true",
|
||||
help="Disable fetching of financial data during the Reddit scan.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--update-top-tickers",
|
||||
action="store_true",
|
||||
help="Update financial data only for tickers currently in the Top 10 daily/weekly dashboards.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-u",
|
||||
"--update-financials-only",
|
||||
nargs="?",
|
||||
const="ALL_TICKERS", # A special value to signify "update all"
|
||||
default=None,
|
||||
metavar='TICKER',
|
||||
help="Update financials. Provide a ticker symbol to update just one,\nor use the flag alone to update all tickers in the database."
|
||||
metavar="TICKER",
|
||||
help="Update financials. Provide a ticker symbol to update just one,\nor use the flag alone to update all tickers in the database.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--debug",
|
||||
action="store_true",
|
||||
help="Enable detailed debug logging to the console.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--stdout", action="store_true", help="Print all log messages to the console."
|
||||
)
|
||||
parser.add_argument("--debug", action="store_true", help="Enable detailed debug logging to the console.")
|
||||
parser.add_argument("--stdout", action="store_true", help="Print all log messages to the console.")
|
||||
|
||||
args = parser.parse_args()
|
||||
setup_logging(console_verbose=args.stdout, debug_mode=args.debug)
|
||||
@@ -214,11 +336,17 @@ def main():
|
||||
|
||||
# 3. Get all subreddits and loop through them
|
||||
all_subreddits = database.get_all_scanned_subreddits()
|
||||
log.info(f"-> Checking top tickers for {len(all_subreddits)} individual subreddit(s)...")
|
||||
log.info(
|
||||
f"-> Checking top tickers for {len(all_subreddits)} individual subreddit(s)..."
|
||||
)
|
||||
for sub_name in all_subreddits:
|
||||
log.debug(f" -> Checking r/{sub_name}...")
|
||||
top_daily_sub = database.get_top_daily_ticker_symbols_for_subreddit(sub_name)
|
||||
top_weekly_sub = database.get_top_weekly_ticker_symbols_for_subreddit(sub_name)
|
||||
top_daily_sub = database.get_top_daily_ticker_symbols_for_subreddit(
|
||||
sub_name
|
||||
)
|
||||
top_weekly_sub = database.get_top_weekly_ticker_symbols_for_subreddit(
|
||||
sub_name
|
||||
)
|
||||
tickers_to_update.update(top_daily_sub)
|
||||
tickers_to_update.update(top_weekly_sub)
|
||||
|
||||
@@ -227,18 +355,21 @@ def main():
|
||||
if not unique_top_tickers:
|
||||
log.info("No top tickers found in the last week. Nothing to update.")
|
||||
else:
|
||||
log.info(f"Found {len(unique_top_tickers)} unique top tickers to update: {', '.join(unique_top_tickers)}")
|
||||
log.info(
|
||||
f"Found {len(unique_top_tickers)} unique top tickers to update: {', '.join(unique_top_tickers)}"
|
||||
)
|
||||
conn = database.get_db_connection()
|
||||
for ticker_symbol in unique_top_tickers:
|
||||
# 4. Find the ticker's ID to perform the update
|
||||
ticker_info = database.get_ticker_by_symbol(ticker_symbol)
|
||||
if ticker_info:
|
||||
log.info(f" -> Updating financials for {ticker_info['symbol']}...")
|
||||
financials = get_financial_data_via_fetcher(ticker_info['symbol'])
|
||||
financials = get_financial_data_via_fetcher(ticker_info["symbol"])
|
||||
database.update_ticker_financials(
|
||||
conn, ticker_info['id'],
|
||||
financials.get('market_cap'),
|
||||
financials.get('closing_price')
|
||||
conn,
|
||||
ticker_info["id"],
|
||||
financials.get("market_cap"),
|
||||
financials.get("closing_price"),
|
||||
)
|
||||
conn.close()
|
||||
|
||||
@@ -253,31 +384,37 @@ def main():
|
||||
log.info(f"Found {len(all_tickers)} tickers in the database to update.")
|
||||
conn = database.get_db_connection()
|
||||
for ticker in all_tickers:
|
||||
symbol = ticker['symbol']
|
||||
symbol = ticker["symbol"]
|
||||
log.info(f" -> Updating financials for {symbol}...")
|
||||
financials = get_financial_data_via_fetcher(symbol)
|
||||
database.update_ticker_financials(
|
||||
conn, ticker['id'],
|
||||
financials.get('market_cap'),
|
||||
financials.get('closing_price')
|
||||
conn,
|
||||
ticker["id"],
|
||||
financials.get("market_cap"),
|
||||
financials.get("closing_price"),
|
||||
)
|
||||
conn.close()
|
||||
else:
|
||||
ticker_symbol_to_update = update_mode
|
||||
log.critical(f"--- Starting Financial Data Update for single ticker: {ticker_symbol_to_update} ---")
|
||||
log.critical(
|
||||
f"--- Starting Financial Data Update for single ticker: {ticker_symbol_to_update} ---"
|
||||
)
|
||||
ticker_info = database.get_ticker_by_symbol(ticker_symbol_to_update)
|
||||
if ticker_info:
|
||||
conn = database.get_db_connection()
|
||||
log.info(f" -> Updating financials for {ticker_info['symbol']}...")
|
||||
financials = get_financial_data_via_fetcher(ticker_info['symbol'])
|
||||
financials = get_financial_data_via_fetcher(ticker_info["symbol"])
|
||||
database.update_ticker_financials(
|
||||
conn, ticker_info['id'],
|
||||
financials.get('market_cap'),
|
||||
financials.get('closing_price')
|
||||
conn,
|
||||
ticker_info["id"],
|
||||
financials.get("market_cap"),
|
||||
financials.get("closing_price"),
|
||||
)
|
||||
conn.close()
|
||||
else:
|
||||
log.error(f"Ticker '{ticker_symbol_to_update}' not found in the database.")
|
||||
log.error(
|
||||
f"Ticker '{ticker_symbol_to_update}' not found in the database."
|
||||
)
|
||||
log.critical("--- Financial Data Update Complete ---")
|
||||
|
||||
else:
|
||||
@@ -295,7 +432,8 @@ def main():
|
||||
return
|
||||
|
||||
reddit = get_reddit_instance()
|
||||
if not reddit: return
|
||||
if not reddit:
|
||||
return
|
||||
|
||||
scan_subreddits(
|
||||
reddit,
|
||||
@@ -303,8 +441,9 @@ def main():
|
||||
post_limit=args.posts,
|
||||
comment_limit=args.comments,
|
||||
days_to_scan=args.days,
|
||||
fetch_financials=(not args.no_financials)
|
||||
fetch_financials=(not args.no_financials),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -16,4 +16,4 @@ def get_sentiment_score(text):
|
||||
# The polarity_scores() method returns a dictionary with 'neg', 'neu', 'pos', and 'compound' scores.
|
||||
# We are most interested in the 'compound' score.
|
||||
scores = _analyzer.polarity_scores(text)
|
||||
return scores['compound']
|
||||
return scores["compound"]
|
||||
|
@@ -3,9 +3,9 @@ import nltk
|
||||
# This will download the 'vader_lexicon' dataset
|
||||
# It only needs to be run once
|
||||
try:
|
||||
nltk.data.find('sentiment/vader_lexicon.zip')
|
||||
nltk.data.find("sentiment/vader_lexicon.zip")
|
||||
print("VADER lexicon is already downloaded.")
|
||||
except LookupError:
|
||||
print("Downloading VADER lexicon...")
|
||||
nltk.download('vader_lexicon')
|
||||
nltk.download("vader_lexicon")
|
||||
print("Download complete.")
|
20
setup.py
20
setup.py
@@ -2,24 +2,24 @@
|
||||
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
with open('requirements.txt') as f:
|
||||
with open("requirements.txt") as f:
|
||||
requirements = f.read().splitlines()
|
||||
|
||||
setup(
|
||||
name='reddit-stock-analyzer',
|
||||
version='0.0.1',
|
||||
author='Pål-Kristian Hamre',
|
||||
author_email='its@pkhamre.com',
|
||||
description='A command-line tool to analyze stock ticker mentions on Reddit.',
|
||||
name="reddit-stock-analyzer",
|
||||
version="0.0.1",
|
||||
author="Pål-Kristian Hamre",
|
||||
author_email="its@pkhamre.com",
|
||||
description="A command-line tool to analyze stock ticker mentions on Reddit.",
|
||||
# This now correctly finds your 'rstat_tool' package
|
||||
packages=find_packages(),
|
||||
install_requires=requirements,
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
"console_scripts": [
|
||||
# The path is now 'package_name.module_name:function_name'
|
||||
'rstat=rstat_tool.main:main',
|
||||
'rstat-dashboard=rstat_tool.dashboard:start_dashboard',
|
||||
'rstat-cleanup=rstat_tool.cleanup:run_cleanup',
|
||||
"rstat=rstat_tool.main:main",
|
||||
"rstat-dashboard=rstat_tool.dashboard:start_dashboard",
|
||||
"rstat-cleanup=rstat_tool.cleanup:run_cleanup",
|
||||
],
|
||||
},
|
||||
)
|
@@ -1,5 +1,6 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
@@ -8,21 +9,64 @@
|
||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
||||
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;600;700;800&display=swap" rel="stylesheet">
|
||||
<style>
|
||||
body { margin: 0; padding: 2rem; font-family: 'Inter', sans-serif; background: #1a1a1a; display: flex; flex-direction: column; align-items: center; min-height: 100vh; }
|
||||
.navbar { width: 100%; max-width: 1200px; background-color: rgba(45, 55, 72, 0.5); padding: 1rem 2rem; border-radius: 12px; margin-bottom: 2rem; display: flex; flex-wrap: wrap; gap: 1rem; align-items: center;}
|
||||
.navbar a { color: #cbd5e0; text-decoration: none; font-weight: 600; padding: 0.5rem 1rem; border-radius: 6px; transition: background-color 0.2s, color 0.2s; }
|
||||
.navbar a.active, .navbar a:hover { background-color: #4a5568; color: #ffffff; }
|
||||
.view-switcher { margin-left: auto; display: flex; gap: 0.5rem; }
|
||||
body {
|
||||
margin: 0;
|
||||
padding: 2rem;
|
||||
font-family: 'Inter', sans-serif;
|
||||
background: #1a1a1a;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
min-height: 100vh;
|
||||
}
|
||||
|
||||
.navbar {
|
||||
width: 100%;
|
||||
max-width: 1200px;
|
||||
background-color: rgba(45, 55, 72, 0.5);
|
||||
padding: 1rem 2rem;
|
||||
border-radius: 12px;
|
||||
margin-bottom: 2rem;
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
gap: 1rem;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.navbar a {
|
||||
color: #cbd5e0;
|
||||
text-decoration: none;
|
||||
font-weight: 600;
|
||||
padding: 0.5rem 1rem;
|
||||
border-radius: 6px;
|
||||
transition: background-color 0.2s, color 0.2s;
|
||||
}
|
||||
|
||||
.navbar a.active,
|
||||
.navbar a:hover {
|
||||
background-color: #4a5568;
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
.view-switcher {
|
||||
margin-left: auto;
|
||||
display: flex;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
|
||||
.dropdown {
|
||||
position: relative; /* Establishes a positioning context for the menu */
|
||||
position: relative;
|
||||
/* Establishes a positioning context for the menu */
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
.dropdown {
|
||||
position: relative;
|
||||
display: inline-block;
|
||||
/* Remove the padding that was causing the misalignment */
|
||||
/* padding-bottom: 0.5rem; */
|
||||
}
|
||||
|
||||
.dropdown-button {
|
||||
color: #cbd5e0;
|
||||
font-weight: 600;
|
||||
@@ -30,19 +74,23 @@
|
||||
border-radius: 6px;
|
||||
cursor: pointer;
|
||||
transition: background-color 0.2s, color 0.2s;
|
||||
display: block; /* Ensures it behaves predictably with padding */
|
||||
display: block;
|
||||
/* Ensures it behaves predictably with padding */
|
||||
}
|
||||
.dropdown-button.active, .dropdown:hover .dropdown-button {
|
||||
|
||||
.dropdown-button.active,
|
||||
.dropdown:hover .dropdown-button {
|
||||
background-color: #4a5568;
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
.dropdown-menu {
|
||||
visibility: hidden;
|
||||
opacity: 0;
|
||||
position: absolute;
|
||||
background-color: #2d3748;
|
||||
min-width: 200px;
|
||||
box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.4);
|
||||
box-shadow: 0px 8px 16px 0px rgba(0, 0, 0, 0.4);
|
||||
z-index: 1;
|
||||
border-radius: 8px;
|
||||
padding: 0.5rem 0;
|
||||
@@ -51,6 +99,7 @@
|
||||
left: 0;
|
||||
transition: opacity 0.2s ease-in-out, visibility 0.2s ease-in-out;
|
||||
}
|
||||
|
||||
.dropdown-menu a {
|
||||
color: #e2e8f0;
|
||||
padding: 0.75rem 1.5rem;
|
||||
@@ -58,73 +107,224 @@
|
||||
display: block;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.dropdown-menu a:hover {
|
||||
background-color: #4a5568;
|
||||
}
|
||||
|
||||
.dropdown:hover .dropdown-menu {
|
||||
visibility: visible;
|
||||
opacity: 1;
|
||||
}
|
||||
.image-container { width: 750px; background: linear-gradient(145deg, #2d3748, #1a202c); color: #ffffff; border-radius: 16px; padding: 2.5rem; box-shadow: 0 10px 30px rgba(0,0,0,0.5); text-align: center; }
|
||||
header { display: flex; justify-content: space-between; align-items: flex-start; margin-bottom: 2rem; }
|
||||
|
||||
.image-container {
|
||||
width: 750px;
|
||||
background: linear-gradient(145deg, #2d3748, #1a202c);
|
||||
color: #ffffff;
|
||||
border-radius: 16px;
|
||||
padding: 2.5rem;
|
||||
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.5);
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
header {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: flex-start;
|
||||
margin-bottom: 2rem;
|
||||
}
|
||||
|
||||
.header-action {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 1rem;
|
||||
}
|
||||
|
||||
.header-action .icon-link svg {
|
||||
color: #a0aec0;
|
||||
transition: color 0.2s;
|
||||
}
|
||||
|
||||
.header-action .icon-link:hover svg {
|
||||
color: #ffffff;
|
||||
}
|
||||
.title-block { text-align: left; }
|
||||
.title-block h1 { font-size: 2.5rem; font-weight: 800; margin: 0; line-height: 1; }
|
||||
.title-block h2 { font-size: 1.25rem; font-weight: 600; margin: 0.5rem 0 0; color: #a0aec0; }
|
||||
.date { font-size: 1.1rem; font-weight: 600; color: #a0aec0; letter-spacing: 0.02em; }
|
||||
table { width: 100%; border-collapse: collapse; text-align: left; }
|
||||
th, td { padding: 1rem 0.5rem; border-bottom: 1px solid rgba(255, 255, 255, 0.1); }
|
||||
th { font-weight: 700; text-transform: uppercase; font-size: 0.75rem; color: #718096; letter-spacing: 0.05em; }
|
||||
th.mentions, th.sentiment { text-align: center; }
|
||||
th.financials { text-align: right; }
|
||||
td { font-size: 1.1rem; font-weight: 600; }
|
||||
tr:last-child td { border-bottom: none; }
|
||||
td.rank { font-weight: 700; color: #cbd5e0; width: 5%; }
|
||||
td.ticker { width: 15%; }
|
||||
td.financials { text-align: right; width: 20%; }
|
||||
td.mentions { text-align: center; width: 15%; }
|
||||
td.sentiment { text-align: center; width: 20%; }
|
||||
.sentiment-bullish { color: #48bb78; font-weight: 700; }
|
||||
.sentiment-bearish { color: #f56565; font-weight: 700; }
|
||||
.sentiment-neutral { color: #a0aec0; font-weight: 600; }
|
||||
footer { margin-top: 2.5rem; }
|
||||
.brand-name { font-size: 1.75rem; font-weight: 800; letter-spacing: -1px; }
|
||||
.brand-subtitle { font-size: 1rem; color: #a0aec0; }
|
||||
/* Style for the ticker link in interactive mode */
|
||||
|
||||
.title-block {
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.title-block h1 {
|
||||
font-size: 2.5rem;
|
||||
font-weight: 800;
|
||||
margin: 0;
|
||||
line-height: 1;
|
||||
}
|
||||
|
||||
.title-block h2 {
|
||||
font-size: 1.25rem;
|
||||
font-weight: 600;
|
||||
margin: 0.5rem 0 0;
|
||||
color: #a0aec0;
|
||||
}
|
||||
|
||||
.date {
|
||||
font-size: 1.1rem;
|
||||
font-weight: 600;
|
||||
color: #a0aec0;
|
||||
letter-spacing: 0.02em;
|
||||
}
|
||||
|
||||
table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
th,
|
||||
td {
|
||||
padding: 1rem 0.5rem;
|
||||
border-bottom: 1px solid rgba(255, 255, 255, 0.1);
|
||||
}
|
||||
|
||||
th {
|
||||
font-weight: 700;
|
||||
text-transform: uppercase;
|
||||
font-size: 0.75rem;
|
||||
color: #718096;
|
||||
letter-spacing: 0.05em;
|
||||
}
|
||||
|
||||
th.mentions,
|
||||
th.sentiment {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
th.financials {
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
td {
|
||||
font-size: 1.1rem;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
tr:last-child td {
|
||||
border-bottom: none;
|
||||
}
|
||||
|
||||
td.rank {
|
||||
font-weight: 700;
|
||||
color: #cbd5e0;
|
||||
width: 5%;
|
||||
}
|
||||
|
||||
td.ticker {
|
||||
width: 15%;
|
||||
}
|
||||
|
||||
td.financials {
|
||||
text-align: right;
|
||||
width: 20%;
|
||||
}
|
||||
|
||||
td.mentions {
|
||||
text-align: center;
|
||||
width: 15%;
|
||||
}
|
||||
|
||||
td.sentiment {
|
||||
text-align: center;
|
||||
width: 20%;
|
||||
}
|
||||
|
||||
.sentiment-bullish {
|
||||
color: #48bb78;
|
||||
font-weight: 700;
|
||||
}
|
||||
|
||||
.sentiment-bearish {
|
||||
color: #f56565;
|
||||
font-weight: 700;
|
||||
}
|
||||
|
||||
.sentiment-neutral {
|
||||
color: #a0aec0;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
footer {
|
||||
margin-top: 2.5rem;
|
||||
}
|
||||
|
||||
.brand-name {
|
||||
font-size: 1.75rem;
|
||||
font-weight: 800;
|
||||
letter-spacing: -1px;
|
||||
}
|
||||
|
||||
.brand-subtitle {
|
||||
font-size: 1rem;
|
||||
color: #a0aec0;
|
||||
}
|
||||
|
||||
td.ticker a {
|
||||
color: inherit; /* Make the link color the same as the text */
|
||||
color: inherit;
|
||||
text-decoration: none;
|
||||
display: inline-block;
|
||||
transition: transform 0.1s ease-in-out;
|
||||
}
|
||||
|
||||
td.ticker a:hover {
|
||||
text-decoration: underline;
|
||||
transform: scale(1.05);
|
||||
}
|
||||
|
||||
/* Ticker coloring (used in both modes) */
|
||||
tr:nth-child(1) td.ticker { color: #d8b4fe; } tr:nth-child(6) td.ticker { color: #fca5a5; }
|
||||
tr:nth-child(2) td.ticker { color: #a3e635; } tr:nth-child(7) td.ticker { color: #fdba74; }
|
||||
tr:nth-child(3) td.ticker { color: #67e8f9; } tr:nth-child(8) td.ticker { color: #6ee7b7; }
|
||||
tr:nth-child(4) td.ticker { color: #fde047; } tr:nth-child(9) td.ticker { color: #93c5fd; }
|
||||
tr:nth-child(5) td.ticker { color: #fcd34d; } tr:nth-child(10) td.ticker { color: #d1d5db; }
|
||||
tr:nth-child(1) td.ticker {
|
||||
color: #d8b4fe;
|
||||
}
|
||||
|
||||
tr:nth-child(6) td.ticker {
|
||||
color: #fca5a5;
|
||||
}
|
||||
|
||||
tr:nth-child(2) td.ticker {
|
||||
color: #a3e635;
|
||||
}
|
||||
|
||||
tr:nth-child(7) td.ticker {
|
||||
color: #fdba74;
|
||||
}
|
||||
|
||||
tr:nth-child(3) td.ticker {
|
||||
color: #67e8f9;
|
||||
}
|
||||
|
||||
tr:nth-child(8) td.ticker {
|
||||
color: #6ee7b7;
|
||||
}
|
||||
|
||||
tr:nth-child(4) td.ticker {
|
||||
color: #fde047;
|
||||
}
|
||||
|
||||
tr:nth-child(9) td.ticker {
|
||||
color: #93c5fd;
|
||||
}
|
||||
|
||||
tr:nth-child(5) td.ticker {
|
||||
color: #fcd34d;
|
||||
}
|
||||
|
||||
tr:nth-child(10) td.ticker {
|
||||
color: #d1d5db;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
{% if not is_image_mode %}
|
||||
<nav class="navbar">
|
||||
<a href="/" {% if not subreddit_name %}class="active"{% endif %}>Overall</a>
|
||||
<a href="/" {% if not subreddit_name %}class="active" {% endif %}>Overall</a>
|
||||
|
||||
<!-- --- THIS IS THE NEW HTML STRUCTURE FOR THE DROPDOWN --- -->
|
||||
<div class="dropdown">
|
||||
@@ -140,8 +340,8 @@
|
||||
<!-- --- END OF NEW HTML STRUCTURE --- -->
|
||||
|
||||
<div class="view-switcher">
|
||||
<a href="?view=daily" {% if view_type == 'daily' %}class="active"{% endif %}>Daily</a>
|
||||
<a href="?view=weekly" {% if view_type == 'weekly' %}class="active"{% endif %}>Weekly</a>
|
||||
<a href="?view=daily" {% if view_type=='daily' %}class="active" {% endif %}>Daily</a>
|
||||
<a href="?view=weekly" {% if view_type=='weekly' %}class="active" {% endif %}>Weekly</a>
|
||||
</div>
|
||||
</nav>
|
||||
{% endif %}
|
||||
@@ -149,4 +349,5 @@
|
||||
{% block content %}{% endblock %}
|
||||
</main>
|
||||
</body>
|
||||
|
||||
</html>
|
@@ -16,7 +16,8 @@
|
||||
<!-- Only show the icon if we are NOT already in image mode -->
|
||||
{% if not is_image_mode %}
|
||||
<a href="{{ base_url }}?view={{ view_type }}&image=true" class="icon-link" title="View as Shareable Image">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="22" height="22" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2.5" stroke-linecap="round" stroke-linejoin="round">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="22" height="22" viewBox="0 0 24 24" fill="none"
|
||||
stroke="currentColor" stroke-width="2.5" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M23 19a2 2 0 0 1-2 2H3a2 2 0 0 1-2-2V8a2 2 0 0 1 2-2h4l2-3h6l2 3h4a2 2 0 0 1 2 2z"></path>
|
||||
<circle cx="12" cy="13" r="4"></circle>
|
||||
</svg>
|
||||
|
@@ -3,11 +3,11 @@
|
||||
{% block title %}Deep Dive: {{ symbol }}{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<h1>Deep Dive Analysis for: <strong>{{ symbol }}</strong></h1>
|
||||
<p>Showing posts that mention {{ symbol }}, sorted by most recent.</p>
|
||||
<h1>Deep Dive Analysis for: <strong>{{ symbol }}</strong></h1>
|
||||
<p>Showing posts that mention {{ symbol }}, sorted by most recent.</p>
|
||||
|
||||
{% for post in posts %}
|
||||
<div class="post-card">
|
||||
{% for post in posts %}
|
||||
<div class="post-card">
|
||||
<h3><a href="{{ post.post_url }}" target="_blank">{{ post.title }}</a></h3>
|
||||
<div class="post-meta">
|
||||
<span>r/{{ post.subreddit_name }}</span> |
|
||||
@@ -15,15 +15,15 @@
|
||||
<span>Avg. Sentiment:
|
||||
{% if post.avg_comment_sentiment > 0.1 %}
|
||||
<span class="sentiment-bullish">{{ "%.2f"|format(post.avg_comment_sentiment) }}</span>
|
||||
{% elif post.avg_comment_sentiment < -0.1 %}
|
||||
<span class="sentiment-bearish">{{ "%.2f"|format(post.avg_comment_sentiment) }}</span>
|
||||
{% elif post.avg_comment_sentiment < -0.1 %} <span class="sentiment-bearish">{{
|
||||
"%.2f"|format(post.avg_comment_sentiment) }}</span>
|
||||
{% else %}
|
||||
<span class="sentiment-neutral">{{ "%.2f"|format(post.avg_comment_sentiment) }}</span>
|
||||
{% endif %}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
{% else %}
|
||||
<p>No analyzed posts found for this ticker. Run the 'rstat' scraper to gather data.</p>
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% else %}
|
||||
<p>No analyzed posts found for this ticker. Run the 'rstat' scraper to gather data.</p>
|
||||
{% endfor %}
|
||||
{% endblock %}
|
@@ -6,8 +6,7 @@ import logging
|
||||
|
||||
# Set up a simple logger to see detailed error tracebacks
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(levelname)s - %(message)s'
|
||||
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
|
||||
# A list of tickers to test. One very common one, and two from your logs.
|
||||
@@ -20,31 +19,41 @@ for ticker_symbol in TICKERS_TO_TEST:
|
||||
|
||||
# --- Test 1: The Ticker().info method ---
|
||||
try:
|
||||
logging.info(f"Attempting to create Ticker object and get .info for {ticker_symbol}...")
|
||||
logging.info(
|
||||
f"Attempting to create Ticker object and get .info for {ticker_symbol}..."
|
||||
)
|
||||
ticker_obj = yf.Ticker(ticker_symbol)
|
||||
market_cap = ticker_obj.info.get('marketCap')
|
||||
market_cap = ticker_obj.info.get("marketCap")
|
||||
if market_cap is not None:
|
||||
logging.info(f"SUCCESS: Got market cap for {ticker_symbol}: {market_cap}")
|
||||
else:
|
||||
logging.warning(f"PARTIAL SUCCESS: .info call for {ticker_symbol} worked, but no market cap was found.")
|
||||
logging.warning(
|
||||
f"PARTIAL SUCCESS: .info call for {ticker_symbol} worked, but no market cap was found."
|
||||
)
|
||||
except Exception:
|
||||
logging.error(f"FAILURE: An error occurred during the Ticker().info call for {ticker_symbol}.", exc_info=True)
|
||||
|
||||
logging.error(
|
||||
f"FAILURE: An error occurred during the Ticker().info call for {ticker_symbol}.",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
# --- Test 2: The yf.download() method ---
|
||||
try:
|
||||
logging.info(f"Attempting yf.download() for {ticker_symbol}...")
|
||||
data = yf.download(
|
||||
ticker_symbol,
|
||||
period="2d",
|
||||
progress=False,
|
||||
auto_adjust=False
|
||||
ticker_symbol, period="2d", progress=False, auto_adjust=False
|
||||
)
|
||||
if not data.empty:
|
||||
logging.info(f"SUCCESS: yf.download() for {ticker_symbol} returned {len(data)} rows of data.")
|
||||
logging.info(
|
||||
f"SUCCESS: yf.download() for {ticker_symbol} returned {len(data)} rows of data."
|
||||
)
|
||||
else:
|
||||
logging.warning(f"PARTIAL SUCCESS: yf.download() for {ticker_symbol} worked, but returned no data (likely delisted).")
|
||||
logging.warning(
|
||||
f"PARTIAL SUCCESS: yf.download() for {ticker_symbol} worked, but returned no data (likely delisted)."
|
||||
)
|
||||
except Exception:
|
||||
logging.error(f"FAILURE: An error occurred during the yf.download() call for {ticker_symbol}.", exc_info=True)
|
||||
logging.error(
|
||||
f"FAILURE: An error occurred during the yf.download() call for {ticker_symbol}.",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
print("\n--- YFINANCE Diagnostic Test Complete ---")
|
Reference in New Issue
Block a user