Compare commits
103 Commits
728fe43571
...
main
Author | SHA1 | Date | |
---|---|---|---|
cdce16c29e | |||
8238ca5352 | |||
35577770dd | |||
1c974595ef | |||
6c444962ac | |||
ba9d81f453 | |||
b9767e4cfc | |||
61c9aba952 | |||
4012ff7bea | |||
537a62988a | |||
9628883fa7 | |||
673bc9cd6f | |||
2aaab6673f | |||
c392fbc366 | |||
1aaa2d70a0 | |||
7fec7ec740 | |||
8ebaaf8b36 | |||
2f43380970 | |||
437f7b055f | |||
262b4ee3cc | |||
ef288d565b | |||
aa0a383b9c | |||
a17767f6e4 | |||
703f52c217 | |||
d85d372a65 | |||
885ddada71 | |||
8b6ad157dc | |||
474431084e | |||
051e24878a | |||
0fc88c17ef | |||
3beadf57a3 | |||
ff0f528d36 | |||
0623770d10 | |||
81338563a2 | |||
1cc7f556c4 | |||
30899d35b2 | |||
26011eb170 | |||
9329c591b5 | |||
bae0d1902b | |||
9e5455592b | |||
8a80df5946 | |||
712b12dc7f | |||
84486adb83 | |||
f7faebfc0d | |||
d05b3a0cc7 | |||
5d3e510f6b | |||
c792ea0bf8 | |||
319ee0f402 | |||
f314d57453 | |||
5ec49a53b5 | |||
e0fe761c3d | |||
2aa378f23b | |||
0acb8470c5 | |||
f6536761bc | |||
776c8ff688 | |||
ac7ae5e34a | |||
f3d01e296f | |||
6d610c7c31 | |||
6b2004cb27 | |||
6611999b5f | |||
a2459745c1 | |||
e92a508be3 | |||
3c2a38d1a1 | |||
55ea5d187f | |||
5319bc554a | |||
9f49660970 | |||
afcba995f1 | |||
3499cecb8b | |||
8448ff1897 | |||
56e0965a5f | |||
f940470de3 | |||
38e42efdef | |||
0dab12eb8c | |||
44fc3ef533 | |||
67f627e7ea | |||
c5a91c9d72 | |||
eb6de197f0 | |||
0d6d9516d7 | |||
841f6a5305 | |||
c9e754c9c9 | |||
07c1fd3841 | |||
de57a5b26b | |||
bd27db49e7 | |||
fa7eddf02f | |||
bd59092674 | |||
2cb32bc1cb | |||
161502e214 | |||
ab44bc0e96 | |||
2688a7df44 | |||
45818046a2 | |||
38e9ed9b01 | |||
f248500d76 | |||
b573b9d2f3 | |||
f6ea86fa91 | |||
d4ed76e153 | |||
afe3cecb4f | |||
966ef45916 | |||
6b4fb136e2 | |||
0bc4f1a93c | |||
67d195d5d7 | |||
fb1b2c1b20 | |||
ef91b735b7 | |||
d330f31950 |
94
.dockerignore
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
# Git
|
||||||
|
.git
|
||||||
|
.gitignore
|
||||||
|
.gitattributes
|
||||||
|
|
||||||
|
|
||||||
|
# CI
|
||||||
|
.codeclimate.yml
|
||||||
|
.travis.yml
|
||||||
|
.taskcluster.yml
|
||||||
|
|
||||||
|
# Docker
|
||||||
|
docker-compose.yml
|
||||||
|
Dockerfile
|
||||||
|
.docker
|
||||||
|
.dockerignore
|
||||||
|
|
||||||
|
# Byte-compiled / optimized / DLL files
|
||||||
|
**/__pycache__/
|
||||||
|
**/*.py[cod]
|
||||||
|
|
||||||
|
# C extensions
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Distribution / packaging
|
||||||
|
.Python
|
||||||
|
env/
|
||||||
|
build/
|
||||||
|
develop-eggs/
|
||||||
|
dist/
|
||||||
|
downloads/
|
||||||
|
eggs/
|
||||||
|
lib/
|
||||||
|
lib64/
|
||||||
|
parts/
|
||||||
|
sdist/
|
||||||
|
var/
|
||||||
|
*.egg-info/
|
||||||
|
.installed.cfg
|
||||||
|
*.egg
|
||||||
|
|
||||||
|
# PyInstaller
|
||||||
|
# Usually these files are written by a python script from a template
|
||||||
|
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||||
|
*.manifest
|
||||||
|
*.spec
|
||||||
|
|
||||||
|
# Installer logs
|
||||||
|
pip-log.txt
|
||||||
|
pip-delete-this-directory.txt
|
||||||
|
|
||||||
|
# Unit test / coverage reports
|
||||||
|
htmlcov/
|
||||||
|
.tox/
|
||||||
|
.coverage
|
||||||
|
.cache
|
||||||
|
nosetests.xml
|
||||||
|
coverage.xml
|
||||||
|
|
||||||
|
# Translations
|
||||||
|
*.mo
|
||||||
|
*.pot
|
||||||
|
|
||||||
|
# Ignore Node.js dependencies (they will be installed inside the container)
|
||||||
|
node_modules/
|
||||||
|
|
||||||
|
# Ignore database and log files
|
||||||
|
*.db
|
||||||
|
*.log
|
||||||
|
*.db-journal
|
||||||
|
|
||||||
|
# Sphinx documentation
|
||||||
|
docs/_build/
|
||||||
|
|
||||||
|
# PyBuilder
|
||||||
|
target/
|
||||||
|
|
||||||
|
# Virtual environment
|
||||||
|
.env
|
||||||
|
.venv/
|
||||||
|
venv/
|
||||||
|
|
||||||
|
# PyCharm
|
||||||
|
.idea
|
||||||
|
|
||||||
|
# Python mode for VIM
|
||||||
|
.ropeproject
|
||||||
|
**/.ropeproject
|
||||||
|
|
||||||
|
# Vim swap files
|
||||||
|
**/*.swp
|
||||||
|
|
||||||
|
# VS Code
|
||||||
|
.vscode/
|
4
.gitignore
vendored
@@ -6,3 +6,7 @@ __pycache__/
|
|||||||
*.db
|
*.db
|
||||||
*.log
|
*.log
|
||||||
reddit_stock_analyzer.egg-info/
|
reddit_stock_analyzer.egg-info/
|
||||||
|
images/
|
||||||
|
public/
|
||||||
|
config/certbot/
|
||||||
|
node_modules/
|
||||||
|
31
Dockerfile
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
FROM node:24.5.0-bookworm-slim AS builder
|
||||||
|
|
||||||
|
WORKDIR /usr/src/build
|
||||||
|
|
||||||
|
COPY package.json package-lock.json ./
|
||||||
|
|
||||||
|
RUN npm install
|
||||||
|
|
||||||
|
COPY tailwind.config.js ./
|
||||||
|
COPY templates/ ./templates/
|
||||||
|
COPY static/css/input.css ./static/css/input.css
|
||||||
|
|
||||||
|
RUN npx tailwindcss -i ./static/css/input.css -o ./static/css/style.css --minify
|
||||||
|
|
||||||
|
FROM python:3.13.6-slim
|
||||||
|
|
||||||
|
EXPOSE 5000
|
||||||
|
|
||||||
|
WORKDIR /usr/src/app
|
||||||
|
|
||||||
|
COPY requirements.txt .
|
||||||
|
|
||||||
|
RUN python3 -m pip install --upgrade pip
|
||||||
|
RUN python3 -m pip install --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
COPY --from=builder /usr/src/build/static/css/style.css ./static/css/style.css
|
||||||
|
|
||||||
|
RUN python3 -m pip install -e .
|
||||||
|
|
||||||
|
CMD [ "gunicorn", "--config", "rstat_tool/gunicorn-cfg.py", "-k", "sync", "rstat_tool.dashboard:app" ]
|
398
README.md
@@ -1,158 +1,308 @@
|
|||||||
# rstat - Reddit Stock Analyzer
|
<div align="center">
|
||||||
|
|
||||||
A powerful, installable command-line tool and web dashboard to scan Reddit for stock ticker mentions, perform sentiment analysis, generate insightful reports, and create shareable summary images.
|
# RSTAT — Reddit Stock Analyzer
|
||||||
|
|
||||||
## Key Features
|
Scan Reddit for stock ticker mentions, score sentiment, enrich with price/market cap, and explore the results in a clean web dashboard. Automate shareable images and post them to Reddit.
|
||||||
|
|
||||||
* **Dual-Interface:** Use a flexible command-line tool (`rstat`) for data collection and a simple web dashboard (`rstat-dashboard`) for data visualization.
|
</div>
|
||||||
* **Flexible Data Scraping:**
|
|
||||||
* Scan subreddits from a config file or target a single subreddit on the fly.
|
|
||||||
* Configure the time window to scan posts from the last 24 hours (for daily cron jobs) or back-fill data from several past days (e.g., last 7 days).
|
|
||||||
* Fetches from `/new` to capture the most recent discussions.
|
|
||||||
* **Deep Analysis & Storage:**
|
|
||||||
* Scans both post titles and comments, differentiating between the two.
|
|
||||||
* Performs a "deep dive" analysis on posts to calculate the average sentiment of the entire comment section.
|
|
||||||
* Persists all data in a local SQLite database (`reddit_stocks.db`) to track trends over time.
|
|
||||||
* **Rich Data Enrichment:**
|
|
||||||
* Calculates sentiment (Bullish, Bearish, Neutral) for every mention using NLTK.
|
|
||||||
* Fetches and stores daily closing prices and market capitalization from Yahoo Finance.
|
|
||||||
* **Interactive Web Dashboard:**
|
|
||||||
* View Top 10 tickers across all subreddits or on a per-subreddit basis.
|
|
||||||
* Click any ticker to get a "Deep Dive" page, showing every post it was mentioned in.
|
|
||||||
* **Shareable Summary Images:**
|
|
||||||
* Generate clean, dark-mode summary images for both daily and weekly sentiment for any subreddit, perfect for sharing.
|
|
||||||
* **High-Quality Data:**
|
|
||||||
* Uses a configurable blacklist and smart filtering to reduce false positives.
|
|
||||||
* Automatically cleans the database of invalid tickers if the blacklist is updated.
|
|
||||||
|
|
||||||
## Project Structure
|
## Highlights
|
||||||
|
|
||||||
|
- CLI + Web UI: Collect data with `rstat`, browse it with `rstat-dashboard`.
|
||||||
|
- Smart ticker parsing: Prefer $TSLA/$AAPL “golden” matches; fall back to filtered ALL-CAPS words.
|
||||||
|
- Sentiment: VADER (NLTK) scores for titles and comments; “deep dive” averages per post.
|
||||||
|
- Storage: Local SQLite database `reddit_stocks.db` with de-duped mentions and post analytics.
|
||||||
|
- Enrichment: Yahoo Finance market cap + latest close fetched in batch and on-demand.
|
||||||
|
- Images: Export polished daily/weekly summary PNGs for subreddits or “overall”.
|
||||||
|
- Automation: Optional cron job plus one-command posting to Reddit with OAuth refresh tokens.
|
||||||
|
|
||||||
|
## Repository layout
|
||||||
|
|
||||||
```
|
```
|
||||||
reddit_stock_analyzer/
|
.
|
||||||
├── .env # Your secret API keys
|
├── Dockerfile # Multi-stage build (Tailwind -> Python + gunicorn)
|
||||||
├── requirements.txt # Project dependencies
|
├── docker-compose.yml # Prod (nginx + varnish optional) + dashboard
|
||||||
├── setup.py # Installation script for the tool
|
├── docker-compose-dev.yml # Dev compose (local nginx)
|
||||||
├── subreddits.json # Default list of subreddits to scan
|
├── requirements.txt # Python deps
|
||||||
├── templates/ # HTML templates for the web dashboard
|
├── setup.py # Installs console scripts
|
||||||
│ ├── base.html
|
├── subreddits.json # Default subreddits list
|
||||||
│ ├── index.html
|
├── reddit_stocks.db # SQLite database (generated/updated by CLI)
|
||||||
│ ├── subreddit.html
|
├── export_image.py # Generate shareable PNGs (Playwright)
|
||||||
│ ├── deep_dive.html
|
├── post_to_reddit.py # Post latest PNG to Reddit
|
||||||
│ ├── image_view.html
|
├── get_refresh_token.py # One-time OAuth2 refresh token helper
|
||||||
│ └── weekly_image_view.html
|
├── fetch_close_price.py # Utility for closing price (yfinance)
|
||||||
└── rstat_tool/ # The main source code package
|
├── fetch_market_cap.py # Utility for market cap (yfinance)
|
||||||
├── __init__.py
|
├── rstat_tool/
|
||||||
├── main.py # Scraper entry point and CLI logic
|
│ ├── main.py # CLI entry (rstat)
|
||||||
├── dashboard.py # Web dashboard entry point (Flask app)
|
│ ├── dashboard.py # Flask app entry (rstat-dashboard)
|
||||||
├── database.py # All SQLite database functions
|
│ ├── database.py # SQLite schema + queries
|
||||||
└── ...
|
│ ├── ticker_extractor.py # Ticker parsing + blacklist
|
||||||
|
│ ├── sentiment_analyzer.py # VADER sentiment
|
||||||
|
│ ├── cleanup.py # Cleanup utilities (rstat-cleanup)
|
||||||
|
│ ├── flair_finder.py # Fetch subreddit flair IDs (rstat-flairs)
|
||||||
|
│ ├── logger_setup.py # Logging
|
||||||
|
│ └── setup_nltk.py # One-time VADER download
|
||||||
|
├── templates/ # Jinja2 templates (Tailwind 4 styling)
|
||||||
|
└── static/ # Favicon + generated CSS (style.css)
|
||||||
```
|
```
|
||||||
|
|
||||||
## Setup and Installation
|
## Requirements
|
||||||
|
|
||||||
Follow these steps to set up the project on your local machine.
|
- Python 3.10+ (Docker image uses Python 3.13-slim)
|
||||||
|
- Reddit API app (script type) for read + submit
|
||||||
|
- For optional image export: Playwright browsers
|
||||||
|
- For UI development (optional): Node 18+ to rebuild Tailwind CSS
|
||||||
|
|
||||||
### 1. Prerequisites
|
## Setup
|
||||||
* Python 3.7+
|
|
||||||
* Git
|
1) Clone and enter the repo
|
||||||
|
|
||||||
### 2. Clone the Repository
|
|
||||||
```bash
|
```bash
|
||||||
git clone <your-repository-url>
|
git clone <your-repo>
|
||||||
cd reddit_stock_analyzer
|
cd reddit_stock_analyzer
|
||||||
```
|
```
|
||||||
|
|
||||||
### 3. Set Up a Python Virtual Environment
|
2) Create and activate a virtualenv
|
||||||
It is highly recommended to use a virtual environment to manage dependencies.
|
|
||||||
|
|
||||||
**On macOS / Linux:**
|
- bash/zsh:
|
||||||
```bash
|
```bash
|
||||||
python3 -m venv .venv
|
python3 -m venv .venv
|
||||||
source .venv/bin/activate
|
source .venv/bin/activate
|
||||||
```
|
```
|
||||||
|
- fish:
|
||||||
|
```fish
|
||||||
|
python3 -m venv .venv
|
||||||
|
source .venv/bin/activate.fish
|
||||||
|
```
|
||||||
|
|
||||||
**On Windows:**
|
3) Install Python dependencies and commands
|
||||||
```bash
|
|
||||||
python -m venv .venv
|
|
||||||
.\.venv\Scripts\activate
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4. Install Dependencies
|
|
||||||
```bash
|
```bash
|
||||||
pip install -r requirements.txt
|
pip install -r requirements.txt
|
||||||
|
pip install -e .
|
||||||
```
|
```
|
||||||
|
|
||||||
### 5. Configure Reddit API Credentials
|
4) Configure environment
|
||||||
1. Go to the [Reddit Apps preferences page](https://www.reddit.com/prefs/apps) and create a new "script" app.
|
|
||||||
2. Create a file named `.env` in the root of the project directory.
|
|
||||||
3. Add your credentials to the `.env` file like this:
|
|
||||||
|
|
||||||
```
|
Create a `.env` file in the repo root with your Reddit app credentials:
|
||||||
REDDIT_CLIENT_ID=your_client_id_from_reddit
|
|
||||||
REDDIT_CLIENT_SECRET=your_client_secret_from_reddit
|
```
|
||||||
REDDIT_USER_AGENT=A custom user agent string (e.g., python:rstat:v1.2)
|
REDDIT_CLIENT_ID=your_client_id
|
||||||
```
|
REDDIT_CLIENT_SECRET=your_client_secret
|
||||||
|
REDDIT_USER_AGENT=python:rstat:v1.0 (by u/yourname)
|
||||||
|
```
|
||||||
|
|
||||||
|
Optional (after OAuth step below):
|
||||||
|
|
||||||
|
```
|
||||||
|
REDDIT_REFRESH_TOKEN=your_refresh_token
|
||||||
|
```
|
||||||
|
|
||||||
|
5) One-time NLTK setup
|
||||||
|
|
||||||
### 6. Set Up NLTK
|
|
||||||
Run the included setup script **once** to download the required `vader_lexicon` for sentiment analysis.
|
|
||||||
```bash
|
```bash
|
||||||
python rstat_tool/setup_nltk.py
|
python rstat_tool/setup_nltk.py
|
||||||
```
|
```
|
||||||
|
|
||||||
### 7. Build and Install the Commands
|
6) Configure subreddits (optional)
|
||||||
Install the tool in "editable" mode. This creates the `rstat` and `rstat-dashboard` commands in your virtual environment and links them to your source code.
|
|
||||||
|
Edit `subreddits.json` to your liking. It ships with a sane default list.
|
||||||
|
|
||||||
|
## CLI usage (rstat)
|
||||||
|
|
||||||
|
The `rstat` command collects Reddit data and updates the database. Credentials are read from `.env`.
|
||||||
|
|
||||||
|
Common flags (see `rstat --help`):
|
||||||
|
|
||||||
|
- `--config FILE` Use a JSON file with `{"subreddits": [ ... ]}` (default: `subreddits.json`)
|
||||||
|
- `--subreddit NAME` Scan a single subreddit instead of the config
|
||||||
|
- `--days N` Only scan posts from the last N days (default 1)
|
||||||
|
- `--posts N` Max posts per subreddit to check (default 200)
|
||||||
|
- `--comments N` Max comments per post to scan (default 100)
|
||||||
|
- `--no-financials` Skip Yahoo Finance during the scan (faster)
|
||||||
|
- `--update-top-tickers` Update financials for tickers that are currently top daily/weekly
|
||||||
|
- `--update-financials-only [TICKER]` Update all or a single ticker’s market cap/close
|
||||||
|
- `--stdout` Log to console as well as file; `--debug` for verbose
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install -e .
|
# Scan configured subs for last 24h, including financials
|
||||||
|
rstat --days 1
|
||||||
|
|
||||||
|
# Target a single subreddit for the past week, scan more comments
|
||||||
|
rstat --subreddit wallstreetbets --days 7 --comments 250
|
||||||
|
|
||||||
|
# Skip financials during scan, then update only top tickers
|
||||||
|
rstat --no-financials
|
||||||
|
rstat --update-top-tickers
|
||||||
|
|
||||||
|
# Update financials for all tickers in DB
|
||||||
|
rstat --update-financials-only
|
||||||
|
|
||||||
|
# Update a single ticker (case-insensitive)
|
||||||
|
rstat --update-financials-only TSLA
|
||||||
```
|
```
|
||||||
The installation is now complete.
|
|
||||||
|
How mentions are detected:
|
||||||
|
|
||||||
|
- If a post contains any $TICKER (e.g., `$TSLA`) anywhere, we use “golden-only” mode: only $-prefixed tickers are considered.
|
||||||
|
- Otherwise, we fall back to filtered ALL-CAPS 2–5 letter words, excluding a large blacklist to avoid false positives.
|
||||||
|
- Title tickers attribute all comments in the thread; otherwise, we scan comments directly for mentions.
|
||||||
|
|
||||||
|
## Web dashboard (rstat-dashboard)
|
||||||
|
|
||||||
|
Start the dashboard and open http://127.0.0.1:5000
|
||||||
|
|
||||||
|
```bash
|
||||||
|
rstat-dashboard
|
||||||
|
```
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
- Overall top 10 (daily/weekly) across all subs
|
||||||
|
- Per-subreddit dashboards (daily/weekly)
|
||||||
|
- Deep Dive pages listing posts analyzed for a ticker
|
||||||
|
- Shareable image-friendly views (UI hides nav when `?image=true`)
|
||||||
|
|
||||||
|
The dashboard reads from `reddit_stocks.db`. Run `rstat` first so you have data.
|
||||||
|
|
||||||
|
## Image export (export_image.py)
|
||||||
|
|
||||||
|
Exports a high-res PNG of the dashboard views via Playwright. Note: the script currently uses `https://rstat.net` as its base URL.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Overall daily image
|
||||||
|
python export_image.py --overall
|
||||||
|
|
||||||
|
# Subreddit daily image
|
||||||
|
python export_image.py --subreddit wallstreetbets
|
||||||
|
|
||||||
|
# Weekly view
|
||||||
|
python export_image.py --subreddit wallstreetbets --weekly
|
||||||
|
```
|
||||||
|
|
||||||
|
Output files are saved into the `images/` folder, e.g. `overall_summary_daily_1700000000.png`.
|
||||||
|
|
||||||
|
Tip: If you want to export from a local dashboard instead of rstat.net, edit `base_url` in `export_image.py`.
|
||||||
|
|
||||||
|
## Post images to Reddit (post_to_reddit.py)
|
||||||
|
|
||||||
|
One-time OAuth2 step to obtain a refresh token:
|
||||||
|
|
||||||
|
1) In your Reddit app settings, set the redirect URI to exactly `http://localhost:5000` (matches the script).
|
||||||
|
2) Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python get_refresh_token.py
|
||||||
|
```
|
||||||
|
|
||||||
|
Follow the on-screen steps: open the generated URL, allow, copy the redirected URL, paste back. Add the printed token to `.env` as `REDDIT_REFRESH_TOKEN`.
|
||||||
|
|
||||||
|
Now you can post:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Post the most recent overall image to r/rstat
|
||||||
|
python post_to_reddit.py
|
||||||
|
|
||||||
|
# Post the most recent daily image for a subreddit
|
||||||
|
python post_to_reddit.py --subreddit wallstreetbets
|
||||||
|
|
||||||
|
# Post weekly image for a subreddit
|
||||||
|
python post_to_reddit.py --subreddit wallstreetbets --weekly
|
||||||
|
|
||||||
|
# Choose a target subreddit and (optionally) a flair ID
|
||||||
|
python post_to_reddit.py --subreddit wallstreetbets --target-subreddit rstat --flair-id <ID>
|
||||||
|
```
|
||||||
|
|
||||||
|
Need a flair ID? Use the helper:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
rstat-flairs wallstreetbets
|
||||||
|
```
|
||||||
|
|
||||||
|
## Cleanup utilities (rstat-cleanup)
|
||||||
|
|
||||||
|
Remove blacklisted “ticker” rows and/or purge data for subreddits no longer in your config.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Show help
|
||||||
|
rstat-cleanup --help
|
||||||
|
|
||||||
|
# Remove tickers that are in the internal COMMON_WORDS_BLACKLIST
|
||||||
|
rstat-cleanup --tickers
|
||||||
|
|
||||||
|
# Remove any subreddit data not in subreddits.json
|
||||||
|
rstat-cleanup --subreddits
|
||||||
|
|
||||||
|
# Use a custom config file
|
||||||
|
rstat-cleanup --subreddits my_subs.json
|
||||||
|
|
||||||
|
# Run both tasks
|
||||||
|
rstat-cleanup --all
|
||||||
|
```
|
||||||
|
|
||||||
|
## Automation (cron)
|
||||||
|
|
||||||
|
An example `run_daily_job.sh` is provided. Update `BASE_DIR` and make it executable:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
chmod +x run_daily_job.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Add a cron entry (example 22:00 daily):
|
||||||
|
|
||||||
|
```
|
||||||
|
0 22 * * * /absolute/path/to/reddit_stock_analyzer/run_daily_job.sh >> /absolute/path/to/reddit_stock_analyzer/cron.log 2>&1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Docker
|
||||||
|
|
||||||
|
Builds a Tailwind CSS layer, then a Python runtime with gunicorn. The compose files include optional nginx and varnish.
|
||||||
|
|
||||||
|
Quick start for the dashboard only (uses your host `reddit_stocks.db`):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose up -d rstat-dashboard
|
||||||
|
```
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
|
||||||
|
- The `rstat-dashboard` container mounts `./reddit_stocks.db` read-only. Populate it by running `rstat` on the host (or add a separate CLI container).
|
||||||
|
- Prod compose includes nginx (and optional certbot/varnish) configs under `config/`.
|
||||||
|
|
||||||
|
## Data model (SQLite)
|
||||||
|
|
||||||
|
- `tickers(id, symbol UNIQUE, market_cap, closing_price, last_updated)`
|
||||||
|
- `subreddits(id, name UNIQUE)`
|
||||||
|
- `mentions(id, ticker_id, subreddit_id, post_id, comment_id NULLABLE, mention_type, mention_sentiment, mention_timestamp, UNIQUE(ticker_id, post_id, comment_id))`
|
||||||
|
- `posts(id, post_id UNIQUE, title, post_url, subreddit_id, post_timestamp, comment_count, avg_comment_sentiment)`
|
||||||
|
|
||||||
|
Uniqueness prevents duplicates across post/comment granularity. Cleanup helpers remove blacklisted “tickers” and stale subreddits.
|
||||||
|
|
||||||
|
## UI and Tailwind
|
||||||
|
|
||||||
|
The CSS (`static/css/style.css`) is generated from `static/css/input.css` using Tailwind 4 during Docker build. If you want to tweak UI locally:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm install
|
||||||
|
npx tailwindcss -i ./static/css/input.css -o ./static/css/style.css --minify
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
- Missing VADER: Run `python rstat_tool/setup_nltk.py` once (in your venv).
|
||||||
|
- Playwright errors: Run `playwright install` once; ensure lib dependencies are present on your OS.
|
||||||
|
- yfinance returns None: Retry later; some tickers or regions can be spotty. The app tolerates missing financials.
|
||||||
|
- Flair required: If posting fails with flair errors, fetch a valid flair ID and pass `--flair-id`.
|
||||||
|
- Empty dashboards: Make sure `rstat` ran recently and `.env` is set; check `rstat.log`.
|
||||||
|
- DB locked: If you edit while the dashboard is reading, wait or stop the server; SQLite locks are short-lived.
|
||||||
|
|
||||||
|
## Safety and notes
|
||||||
|
|
||||||
|
- Do not commit `.env` or your database if it contains sensitive data.
|
||||||
|
- This project is for research/entertainment. Not investment advice.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Usage
|
Made with Python, Flask, NLTK, Playwright, and Tailwind.
|
||||||
|
|
||||||
The tool is split into two commands: one for gathering data and one for viewing it.
|
|
||||||
|
|
||||||
### 1. The Scraper (`rstat`)
|
|
||||||
|
|
||||||
This is the command-line tool you will use to populate the database. It is highly flexible.
|
|
||||||
|
|
||||||
**Common Commands:**
|
|
||||||
|
|
||||||
* **Run a daily scan (for cron jobs):** Scans subreddits from `subreddits.json` for posts in the last 24 hours.
|
|
||||||
```bash
|
|
||||||
rstat --config subreddits.json --days 1
|
|
||||||
```
|
|
||||||
|
|
||||||
* **Scan a single subreddit:** Ignores the config file and scans just one subreddit.
|
|
||||||
```bash
|
|
||||||
rstat --subreddit wallstreetbets --days 1
|
|
||||||
```
|
|
||||||
|
|
||||||
* **Back-fill data for last week:** Scans a specific subreddit for all new posts in the last 7 days.
|
|
||||||
```bash
|
|
||||||
rstat --subreddit Tollbugatabets --days 7
|
|
||||||
```
|
|
||||||
|
|
||||||
* **Get help and see all options:**
|
|
||||||
```bash
|
|
||||||
rstat --help
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. The Web Dashboard (`rstat-dashboard`)
|
|
||||||
|
|
||||||
This command starts a local web server to let you explore the data you've collected.
|
|
||||||
|
|
||||||
**How to Run:**
|
|
||||||
1. Make sure you have run the `rstat` scraper at least once to populate the database.
|
|
||||||
2. Start the web server:
|
|
||||||
```bash
|
|
||||||
rstat-dashboard
|
|
||||||
```
|
|
||||||
3. Open your web browser and navigate to **http://127.0.0.1:5000**.
|
|
||||||
|
|
||||||
**Dashboard Features:**
|
|
||||||
* **Main Page:** Shows the Top 10 most mentioned tickers across all scanned subreddits.
|
|
||||||
* **Subreddit Pages:** Click any subreddit in the navigation bar to see a dashboard specific to that community.
|
|
||||||
* **Deep Dive:** In any table, click on a ticker's symbol to see a detailed breakdown of every post it was mentioned in.
|
|
||||||
* **Shareable Images:** On a subreddit's page, click "(View Daily Image)" or "(View Weekly Image)" to generate a polished, shareable summary card.
|
|
17
config/nginx/dev/localhost.conf
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
server {
|
||||||
|
listen 80 default_server;
|
||||||
|
listen [::]:80 default_server;
|
||||||
|
|
||||||
|
server_name _;
|
||||||
|
server_tokens off;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
|
||||||
|
proxy_pass http://varnish:80;
|
||||||
|
proxy_redirect off;
|
||||||
|
}
|
||||||
|
}
|
15
config/nginx/rstat.net-NOSSL.conf
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
server {
|
||||||
|
listen 80 default_server;
|
||||||
|
listen [::]:80 default_server;
|
||||||
|
|
||||||
|
server_name www.rstat.net rstat.net;
|
||||||
|
server_tokens off;
|
||||||
|
|
||||||
|
location /.well-known/acme-challenge/ {
|
||||||
|
root /usr/share/nginx/certbot;
|
||||||
|
}
|
||||||
|
|
||||||
|
location / {
|
||||||
|
return 301 https://rstat.net$request_uri;
|
||||||
|
}
|
||||||
|
}
|
67
config/nginx/rstat.net-SSL.conf
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
listen [::]:443 ssl;
|
||||||
|
|
||||||
|
server_name www.rstat.net;
|
||||||
|
server_tokens off;
|
||||||
|
|
||||||
|
http2 on;
|
||||||
|
|
||||||
|
ssl_certificate /etc/nginx/ssl/live/www.rstat.net/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/nginx/ssl/live/www.rstat.net/privkey.pem;
|
||||||
|
|
||||||
|
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000" always;
|
||||||
|
|
||||||
|
return 301 https://rstat.net$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
listen [::]:443 ssl;
|
||||||
|
|
||||||
|
server_name rstat.net;
|
||||||
|
server_tokens off;
|
||||||
|
|
||||||
|
http2 on;
|
||||||
|
|
||||||
|
ssl_certificate /etc/nginx/ssl/live/www.rstat.net/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/nginx/ssl/live/www.rstat.net/privkey.pem;
|
||||||
|
|
||||||
|
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000" always;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
|
||||||
|
proxy_pass http://varnish:80;
|
||||||
|
proxy_redirect off;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# intermediate configuration
|
||||||
|
ssl_protocols TLSv1.2 TLSv1.3;
|
||||||
|
ssl_ecdh_curve X25519:prime256v1:secp384r1;
|
||||||
|
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
|
||||||
|
ssl_prefer_server_ciphers off;
|
||||||
|
|
||||||
|
# see also ssl_session_ticket_key alternative to stateful session cache
|
||||||
|
ssl_session_timeout 1d;
|
||||||
|
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
|
||||||
|
|
||||||
|
# curl https://ssl-config.mozilla.org/ffdhe2048.txt > /path/to/dhparam
|
||||||
|
ssl_dhparam /etc/nginx/ssl/dhparam.pem;
|
||||||
|
|
||||||
|
# OCSP stapling
|
||||||
|
ssl_stapling on;
|
||||||
|
ssl_stapling_verify on;
|
||||||
|
|
||||||
|
# verify chain of trust of OCSP response using Root CA and Intermediate certs
|
||||||
|
ssl_trusted_certificate /etc/nginx/ssl/live/www.rstat.net/chain.pem;
|
||||||
|
|
||||||
|
# replace with the IP address of your resolver;
|
||||||
|
# async 'resolver' is important for proper operation of OCSP stapling
|
||||||
|
resolver 67.207.67.3;
|
105
config/varnish/default.vcl
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
vcl 4.1;
|
||||||
|
|
||||||
|
# https://github.com/varnish/toolbox/tree/master/vcls/hit-miss
|
||||||
|
include "hit-miss.vcl";
|
||||||
|
import std;
|
||||||
|
|
||||||
|
backend default {
|
||||||
|
.host = "rstat-dashboard";
|
||||||
|
.port = "5000";
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_recv {
|
||||||
|
if (req.method != "GET" &&
|
||||||
|
req.method != "HEAD" &&
|
||||||
|
req.method != "PUT" &&
|
||||||
|
req.method != "POST" &&
|
||||||
|
req.method != "TRACE" &&
|
||||||
|
req.method != "OPTIONS" &&
|
||||||
|
req.method != "DELETE") {
|
||||||
|
/* Non-RFC2616 or CONNECT which is weird. */
|
||||||
|
return (pipe);
|
||||||
|
}
|
||||||
|
|
||||||
|
# We only deal with GET and HEAD by default
|
||||||
|
if (req.method != "GET" && req.method != "HEAD") {
|
||||||
|
return (pass);
|
||||||
|
}
|
||||||
|
|
||||||
|
set req.url = regsub(req.url, "^http[s]?://", "");
|
||||||
|
|
||||||
|
# static files are always cacheable. remove SSL flag and cookie
|
||||||
|
if (req.url ~ "^/(pub/)?(media|static)/.*\.(ico|jpg|jpeg|png|gif|tiff|bmp|mp3|ogg|svg|swf|woff|woff2|eot|ttf|otf)$") {
|
||||||
|
unset req.http.Https;
|
||||||
|
unset req.http.X-Forwarded-Proto;
|
||||||
|
unset req.http.Cookie;
|
||||||
|
unset req.http.css;
|
||||||
|
unset req.http.js;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_hash {
|
||||||
|
if (req.http.host) {
|
||||||
|
hash_data(req.http.host);
|
||||||
|
} else {
|
||||||
|
hash_data(server.ip);
|
||||||
|
}
|
||||||
|
|
||||||
|
# To make sure http users don't see ssl warning
|
||||||
|
if (req.http.X-Forwarded-Proto) {
|
||||||
|
hash_data(req.http.X-Forwarded-Proto);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_backend_response {
|
||||||
|
set beresp.http.X-Host = bereq.http.host;
|
||||||
|
|
||||||
|
set beresp.ttl = 1m;
|
||||||
|
# Enable stale content serving
|
||||||
|
set beresp.grace = 24h;
|
||||||
|
# Preserve the origin's Cache-Control header for client-side caching
|
||||||
|
if (beresp.http.Cache-Control) {
|
||||||
|
set beresp.http.X-Orig-Cache-Control = beresp.http.Cache-Control;
|
||||||
|
}
|
||||||
|
|
||||||
|
# validate if we need to cache it and prevent from setting cookie
|
||||||
|
# images, css and js are cacheable by default so we have to remove cookie also
|
||||||
|
if (beresp.ttl > 0s && (bereq.method == "GET" || bereq.method == "HEAD")) {
|
||||||
|
unset beresp.http.set-cookie;
|
||||||
|
unset beresp.http.set-css;
|
||||||
|
unset beresp.http.set-js;
|
||||||
|
if (bereq.url !~ "\.(ico|jpg|jpeg|png|gif|tiff|bmp|gz|tgz|bz2|tbz|mp3|ogg|svg|swf|woff|woff2|eot|ttf|otf)(\?|$)") {
|
||||||
|
set beresp.http.Pragma = "no-cache";
|
||||||
|
set beresp.http.Expires = "-1";
|
||||||
|
set beresp.http.Cache-Control = "no-store, no-cache, must-revalidate, max-age=0";
|
||||||
|
set beresp.grace = 1m;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# If page is not cacheable then bypass varnish for 2 minutes as Hit-For-Pass
|
||||||
|
if (beresp.ttl <= 0s ||
|
||||||
|
beresp.http.Surrogate-control ~ "no-store" ||
|
||||||
|
(!beresp.http.Surrogate-Control && beresp.http.Vary == "*")) {
|
||||||
|
# Mark as Hit-For-Pass for the next 2 minutes
|
||||||
|
set beresp.ttl = 120s;
|
||||||
|
set beresp.uncacheable = true;
|
||||||
|
}
|
||||||
|
return (deliver);
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_deliver {
|
||||||
|
# Restore the origin's Cache-Control header for the browser
|
||||||
|
if (resp.http.X-Orig-Cache-Control) {
|
||||||
|
set resp.http.Cache-Control = resp.http.X-Orig-Cache-Control;
|
||||||
|
unset resp.http.X-Orig-Cache-Control;
|
||||||
|
} else {
|
||||||
|
# If no Cache-Control was set by the origin, we'll set a default
|
||||||
|
set resp.http.Cache-Control = "no-cache, must-revalidate";
|
||||||
|
}
|
||||||
|
|
||||||
|
unset resp.http.Server;
|
||||||
|
unset resp.http.Via;
|
||||||
|
unset resp.http.Link;
|
||||||
|
}
|
105
config/varnish/dev.vcl
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
vcl 4.1;
|
||||||
|
|
||||||
|
# https://github.com/varnish/toolbox/tree/master/vcls/hit-miss
|
||||||
|
include "hit-miss.vcl";
|
||||||
|
import std;
|
||||||
|
|
||||||
|
backend default {
|
||||||
|
.host = "rstat-dashboard";
|
||||||
|
.port = "5000";
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_recv {
|
||||||
|
if (req.method != "GET" &&
|
||||||
|
req.method != "HEAD" &&
|
||||||
|
req.method != "PUT" &&
|
||||||
|
req.method != "POST" &&
|
||||||
|
req.method != "TRACE" &&
|
||||||
|
req.method != "OPTIONS" &&
|
||||||
|
req.method != "DELETE") {
|
||||||
|
/* Non-RFC2616 or CONNECT which is weird. */
|
||||||
|
return (pipe);
|
||||||
|
}
|
||||||
|
|
||||||
|
# We only deal with GET and HEAD by default
|
||||||
|
if (req.method != "GET" && req.method != "HEAD") {
|
||||||
|
return (pass);
|
||||||
|
}
|
||||||
|
|
||||||
|
set req.url = regsub(req.url, "^http[s]?://", "");
|
||||||
|
|
||||||
|
# static files are always cacheable. remove SSL flag and cookie
|
||||||
|
if (req.url ~ "^/(pub/)?(media|static)/.*\.(ico|jpg|jpeg|png|gif|tiff|bmp|mp3|ogg|svg|swf|woff|woff2|eot|ttf|otf)$") {
|
||||||
|
unset req.http.Https;
|
||||||
|
unset req.http.X-Forwarded-Proto;
|
||||||
|
unset req.http.Cookie;
|
||||||
|
unset req.http.css;
|
||||||
|
unset req.http.js;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_hash {
|
||||||
|
if (req.http.host) {
|
||||||
|
hash_data(req.http.host);
|
||||||
|
} else {
|
||||||
|
hash_data(server.ip);
|
||||||
|
}
|
||||||
|
|
||||||
|
# To make sure http users don't see ssl warning
|
||||||
|
if (req.http.X-Forwarded-Proto) {
|
||||||
|
hash_data(req.http.X-Forwarded-Proto);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_backend_response {
|
||||||
|
set beresp.http.X-Host = bereq.http.host;
|
||||||
|
|
||||||
|
set beresp.ttl = 1m;
|
||||||
|
# Enable stale content serving
|
||||||
|
set beresp.grace = 24h;
|
||||||
|
# Preserve the origin's Cache-Control header for client-side caching
|
||||||
|
if (beresp.http.Cache-Control) {
|
||||||
|
set beresp.http.X-Orig-Cache-Control = beresp.http.Cache-Control;
|
||||||
|
}
|
||||||
|
|
||||||
|
# validate if we need to cache it and prevent from setting cookie
|
||||||
|
# images, css and js are cacheable by default so we have to remove cookie also
|
||||||
|
if (beresp.ttl > 0s && (bereq.method == "GET" || bereq.method == "HEAD")) {
|
||||||
|
unset beresp.http.set-cookie;
|
||||||
|
unset beresp.http.set-css;
|
||||||
|
unset beresp.http.set-js;
|
||||||
|
if (bereq.url !~ "\.(ico|jpg|jpeg|png|gif|tiff|bmp|gz|tgz|bz2|tbz|mp3|ogg|svg|swf|woff|woff2|eot|ttf|otf)(\?|$)") {
|
||||||
|
set beresp.http.Pragma = "no-cache";
|
||||||
|
set beresp.http.Expires = "-1";
|
||||||
|
set beresp.http.Cache-Control = "no-store, no-cache, must-revalidate, max-age=0";
|
||||||
|
set beresp.grace = 1m;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# If page is not cacheable then bypass varnish for 2 minutes as Hit-For-Pass
|
||||||
|
if (beresp.ttl <= 0s ||
|
||||||
|
beresp.http.Surrogate-control ~ "no-store" ||
|
||||||
|
(!beresp.http.Surrogate-Control && beresp.http.Vary == "*")) {
|
||||||
|
# Mark as Hit-For-Pass for the next 2 minutes
|
||||||
|
set beresp.ttl = 120s;
|
||||||
|
set beresp.uncacheable = true;
|
||||||
|
}
|
||||||
|
return (deliver);
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_deliver {
|
||||||
|
# Restore the origin's Cache-Control header for the browser
|
||||||
|
if (resp.http.X-Orig-Cache-Control) {
|
||||||
|
set resp.http.Cache-Control = resp.http.X-Orig-Cache-Control;
|
||||||
|
unset resp.http.X-Orig-Cache-Control;
|
||||||
|
} else {
|
||||||
|
# If no Cache-Control was set by the origin, we'll set a default
|
||||||
|
set resp.http.Cache-Control = "no-cache, must-revalidate";
|
||||||
|
}
|
||||||
|
|
||||||
|
unset resp.http.Server;
|
||||||
|
unset resp.http.Via;
|
||||||
|
unset resp.http.Link;
|
||||||
|
}
|
39
config/varnish/hit-miss.vcl
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
sub vcl_recv {
|
||||||
|
unset req.http.x-cache;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_hit {
|
||||||
|
set req.http.x-cache = "hit";
|
||||||
|
if (obj.ttl <= 0s && obj.grace > 0s) {
|
||||||
|
set req.http.x-cache = "hit graced";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_miss {
|
||||||
|
set req.http.x-cache = "miss";
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_pass {
|
||||||
|
set req.http.x-cache = "pass";
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_pipe {
|
||||||
|
set req.http.x-cache = "pipe uncacheable";
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_synth {
|
||||||
|
set req.http.x-cache = "synth synth";
|
||||||
|
# comment the following line to omit the x-cache header in the response
|
||||||
|
set resp.http.x-cache = req.http.x-cache;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_deliver {
|
||||||
|
if (obj.uncacheable) {
|
||||||
|
set req.http.x-cache = req.http.x-cache + " uncacheable" ;
|
||||||
|
} else {
|
||||||
|
set req.http.x-cache = req.http.x-cache + " cached" ;
|
||||||
|
}
|
||||||
|
|
||||||
|
# comment the following line to omit the x-cache header in the response
|
||||||
|
set resp.http.x-cache = req.http.x-cache;
|
||||||
|
}
|
31
docker-compose-dev.yml
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
name: rstat
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
rstat-dashboard:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- ./reddit_stocks.db:/usr/src/app/reddit_stocks.db:ro
|
||||||
|
ports:
|
||||||
|
- "5000:5000"
|
||||||
|
|
||||||
|
nginx:
|
||||||
|
image: nginx:1.29.0
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- ./config/nginx/dev:/etc/nginx/conf.d:ro
|
||||||
|
- ./public:/usr/share/nginx:ro
|
||||||
|
ports:
|
||||||
|
- "80:80"
|
||||||
|
|
||||||
|
varnish:
|
||||||
|
image: varnish:7.7.1
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- ./config/varnish/dev.vcl:/etc/varnish/default.vcl:ro"
|
||||||
|
- ./config/varnish/hit-miss.vcl:/etc/varnish/hit-miss.vcl:ro"
|
||||||
|
tmpfs:
|
||||||
|
- /var/lib/varnish/varnishd:exec
|
39
docker-compose.yml
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
name: rstat
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
rstat-dashboard:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- ./reddit_stocks.db:/usr/src/app/reddit_stocks.db:ro
|
||||||
|
ports:
|
||||||
|
- "5000:5000"
|
||||||
|
|
||||||
|
nginx:
|
||||||
|
image: nginx:1.29.0
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- ./config/nginx:/etc/nginx/conf.d:ro
|
||||||
|
- ./config/certbot:/etc/nginx/ssl:ro
|
||||||
|
- ./public:/usr/share/nginx:ro
|
||||||
|
ports:
|
||||||
|
- "80:80"
|
||||||
|
- "443:443"
|
||||||
|
|
||||||
|
varnish:
|
||||||
|
image: varnish:7.7.1
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- ./config/varnish/default.vcl:/etc/varnish/default.vcl:ro"
|
||||||
|
- ./config/varnish/hit-miss.vcl:/etc/varnish/hit-miss.vcl:ro"
|
||||||
|
tmpfs:
|
||||||
|
- /var/lib/varnish/varnishd:exec
|
||||||
|
|
||||||
|
certbot:
|
||||||
|
image: certbot/certbot:v4.1.1
|
||||||
|
volumes:
|
||||||
|
- ./config/certbot:/etc/letsencrypt:rw
|
||||||
|
- ./public/certbot:/usr/share/nginx/certbot:rw
|
86
export_image.py
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
# export_image.py
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from playwright.sync_api import sync_playwright
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Define the output directory as a constant
|
||||||
|
OUTPUT_DIR = "images"
|
||||||
|
|
||||||
|
|
||||||
|
def export_image(url_path, filename_prefix):
|
||||||
|
"""
|
||||||
|
Launches a headless browser, navigates to a URL path, and screenshots
|
||||||
|
the main content element, saving it to the OUTPUT_DIR.
|
||||||
|
"""
|
||||||
|
print(f"-> Preparing to export image for: {filename_prefix}")
|
||||||
|
|
||||||
|
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
||||||
|
|
||||||
|
base_url = "https://rstat.net"
|
||||||
|
# Ensure the URL path starts correctly
|
||||||
|
url_path = url_path.lstrip("/")
|
||||||
|
url = f"{base_url}/{url_path}"
|
||||||
|
|
||||||
|
output_file = os.path.join(OUTPUT_DIR, f"{filename_prefix}_{int(time.time())}.png")
|
||||||
|
|
||||||
|
with sync_playwright() as p:
|
||||||
|
try:
|
||||||
|
browser = p.chromium.launch()
|
||||||
|
page = browser.new_page()
|
||||||
|
|
||||||
|
page.set_viewport_size({"width": 1920, "height": 1080})
|
||||||
|
|
||||||
|
print(f" Navigating to {url}...")
|
||||||
|
# Use 'domcontentloaded' for faster navigation when possible
|
||||||
|
page.goto(url, wait_until="domcontentloaded")
|
||||||
|
|
||||||
|
# Give fonts and styles a moment to render after the DOM is ready
|
||||||
|
page.wait_for_timeout(500)
|
||||||
|
|
||||||
|
# --- THIS IS THE CORRECTED LINE ---
|
||||||
|
# Target the new, correct class for our main content card
|
||||||
|
element = page.locator(".max-w-3xl")
|
||||||
|
|
||||||
|
print(f" Saving screenshot to {output_file}...")
|
||||||
|
element.screenshot(path=output_file)
|
||||||
|
|
||||||
|
browser.close()
|
||||||
|
print(f"-> Export complete! Image saved to {output_file}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"\nAn error occurred during export: {e}")
|
||||||
|
print(
|
||||||
|
"Please ensure the 'rstat-dashboard' server is running in another terminal."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser(description="Export subreddit sentiment images.")
|
||||||
|
group = parser.add_mutually_exclusive_group(required=True)
|
||||||
|
group.add_argument("-s", "--subreddit", help="The name of the subreddit to export.")
|
||||||
|
group.add_argument(
|
||||||
|
"-o", "--overall", action="store_true", help="Export the overall summary image."
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"-w",
|
||||||
|
"--weekly",
|
||||||
|
action="store_true",
|
||||||
|
help="Export the weekly view instead of the daily view.",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
view_type = "weekly" if args.weekly else "daily"
|
||||||
|
|
||||||
|
if args.subreddit:
|
||||||
|
url_path_to_render = f"subreddit/{args.subreddit}?view={view_type}&image=true"
|
||||||
|
filename_prefix_to_save = f"{args.subreddit}_{view_type}"
|
||||||
|
export_image(url_path_to_render, filename_prefix_to_save)
|
||||||
|
|
||||||
|
elif args.overall:
|
||||||
|
url_path_to_render = f"/?view={view_type}&image=true"
|
||||||
|
filename_prefix_to_save = f"overall_summary_{view_type}"
|
||||||
|
export_image(url_path_to_render, filename_prefix_to_save)
|
3
extract-words-from-log.sh
Executable file
@@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
grep possibly rstat.log | grep -v 'YFPricesMissingError' | awk '{print $6 }' | tr -d : | tr -d \$ | sort -u | awk '{printf "%s\"%s\"", sep, $0; sep=", "} END {print ""}'
|
37
fetch_close_price.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
# fetch_close_price.py
|
||||||
|
# This script does ONLY ONE THING: gets the closing price using the stable Ticker.history() method.
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
import yfinance as yf
|
||||||
|
import pandas as pd
|
||||||
|
import logging
|
||||||
|
|
||||||
|
# Suppress verbose yfinance logging in this isolated process
|
||||||
|
logging.getLogger("yfinance").setLevel(logging.CRITICAL)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if len(sys.argv) < 2:
|
||||||
|
# Exit with an error code if no ticker is provided
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
ticker_symbol = sys.argv[1]
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Instead of the global yf.download(), we use the Ticker object's .history() method.
|
||||||
|
# This uses a different internal code path that we have proven is stable.
|
||||||
|
ticker = yf.Ticker(ticker_symbol)
|
||||||
|
data = ticker.history(period="2d", auto_adjust=False)
|
||||||
|
|
||||||
|
closing_price = None
|
||||||
|
if not data.empty:
|
||||||
|
last_close_raw = data["Close"].iloc[-1]
|
||||||
|
if pd.notna(last_close_raw):
|
||||||
|
closing_price = float(last_close_raw)
|
||||||
|
|
||||||
|
# On success, print JSON to stdout and exit cleanly
|
||||||
|
print(json.dumps({"closing_price": closing_price}))
|
||||||
|
sys.exit(0)
|
||||||
|
except Exception:
|
||||||
|
# If any error occurs, print an empty JSON and exit with an error code
|
||||||
|
print(json.dumps({"closing_price": None}))
|
||||||
|
sys.exit(1)
|
28
fetch_market_cap.py
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# fetch_market_cap.py
|
||||||
|
# This script does ONLY ONE THING: gets the market cap.
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
import yfinance as yf
|
||||||
|
import logging
|
||||||
|
|
||||||
|
# Suppress verbose yfinance logging in this isolated process
|
||||||
|
logging.getLogger("yfinance").setLevel(logging.CRITICAL)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if len(sys.argv) < 2:
|
||||||
|
# Exit with an error code if no ticker is provided
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
ticker_symbol = sys.argv[1]
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Directly get the market cap
|
||||||
|
market_cap = yf.Ticker(ticker_symbol).info.get("marketCap")
|
||||||
|
|
||||||
|
# On success, print JSON to stdout and exit cleanly
|
||||||
|
print(json.dumps({"market_cap": market_cap}))
|
||||||
|
sys.exit(0)
|
||||||
|
except Exception:
|
||||||
|
# If any error occurs, print an empty JSON and exit with an error code
|
||||||
|
print(json.dumps({"market_cap": None}))
|
||||||
|
sys.exit(1)
|
82
get_refresh_token.py
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
# get_refresh_token.py
|
||||||
|
# A temporary, one-time-use script to get your OAuth2 refresh token.
|
||||||
|
|
||||||
|
import praw
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import socket
|
||||||
|
|
||||||
|
# --- IMPORTANT: Ensure this matches the "redirect uri" in your Reddit App settings ---
|
||||||
|
REDIRECT_URI = "http://localhost:5000"
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
print("--- RSTAT Refresh Token Generator ---")
|
||||||
|
load_dotenv()
|
||||||
|
client_id = os.getenv("REDDIT_CLIENT_ID")
|
||||||
|
client_secret = os.getenv("REDDIT_CLIENT_SECRET")
|
||||||
|
|
||||||
|
if not all([client_id, client_secret]):
|
||||||
|
print(
|
||||||
|
"Error: REDDIT_CLIENT_ID and REDDIT_CLIENT_SECRET must be set in your .env file."
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
# 1. Initialize PRAW
|
||||||
|
reddit = praw.Reddit(
|
||||||
|
client_id=client_id,
|
||||||
|
client_secret=client_secret,
|
||||||
|
redirect_uri=REDIRECT_URI,
|
||||||
|
user_agent="rstat_token_fetcher (by u/YourUsername)", # Can be anything
|
||||||
|
)
|
||||||
|
|
||||||
|
# 2. Generate the authorization URL
|
||||||
|
# Scopes define what our script is allowed to do. 'identity' and 'submit' are needed.
|
||||||
|
scopes = ["identity", "submit", "read"]
|
||||||
|
state = str(random.randint(0, 65536))
|
||||||
|
auth_url = reddit.auth.url(scopes, state, "permanent")
|
||||||
|
|
||||||
|
print("\nStep 1: Open this URL in your browser:\n")
|
||||||
|
print(auth_url)
|
||||||
|
|
||||||
|
print(
|
||||||
|
"\nStep 2: Log in to Reddit, click 'Allow', and you'll be redirected to a 'page not found'."
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"Step 3: Copy the ENTIRE URL from your browser's address bar after the redirect."
|
||||||
|
)
|
||||||
|
|
||||||
|
# 3. Get the redirected URL from the user
|
||||||
|
redirected_url = input(
|
||||||
|
"\nStep 4: Paste the full redirected URL here and press Enter:\n> "
|
||||||
|
)
|
||||||
|
|
||||||
|
# 4. Exchange the authorization code for a refresh token
|
||||||
|
try:
|
||||||
|
# The state is used to prevent CSRF attacks, we're just checking it matches
|
||||||
|
assert state == redirected_url.split("state=")[1].split("&")[0]
|
||||||
|
code = redirected_url.split("code=")[1].split("#_")[0]
|
||||||
|
|
||||||
|
print("\nAuthorization code received. Fetching refresh token...")
|
||||||
|
|
||||||
|
# This is the line that gets the key!
|
||||||
|
refresh_token = reddit.auth.authorize(code)
|
||||||
|
|
||||||
|
print("\n--- SUCCESS! ---")
|
||||||
|
print("Your Refresh Token is:\n")
|
||||||
|
print(refresh_token)
|
||||||
|
print(
|
||||||
|
"\nStep 5: Copy this token and add it to your .env file as REDDIT_REFRESH_TOKEN."
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"Step 6: You can now delete your REDDIT_USERNAME and REDDIT_PASSWORD from the .env file."
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"\nAn error occurred: {e}")
|
||||||
|
print("Please make sure you copied the full URL.")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
1294
package-lock.json
generated
Normal file
25
package.json
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
{
|
||||||
|
"name": "reddit_stock_analyzer",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"description": "A powerful, installable command-line tool and web dashboard to scan Reddit for stock ticker mentions, perform sentiment analysis, generate insightful reports, and create shareable summary images.",
|
||||||
|
"main": "index.js",
|
||||||
|
"scripts": {
|
||||||
|
"test": "echo \"Error: no test specified\" && exit 1"
|
||||||
|
},
|
||||||
|
"repository": {
|
||||||
|
"type": "git",
|
||||||
|
"url": "ssh://git@git.pkhamre.com:43721/pkhamre/reddit_stock_analyzer.git"
|
||||||
|
},
|
||||||
|
"keywords": [],
|
||||||
|
"author": "",
|
||||||
|
"license": "ISC",
|
||||||
|
"type": "commonjs",
|
||||||
|
"devDependencies": {
|
||||||
|
"@tailwindcss/cli": "^4.1.11",
|
||||||
|
"@tailwindcss/typography": "^0.5.16",
|
||||||
|
"tailwindcss": "^4.1.11"
|
||||||
|
},
|
||||||
|
"dependencies": {
|
||||||
|
"@tailwindplus/elements": "^1.0.3"
|
||||||
|
}
|
||||||
|
}
|
145
post_to_reddit.py
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
# post_to_reddit.py
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import glob
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
import praw
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
IMAGE_DIR = "images"
|
||||||
|
|
||||||
|
|
||||||
|
def get_reddit_instance():
|
||||||
|
"""Initializes and returns a PRAW Reddit instance using OAuth2 refresh token."""
|
||||||
|
|
||||||
|
env_path = Path(__file__).parent / ".env"
|
||||||
|
load_dotenv(dotenv_path=env_path)
|
||||||
|
|
||||||
|
client_id = os.getenv("REDDIT_CLIENT_ID")
|
||||||
|
client_secret = os.getenv("REDDIT_CLIENT_SECRET")
|
||||||
|
user_agent = os.getenv("REDDIT_USER_AGENT")
|
||||||
|
refresh_token = os.getenv("REDDIT_REFRESH_TOKEN")
|
||||||
|
|
||||||
|
if not all([client_id, client_secret, user_agent, refresh_token]):
|
||||||
|
print(
|
||||||
|
"Error: Reddit API credentials (including REDDIT_REFRESH_TOKEN) must be set in .env file."
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
|
||||||
|
return praw.Reddit(
|
||||||
|
client_id=client_id,
|
||||||
|
client_secret=client_secret,
|
||||||
|
user_agent=user_agent,
|
||||||
|
refresh_token=refresh_token,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def find_latest_image(pattern):
|
||||||
|
"""Finds the most recent file in the IMAGE_DIR that matches a given pattern."""
|
||||||
|
try:
|
||||||
|
search_path = os.path.join(IMAGE_DIR, pattern)
|
||||||
|
list_of_files = glob.glob(search_path)
|
||||||
|
if not list_of_files:
|
||||||
|
return None
|
||||||
|
# The latest file will be the one with the highest modification time
|
||||||
|
latest_file = max(list_of_files, key=os.path.getmtime)
|
||||||
|
return latest_file
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error finding image file: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main function to find an image and post it to Reddit."""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Find the latest sentiment image and post it to a subreddit."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-s",
|
||||||
|
"--subreddit",
|
||||||
|
help="The source subreddit of the image to post. (Defaults to overall summary)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-w",
|
||||||
|
"--weekly",
|
||||||
|
action="store_true",
|
||||||
|
help="Post the weekly summary instead of the daily one.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-t",
|
||||||
|
"--target-subreddit",
|
||||||
|
default="rstat",
|
||||||
|
help="The subreddit to post the image to. (Default: rstat)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--flair-id",
|
||||||
|
help="The specific Flair ID to use for the post (required for some subreddits).",
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# --- 1. Determine filename pattern and post title ---
|
||||||
|
current_date_str = datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
if args.subreddit:
|
||||||
|
view_type = "weekly" if args.weekly else "daily"
|
||||||
|
filename_pattern = f"{args.subreddit.lower()}_{view_type}_*.png"
|
||||||
|
post_title = f"{view_type.capitalize()} Ticker Sentiment for r/{args.subreddit} ({current_date_str})"
|
||||||
|
else:
|
||||||
|
# Default to the overall summary
|
||||||
|
if args.weekly:
|
||||||
|
print(
|
||||||
|
"Warning: --weekly flag has no effect for overall summary. Posting overall daily image."
|
||||||
|
)
|
||||||
|
filename_pattern = "overall_summary_*.png"
|
||||||
|
post_title = (
|
||||||
|
f"Overall Top 10 Ticker Mentions Across Reddit ({current_date_str})"
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"Searching for image pattern: {filename_pattern}")
|
||||||
|
|
||||||
|
# --- 2. Find the latest image file ---
|
||||||
|
image_to_post = find_latest_image(filename_pattern)
|
||||||
|
|
||||||
|
if not image_to_post:
|
||||||
|
print(
|
||||||
|
f"Error: No image found matching the pattern '{filename_pattern}'. Please run the scraper and exporter first."
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
print(f"Found image: {image_to_post}")
|
||||||
|
|
||||||
|
# --- 3. Connect to Reddit and submit ---
|
||||||
|
reddit = get_reddit_instance()
|
||||||
|
if not reddit:
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
target_sub = reddit.subreddit(args.target_subreddit)
|
||||||
|
print(f"Submitting '{post_title}' to r/{target_sub.display_name}...")
|
||||||
|
|
||||||
|
# --- Simplified submission logic ---
|
||||||
|
submission = target_sub.submit_image(
|
||||||
|
title=post_title,
|
||||||
|
image_path=image_to_post,
|
||||||
|
flair_id=args.flair_id, # Directly use the provided ID. This will be None if not provided.
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\n--- Post Successful! ---")
|
||||||
|
print(f"Post URL: {submission.shortlink}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"\nAn error occurred while posting to Reddit: {e}")
|
||||||
|
if "FLAIR_REQUIRED" in str(e).upper():
|
||||||
|
print(
|
||||||
|
"\nHINT: This subreddit requires a flair. You MUST provide a valid Flair ID using the --flair-id argument."
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"Please follow the manual steps to find the Flair ID using your browser's developer tools."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@@ -1,5 +1,8 @@
|
|||||||
yfinance
|
Flask==3.1.1
|
||||||
praw
|
gunicorn==23.0.0
|
||||||
python-dotenv
|
nltk==3.9.1
|
||||||
nltk
|
playwright==1.54.0
|
||||||
Flask
|
praw==7.8.1
|
||||||
|
python-dotenv==1.1.1
|
||||||
|
uvicorn==0.35.0
|
||||||
|
yfinance==0.2.65
|
88
rstat_tool/cleanup.py
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
# rstat_tool/cleanup.py
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
from . import database
|
||||||
|
from .logger_setup import setup_logging, logger as log
|
||||||
|
|
||||||
|
# We can't reuse load_subreddits from main anymore if it's not in the same file
|
||||||
|
# So we will duplicate it here. It's small and keeps this script self-contained.
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
|
def load_subreddits(filepath):
|
||||||
|
"""Loads a list of subreddits from a JSON file."""
|
||||||
|
try:
|
||||||
|
with open(filepath, "r") as f:
|
||||||
|
data = json.load(f)
|
||||||
|
return data.get("subreddits", [])
|
||||||
|
except (FileNotFoundError, json.JSONDecodeError) as e:
|
||||||
|
log.error(f"Error loading config file '{filepath}': {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def run_cleanup():
|
||||||
|
"""Main function for the cleanup tool."""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="A tool to clean stale data from the RSTAT database.",
|
||||||
|
formatter_class=argparse.RawTextHelpFormatter,
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--tickers",
|
||||||
|
action="store_true",
|
||||||
|
help="Clean tickers that are in the blacklist.",
|
||||||
|
)
|
||||||
|
|
||||||
|
# --- UPDATED ARGUMENT DEFINITION ---
|
||||||
|
# nargs='?': Makes the argument optional.
|
||||||
|
# const='subreddits.json': The value used if the flag is present with no argument.
|
||||||
|
# default=None: The value if the flag is not present at all.
|
||||||
|
parser.add_argument(
|
||||||
|
"--subreddits",
|
||||||
|
nargs="?",
|
||||||
|
const="subreddits.json",
|
||||||
|
default=None,
|
||||||
|
help="Clean data from subreddits NOT in the specified config file.\n(Defaults to 'subreddits.json' if flag is used without a value).",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--all", action="store_true", help="Run all available cleanup tasks."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--stdout", action="store_true", help="Print all log messages to the console."
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
setup_logging(console_verbose=args.stdout)
|
||||||
|
|
||||||
|
run_any_task = False
|
||||||
|
|
||||||
|
log.critical("\n--- Starting Cleanup ---")
|
||||||
|
|
||||||
|
# --- UPDATED LOGIC TO HANDLE THE NEW ARGUMENT ---
|
||||||
|
if args.all or args.tickers:
|
||||||
|
run_any_task = True
|
||||||
|
database.clean_stale_tickers()
|
||||||
|
|
||||||
|
# The --subreddits argument will be None if not provided, or a filename string if it is.
|
||||||
|
if args.all or args.subreddits is not None:
|
||||||
|
run_any_task = True
|
||||||
|
# If --all is used, default to 'subreddits.json' if --subreddits wasn't also specified
|
||||||
|
config_file = args.subreddits or "subreddits.json"
|
||||||
|
log.info(f"\nCleaning subreddits based on active list in: {config_file}")
|
||||||
|
active_subreddits = load_subreddits(config_file)
|
||||||
|
if active_subreddits is not None:
|
||||||
|
database.clean_stale_subreddits(active_subreddits)
|
||||||
|
|
||||||
|
if not run_any_task:
|
||||||
|
parser.print_help()
|
||||||
|
log.error(
|
||||||
|
"\nError: Please provide at least one cleanup option (e.g., --tickers, --subreddits, --all)."
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
log.critical("\nCleanup finished.")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
run_cleanup()
|
@@ -1,19 +1,20 @@
|
|||||||
# rstat_tool/dashboard.py
|
# rstat_tool/dashboard.py
|
||||||
|
|
||||||
from flask import Flask, render_template
|
from flask import Flask, render_template, request
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta, timezone
|
||||||
|
from .logger_setup import logger as log
|
||||||
from .database import (
|
from .database import (
|
||||||
get_overall_summary,
|
|
||||||
get_subreddit_summary,
|
|
||||||
get_all_scanned_subreddits,
|
get_all_scanned_subreddits,
|
||||||
get_deep_dive_details,
|
get_deep_dive_details,
|
||||||
get_image_view_summary,
|
get_daily_summary_for_subreddit,
|
||||||
get_weekly_summary_for_subreddit
|
get_weekly_summary_for_subreddit,
|
||||||
|
get_overall_daily_summary,
|
||||||
|
get_overall_weekly_summary,
|
||||||
)
|
)
|
||||||
|
|
||||||
app = Flask(__name__, template_folder='../templates')
|
app = Flask(__name__, template_folder='../templates', static_folder='../static')
|
||||||
|
|
||||||
@app.template_filter('format_mc')
|
@app.template_filter("format_mc")
|
||||||
def format_market_cap(mc):
|
def format_market_cap(mc):
|
||||||
"""Formats a large number into a readable market cap string."""
|
"""Formats a large number into a readable market cap string."""
|
||||||
if mc is None or mc == 0:
|
if mc is None or mc == 0:
|
||||||
@@ -27,23 +28,81 @@ def format_market_cap(mc):
|
|||||||
else:
|
else:
|
||||||
return f"${mc:,}"
|
return f"${mc:,}"
|
||||||
|
|
||||||
|
|
||||||
@app.context_processor
|
@app.context_processor
|
||||||
def inject_subreddits():
|
def inject_subreddits():
|
||||||
"""Makes the list of all scanned subreddits available to every template."""
|
"""Makes the list of all subreddits available to every template for the navbar."""
|
||||||
subreddits = get_all_scanned_subreddits()
|
return dict(all_subreddits=get_all_scanned_subreddits())
|
||||||
return dict(subreddits=subreddits)
|
|
||||||
|
|
||||||
@app.route("/")
|
@app.route("/")
|
||||||
def index():
|
def overall_dashboard():
|
||||||
"""The handler for the main dashboard page."""
|
"""Handler for the main, overall dashboard."""
|
||||||
tickers = get_overall_summary(limit=10)
|
view_type = request.args.get("view", "daily")
|
||||||
return render_template("index.html", tickers=tickers)
|
is_image_mode = request.args.get("image") == "true"
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get the 'top' parameter, default to 10, and ensure it's an integer
|
||||||
|
top_n = int(request.args.get('top', 10))
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
top_n = 10 # Fallback to 10 if the value is invalid
|
||||||
|
|
||||||
|
if view_type == "weekly":
|
||||||
|
tickers, start, end = get_overall_weekly_summary(limit=top_n)
|
||||||
|
date_string = f"{start.strftime('%b %d')} - {end.strftime('%b %d, %Y')}"
|
||||||
|
subtitle = f"All Subreddits - Top {top_n} Weekly"
|
||||||
|
else: # Default to daily
|
||||||
|
tickers = get_overall_daily_summary(limit=top_n)
|
||||||
|
date_string = datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
||||||
|
subtitle = f"All Subreddits - Top {top_n} Daily"
|
||||||
|
|
||||||
|
return render_template(
|
||||||
|
"dashboard_view.html",
|
||||||
|
title="Overall Dashboard",
|
||||||
|
subtitle=subtitle,
|
||||||
|
date_string=date_string,
|
||||||
|
tickers=tickers,
|
||||||
|
view_type=view_type,
|
||||||
|
subreddit_name=None,
|
||||||
|
is_image_mode=is_image_mode,
|
||||||
|
base_url="/",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@app.route("/subreddit/<name>")
|
@app.route("/subreddit/<name>")
|
||||||
def subreddit_dashboard(name):
|
def subreddit_dashboard(name):
|
||||||
"""A dynamic route for per-subreddit dashboards."""
|
"""Handler for per-subreddit dashboards."""
|
||||||
tickers = get_subreddit_summary(name, limit=10)
|
view_type = request.args.get("view", "daily")
|
||||||
return render_template("subreddit.html", tickers=tickers, subreddit_name=name)
|
is_image_mode = request.args.get("image") == "true"
|
||||||
|
|
||||||
|
try:
|
||||||
|
top_n = int(request.args.get('top', 10))
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
top_n = 10
|
||||||
|
|
||||||
|
if view_type == "weekly":
|
||||||
|
today = datetime.now(timezone.utc)
|
||||||
|
target_date = today - timedelta(days=7)
|
||||||
|
tickers, start, end = get_weekly_summary_for_subreddit(name, target_date, limit=top_n)
|
||||||
|
date_string = f"{start.strftime('%b %d')} - {end.strftime('%b %d, %Y')}"
|
||||||
|
subtitle = f"r/{name} - Top {top_n} Weekly"
|
||||||
|
else: # Default to daily
|
||||||
|
tickers = get_daily_summary_for_subreddit(name, limit=top_n)
|
||||||
|
date_string = datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
||||||
|
subtitle = f"r/{name} - Top {top_n} Daily"
|
||||||
|
|
||||||
|
return render_template(
|
||||||
|
"dashboard_view.html",
|
||||||
|
title=f"r/{name} Dashboard",
|
||||||
|
subtitle=subtitle,
|
||||||
|
date_string=date_string,
|
||||||
|
tickers=tickers,
|
||||||
|
view_type=view_type,
|
||||||
|
subreddit_name=name,
|
||||||
|
is_image_mode=is_image_mode,
|
||||||
|
base_url=f"/subreddit/{name}",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@app.route("/deep-dive/<symbol>")
|
@app.route("/deep-dive/<symbol>")
|
||||||
def deep_dive(symbol):
|
def deep_dive(symbol):
|
||||||
@@ -52,41 +111,21 @@ def deep_dive(symbol):
|
|||||||
posts = get_deep_dive_details(symbol)
|
posts = get_deep_dive_details(symbol)
|
||||||
return render_template("deep_dive.html", posts=posts, symbol=symbol)
|
return render_template("deep_dive.html", posts=posts, symbol=symbol)
|
||||||
|
|
||||||
@app.route("/image/<name>")
|
|
||||||
def image_view(name):
|
|
||||||
"""The handler for the image-style dashboard."""
|
|
||||||
tickers = get_image_view_summary(name)
|
|
||||||
current_date = datetime.utcnow().strftime("%Y-%m-%d")
|
|
||||||
return render_template(
|
|
||||||
"image_view.html",
|
|
||||||
tickers=tickers,
|
|
||||||
subreddit_name=name,
|
|
||||||
current_date=current_date
|
|
||||||
)
|
|
||||||
|
|
||||||
@app.route("/image/weekly/<name>")
|
@app.route("/about")
|
||||||
def weekly_image_view(name):
|
def about_page():
|
||||||
"""The handler for the WEEKLY image-style dashboard."""
|
"""Handler for the static About page."""
|
||||||
tickers = get_weekly_summary_for_subreddit(name)
|
# We need to pass these so the navbar knows which items to highlight
|
||||||
|
return render_template("about.html", subreddit_name=None, view_type='daily')
|
||||||
|
|
||||||
# Create the date range string for the title
|
|
||||||
end_date = datetime.utcnow()
|
|
||||||
start_date = end_date - timedelta(days=7)
|
|
||||||
date_range_str = f"{start_date.strftime('%b %d')} - {end_date.strftime('%b %d, %Y')}"
|
|
||||||
|
|
||||||
return render_template(
|
|
||||||
"weekly_image_view.html",
|
|
||||||
tickers=tickers,
|
|
||||||
subreddit_name=name,
|
|
||||||
date_range=date_range_str
|
|
||||||
)
|
|
||||||
|
|
||||||
def start_dashboard():
|
def start_dashboard():
|
||||||
"""The main function called by the 'rstat-dashboard' command."""
|
"""The main function called by the 'rstat-dashboard' command."""
|
||||||
print("Starting Flask server...")
|
log.info("Starting Flask server...")
|
||||||
print("Open http://127.0.0.1:5000 in your browser.")
|
log.info("Open http://127.0.0.1:5000 in your browser.")
|
||||||
print("Press CTRL+C to stop the server.")
|
log.info("Press CTRL+C to stop the server.")
|
||||||
app.run(debug=True)
|
app.run(debug=True)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
start_dashboard()
|
start_dashboard()
|
@@ -2,26 +2,95 @@
|
|||||||
|
|
||||||
import sqlite3
|
import sqlite3
|
||||||
import time
|
import time
|
||||||
from .ticker_extractor import COMMON_WORDS_BLACKLIST
|
from .ticker_extractor import (
|
||||||
from datetime import datetime, timedelta
|
COMMON_WORDS_BLACKLIST,
|
||||||
|
extract_golden_tickers,
|
||||||
|
extract_potential_tickers,
|
||||||
|
)
|
||||||
|
from .logger_setup import logger as log
|
||||||
|
from datetime import datetime, timedelta, timezone
|
||||||
|
|
||||||
DB_FILE = "reddit_stocks.db"
|
DB_FILE = "reddit_stocks.db"
|
||||||
|
MARKET_CAP_REFRESH_INTERVAL = 86400
|
||||||
|
|
||||||
|
|
||||||
|
def clean_stale_tickers():
|
||||||
|
"""
|
||||||
|
Removes tickers and their associated mentions from the database
|
||||||
|
if the ticker symbol exists in the COMMON_WORDS_BLACKLIST.
|
||||||
|
"""
|
||||||
|
log.info("\n--- Cleaning Stale Tickers from Database ---")
|
||||||
|
conn = get_db_connection()
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
placeholders = ",".join("?" for _ in COMMON_WORDS_BLACKLIST)
|
||||||
|
query = f"SELECT id, symbol FROM tickers WHERE symbol IN ({placeholders})"
|
||||||
|
|
||||||
|
cursor.execute(query, tuple(COMMON_WORDS_BLACKLIST))
|
||||||
|
stale_tickers = cursor.fetchall()
|
||||||
|
|
||||||
|
if not stale_tickers:
|
||||||
|
log.info("No stale tickers to clean.")
|
||||||
|
conn.close()
|
||||||
|
return
|
||||||
|
|
||||||
|
for ticker in stale_tickers:
|
||||||
|
ticker_id = ticker["id"]
|
||||||
|
ticker_symbol = ticker["symbol"]
|
||||||
|
log.info(f"Removing stale ticker '{ticker_symbol}' (ID: {ticker_id})...")
|
||||||
|
cursor.execute("DELETE FROM mentions WHERE ticker_id = ?", (ticker_id,))
|
||||||
|
cursor.execute("DELETE FROM tickers WHERE id = ?", (ticker_id,))
|
||||||
|
|
||||||
|
deleted_count = conn.total_changes
|
||||||
|
conn.commit()
|
||||||
|
conn.close()
|
||||||
|
log.info(f"Cleanup complete. Removed {deleted_count} records.")
|
||||||
|
|
||||||
|
|
||||||
|
def clean_stale_subreddits(active_subreddits):
|
||||||
|
"""
|
||||||
|
Removes all data associated with subreddits that are NOT in the active list.
|
||||||
|
"""
|
||||||
|
log.info("\n--- Cleaning Stale Subreddits from Database ---")
|
||||||
|
conn = get_db_connection()
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
# Convert the list of active subreddits from the config file to a lowercase set for fast,
|
||||||
|
# case-insensitive lookups.
|
||||||
|
active_subreddits_lower = {sub.lower() for sub in active_subreddits}
|
||||||
|
|
||||||
|
cursor.execute("SELECT id, name FROM subreddits")
|
||||||
|
db_subreddits = cursor.fetchall()
|
||||||
|
stale_sub_ids = []
|
||||||
|
for sub in db_subreddits:
|
||||||
|
if sub["name"] not in active_subreddits_lower:
|
||||||
|
log.info(f"Found stale subreddit to remove: r/{sub['name']}")
|
||||||
|
stale_sub_ids.append(sub["id"])
|
||||||
|
if not stale_sub_ids:
|
||||||
|
log.info("No stale subreddits to clean.")
|
||||||
|
conn.close()
|
||||||
|
return
|
||||||
|
for sub_id in stale_sub_ids:
|
||||||
|
log.info(f" -> Deleting associated data for subreddit ID: {sub_id}")
|
||||||
|
cursor.execute("DELETE FROM mentions WHERE subreddit_id = ?", (sub_id,))
|
||||||
|
cursor.execute("DELETE FROM posts WHERE subreddit_id = ?", (sub_id,))
|
||||||
|
cursor.execute("DELETE FROM subreddits WHERE id = ?", (sub_id,))
|
||||||
|
conn.commit()
|
||||||
|
conn.close()
|
||||||
|
log.info("Stale subreddit cleanup complete.")
|
||||||
|
|
||||||
|
|
||||||
def get_db_connection():
|
def get_db_connection():
|
||||||
"""Establishes a connection to the SQLite database."""
|
|
||||||
conn = sqlite3.connect(DB_FILE)
|
conn = sqlite3.connect(DB_FILE)
|
||||||
conn.row_factory = sqlite3.Row
|
conn.row_factory = sqlite3.Row
|
||||||
return conn
|
return conn
|
||||||
|
|
||||||
|
|
||||||
def initialize_db():
|
def initialize_db():
|
||||||
"""
|
|
||||||
Initializes the database and creates the necessary tables if they don't exist.
|
|
||||||
"""
|
|
||||||
conn = get_db_connection()
|
conn = get_db_connection()
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
|
cursor.execute(
|
||||||
# --- Create tickers table ---
|
"""
|
||||||
cursor.execute("""
|
|
||||||
CREATE TABLE IF NOT EXISTS tickers (
|
CREATE TABLE IF NOT EXISTS tickers (
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
symbol TEXT NOT NULL UNIQUE,
|
symbol TEXT NOT NULL UNIQUE,
|
||||||
@@ -29,33 +98,36 @@ def initialize_db():
|
|||||||
closing_price REAL,
|
closing_price REAL,
|
||||||
last_updated INTEGER
|
last_updated INTEGER
|
||||||
)
|
)
|
||||||
""")
|
"""
|
||||||
|
)
|
||||||
# --- Create subreddits table ---
|
cursor.execute(
|
||||||
cursor.execute("""
|
"""
|
||||||
CREATE TABLE IF NOT EXISTS subreddits (
|
CREATE TABLE IF NOT EXISTS subreddits (
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
name TEXT NOT NULL UNIQUE
|
name TEXT NOT NULL UNIQUE
|
||||||
)
|
)
|
||||||
""")
|
"""
|
||||||
|
)
|
||||||
# --- Create mentions table ---
|
cursor.execute(
|
||||||
cursor.execute("""
|
"""
|
||||||
CREATE TABLE IF NOT EXISTS mentions (
|
CREATE TABLE IF NOT EXISTS mentions (
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
ticker_id INTEGER,
|
ticker_id INTEGER,
|
||||||
subreddit_id INTEGER,
|
subreddit_id INTEGER,
|
||||||
post_id TEXT NOT NULL,
|
post_id TEXT NOT NULL,
|
||||||
mention_type TEXT NOT NULL, -- Can be 'post' or 'comment'
|
comment_id TEXT, -- NEW: Will be NULL for post mentions
|
||||||
|
mention_type TEXT NOT NULL,
|
||||||
|
mention_sentiment REAL,
|
||||||
mention_timestamp INTEGER NOT NULL,
|
mention_timestamp INTEGER NOT NULL,
|
||||||
sentiment_score REAL,
|
|
||||||
FOREIGN KEY (ticker_id) REFERENCES tickers (id),
|
FOREIGN KEY (ticker_id) REFERENCES tickers (id),
|
||||||
FOREIGN KEY (subreddit_id) REFERENCES subreddits (id),
|
FOREIGN KEY (subreddit_id) REFERENCES subreddits (id),
|
||||||
UNIQUE(ticker_id, post_id, mention_type, sentiment_score)
|
-- The new, perfect uniqueness rule:
|
||||||
|
UNIQUE(ticker_id, post_id, comment_id)
|
||||||
)
|
)
|
||||||
""")
|
"""
|
||||||
|
)
|
||||||
cursor.execute("""
|
cursor.execute(
|
||||||
|
"""
|
||||||
CREATE TABLE IF NOT EXISTS posts (
|
CREATE TABLE IF NOT EXISTS posts (
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
post_id TEXT NOT NULL UNIQUE,
|
post_id TEXT NOT NULL UNIQUE,
|
||||||
@@ -67,155 +139,75 @@ def initialize_db():
|
|||||||
avg_comment_sentiment REAL,
|
avg_comment_sentiment REAL,
|
||||||
FOREIGN KEY (subreddit_id) REFERENCES subreddits (id)
|
FOREIGN KEY (subreddit_id) REFERENCES subreddits (id)
|
||||||
)
|
)
|
||||||
""")
|
"""
|
||||||
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
conn.close()
|
||||||
print("Database initialized successfully.")
|
log.info("Database initialized successfully.")
|
||||||
|
|
||||||
def clean_stale_tickers():
|
|
||||||
"""
|
|
||||||
Removes tickers and their associated mentions from the database
|
|
||||||
if the ticker symbol exists in the COMMON_WORDS_BLACKLIST.
|
|
||||||
"""
|
|
||||||
print("\n--- Cleaning Stale Tickers from Database ---")
|
|
||||||
conn = get_db_connection()
|
|
||||||
cursor = conn.cursor()
|
|
||||||
|
|
||||||
placeholders = ','.join('?' for _ in COMMON_WORDS_BLACKLIST)
|
|
||||||
query = f"SELECT id, symbol FROM tickers WHERE symbol IN ({placeholders})"
|
|
||||||
|
|
||||||
cursor.execute(query, tuple(COMMON_WORDS_BLACKLIST))
|
|
||||||
stale_tickers = cursor.fetchall()
|
|
||||||
|
|
||||||
if not stale_tickers:
|
|
||||||
print("No stale tickers to clean.")
|
|
||||||
conn.close()
|
|
||||||
return
|
|
||||||
|
|
||||||
for ticker in stale_tickers:
|
|
||||||
ticker_id = ticker['id']
|
|
||||||
ticker_symbol = ticker['symbol']
|
|
||||||
print(f"Removing stale ticker '{ticker_symbol}' (ID: {ticker_id})...")
|
|
||||||
cursor.execute("DELETE FROM mentions WHERE ticker_id = ?", (ticker_id,))
|
|
||||||
cursor.execute("DELETE FROM tickers WHERE id = ?", (ticker_id,))
|
|
||||||
|
|
||||||
deleted_count = conn.total_changes
|
|
||||||
conn.commit()
|
|
||||||
conn.close()
|
|
||||||
print(f"Cleanup complete. Removed {deleted_count} records.")
|
|
||||||
|
|
||||||
|
|
||||||
def add_mention(conn, ticker_id, subreddit_id, post_id, mention_type, timestamp, sentiment):
|
def add_mention(
|
||||||
|
conn,
|
||||||
|
ticker_id,
|
||||||
|
subreddit_id,
|
||||||
|
post_id,
|
||||||
|
mention_type,
|
||||||
|
timestamp,
|
||||||
|
mention_sentiment,
|
||||||
|
comment_id=None,
|
||||||
|
):
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
try:
|
try:
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"INSERT INTO mentions (ticker_id, subreddit_id, post_id, mention_type, mention_timestamp, sentiment_score) VALUES (?, ?, ?, ?, ?, ?)",
|
"""
|
||||||
(ticker_id, subreddit_id, post_id, mention_type, timestamp, sentiment)
|
INSERT INTO mentions (ticker_id, subreddit_id, post_id, comment_id, mention_type, mention_timestamp, mention_sentiment)
|
||||||
|
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||||
|
""",
|
||||||
|
(
|
||||||
|
ticker_id,
|
||||||
|
subreddit_id,
|
||||||
|
post_id,
|
||||||
|
comment_id,
|
||||||
|
mention_type,
|
||||||
|
timestamp,
|
||||||
|
mention_sentiment,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
conn.commit()
|
|
||||||
except sqlite3.IntegrityError:
|
except sqlite3.IntegrityError:
|
||||||
|
# This will now correctly catch and ignore any true duplicates.
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def get_or_create_entity(conn, table_name, column_name, value):
|
def get_or_create_entity(conn, table_name, column_name, value):
|
||||||
"""Generic function to get or create an entity and return its ID."""
|
"""Generic function to get or create an entity and return its ID."""
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
cursor.execute(f"SELECT id FROM {table_name} WHERE {column_name} = ?", (value,))
|
cursor.execute(f"SELECT id FROM {table_name} WHERE {column_name} = ?", (value,))
|
||||||
result = cursor.fetchone()
|
result = cursor.fetchone()
|
||||||
if result:
|
if result:
|
||||||
return result['id']
|
return result["id"]
|
||||||
else:
|
else:
|
||||||
cursor.execute(f"INSERT INTO {table_name} ({column_name}) VALUES (?)", (value,))
|
cursor.execute(f"INSERT INTO {table_name} ({column_name}) VALUES (?)", (value,))
|
||||||
conn.commit()
|
conn.commit()
|
||||||
return cursor.lastrowid
|
return cursor.lastrowid
|
||||||
|
|
||||||
|
|
||||||
def update_ticker_financials(conn, ticker_id, market_cap, closing_price):
|
def update_ticker_financials(conn, ticker_id, market_cap, closing_price):
|
||||||
"""Updates the financials and timestamp for a specific ticker."""
|
"""Updates the financials and timestamp for a specific ticker."""
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
current_timestamp = int(time.time())
|
current_timestamp = int(time.time())
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"UPDATE tickers SET market_cap = ?, closing_price = ?, last_updated = ? WHERE id = ?",
|
"UPDATE tickers SET market_cap = ?, closing_price = ?, last_updated = ? WHERE id = ?",
|
||||||
(market_cap, closing_price, current_timestamp, ticker_id)
|
(market_cap, closing_price, current_timestamp, ticker_id),
|
||||||
)
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
|
|
||||||
|
|
||||||
def get_ticker_info(conn, ticker_id):
|
def get_ticker_info(conn, ticker_id):
|
||||||
"""Retrieves all info for a specific ticker by its ID."""
|
"""Retrieves all info for a specific ticker by its ID."""
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
cursor.execute("SELECT * FROM tickers WHERE id = ?", (ticker_id,))
|
cursor.execute("SELECT * FROM tickers WHERE id = ?", (ticker_id,))
|
||||||
return cursor.fetchone()
|
return cursor.fetchone()
|
||||||
|
|
||||||
def generate_summary_report(limit=20):
|
|
||||||
"""Queries the DB to generate a summary for the command-line tool."""
|
|
||||||
print(f"\n--- Top {limit} Tickers by Mention Count ---")
|
|
||||||
conn = get_db_connection()
|
|
||||||
cursor = conn.cursor()
|
|
||||||
query = """
|
|
||||||
SELECT
|
|
||||||
t.symbol, t.market_cap, COUNT(m.id) as mention_count,
|
|
||||||
SUM(CASE WHEN m.sentiment_score > 0.1 THEN 1 ELSE 0 END) as bullish_mentions,
|
|
||||||
SUM(CASE WHEN m.sentiment_score < -0.1 THEN 1 ELSE 0 END) as bearish_mentions,
|
|
||||||
SUM(CASE WHEN m.sentiment_score BETWEEN -0.1 AND 0.1 THEN 1 ELSE 0 END) as neutral_mentions
|
|
||||||
FROM mentions m JOIN tickers t ON m.ticker_id = t.id
|
|
||||||
GROUP BY t.symbol, t.market_cap ORDER BY mention_count DESC LIMIT ?;
|
|
||||||
"""
|
|
||||||
results = cursor.execute(query, (limit,)).fetchall()
|
|
||||||
header = f"{'Ticker':<8} | {'Mentions':<8} | {'Bullish':<8} | {'Bearish':<8} | {'Neutral':<8} | {'Market Cap':<15}"
|
|
||||||
print(header)
|
|
||||||
print("-" * len(header))
|
|
||||||
for row in results:
|
|
||||||
market_cap_str = "N/A"
|
|
||||||
if row['market_cap'] and row['market_cap'] > 0:
|
|
||||||
mc = row['market_cap']
|
|
||||||
if mc >= 1e12: market_cap_str = f"${mc/1e12:.2f}T"
|
|
||||||
elif mc >= 1e9: market_cap_str = f"${mc/1e9:.2f}B"
|
|
||||||
else: market_cap_str = f"${mc/1e6:.2f}M"
|
|
||||||
print(f"{row['symbol']:<8} | {row['mention_count']:<8} | {row['bullish_mentions']:<8} | {row['bearish_mentions']:<8} | {row['neutral_mentions']:<8} | {market_cap_str:<15}")
|
|
||||||
conn.close()
|
|
||||||
|
|
||||||
def get_overall_summary(limit=50):
|
|
||||||
conn = get_db_connection()
|
|
||||||
query = """
|
|
||||||
SELECT
|
|
||||||
t.symbol, t.market_cap, t.closing_price, -- Added closing_price
|
|
||||||
COUNT(m.id) as mention_count,
|
|
||||||
SUM(CASE WHEN m.sentiment_score > 0.1 THEN 1 ELSE 0 END) as bullish_mentions,
|
|
||||||
SUM(CASE WHEN m.sentiment_score < -0.1 THEN 1 ELSE 0 END) as bearish_mentions,
|
|
||||||
SUM(CASE WHEN m.sentiment_score BETWEEN -0.1 AND 0.1 THEN 1 ELSE 0 END) as neutral_mentions
|
|
||||||
FROM mentions m JOIN tickers t ON m.ticker_id = t.id
|
|
||||||
GROUP BY t.symbol, t.market_cap, t.closing_price -- Added closing_price
|
|
||||||
ORDER BY mention_count DESC LIMIT ?;
|
|
||||||
"""
|
|
||||||
results = conn.execute(query, (limit,)).fetchall()
|
|
||||||
conn.close()
|
|
||||||
return results
|
|
||||||
|
|
||||||
def get_subreddit_summary(subreddit_name, limit=50):
|
|
||||||
conn = get_db_connection()
|
|
||||||
query = """
|
|
||||||
SELECT
|
|
||||||
t.symbol, t.market_cap, t.closing_price, -- Added closing_price
|
|
||||||
COUNT(m.id) as mention_count,
|
|
||||||
SUM(CASE WHEN m.sentiment_score > 0.1 THEN 1 ELSE 0 END) as bullish_mentions,
|
|
||||||
SUM(CASE WHEN m.sentiment_score < -0.1 THEN 1 ELSE 0 END) as bearish_mentions,
|
|
||||||
SUM(CASE WHEN m.sentiment_score BETWEEN -0.1 AND 0.1 THEN 1 ELSE 0 END) as neutral_mentions
|
|
||||||
FROM mentions m
|
|
||||||
JOIN tickers t ON m.ticker_id = t.id
|
|
||||||
JOIN subreddits s ON m.subreddit_id = s.id
|
|
||||||
WHERE s.name = ?
|
|
||||||
GROUP BY t.symbol, t.market_cap, t.closing_price -- Added closing_price
|
|
||||||
ORDER BY mention_count DESC LIMIT ?;
|
|
||||||
"""
|
|
||||||
results = conn.execute(query, (subreddit_name, limit)).fetchall()
|
|
||||||
conn.close()
|
|
||||||
return results
|
|
||||||
|
|
||||||
def get_all_scanned_subreddits():
|
|
||||||
"""Gets a unique list of all subreddits we have data for."""
|
|
||||||
conn = get_db_connection()
|
|
||||||
results = conn.execute("SELECT DISTINCT name FROM subreddits ORDER BY name ASC;").fetchall()
|
|
||||||
conn.close()
|
|
||||||
return [row['name'] for row in results]
|
|
||||||
|
|
||||||
def add_or_update_post_analysis(conn, post_data):
|
def add_or_update_post_analysis(conn, post_data):
|
||||||
"""
|
"""
|
||||||
@@ -232,79 +224,218 @@ def add_or_update_post_analysis(conn, post_data):
|
|||||||
comment_count = excluded.comment_count,
|
comment_count = excluded.comment_count,
|
||||||
avg_comment_sentiment = excluded.avg_comment_sentiment;
|
avg_comment_sentiment = excluded.avg_comment_sentiment;
|
||||||
""",
|
""",
|
||||||
post_data
|
post_data,
|
||||||
)
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
|
|
||||||
|
|
||||||
|
def get_week_start_end(for_date):
|
||||||
|
"""Calculates the start (Monday) and end (Sunday) of the week."""
|
||||||
|
start_of_week = for_date - timedelta(days=for_date.weekday())
|
||||||
|
end_of_week = start_of_week + timedelta(days=6)
|
||||||
|
start_of_week = start_of_week.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||||
|
end_of_week = end_of_week.replace(hour=23, minute=59, second=59, microsecond=999999)
|
||||||
|
return start_of_week, end_of_week
|
||||||
|
|
||||||
|
|
||||||
|
def get_overall_daily_summary(limit=10):
|
||||||
|
"""Gets the top tickers across all subreddits from the LAST 24 HOURS."""
|
||||||
|
conn = get_db_connection()
|
||||||
|
one_day_ago = datetime.now(timezone.utc) - timedelta(days=1)
|
||||||
|
one_day_ago_timestamp = int(one_day_ago.timestamp())
|
||||||
|
query = """
|
||||||
|
SELECT t.symbol, t.market_cap, t.closing_price, COUNT(m.id) as total_mentions,
|
||||||
|
SUM(CASE WHEN m.mention_sentiment > 0.1 THEN 1 ELSE 0 END) as bullish_mentions,
|
||||||
|
SUM(CASE WHEN m.mention_sentiment < -0.1 THEN 1 ELSE 0 END) as bearish_mentions
|
||||||
|
FROM mentions m JOIN tickers t ON m.ticker_id = t.id
|
||||||
|
WHERE m.mention_timestamp >= ?
|
||||||
|
GROUP BY t.symbol, t.market_cap, t.closing_price
|
||||||
|
ORDER BY total_mentions DESC LIMIT ?;
|
||||||
|
"""
|
||||||
|
results = conn.execute(query, (one_day_ago_timestamp, limit)).fetchall()
|
||||||
|
conn.close()
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def get_overall_weekly_summary(limit=10):
|
||||||
|
"""Gets the top tickers across all subreddits for LAST WEEK (Mon-Sun)."""
|
||||||
|
conn = get_db_connection()
|
||||||
|
today = datetime.now(timezone.utc)
|
||||||
|
target_date_for_last_week = today - timedelta(days=7)
|
||||||
|
start_of_week, end_of_week = get_week_start_end(target_date_for_last_week)
|
||||||
|
start_timestamp = int(start_of_week.timestamp())
|
||||||
|
end_timestamp = int(end_of_week.timestamp())
|
||||||
|
query = """
|
||||||
|
SELECT t.symbol, t.market_cap, t.closing_price, COUNT(m.id) as total_mentions,
|
||||||
|
SUM(CASE WHEN m.mention_sentiment > 0.1 THEN 1 ELSE 0 END) as bullish_mentions,
|
||||||
|
SUM(CASE WHEN m.mention_sentiment < -0.1 THEN 1 ELSE 0 END) as bearish_mentions
|
||||||
|
FROM mentions m JOIN tickers t ON m.ticker_id = t.id
|
||||||
|
WHERE m.mention_timestamp BETWEEN ? AND ?
|
||||||
|
GROUP BY t.symbol, t.market_cap, t.closing_price
|
||||||
|
ORDER BY total_mentions DESC LIMIT ?;
|
||||||
|
"""
|
||||||
|
results = conn.execute(query, (start_timestamp, end_timestamp, limit)).fetchall()
|
||||||
|
conn.close()
|
||||||
|
return results, start_of_week, end_of_week
|
||||||
|
|
||||||
|
|
||||||
|
def get_daily_summary_for_subreddit(subreddit_name, limit=10):
|
||||||
|
"""Gets a summary for a subreddit's DAILY view (last 24 hours)."""
|
||||||
|
conn = get_db_connection()
|
||||||
|
one_day_ago = datetime.now(timezone.utc) - timedelta(days=1)
|
||||||
|
one_day_ago_timestamp = int(one_day_ago.timestamp())
|
||||||
|
query = """
|
||||||
|
SELECT t.symbol, t.market_cap, t.closing_price, COUNT(m.id) as total_mentions,
|
||||||
|
SUM(CASE WHEN m.mention_sentiment > 0.1 THEN 1 ELSE 0 END) as bullish_mentions,
|
||||||
|
SUM(CASE WHEN m.mention_sentiment < -0.1 THEN 1 ELSE 0 END) as bearish_mentions
|
||||||
|
FROM mentions m JOIN tickers t ON m.ticker_id = t.id JOIN subreddits s ON m.subreddit_id = s.id
|
||||||
|
WHERE LOWER(s.name) = LOWER(?) AND m.mention_timestamp >= ?
|
||||||
|
GROUP BY t.symbol, t.market_cap, t.closing_price
|
||||||
|
ORDER BY total_mentions DESC LIMIT ?;
|
||||||
|
"""
|
||||||
|
results = conn.execute(query, (subreddit_name, one_day_ago_timestamp, limit)).fetchall()
|
||||||
|
conn.close()
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def get_weekly_summary_for_subreddit(subreddit_name, for_date, limit=10):
|
||||||
|
"""Gets a summary for a subreddit's WEEKLY view (for a specific week)."""
|
||||||
|
conn = get_db_connection()
|
||||||
|
start_of_week, end_of_week = get_week_start_end(for_date)
|
||||||
|
start_timestamp = int(start_of_week.timestamp())
|
||||||
|
end_timestamp = int(end_of_week.timestamp())
|
||||||
|
query = """
|
||||||
|
SELECT t.symbol, t.market_cap, t.closing_price, COUNT(m.id) as total_mentions,
|
||||||
|
SUM(CASE WHEN m.mention_sentiment > 0.1 THEN 1 ELSE 0 END) as bullish_mentions,
|
||||||
|
SUM(CASE WHEN m.mention_sentiment < -0.1 THEN 1 ELSE 0 END) as bearish_mentions
|
||||||
|
FROM mentions m JOIN tickers t ON m.ticker_id = t.id JOIN subreddits s ON m.subreddit_id = s.id
|
||||||
|
WHERE LOWER(s.name) = LOWER(?) AND m.mention_timestamp BETWEEN ? AND ?
|
||||||
|
GROUP BY t.symbol, t.market_cap, t.closing_price
|
||||||
|
ORDER BY total_mentions DESC LIMIT ?;
|
||||||
|
"""
|
||||||
|
results = conn.execute(
|
||||||
|
query, (subreddit_name, start_timestamp, end_timestamp, limit)
|
||||||
|
).fetchall()
|
||||||
|
conn.close()
|
||||||
|
return results, start_of_week, end_of_week
|
||||||
|
|
||||||
|
|
||||||
def get_deep_dive_details(ticker_symbol):
|
def get_deep_dive_details(ticker_symbol):
|
||||||
"""
|
"""Gets all analyzed posts that mention a specific ticker."""
|
||||||
Gets all analyzed posts that mention a specific ticker.
|
|
||||||
"""
|
|
||||||
conn = get_db_connection()
|
conn = get_db_connection()
|
||||||
query = """
|
query = """
|
||||||
SELECT DISTINCT p.*, s.name as subreddit_name FROM posts p
|
SELECT DISTINCT p.*, s.name as subreddit_name FROM posts p
|
||||||
JOIN mentions m ON p.post_id = m.post_id
|
JOIN mentions m ON p.post_id = m.post_id JOIN tickers t ON m.ticker_id = t.id
|
||||||
JOIN tickers t ON m.ticker_id = t.id
|
|
||||||
JOIN subreddits s ON p.subreddit_id = s.id
|
JOIN subreddits s ON p.subreddit_id = s.id
|
||||||
WHERE t.symbol = ?
|
WHERE LOWER(t.symbol) = LOWER(?) ORDER BY p.post_timestamp DESC;
|
||||||
ORDER BY p.post_timestamp DESC;
|
|
||||||
"""
|
"""
|
||||||
results = conn.execute(query, (ticker_symbol,)).fetchall()
|
results = conn.execute(query, (ticker_symbol,)).fetchall()
|
||||||
conn.close()
|
conn.close()
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def get_image_view_summary(subreddit_name):
|
|
||||||
"""
|
def get_all_scanned_subreddits():
|
||||||
Gets a summary of tickers for the image view, including post, comment,
|
"""Gets a unique list of all subreddits we have data for."""
|
||||||
and sentiment counts.
|
|
||||||
"""
|
|
||||||
conn = get_db_connection()
|
conn = get_db_connection()
|
||||||
# This query now also counts sentiment types
|
results = conn.execute(
|
||||||
query = """
|
"SELECT DISTINCT name FROM subreddits ORDER BY name ASC;"
|
||||||
SELECT
|
).fetchall()
|
||||||
t.symbol,
|
conn.close()
|
||||||
COUNT(CASE WHEN m.mention_type = 'post' THEN 1 END) as post_mentions,
|
return [row["name"] for row in results]
|
||||||
COUNT(CASE WHEN m.mention_type = 'comment' THEN 1 END) as comment_mentions,
|
|
||||||
COUNT(CASE WHEN m.sentiment_score > 0.1 THEN 1 END) as bullish_mentions,
|
|
||||||
COUNT(CASE WHEN m.sentiment_score < -0.1 THEN 1 END) as bearish_mentions
|
def get_all_tickers():
|
||||||
FROM mentions m
|
"""Retrieves the ID and symbol of every ticker in the database."""
|
||||||
JOIN tickers t ON m.ticker_id = t.id
|
conn = get_db_connection()
|
||||||
JOIN subreddits s ON m.subreddit_id = s.id
|
results = conn.execute("SELECT id, symbol FROM tickers;").fetchall()
|
||||||
WHERE s.name = ?
|
|
||||||
GROUP BY t.symbol
|
|
||||||
ORDER BY (post_mentions + comment_mentions) DESC
|
|
||||||
LIMIT 10;
|
|
||||||
"""
|
|
||||||
results = conn.execute(query, (subreddit_name,)).fetchall()
|
|
||||||
conn.close()
|
conn.close()
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def get_weekly_summary_for_subreddit(subreddit_name):
|
|
||||||
|
def get_ticker_by_symbol(symbol):
|
||||||
"""
|
"""
|
||||||
Gets a weekly summary for a specific subreddit for the image view.
|
Retrieves a single ticker's ID and symbol from the database.
|
||||||
|
The search is case-insensitive. Returns a Row object or None if not found.
|
||||||
"""
|
"""
|
||||||
conn = get_db_connection()
|
conn = get_db_connection()
|
||||||
|
cursor = conn.cursor()
|
||||||
|
cursor.execute(
|
||||||
|
"SELECT id, symbol FROM tickers WHERE LOWER(symbol) = LOWER(?)", (symbol,)
|
||||||
|
)
|
||||||
|
result = cursor.fetchone()
|
||||||
|
conn.close()
|
||||||
|
return result
|
||||||
|
|
||||||
# Calculate the timestamp for 7 days ago
|
|
||||||
seven_days_ago = datetime.utcnow() - timedelta(days=7)
|
def get_top_daily_ticker_symbols():
|
||||||
|
"""Gets a simple list of the Top 10 ticker symbols from the last 24 hours."""
|
||||||
|
conn = get_db_connection()
|
||||||
|
one_day_ago = datetime.now(timezone.utc) - timedelta(days=1)
|
||||||
|
one_day_ago_timestamp = int(one_day_ago.timestamp())
|
||||||
|
query = """
|
||||||
|
SELECT t.symbol FROM mentions m JOIN tickers t ON m.ticker_id = t.id
|
||||||
|
WHERE m.mention_timestamp >= ?
|
||||||
|
GROUP BY t.symbol ORDER BY COUNT(m.id) DESC LIMIT 10;
|
||||||
|
"""
|
||||||
|
results = conn.execute(query, (one_day_ago_timestamp,)).fetchall()
|
||||||
|
conn.close()
|
||||||
|
return [row["symbol"] for row in results] # Return a simple list of strings
|
||||||
|
|
||||||
|
|
||||||
|
def get_top_weekly_ticker_symbols():
|
||||||
|
"""Gets a simple list of the Top 10 ticker symbols from the last 7 days."""
|
||||||
|
conn = get_db_connection()
|
||||||
|
seven_days_ago = datetime.now(timezone.utc) - timedelta(days=7)
|
||||||
seven_days_ago_timestamp = int(seven_days_ago.timestamp())
|
seven_days_ago_timestamp = int(seven_days_ago.timestamp())
|
||||||
|
|
||||||
# The query is the same as before, but with an added WHERE clause for the timestamp
|
|
||||||
query = """
|
query = """
|
||||||
SELECT
|
SELECT t.symbol FROM mentions m JOIN tickers t ON m.ticker_id = t.id
|
||||||
t.symbol,
|
WHERE m.mention_timestamp >= ?
|
||||||
COUNT(CASE WHEN m.mention_type = 'post' THEN 1 END) as post_mentions,
|
GROUP BY t.symbol ORDER BY COUNT(m.id) DESC LIMIT 10;
|
||||||
COUNT(CASE WHEN m.mention_type = 'comment' THEN 1 END) as comment_mentions,
|
|
||||||
COUNT(CASE WHEN m.sentiment_score > 0.1 THEN 1 END) as bullish_mentions,
|
|
||||||
COUNT(CASE WHEN m.sentiment_score < -0.1 THEN 1 END) as bearish_mentions
|
|
||||||
FROM mentions m
|
|
||||||
JOIN tickers t ON m.ticker_id = t.id
|
|
||||||
JOIN subreddits s ON m.subreddit_id = s.id
|
|
||||||
WHERE s.name = ? AND m.mention_timestamp >= ?
|
|
||||||
GROUP BY t.symbol
|
|
||||||
ORDER BY (post_mentions + comment_mentions) DESC
|
|
||||||
LIMIT 10;
|
|
||||||
"""
|
"""
|
||||||
results = conn.execute(query, (subreddit_name, seven_days_ago_timestamp)).fetchall()
|
results = conn.execute(query, (seven_days_ago_timestamp,)).fetchall()
|
||||||
conn.close()
|
conn.close()
|
||||||
return results
|
return [row["symbol"] for row in results] # Return a simple list of strings
|
||||||
|
|
||||||
|
|
||||||
|
def get_top_daily_ticker_symbols_for_subreddit(subreddit_name):
|
||||||
|
"""Gets a list of the Top 10 daily ticker symbols for a specific subreddit."""
|
||||||
|
conn = get_db_connection()
|
||||||
|
one_day_ago = datetime.now(timezone.utc) - timedelta(days=1)
|
||||||
|
one_day_ago_timestamp = int(one_day_ago.timestamp())
|
||||||
|
query = """
|
||||||
|
SELECT t.symbol FROM mentions m JOIN tickers t ON m.ticker_id = t.id
|
||||||
|
JOIN subreddits s ON m.subreddit_id = s.id
|
||||||
|
WHERE LOWER(s.name) = LOWER(?) AND m.mention_timestamp >= ?
|
||||||
|
GROUP BY t.symbol ORDER BY COUNT(m.id) DESC LIMIT 10;
|
||||||
|
"""
|
||||||
|
results = conn.execute(
|
||||||
|
query,
|
||||||
|
(
|
||||||
|
subreddit_name,
|
||||||
|
one_day_ago_timestamp,
|
||||||
|
),
|
||||||
|
).fetchall()
|
||||||
|
conn.close()
|
||||||
|
return [row["symbol"] for row in results]
|
||||||
|
|
||||||
|
|
||||||
|
def get_top_weekly_ticker_symbols_for_subreddit(subreddit_name):
|
||||||
|
"""Gets a list of the Top 10 weekly ticker symbols for a specific subreddit."""
|
||||||
|
conn = get_db_connection()
|
||||||
|
seven_days_ago = datetime.now(timezone.utc) - timedelta(days=7)
|
||||||
|
seven_days_ago_timestamp = int(seven_days_ago.timestamp())
|
||||||
|
query = """
|
||||||
|
SELECT t.symbol FROM mentions m JOIN tickers t ON m.ticker_id = t.id
|
||||||
|
JOIN subreddits s ON m.subreddit_id = s.id
|
||||||
|
WHERE LOWER(s.name) = LOWER(?) AND m.mention_timestamp >= ?
|
||||||
|
GROUP BY t.symbol ORDER BY COUNT(m.id) DESC LIMIT 10;
|
||||||
|
"""
|
||||||
|
results = conn.execute(
|
||||||
|
query,
|
||||||
|
(
|
||||||
|
subreddit_name,
|
||||||
|
seven_days_ago_timestamp,
|
||||||
|
),
|
||||||
|
).fetchall()
|
||||||
|
conn.close()
|
||||||
|
return [row["symbol"] for row in results]
|
||||||
|
76
rstat_tool/flair_finder.py
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
# rstat_tool/flair_finder.py
|
||||||
|
# A dedicated tool to find available link flairs for a subreddit.
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import praw
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
def get_reddit_instance_for_flairs():
|
||||||
|
"""
|
||||||
|
Initializes and returns an authenticated PRAW instance using the refresh token.
|
||||||
|
This is a copy of the robust authentication from the posting script.
|
||||||
|
"""
|
||||||
|
# Find the .env file relative to the project root
|
||||||
|
env_path = Path(__file__).parent.parent / '.env'
|
||||||
|
load_dotenv(dotenv_path=env_path)
|
||||||
|
|
||||||
|
client_id = os.getenv("REDDIT_CLIENT_ID")
|
||||||
|
client_secret = os.getenv("REDDIT_CLIENT_SECRET")
|
||||||
|
user_agent = os.getenv("REDDIT_USER_AGENT")
|
||||||
|
refresh_token = os.getenv("REDDIT_REFRESH_TOKEN")
|
||||||
|
|
||||||
|
if not all([client_id, client_secret, user_agent, refresh_token]):
|
||||||
|
print("Error: Reddit API credentials (including REDDIT_REFRESH_TOKEN) must be set in .env file.", file=sys.stderr)
|
||||||
|
return None
|
||||||
|
|
||||||
|
return praw.Reddit(
|
||||||
|
client_id=client_id,
|
||||||
|
client_secret=client_secret,
|
||||||
|
user_agent=user_agent,
|
||||||
|
refresh_token=refresh_token
|
||||||
|
)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main function to fetch and display flairs."""
|
||||||
|
parser = argparse.ArgumentParser(description="Fetch and display available post flairs for a subreddit.")
|
||||||
|
parser.add_argument("subreddit", help="The name of the subreddit to check.")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
reddit = get_reddit_instance_for_flairs()
|
||||||
|
if not reddit:
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
print(f"\n--- Attempting to Fetch Post Flairs for r/{args.subreddit} ---")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# This uses PRAW's generic GET request method to hit the specific API endpoint.
|
||||||
|
api_path = f"/r/{args.subreddit}/api/link_flair_v2.json"
|
||||||
|
flairs = reddit.get(api_path, params={"raw_json": 1})
|
||||||
|
|
||||||
|
if not flairs:
|
||||||
|
print("No flairs found or flair list is empty for this subreddit.")
|
||||||
|
return
|
||||||
|
|
||||||
|
print("\n--- Available Post Flairs ---")
|
||||||
|
found_count = 0
|
||||||
|
for flair in flairs:
|
||||||
|
flair_text = flair.get('text')
|
||||||
|
flair_id = flair.get('id')
|
||||||
|
if flair_text and flair_id:
|
||||||
|
print(f" Flair Text: '{flair_text}'")
|
||||||
|
print(f" Flair ID: {flair_id}\n")
|
||||||
|
found_count += 1
|
||||||
|
|
||||||
|
if found_count == 0:
|
||||||
|
print("No flairs with both text and ID were found.")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"\nAn error occurred: {e}", file=sys.stderr)
|
||||||
|
print("Hint: Please ensure the subreddit exists and that your authenticated user has permission to view it.", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@@ -1,52 +1,120 @@
|
|||||||
# The initial unsorted set of words.
|
# The initial unsorted set of words.
|
||||||
# Note: In Python, a 'set' is inherently unordered, but we define it here for clarity.
|
# Note: In Python, a 'set' is inherently unordered, but we define it here for clarity.
|
||||||
COMMON_WORDS_BLACKLIST = {
|
COMMON_WORDS_BLACKLIST = {
|
||||||
"401K", "403B", "457B", "ABOUT", "ABOVE", "ADAM", "ADX", "AEDT", "AEST", "AH",
|
"401K", "403B", "457B", "AAVE", "ABC", "ABOUT", "ABOVE", "ACAT", "ADAM", "ADHD",
|
||||||
"AI", "ALL", "ALPHA", "ALSO", "AM", "AMA", "AMEX", "AND", "ANY", "AR",
|
"ADR", "ADS", "ADX", "AEDT", "AEST", "AF", "AFAIK", "AFTER", "AGENT", "AH",
|
||||||
"ARE", "ARK", "AROUND", "ASAP", "ASS", "ASSET", "AT", "ATH", "ATL", "ATM",
|
"AI", "AINT", "AK", "AKSJE", "ALD", "ALGOS", "ALIVE", "ALL", "ALPHA", "ALSO",
|
||||||
"AUD", "AWS", "BABY", "BAG", "BAGS", "BE", "BEAR", "BELOW", "BETA", "BIG",
|
"AM", "AMA", "AMEX", "AMK", "AMY", "AND", "ANSS", "ANY", "APES", "APL",
|
||||||
"BIS", "BLEND", "BOE", "BOJ", "BOLL", "BOMB", "BOND", "BOTH", "BOTS", "BRB",
|
"APPL", "APPLE", "APR", "APUS", "APY", "AR", "ARBK", "ARE", "AREA", "ARH",
|
||||||
"BRL", "BS", "BST", "BSU", "BTC", "BTW", "BULL", "BUST", "BUT", "BUY",
|
"ARK", "AROUND", "ART", "AS", "ASAP", "ASEAN", "ASK", "ASS", "ASSET", "AST",
|
||||||
"BUZZ", "CAD", "CALL", "CAN", "CAP", "CBS", "CCI", "CEO", "CEST", "CET",
|
"AT", "ATH", "ATL", "ATM", "AUD", "AUG", "AUM", "AV", "AVG", "AWS",
|
||||||
"CEX", "CFD", "CFO", "CHF", "CHIPS", "CIA", "CLOSE", "CNBC", "CNY", "COKE",
|
"BABY", "BAD", "BAG", "BAGS", "BALLS", "BAN", "BANG", "BASIC", "BBB", "BBBY",
|
||||||
"COME", "COST", "COULD", "CPAP", "CPI", "CSE", "CST", "CTB", "CTO", "CYCLE",
|
"BE", "BEAR", "BEARS", "BECN", "BEER", "BELL", "BELOW", "BETA", "BETS", "BF",
|
||||||
"CZK", "DAO", "DATE", "DAX", "DAY", "DCA", "DD", "DEBT", "DEX", "DIA",
|
"BID", "BIG", "BIS", "BITCH", "BKEY", "BLEND", "BLOW", "BMW", "BNP", "BNPL",
|
||||||
"DIV", "DJIA", "DKK", "DM", "DO", "DOE", "DOGE", "DOJ", "DONT", "DR",
|
"BOARD", "BOE", "BOJ", "BOLL", "BOMB", "BOND", "BONED", "BORN", "BOTH", "BOTS",
|
||||||
"EACH", "EARLY", "EARN", "ECB", "EDGAR", "EDIT", "EDT", "EMA", "END", "EOD",
|
"BOY", "BOYS", "BRB", "BRICS", "BRK", "BRKA", "BRKB", "BRL", "BROKE", "BRRRR",
|
||||||
"EOW", "EOY", "EPA", "EPS", "ER", "ESG", "EST", "ETF", "ETFS", "ETH",
|
"BS", "BSE", "BST", "BSU", "BT", "BTC", "BTS", "BTW", "BUDDY", "BULL",
|
||||||
"EU", "EUR", "EV", "EVEN", "EVERY", "FAQ", "FAR", "FAST", "FBI", "FD",
|
"BULLS", "BUST", "BUT", "BUY", "BUZZ", "CAD", "CAFE", "CAGR", "CALL", "CALLS",
|
||||||
"FDA", "FIHTX", "FINRA", "FINT", "FINTX", "FINTY", "FIRST", "FOMC", "FOMO", "FOR",
|
"CAN", "CAP", "CARB", "CARES", "CASE", "CATL", "CBD", "CBGM", "CBS", "CCI",
|
||||||
"FOREX", "FRAUD", "FRG", "FROM", "FSPSX", "FTSE", "FUCK", "FUD", "FULL", "FUND",
|
"CCP", "CD", "CDN", "CEO", "CEST", "CET", "CEX", "CFD", "CFO", "CFPB",
|
||||||
"FXAIX", "FXIAX", "FY", "FYI", "FZROX", "GAAP", "GAIN", "GBP", "GDP", "GET",
|
"CHART", "CHASE", "CHATS", "CHECK", "CHF", "CHICK", "CHIP", "CHIPS", "CIA", "CIC",
|
||||||
"GL", "GLHF", "GMT", "GO", "GOAL", "GOAT", "GOING", "GPT", "GPU", "GRAB",
|
"CLAIM", "CLEAN", "CLICK", "CLOSE", "CMON", "CN", "CNBC", "CNN", "CNY", "COBRA",
|
||||||
"GTG", "HALF", "HAS", "HATE", "HAVE", "HEAR", "HEDGE", "HELP", "HIGH", "HINT",
|
"COCK", "COGS", "COIL", "COKE", "COME", "COST", "COULD", "COVID", "CPAP", "CPI",
|
||||||
"HKD", "HODL", "HOLD", "HOUR", "HSA", "HUF", "IF", "II", "IKZ", "IMHO",
|
"CRA", "CRE", "CRO", "CRV", "CSE", "CSP", "CSS", "CST", "CTB", "CTEP",
|
||||||
"IMO", "IN", "INR", "IP", "IPO", "IRA", "IRS", "IS", "ISA", "ISM",
|
"CTO", "CUCKS", "CULT", "CUM", "CUSMA", "CUTS", "CUV", "CYCLE", "CZK", "DA",
|
||||||
"IST", "IT", "ITM", "IV", "IVV", "IWM", "JD", "JPOW", "JPY", "JST",
|
"DAILY", "DAO", "DART", "DATA", "DATE", "DAX", "DAY", "DAYS", "DCA", "DCF",
|
||||||
"JUST", "KARMA", "KEEP", "KNOW", "KO", "KRW", "LANGT", "LARGE", "LAST", "LATE",
|
"DD", "DEAL", "DEBT", "DEEZ", "DEMO", "DET", "DEX", "DGAF", "DIA", "DID",
|
||||||
"LATER", "LBO", "LEAP", "LEAPS", "LETS", "LFG", "LIKE", "LIMIT", "LLC", "LLM",
|
"DIDNT", "DIP", "DITM", "DIV", "DIY", "DJI", "DJIA", "DJTJ", "DKK", "DL",
|
||||||
"LMAO", "LOKO", "LOL", "LONG", "LOOK", "LOSS", "LOVE", "LOW", "M&A", "MA",
|
"DM", "DMV", "DNI", "DNUTZ", "DO", "DOD", "DOE", "DOES", "DOGE", "DOING",
|
||||||
"MACD", "MAKE", "MAX", "MC", "ME", "MEME", "MERK", "MEXC", "MID", "MIGHT",
|
"DOJ", "DOM", "DONNY", "DONT", "DONUT", "DOOR", "DOWN", "DOZEN", "DPI", "DR",
|
||||||
"MIN", "MIND", "ML", "MOASS", "MONTH", "MORE", "MSK", "MUSIC", "MUST", "MXN",
|
"DUDE", "DUMP", "DUNT", "DUT", "DUTY", "DXY", "DXYXBT", "DYI", "DYNK", "DYODD",
|
||||||
"MY", "NATO", "NEAR", "NEED", "NEVER", "NEW", "NEXT", "NFA", "NFC", "NFT",
|
"DYOR", "EACH", "EARLY", "EARN", "EAST", "EASY", "EBIT", "ECB", "EDGAR", "EDIT",
|
||||||
"NGMI", "NIGHT", "NO", "NOK", "NONE", "NOT", "NOW", "NSA", "NULL", "NUT",
|
"EDT", "EJ", "EMA", "EMJ", "EMT", "END", "ENRON", "ENSI", "ENV", "EO",
|
||||||
"NYSE", "NZD", "OBV", "OEM", "OF", "OG", "OK", "OLD", "ON", "ONE",
|
"EOD", "EOM", "EOW", "EOY", "EPA", "EPK", "EPS", "ER", "ESG", "ESPP",
|
||||||
"ONLY", "OP", "OPEX", "OR", "OS", "OSCE", "OTC", "OTM", "OUGHT", "OUT",
|
"EST", "ETA", "ETF", "ETFS", "ETH", "ETHT", "ETL", "EU", "EUR", "EV",
|
||||||
"OVER", "OWN", "PANIC", "PC", "PDT", "PE", "PEAK", "PEG", "PEW", "PLAN",
|
"EVEN", "EVERY", "EVTOL", "EXTRA", "EYES", "EZ", "FAANG", "FAFO", "FAQ", "FAR",
|
||||||
"PLN", "PM", "PMI", "POC", "POS", "PPI", "PR", "PRICE", "PROFIT", "PSA",
|
"FAST", "FBI", "FCC", "FCFF", "FD", "FDA", "FED", "FEE", "FFH", "FFS",
|
||||||
"PST", "PT", "PUT", "Q1", "Q2", "Q3", "Q4", "QQQ", "QR", "RBA",
|
"FGMA", "FIG", "FIGMA", "FIHTX", "FILES", "FINAL", "FIND", "FING", "FINRA", "FINT",
|
||||||
"RBNZ", "RE", "REAL", "REIT", "REKT", "RH", "RIGHT", "RIP", "RISK", "ROCK",
|
"FINTX", "FINTY", "FIRE", "FIRST", "FKIN", "FLOAT", "FLRAA", "FLT", "FLY", "FML",
|
||||||
"ROE", "ROFL", "ROI", "ROTH", "RSD", "RSI", "RUB", "RULE", "SAME", "SAVE",
|
"FOLO", "FOMC", "FOMO", "FOR", "FOREX", "FRAUD", "FREAK", "FRED", "FRG", "FROM",
|
||||||
"SCALP", "SCAM", "SCHB", "SEC", "SEE", "SEK", "SELL", "SEP", "SGD", "SHALL",
|
"FRP", "FRS", "FSBO", "FSD", "FSE", "FSELK", "FSPSX", "FTD", "FTSE", "FUCK",
|
||||||
"SHARE", "SHORT", "SL", "SMA", "SMALL", "SO", "SOLIS", "SOME", "SOON", "SP",
|
"FUCKS", "FUD", "FULL", "FUND", "FUNNY", "FVG", "FWIW", "FX", "FXAIX", "FXIAX",
|
||||||
"SPAC", "SPEND", "SPLG", "SPX", "SPY", "START", "STILL", "STOCK", "STOP", "STOR",
|
"FXROX", "FY", "FYI", "FZROX", "GAAP", "GAIN", "GAV", "GAVE", "GBP", "GC",
|
||||||
"SWING", "TA", "TAG", "TAKE", "TERM", "THANK", "THAT", "THE", "THINK", "THIS",
|
"GDP", "GET", "GFC", "GG", "GGTM", "GIVES", "GJ", "GL", "GLHF", "GMAT",
|
||||||
"TIME", "TITS", "TL", "TL;DR", "TLDR", "TO", "TODAY", "TOTAL", "TRADE", "TREND",
|
"GMI", "GMT", "GO", "GOAL", "GOAT", "GOD", "GOING", "GOLD", "GONE", "GONNA",
|
||||||
"TRUE", "TRY", "TTYL", "TWO", "UI", "UK", "UNDER", "UP", "US", "USA",
|
"GOODS", "GOPRO", "GPT", "GPU", "GRAB", "GREAT", "GREEN", "GSOV", "GST", "GTA",
|
||||||
"USD", "UTC", "VALUE", "VOO", "VP", "VR", "VTI", "WAGMI", "WANT", "WATCH",
|
"GTC", "GTFO", "GTG", "GUH", "GUNS", "GUY", "GUYS", "HAD", "HAHA", "HALF",
|
||||||
"WAY", "WE", "WEB3", "WEEK", "WHALE", "WHO", "WHY", "WIDE", "WILL", "WORDS",
|
"HAM", "HANDS", "HAS", "HATE", "HAVE", "HBAR", "HCOL", "HEAR", "HEDGE", "HEGE",
|
||||||
"WORTH", "WOULD", "WSB", "WTF", "XRP", "YES", "YET", "YIELD", "YOLO", "YOU",
|
"HELD", "HELE", "HELL", "HELP", "HERE", "HEY", "HFCS", "HFT", "HGTV", "HIGH",
|
||||||
"YOUR", "YOY", "YT", "YTD", "ZAR", "ZEN", "ZERO"
|
"HIGHS", "HINT", "HIS", "HITID", "HK", "HKD", "HKEX", "HODL", "HODOR", "HOF",
|
||||||
|
"HOLD", "HOLY", "HOME", "HOT", "HOUR", "HOURS", "HOW", "HS", "HSA", "HSI",
|
||||||
|
"HT", "HTCI", "HTF", "HTML", "HUF", "HUGE", "HV", "HYPE", "IANAL", "IATF",
|
||||||
|
"IB", "IBS", "ICSID", "ICT", "ID", "IDF", "IDK", "IF", "II", "IIRC",
|
||||||
|
"IKKE", "IKZ", "IM", "IMHO", "IMI", "IMO", "IN", "INC", "INR", "INTEL",
|
||||||
|
"INTO", "IP", "IPO", "IQVIA", "IRA", "IRAS", "IRC", "IRISH", "IRL", "IRMAA",
|
||||||
|
"IRS", "IS", "ISA", "ISIN", "ISM", "ISN", "IST", "IT", "ITC", "ITM",
|
||||||
|
"ITS", "ITWN", "IUIT", "IV", "IVV", "IWM", "IXL", "IXLH", "IYKYK", "JAVA",
|
||||||
|
"JD", "JDG", "JDM", "JE", "JFC", "JK", "JLR", "JMO", "JOBS", "JOIN",
|
||||||
|
"JOKE", "JP", "JPOW", "JPY", "JS", "JST", "JULY", "JUN", "JUST", "KARMA",
|
||||||
|
"KEEP", "KILL", "KING", "KK", "KLA", "KLP", "KNEW", "KNOW", "KO", "KOHLS",
|
||||||
|
"KPMG", "KRW", "LA", "LANGT", "LARGE", "LAST", "LATE", "LATER", "LBO", "LBTC",
|
||||||
|
"LCS", "LDL", "LEADS", "LEAP", "LEAPS", "LEARN", "LEGS", "LEI", "LET", "LETF",
|
||||||
|
"LETS", "LFA", "LFG", "LFP", "LG", "LGEN", "LID", "LIFE", "LIG", "LIGMA",
|
||||||
|
"LIKE", "LIMIT", "LIST", "LLC", "LLM", "LM", "LMAO", "LMAOO", "LMM", "LMN",
|
||||||
|
"LOANS", "LOKO", "LOL", "LOLOL", "LONG", "LONGS", "LOOK", "LOSE", "LOSS", "LOST",
|
||||||
|
"LOVE", "LOVES", "LOW", "LOWER", "LOWS", "LP", "LSS", "LTCG", "LUCID", "LUPD",
|
||||||
|
"LYC", "LYING", "M&A", "MA", "MACD", "MAIL", "MAKE", "MAKES", "MANGE", "MANY",
|
||||||
|
"MASON", "MAX", "MAY", "MAYBE", "MBA", "MC", "MCAP", "MCNA", "MCP", "ME",
|
||||||
|
"MEAN", "MEME", "MER", "MERGE", "MERK", "MES", "MEXC", "MF", "MFER", "MID",
|
||||||
|
"MIGHT", "MIN", "MIND", "MINS", "ML", "MLB", "MLS", "MM", "MMF", "MNQ",
|
||||||
|
"MOASS", "MODEL", "MODTX", "MOM", "MONEY", "MONGO", "MONTH", "MONY", "MOON", "MORE",
|
||||||
|
"MOST", "MOU", "MSK", "MTVGA", "MUCH", "MUSIC", "MUST", "MVA", "MXN", "MY",
|
||||||
|
"MYMD", "NASA", "NASDA", "NATO", "NAV", "NBA", "NBC", "NCAN", "NCR", "NEAR",
|
||||||
|
"NEAT", "NEED", "NEVER", "NEW", "NEWS", "NEXT", "NFA", "NFC", "NFL", "NFT",
|
||||||
|
"NGAD", "NGMI", "NIGHT", "NIQ", "NK", "NO", "NOK", "NON", "NONE", "NOOO",
|
||||||
|
"NOPE", "NORTH", "NOT", "NOVA", "NOW", "NQ", "NRI", "NSA", "NSCLC", "NSLC",
|
||||||
|
"NTG", "NTVS", "NULL", "NUT", "NUTS", "NUTZ", "NVIDIA", "NVM", "NW", "NY",
|
||||||
|
"NYSE", "NZ", "NZD", "OBBB", "OBI", "OBS", "OBV", "OCD", "OCF", "OCO",
|
||||||
|
"ODAT", "ODTE", "OEM", "OF", "OFA", "OFF", "OG", "OH", "OK", "OKAY",
|
||||||
|
"OL", "OLD", "OMFG", "OMG", "ON", "ONDAS", "ONE", "ONLY", "OP", "OPEC",
|
||||||
|
"OPENQ", "OPEX", "OPRN", "OR", "ORB", "ORDER", "ORTEX", "OS", "OSCE", "OSE",
|
||||||
|
"OSEBX", "OT", "OTC", "OTM", "OTOH", "OUCH", "OUGHT", "OUR", "OUT", "OVER",
|
||||||
|
"OWN", "OZZY", "PA", "PAID", "PANIC", "PC", "PDT", "PE", "PEAK", "PEG",
|
||||||
|
"PETA", "PEW", "PFC", "PGHL", "PIMCO", "PITA", "PLAN", "PLAYS", "PLC", "PLN",
|
||||||
|
"PM", "PMCC", "PMI", "PNL", "POC", "POMO", "POP", "POS", "POSCO", "POTUS",
|
||||||
|
"POV", "POW", "PPI", "PR", "PRICE", "PRIME", "PROFIT", "PROXY", "PS", "PSA",
|
||||||
|
"PST", "PT", "PTD", "PUSSY", "PUT", "PUTS", "PWC", "Q1", "Q2", "Q3",
|
||||||
|
"Q4", "QE", "QED", "QIMC", "QQQ", "QR", "RAM", "RATM", "RBA", "RBNZ",
|
||||||
|
"RE", "REACH", "READY", "REAL", "RED", "REIT", "REITS", "REKT", "REPE", "RFK",
|
||||||
|
"RH", "RICO", "RIDE", "RIGHT", "RIP", "RISK", "RISKY", "RNDC", "ROCE", "ROCK",
|
||||||
|
"ROE", "ROFL", "ROI", "ROIC", "ROTH", "RPO", "RRSP", "RSD", "RSI", "RT",
|
||||||
|
"RTD", "RUB", "RUG", "RULE", "RUST", "RVOL", "SAGA", "SALES", "SAME", "SAVE",
|
||||||
|
"SAYS", "SBF", "SBLOC", "SC", "SCALP", "SCAM", "SCHB", "SCIF", "SEC", "SEE",
|
||||||
|
"SEK", "SELL", "SELLL", "SEP", "SESG", "SET", "SFOR", "SGD", "SHALL", "SHARE",
|
||||||
|
"SHEIN", "SHELL", "SHIT", "SHORT", "SHOW", "SHS", "SHTF", "SI", "SICK", "SIGN",
|
||||||
|
"SL", "SLIM", "SLOW", "SMA", "SMALL", "SMFH", "SNZ", "SO", "SOLD", "SOLIS",
|
||||||
|
"SOME", "SOON", "SOOO", "SOUTH", "SP", "SPAC", "SPDR", "SPEND", "SPLG", "SPX",
|
||||||
|
"SPY", "SQUAD", "SS", "SSA", "SSDI", "START", "STAY", "STEEL", "STFU", "STILL",
|
||||||
|
"STO", "STOCK", "STOOQ", "STOP", "STOR", "STQQQ", "STUCK", "STUDY", "SUS", "SUSHI",
|
||||||
|
"SUV", "SWIFT", "SWING", "TA", "TAG", "TAKE", "TAM", "TBTH", "TEAMS", "TED",
|
||||||
|
"TEMU", "TERM", "TESLA", "TEXT", "TF", "TFNA", "TFSA", "THAN", "THANK", "THAT",
|
||||||
|
"THATS", "THE", "THEIR", "THEM", "THEN", "THERE", "THESE", "THEY", "THING", "THINK",
|
||||||
|
"THIS", "THROW", "TI", "TIA", "TIKR", "TIME", "TIMES", "TINA", "TITS", "TJR",
|
||||||
|
"TL", "TL;DR", "TLDR", "TNT", "TO", "TODAY", "TOLD", "TONS", "TOO", "TOS",
|
||||||
|
"TOT", "TOTAL", "TP", "TPU", "TRADE", "TREND", "TRUE", "TRUMP", "TRUST", "TRY",
|
||||||
|
"TSA", "TSMC", "TSP", "TSX", "TSXV", "TTIP", "TTM", "TTYL", "TURNS", "TWO",
|
||||||
|
"UAW", "UCITS", "UGH", "UI", "UK", "UNDER", "UNITS", "UNO", "UNTIL", "UP",
|
||||||
|
"US", "USA", "USD", "USMCA", "USSA", "USSR", "UTC", "VALID", "VALUE", "VAMOS",
|
||||||
|
"VAT", "VEIEN", "VEO", "VERY", "VFMXX", "VFV", "VI", "VISA", "VIX", "VLI",
|
||||||
|
"VOO", "VP", "VPAY", "VR", "VRVP", "VSUS", "VTI", "VUAG", "VW", "VWAP",
|
||||||
|
"VWCE", "VXN", "VXUX", "WAGER", "WAGMI", "WAIT", "WALL", "WANT", "WAS", "WATCH",
|
||||||
|
"WAY", "WBTC", "WE", "WEB", "WEB3", "WEEK", "WENT", "WERO", "WEST", "WHALE",
|
||||||
|
"WHAT", "WHEN", "WHERE", "WHICH", "WHILE", "WHO", "WHOS", "WHY", "WIDE", "WILL",
|
||||||
|
"WIRE", "WIRED", "WITH", "WL", "WON", "WOOPS", "WORDS", "WORTH", "WOULD", "WP",
|
||||||
|
"WRONG", "WSB", "WSJ", "WTF", "WV", "WWII", "WWIII", "X", "XAU", "XCUSE",
|
||||||
|
"XD", "XEQT", "XI", "XIV", "XMR", "XO", "XRP", "XX", "YEAH", "YEET",
|
||||||
|
"YES", "YET", "YIELD", "YM", "YMMV", "YOIR", "YOLO", "YOU", "YOUR", "YOY",
|
||||||
|
"YT", "YTD", "YUGE", "YUP", "YUPPP", "ZAR", "ZEN", "ZERO", "ZEV"
|
||||||
}
|
}
|
||||||
|
|
||||||
def format_and_print_list(word_set, words_per_line=10):
|
def format_and_print_list(word_set, words_per_line=10):
|
||||||
@@ -67,7 +135,7 @@ def format_and_print_list(word_set, words_per_line=10):
|
|||||||
# 3. Iterate through the sorted list and print words, respecting the line limit
|
# 3. Iterate through the sorted list and print words, respecting the line limit
|
||||||
for i in range(0, len(sorted_words), words_per_line):
|
for i in range(0, len(sorted_words), words_per_line):
|
||||||
# Get a chunk of words for the current line
|
# Get a chunk of words for the current line
|
||||||
line_chunk = sorted_words[i:i + words_per_line]
|
line_chunk = sorted_words[i : i + words_per_line]
|
||||||
|
|
||||||
# Format each word with double quotes
|
# Format each word with double quotes
|
||||||
formatted_words = [f'"{word}"' for word in line_chunk]
|
formatted_words = [f'"{word}"' for word in line_chunk]
|
||||||
@@ -86,6 +154,7 @@ def format_and_print_list(word_set, words_per_line=10):
|
|||||||
# 4. Print the closing brace
|
# 4. Print the closing brace
|
||||||
print("}")
|
print("}")
|
||||||
|
|
||||||
|
|
||||||
# --- Main execution ---
|
# --- Main execution ---
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
format_and_print_list(COMMON_WORDS_BLACKLIST)
|
format_and_print_list(COMMON_WORDS_BLACKLIST)
|
9
rstat_tool/gunicorn-cfg.py
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# -*- encoding: utf-8 -*-
|
||||||
|
|
||||||
|
bind = '0.0.0.0:5000'
|
||||||
|
workers = 4
|
||||||
|
worker_class = 'uvicorn.workers.UvicornWorker'
|
||||||
|
accesslog = '-'
|
||||||
|
loglevel = 'debug'
|
||||||
|
capture_output = True
|
||||||
|
enable_stdio_inheritance = True
|
51
rstat_tool/logger_setup.py
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
# rstat_tool/logger_setup.py
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
logger = logging.getLogger("rstat_app")
|
||||||
|
|
||||||
|
|
||||||
|
def setup_logging(console_verbose=False, debug_mode=False):
|
||||||
|
"""
|
||||||
|
Configures the application's logger with a new DEBUG level.
|
||||||
|
"""
|
||||||
|
# The logger itself must be set to the lowest possible level (DEBUG).
|
||||||
|
log_level = logging.DEBUG if debug_mode else logging.INFO
|
||||||
|
logger.setLevel(log_level)
|
||||||
|
|
||||||
|
logger.propagate = False
|
||||||
|
if logger.hasHandlers():
|
||||||
|
logger.handlers.clear()
|
||||||
|
|
||||||
|
# File Handler (Always verbose at INFO level or higher)
|
||||||
|
file_handler = logging.FileHandler("rstat.log", mode="a")
|
||||||
|
file_handler.setLevel(logging.INFO) # We don't need debug spam in the file usually
|
||||||
|
file_formatter = logging.Formatter(
|
||||||
|
"%(asctime)s - %(levelname)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
|
||||||
|
)
|
||||||
|
file_handler.setFormatter(file_formatter)
|
||||||
|
logger.addHandler(file_handler)
|
||||||
|
|
||||||
|
# Console Handler (Verbosity is controlled)
|
||||||
|
console_handler = logging.StreamHandler(sys.stdout)
|
||||||
|
console_formatter = logging.Formatter("%(message)s")
|
||||||
|
console_handler.setFormatter(console_formatter)
|
||||||
|
|
||||||
|
if debug_mode:
|
||||||
|
console_handler.setLevel(logging.DEBUG)
|
||||||
|
elif console_verbose:
|
||||||
|
console_handler.setLevel(logging.INFO)
|
||||||
|
else:
|
||||||
|
console_handler.setLevel(logging.CRITICAL)
|
||||||
|
|
||||||
|
logger.addHandler(console_handler)
|
||||||
|
|
||||||
|
# YFINANCE LOGGER CAPTURE
|
||||||
|
yfinance_logger = logging.getLogger("yfinance")
|
||||||
|
yfinance_logger.propagate = False
|
||||||
|
if yfinance_logger.hasHandlers():
|
||||||
|
yfinance_logger.handlers.clear()
|
||||||
|
yfinance_logger.setLevel(logging.WARNING)
|
||||||
|
yfinance_logger.addHandler(console_handler)
|
||||||
|
yfinance_logger.addHandler(file_handler)
|
@@ -4,148 +4,487 @@ import argparse
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
|
import sys
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from pathlib import Path
|
||||||
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
|
||||||
import praw
|
import praw
|
||||||
import yfinance as yf
|
import yfinance as yf
|
||||||
from dotenv import load_dotenv
|
import pandas as pd
|
||||||
|
|
||||||
from . import database
|
from . import database
|
||||||
from .ticker_extractor import extract_tickers
|
from .ticker_extractor import extract_golden_tickers, extract_potential_tickers
|
||||||
from .sentiment_analyzer import get_sentiment_score
|
from .sentiment_analyzer import get_sentiment_score
|
||||||
|
from .logger_setup import setup_logging, logger as log
|
||||||
|
|
||||||
load_dotenv()
|
|
||||||
MARKET_CAP_REFRESH_INTERVAL = 86400
|
|
||||||
POST_AGE_LIMIT = 86400
|
|
||||||
|
|
||||||
def load_subreddits(filepath):
|
def load_subreddits(filepath):
|
||||||
|
"""Loads a list of subreddits from a JSON file."""
|
||||||
try:
|
try:
|
||||||
with open(filepath, 'r') as f:
|
with open(filepath, "r") as f:
|
||||||
return json.load(f).get("subreddits", [])
|
return json.load(f).get("subreddits", [])
|
||||||
except (FileNotFoundError, json.JSONDecodeError) as e:
|
except (FileNotFoundError, json.JSONDecodeError) as e:
|
||||||
print(f"Error loading config file '{filepath}': {e}")
|
log.error(f"Error loading config file '{filepath}': {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def get_financial_data(ticker_symbol):
|
|
||||||
try:
|
|
||||||
ticker = yf.Ticker(ticker_symbol)
|
|
||||||
data = { "market_cap": ticker.fast_info.get('marketCap'), "closing_price": ticker.fast_info.get('previousClose') }
|
|
||||||
return data
|
|
||||||
except Exception:
|
|
||||||
return {"market_cap": None, "closing_price": None}
|
|
||||||
|
|
||||||
def get_reddit_instance():
|
def get_reddit_instance():
|
||||||
|
"""Initializes and returns a PRAW Reddit instance."""
|
||||||
|
env_path = Path(__file__).parent.parent / ".env"
|
||||||
|
load_dotenv(dotenv_path=env_path)
|
||||||
client_id = os.getenv("REDDIT_CLIENT_ID")
|
client_id = os.getenv("REDDIT_CLIENT_ID")
|
||||||
client_secret = os.getenv("REDDIT_CLIENT_SECRET")
|
client_secret = os.getenv("REDDIT_CLIENT_SECRET")
|
||||||
user_agent = os.getenv("REDDIT_USER_AGENT")
|
user_agent = os.getenv("REDDIT_USER_AGENT")
|
||||||
if not all([client_id, client_secret, user_agent]):
|
if not all([client_id, client_secret, user_agent]):
|
||||||
print("Error: Reddit API credentials not found in .env file.")
|
log.error("Error: Reddit API credentials not found in .env file.")
|
||||||
return None
|
return None
|
||||||
return praw.Reddit(client_id=client_id, client_secret=client_secret, user_agent=user_agent)
|
return praw.Reddit(
|
||||||
|
client_id=client_id, client_secret=client_secret, user_agent=user_agent
|
||||||
def scan_subreddits(reddit, subreddits_list, post_limit=100, comment_limit=100, days_to_scan=1):
|
|
||||||
conn = database.get_db_connection()
|
|
||||||
post_age_limit = days_to_scan * 86400
|
|
||||||
current_time = time.time()
|
|
||||||
|
|
||||||
print(f"\nScanning {len(subreddits_list)} subreddit(s) for NEW posts in the last {days_to_scan} day(s)...")
|
|
||||||
for subreddit_name in subreddits_list:
|
|
||||||
try:
|
|
||||||
subreddit_id = database.get_or_create_entity(conn, 'subreddits', 'name', subreddit_name)
|
|
||||||
subreddit = reddit.subreddit(subreddit_name)
|
|
||||||
print(f"Scanning r/{subreddit_name}...")
|
|
||||||
|
|
||||||
for submission in subreddit.new(limit=post_limit):
|
|
||||||
if (current_time - submission.created_utc) > post_age_limit:
|
|
||||||
print(f" -> Reached posts older than the {days_to_scan}-day limit. Moving to next subreddit.")
|
|
||||||
break
|
|
||||||
|
|
||||||
post_text = submission.title + " " + submission.selftext
|
|
||||||
tickers_in_post = extract_tickers(post_text)
|
|
||||||
if tickers_in_post:
|
|
||||||
post_sentiment = get_sentiment_score(submission.title)
|
|
||||||
for ticker_symbol in set(tickers_in_post):
|
|
||||||
ticker_id = database.get_or_create_entity(conn, 'tickers', 'symbol', ticker_symbol)
|
|
||||||
database.add_mention(conn, ticker_id, subreddit_id, submission.id, 'post', int(submission.created_utc), post_sentiment)
|
|
||||||
|
|
||||||
ticker_info = database.get_ticker_info(conn, ticker_id)
|
|
||||||
if not ticker_info['last_updated'] or (current_time - ticker_info['last_updated'] > MARKET_CAP_REFRESH_INTERVAL):
|
|
||||||
print(f" -> Fetching financial data for {ticker_symbol}...")
|
|
||||||
financials = get_financial_data(ticker_symbol)
|
|
||||||
database.update_ticker_financials(
|
|
||||||
conn, ticker_id,
|
|
||||||
financials['market_cap'] or ticker_info['market_cap'],
|
|
||||||
financials['closing_price'] or ticker_info['closing_price']
|
|
||||||
)
|
)
|
||||||
|
|
||||||
submission.comments.replace_more(limit=0)
|
|
||||||
all_comment_sentiments = []
|
|
||||||
for comment in submission.comments.list()[:comment_limit]:
|
|
||||||
all_comment_sentiments.append(get_sentiment_score(comment.body))
|
|
||||||
tickers_in_comment = extract_tickers(comment.body)
|
|
||||||
if tickers_in_comment:
|
|
||||||
comment_sentiment = get_sentiment_score(comment.body)
|
|
||||||
for ticker_symbol in set(tickers_in_comment):
|
|
||||||
ticker_id = database.get_or_create_entity(conn, 'tickers', 'symbol', ticker_symbol)
|
|
||||||
database.add_mention(conn, ticker_id, subreddit_id, submission.id, 'comment', int(comment.created_utc), comment_sentiment)
|
|
||||||
|
|
||||||
avg_sentiment = sum(all_comment_sentiments) / len(all_comment_sentiments) if all_comment_sentiments else 0
|
def fetch_financial_data(ticker_symbol):
|
||||||
|
"""
|
||||||
|
Fetches market cap and the most recent closing price for a single ticker.
|
||||||
|
This function is designed to be thread-safe and robust.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
ticker = yf.Ticker(ticker_symbol)
|
||||||
|
market_cap = ticker.info.get("marketCap")
|
||||||
|
data = ticker.history(period="2d", auto_adjust=False)
|
||||||
|
closing_price = None
|
||||||
|
if not data.empty:
|
||||||
|
last_close_raw = data["Close"].iloc[-1]
|
||||||
|
if pd.notna(last_close_raw):
|
||||||
|
closing_price = float(last_close_raw)
|
||||||
|
return ticker_symbol, {"market_cap": market_cap, "closing_price": closing_price}
|
||||||
|
except Exception:
|
||||||
|
return ticker_symbol, None
|
||||||
|
|
||||||
|
|
||||||
|
def _process_submission(submission, subreddit_id, conn, comment_limit):
|
||||||
|
"""
|
||||||
|
Processes a single Reddit submission with a more precise "Golden Ticker" logic.
|
||||||
|
- If a '$' ticker exists anywhere, the entire submission is in "Golden Only" mode.
|
||||||
|
- Falls back to potential tickers only if no '$' tickers are found anywhere.
|
||||||
|
"""
|
||||||
|
# 1. --- Establish Mode: Golden or Potential ---
|
||||||
|
# Scan the entire submission (title + selftext) to determine the mode.
|
||||||
|
post_text_for_discovery = submission.title + " " + submission.selftext
|
||||||
|
golden_tickers_in_post = extract_golden_tickers(post_text_for_discovery)
|
||||||
|
|
||||||
|
is_golden_mode = bool(golden_tickers_in_post)
|
||||||
|
|
||||||
|
if is_golden_mode:
|
||||||
|
log.info(
|
||||||
|
f" -> Golden Ticker(s) Found: {', '.join(golden_tickers_in_post)}. Engaging Golden-Only Mode."
|
||||||
|
)
|
||||||
|
# In Golden Mode, we ONLY care about tickers with a '$'.
|
||||||
|
tickers_in_title = extract_golden_tickers(submission.title)
|
||||||
|
else:
|
||||||
|
log.info(" -> No Golden Tickers. Falling back to potential ticker search.")
|
||||||
|
# In Potential Mode, we look for any valid-looking capitalized word.
|
||||||
|
tickers_in_title = extract_potential_tickers(submission.title)
|
||||||
|
|
||||||
|
all_tickers_found_in_post = set(tickers_in_title)
|
||||||
|
ticker_id_cache = {}
|
||||||
|
|
||||||
|
# 2. --- Process Title Mentions ---
|
||||||
|
if tickers_in_title:
|
||||||
|
log.info(
|
||||||
|
f" -> Title Mention(s): {', '.join(tickers_in_title)}. Attributing all comments."
|
||||||
|
)
|
||||||
|
post_sentiment = get_sentiment_score(submission.title)
|
||||||
|
for ticker_symbol in tickers_in_title:
|
||||||
|
# All title tickers are saved as 'post' type mentions
|
||||||
|
ticker_id = database.get_or_create_entity(
|
||||||
|
conn, "tickers", "symbol", ticker_symbol
|
||||||
|
)
|
||||||
|
ticker_id_cache[ticker_symbol] = ticker_id
|
||||||
|
database.add_mention(
|
||||||
|
conn,
|
||||||
|
ticker_id,
|
||||||
|
subreddit_id,
|
||||||
|
submission.id,
|
||||||
|
"post",
|
||||||
|
int(submission.created_utc),
|
||||||
|
post_sentiment,
|
||||||
|
comment_id=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 3. --- Process Comments (Single, Efficient Loop) ---
|
||||||
|
submission.comments.replace_more(limit=0)
|
||||||
|
all_comments = submission.comments.list()[:comment_limit]
|
||||||
|
|
||||||
|
for comment in all_comments:
|
||||||
|
comment_sentiment = get_sentiment_score(comment.body)
|
||||||
|
|
||||||
|
if tickers_in_title:
|
||||||
|
# If the title had tickers, every comment is a mention for them.
|
||||||
|
# We don't need to scan the comment text for tickers here.
|
||||||
|
for ticker_symbol in tickers_in_title:
|
||||||
|
ticker_id = ticker_id_cache[ticker_symbol] # Guaranteed to be in cache
|
||||||
|
database.add_mention(
|
||||||
|
conn,
|
||||||
|
ticker_id,
|
||||||
|
subreddit_id,
|
||||||
|
submission.id,
|
||||||
|
"comment",
|
||||||
|
int(comment.created_utc),
|
||||||
|
comment_sentiment,
|
||||||
|
comment_id=comment.id,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# If no title tickers, we must scan the comment for direct mentions.
|
||||||
|
# The type of ticker we look for depends on the mode.
|
||||||
|
if is_golden_mode:
|
||||||
|
# This case is rare (no golden in title, but some in comments) but important.
|
||||||
|
tickers_in_comment = extract_golden_tickers(comment.body)
|
||||||
|
else:
|
||||||
|
tickers_in_comment = extract_potential_tickers(comment.body)
|
||||||
|
|
||||||
|
if tickers_in_comment:
|
||||||
|
all_tickers_found_in_post.update(tickers_in_comment)
|
||||||
|
for ticker_symbol in tickers_in_comment:
|
||||||
|
ticker_id = database.get_or_create_entity(
|
||||||
|
conn, "tickers", "symbol", ticker_symbol
|
||||||
|
)
|
||||||
|
database.add_mention(
|
||||||
|
conn,
|
||||||
|
ticker_id,
|
||||||
|
subreddit_id,
|
||||||
|
submission.id,
|
||||||
|
"comment",
|
||||||
|
int(comment.created_utc),
|
||||||
|
comment_sentiment,
|
||||||
|
comment_id=comment.id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 4. --- Save Deep Dive Analysis ---
|
||||||
|
all_comment_sentiments = [get_sentiment_score(c.body) for c in all_comments]
|
||||||
|
avg_sentiment = (
|
||||||
|
sum(all_comment_sentiments) / len(all_comment_sentiments)
|
||||||
|
if all_comment_sentiments
|
||||||
|
else 0
|
||||||
|
)
|
||||||
post_analysis_data = {
|
post_analysis_data = {
|
||||||
"post_id": submission.id, "title": submission.title,
|
"post_id": submission.id,
|
||||||
|
"title": submission.title,
|
||||||
"post_url": f"https://reddit.com{submission.permalink}",
|
"post_url": f"https://reddit.com{submission.permalink}",
|
||||||
"subreddit_id": subreddit_id, "post_timestamp": int(submission.created_utc),
|
"subreddit_id": subreddit_id,
|
||||||
"comment_count": len(all_comment_sentiments), "avg_comment_sentiment": avg_sentiment
|
"post_timestamp": int(submission.created_utc),
|
||||||
|
"comment_count": len(all_comments),
|
||||||
|
"avg_comment_sentiment": avg_sentiment,
|
||||||
}
|
}
|
||||||
database.add_or_update_post_analysis(conn, post_analysis_data)
|
database.add_or_update_post_analysis(conn, post_analysis_data)
|
||||||
|
|
||||||
|
return all_tickers_found_in_post
|
||||||
|
|
||||||
|
|
||||||
|
def scan_subreddits(
|
||||||
|
reddit,
|
||||||
|
subreddits_list,
|
||||||
|
post_limit=100,
|
||||||
|
comment_limit=100,
|
||||||
|
days_to_scan=1,
|
||||||
|
fetch_financials=True,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Scans subreddits to discover mentions, then performs a single batch update for financials if enabled.
|
||||||
|
"""
|
||||||
|
conn = database.get_db_connection()
|
||||||
|
post_age_limit = days_to_scan * 86400
|
||||||
|
current_time = time.time()
|
||||||
|
all_tickers_to_update = set()
|
||||||
|
|
||||||
|
log.info(f"Scanning {len(subreddits_list)} subreddit(s) for NEW posts...")
|
||||||
|
if not fetch_financials:
|
||||||
|
log.warning("NOTE: Financial data fetching is disabled for this run.")
|
||||||
|
|
||||||
|
for subreddit_name in subreddits_list:
|
||||||
|
try:
|
||||||
|
normalized_sub_name = subreddit_name.lower()
|
||||||
|
subreddit_id = database.get_or_create_entity(
|
||||||
|
conn, "subreddits", "name", normalized_sub_name
|
||||||
|
)
|
||||||
|
subreddit = reddit.subreddit(normalized_sub_name)
|
||||||
|
log.info(f"Scanning r/{normalized_sub_name}...")
|
||||||
|
|
||||||
|
for submission in subreddit.new(limit=post_limit):
|
||||||
|
if (current_time - submission.created_utc) > post_age_limit:
|
||||||
|
log.info(
|
||||||
|
f" -> Reached posts older than the {days_to_scan}-day limit."
|
||||||
|
)
|
||||||
|
break
|
||||||
|
|
||||||
|
tickers_found = _process_submission(
|
||||||
|
submission, subreddit_id, conn, comment_limit
|
||||||
|
)
|
||||||
|
if tickers_found:
|
||||||
|
all_tickers_to_update.update(tickers_found)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Could not scan r/{subreddit_name}. Error: {e}")
|
log.error(
|
||||||
|
f"Could not scan r/{normalized_sub_name}. Error: {e}", exc_info=True
|
||||||
|
)
|
||||||
|
|
||||||
conn.close()
|
conn.close()
|
||||||
print("\n--- Scan Complete ---")
|
log.critical("\n--- Reddit Scan Complete ---")
|
||||||
|
|
||||||
|
if fetch_financials and all_tickers_to_update:
|
||||||
|
log.critical(
|
||||||
|
f"\n--- Starting Batch Financial Update for {len(all_tickers_to_update)} Discovered Tickers ---"
|
||||||
|
)
|
||||||
|
|
||||||
|
tickers_from_db = {t["symbol"]: t["id"] for t in database.get_all_tickers()}
|
||||||
|
tickers_needing_update_symbols = [
|
||||||
|
symbol for symbol in all_tickers_to_update if symbol in tickers_from_db
|
||||||
|
]
|
||||||
|
|
||||||
|
financial_data_batch = {}
|
||||||
|
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||||
|
results = executor.map(fetch_financial_data, tickers_needing_update_symbols)
|
||||||
|
for symbol, data in results:
|
||||||
|
if data:
|
||||||
|
financial_data_batch[symbol] = data
|
||||||
|
|
||||||
|
if financial_data_batch:
|
||||||
|
conn = database.get_db_connection()
|
||||||
|
for symbol, financials in financial_data_batch.items():
|
||||||
|
database.update_ticker_financials(
|
||||||
|
conn,
|
||||||
|
tickers_from_db[symbol],
|
||||||
|
financials.get("market_cap"),
|
||||||
|
financials.get("closing_price"),
|
||||||
|
)
|
||||||
|
conn.close()
|
||||||
|
log.critical("--- Batch Financial Update Complete ---")
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Main function to run the Reddit stock analysis tool."""
|
"""Main function to run the Reddit stock analysis tool."""
|
||||||
parser = argparse.ArgumentParser(description="Analyze stock ticker mentions on Reddit.", formatter_class=argparse.RawTextHelpFormatter)
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Analyze stock ticker mentions on Reddit.",
|
||||||
|
formatter_class=argparse.RawTextHelpFormatter,
|
||||||
|
)
|
||||||
|
|
||||||
parser.add_argument("--config", default="subreddits.json", help="Path to the JSON file containing subreddits.\n(Default: subreddits.json)")
|
parser.add_argument(
|
||||||
parser.add_argument("--subreddit", help="Scan a single subreddit, ignoring the config file.")
|
"-f",
|
||||||
parser.add_argument("--days", type=int, default=1, help="Number of past days to scan for new posts.\n(Default: 1 for last 24 hours)")
|
"--config",
|
||||||
|
default="subreddits.json",
|
||||||
|
help="Path to the JSON file for scanning. (Default: subreddits.json)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-s", "--subreddit", help="Scan a single subreddit, ignoring the config file."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-d",
|
||||||
|
"--days",
|
||||||
|
type=int,
|
||||||
|
default=1,
|
||||||
|
help="Number of past days to scan for new posts. (Default: 1)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-p",
|
||||||
|
"--posts",
|
||||||
|
type=int,
|
||||||
|
default=200,
|
||||||
|
help="Max posts to check per subreddit. (Default: 200)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-c",
|
||||||
|
"--comments",
|
||||||
|
type=int,
|
||||||
|
default=100,
|
||||||
|
help="Number of comments to scan per post. (Default: 100)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-n",
|
||||||
|
"--no-financials",
|
||||||
|
action="store_true",
|
||||||
|
help="Disable fetching of financial data during the Reddit scan.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--update-top-tickers",
|
||||||
|
action="store_true",
|
||||||
|
help="Update financial data only for tickers currently in the Top 10 daily/weekly dashboards.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-u",
|
||||||
|
"--update-financials-only",
|
||||||
|
nargs="?",
|
||||||
|
const="ALL_TICKERS", # A special value to signify "update all"
|
||||||
|
default=None,
|
||||||
|
metavar="TICKER",
|
||||||
|
help="Update financials. Provide a ticker symbol to update just one,\nor use the flag alone to update all tickers in the database.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--debug",
|
||||||
|
action="store_true",
|
||||||
|
help="Enable detailed debug logging to the console.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--stdout", action="store_true", help="Print all log messages to the console."
|
||||||
|
)
|
||||||
|
|
||||||
parser.add_argument("-p", "--posts", type=int, default=200, help="Max posts to check per subreddit.\n(Default: 200)")
|
|
||||||
parser.add_argument("-c", "--comments", type=int, default=100, help="Number of comments to scan per post.\n(Default: 100)")
|
|
||||||
parser.add_argument("-l", "--limit", type=int, default=20, help="Number of tickers to show in the CLI report.\n(Default: 20)")
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
setup_logging(console_verbose=args.stdout, debug_mode=args.debug)
|
||||||
|
|
||||||
# --- THIS IS THE CORRECTED LOGIC BLOCK ---
|
database.initialize_db()
|
||||||
if args.subreddit:
|
|
||||||
# If --subreddit is used, create a list with just that one.
|
if args.update_top_tickers:
|
||||||
subreddits_to_scan = [args.subreddit]
|
# --- Mode 1: Update Top Tickers ---
|
||||||
print(f"Targeted Scan Mode: Focusing on r/{args.subreddit}")
|
log.critical("--- Starting Financial Data Update for Top Tickers ---")
|
||||||
|
top_daily = database.get_top_daily_ticker_symbols()
|
||||||
|
top_weekly = database.get_top_weekly_ticker_symbols()
|
||||||
|
all_sub_names = database.get_all_scanned_subreddits()
|
||||||
|
for sub_name in all_sub_names:
|
||||||
|
top_daily.extend(
|
||||||
|
database.get_top_daily_ticker_symbols_for_subreddit(sub_name)
|
||||||
|
)
|
||||||
|
top_weekly.extend(
|
||||||
|
database.get_top_weekly_ticker_symbols_for_subreddit(sub_name)
|
||||||
|
)
|
||||||
|
tickers_to_update = sorted(list(set(top_daily + top_weekly)))
|
||||||
|
|
||||||
|
if not tickers_to_update:
|
||||||
|
log.info("No top tickers found in the last week. Nothing to update.")
|
||||||
else:
|
else:
|
||||||
# Otherwise, load from the config file.
|
log.info(
|
||||||
print(f"Config Scan Mode: Loading subreddits from {args.config}")
|
f"Found {len(tickers_to_update)} unique top tickers to update. Fetching in parallel..."
|
||||||
# Use the correct argument name: args.config
|
)
|
||||||
|
|
||||||
|
financial_data_batch = {}
|
||||||
|
successful_updates = 0
|
||||||
|
failed_updates = 0
|
||||||
|
|
||||||
|
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||||
|
results = executor.map(fetch_financial_data, tickers_to_update)
|
||||||
|
for symbol, data in results:
|
||||||
|
# A successful fetch is one where data is returned and has a closing price
|
||||||
|
if data and data.get("closing_price") is not None:
|
||||||
|
log.info(f" -> SUCCESS: Fetched data for {symbol}")
|
||||||
|
financial_data_batch[symbol] = data
|
||||||
|
successful_updates += 1
|
||||||
|
else:
|
||||||
|
log.warning(
|
||||||
|
f" -> FAILED: Could not fetch valid financial data for {symbol}"
|
||||||
|
)
|
||||||
|
failed_updates += 1
|
||||||
|
|
||||||
|
if not financial_data_batch:
|
||||||
|
log.error("Failed to fetch any batch financial data. Aborting update.")
|
||||||
|
else:
|
||||||
|
conn = database.get_db_connection()
|
||||||
|
all_tickers_from_db = database.get_all_tickers()
|
||||||
|
ticker_map = {t["symbol"]: t["id"] for t in all_tickers_from_db}
|
||||||
|
|
||||||
|
for symbol, financials in financial_data_batch.items():
|
||||||
|
if symbol in ticker_map:
|
||||||
|
database.update_ticker_financials(
|
||||||
|
conn,
|
||||||
|
ticker_map[symbol],
|
||||||
|
financials.get("market_cap"),
|
||||||
|
financials.get("closing_price"),
|
||||||
|
)
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
log.critical("--- Top Ticker Financial Data Update Complete ---")
|
||||||
|
log.critical(f" Successful updates: {successful_updates}")
|
||||||
|
log.critical(f" Failed updates: {failed_updates}")
|
||||||
|
|
||||||
|
elif args.update_financials_only:
|
||||||
|
# --- Mode 2: Update All or a Single Ticker ---
|
||||||
|
update_mode = args.update_financials_only
|
||||||
|
tickers_to_update = []
|
||||||
|
if update_mode == "ALL_TICKERS":
|
||||||
|
log.critical("--- Starting Financial Data Update for ALL tickers ---")
|
||||||
|
all_tickers_from_db = database.get_all_tickers()
|
||||||
|
tickers_to_update = [t["symbol"] for t in all_tickers_from_db]
|
||||||
|
else:
|
||||||
|
ticker_symbol_to_update = update_mode
|
||||||
|
log.critical(
|
||||||
|
f"--- Starting Financial Data Update for single ticker: {ticker_symbol_to_update} ---"
|
||||||
|
)
|
||||||
|
if database.get_ticker_by_symbol(ticker_symbol_to_update):
|
||||||
|
tickers_to_update = [ticker_symbol_to_update]
|
||||||
|
else:
|
||||||
|
log.error(
|
||||||
|
f"Ticker '{ticker_symbol_to_update}' not found in the database."
|
||||||
|
)
|
||||||
|
|
||||||
|
if tickers_to_update:
|
||||||
|
log.info(
|
||||||
|
f"Found {len(tickers_to_update)} unique tickers to update. Fetching in parallel..."
|
||||||
|
)
|
||||||
|
|
||||||
|
financial_data_batch = {}
|
||||||
|
successful_updates = 0
|
||||||
|
failed_updates = 0
|
||||||
|
|
||||||
|
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||||
|
results = executor.map(fetch_financial_data, tickers_to_update)
|
||||||
|
for symbol, data in results:
|
||||||
|
# A successful fetch is one where data is returned and has a closing price
|
||||||
|
if data and data.get("closing_price") is not None:
|
||||||
|
log.info(f" -> SUCCESS: Fetched data for {symbol}")
|
||||||
|
financial_data_batch[symbol] = data
|
||||||
|
successful_updates += 1
|
||||||
|
else:
|
||||||
|
log.warning(
|
||||||
|
f" -> FAILED: Could not fetch valid financial data for {symbol}"
|
||||||
|
)
|
||||||
|
failed_updates += 1
|
||||||
|
|
||||||
|
if not financial_data_batch:
|
||||||
|
log.error("Failed to fetch any batch financial data. Aborting update.")
|
||||||
|
else:
|
||||||
|
conn = database.get_db_connection()
|
||||||
|
all_tickers_from_db = database.get_all_tickers()
|
||||||
|
ticker_map = {t["symbol"]: t["id"] for t in all_tickers_from_db}
|
||||||
|
|
||||||
|
for symbol, financials in financial_data_batch.items():
|
||||||
|
if symbol in ticker_map:
|
||||||
|
database.update_ticker_financials(
|
||||||
|
conn,
|
||||||
|
ticker_map[symbol],
|
||||||
|
financials.get("market_cap"),
|
||||||
|
financials.get("closing_price"),
|
||||||
|
)
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
log.critical("--- Financial Data Update Complete ---")
|
||||||
|
log.critical(f" Successful updates: {successful_updates}")
|
||||||
|
log.critical(f" Failed updates: {failed_updates}")
|
||||||
|
|
||||||
|
else:
|
||||||
|
# --- Mode 3: Default Reddit Scan ---
|
||||||
|
log.critical("--- Starting Reddit Scan Mode ---")
|
||||||
|
if args.subreddit:
|
||||||
|
subreddits_to_scan = [args.subreddit]
|
||||||
|
log.info(f"Targeted Scan Mode: Focusing on r/{args.subreddit}")
|
||||||
|
else:
|
||||||
|
log.info(f"Config Scan Mode: Loading subreddits from {args.config}")
|
||||||
subreddits_to_scan = load_subreddits(args.config)
|
subreddits_to_scan = load_subreddits(args.config)
|
||||||
|
|
||||||
if not subreddits_to_scan:
|
if not subreddits_to_scan:
|
||||||
print("Error: No subreddits to scan. Please check your config file or --subreddit argument.")
|
log.error("Error: No subreddits to scan.")
|
||||||
return
|
return
|
||||||
|
|
||||||
# --- Initialize and Run ---
|
|
||||||
database.initialize_db()
|
|
||||||
database.clean_stale_tickers()
|
|
||||||
|
|
||||||
reddit = get_reddit_instance()
|
reddit = get_reddit_instance()
|
||||||
if not reddit: return
|
if not reddit:
|
||||||
|
return
|
||||||
|
|
||||||
scan_subreddits(
|
scan_subreddits(
|
||||||
reddit,
|
reddit,
|
||||||
subreddits_to_scan,
|
subreddits_to_scan,
|
||||||
post_limit=args.posts,
|
post_limit=args.posts,
|
||||||
comment_limit=args.comments,
|
comment_limit=args.comments,
|
||||||
days_to_scan=args.days
|
days_to_scan=args.days,
|
||||||
|
fetch_financials=(not args.no_financials),
|
||||||
)
|
)
|
||||||
database.generate_summary_report(limit=args.limit)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
@@ -16,4 +16,4 @@ def get_sentiment_score(text):
|
|||||||
# The polarity_scores() method returns a dictionary with 'neg', 'neu', 'pos', and 'compound' scores.
|
# The polarity_scores() method returns a dictionary with 'neg', 'neu', 'pos', and 'compound' scores.
|
||||||
# We are most interested in the 'compound' score.
|
# We are most interested in the 'compound' score.
|
||||||
scores = _analyzer.polarity_scores(text)
|
scores = _analyzer.polarity_scores(text)
|
||||||
return scores['compound']
|
return scores["compound"]
|
||||||
|
@@ -3,9 +3,9 @@ import nltk
|
|||||||
# This will download the 'vader_lexicon' dataset
|
# This will download the 'vader_lexicon' dataset
|
||||||
# It only needs to be run once
|
# It only needs to be run once
|
||||||
try:
|
try:
|
||||||
nltk.data.find('sentiment/vader_lexicon.zip')
|
nltk.data.find("sentiment/vader_lexicon.zip")
|
||||||
print("VADER lexicon is already downloaded.")
|
print("VADER lexicon is already downloaded.")
|
||||||
except LookupError:
|
except LookupError:
|
||||||
print("Downloading VADER lexicon...")
|
print("Downloading VADER lexicon...")
|
||||||
nltk.download('vader_lexicon')
|
nltk.download("vader_lexicon")
|
||||||
print("Download complete.")
|
print("Download complete.")
|
@@ -5,71 +5,142 @@ import re
|
|||||||
# A set of common English words and acronyms that look like stock tickers.
|
# A set of common English words and acronyms that look like stock tickers.
|
||||||
# This helps reduce false positives.
|
# This helps reduce false positives.
|
||||||
COMMON_WORDS_BLACKLIST = {
|
COMMON_WORDS_BLACKLIST = {
|
||||||
"401K", "403B", "457B", "ABOUT", "ABOVE", "ADAM", "ADX", "AEDT", "AEST", "AH",
|
"401K", "403B", "457B", "AAVE", "ABC", "ABOUT", "ABOVE", "ACAT", "ADAM", "ADHD",
|
||||||
"AI", "ALL", "ALPHA", "ALSO", "AM", "AMA", "AMEX", "AND", "ANY", "AR",
|
"ADR", "ADS", "ADX", "AEDT", "AEST", "AF", "AFAIK", "AFTER", "AGENT", "AH",
|
||||||
"ARE", "ARK", "AROUND", "ASAP", "ASS", "ASSET", "AT", "ATH", "ATL", "ATM",
|
"AI", "AINT", "AK", "AKSJE", "ALD", "ALGOS", "ALIVE", "ALL", "ALPHA", "ALSO",
|
||||||
"AUD", "AWS", "BABY", "BAG", "BAGS", "BE", "BEAR", "BELOW", "BETA", "BIG",
|
"AM", "AMA", "AMEX", "AMK", "AMY", "AND", "ANSS", "ANY", "APES", "APL",
|
||||||
"BIS", "BLEND", "BOE", "BOJ", "BOLL", "BOMB", "BOND", "BOTH", "BOTS", "BRB",
|
"APPL", "APPLE", "APR", "APUS", "APY", "AR", "ARBK", "ARE", "AREA", "ARH",
|
||||||
"BRL", "BS", "BST", "BSU", "BTC", "BTW", "BULL", "BUST", "BUT", "BUY",
|
"ARK", "AROUND", "ART", "AS", "ASAP", "ASEAN", "ASK", "ASS", "ASSET", "AST",
|
||||||
"BUZZ", "CAD", "CALL", "CAN", "CAP", "CBS", "CCI", "CEO", "CEST", "CET",
|
"AT", "ATH", "ATL", "ATM", "AUD", "AUG", "AUM", "AV", "AVG", "AWS",
|
||||||
"CEX", "CFD", "CFO", "CHF", "CHIPS", "CIA", "CLOSE", "CNBC", "CNY", "COKE",
|
"BABY", "BAD", "BAG", "BAGS", "BALLS", "BAN", "BANG", "BASIC", "BBB", "BBBY",
|
||||||
"COME", "COST", "COULD", "CPAP", "CPI", "CSE", "CST", "CTB", "CTO", "CYCLE",
|
"BE", "BEAR", "BEARS", "BECN", "BEER", "BELL", "BELOW", "BETA", "BETS", "BF",
|
||||||
"CZK", "DAO", "DATE", "DAX", "DAY", "DCA", "DD", "DEBT", "DEX", "DIA",
|
"BID", "BIG", "BIS", "BITCH", "BKEY", "BLEND", "BLOW", "BMW", "BNP", "BNPL",
|
||||||
"DIV", "DJIA", "DKK", "DM", "DO", "DOE", "DOGE", "DOJ", "DONT", "DR",
|
"BOARD", "BOE", "BOJ", "BOLL", "BOMB", "BOND", "BONED", "BORN", "BOTH", "BOTS",
|
||||||
"EACH", "EARLY", "EARN", "ECB", "EDGAR", "EDIT", "EDT", "EMA", "END", "EOD",
|
"BOY", "BOYS", "BRB", "BRICS", "BRK", "BRKA", "BRKB", "BRL", "BROKE", "BRRRR",
|
||||||
"EOW", "EOY", "EPA", "EPS", "ER", "ESG", "EST", "ETF", "ETFS", "ETH",
|
"BS", "BSE", "BST", "BSU", "BT", "BTC", "BTS", "BTW", "BUDDY", "BULL",
|
||||||
"EU", "EUR", "EV", "EVEN", "EVERY", "FAQ", "FAR", "FAST", "FBI", "FD",
|
"BULLS", "BUST", "BUT", "BUY", "BUZZ", "CAD", "CAFE", "CAGR", "CALL", "CALLS",
|
||||||
"FDA", "FIHTX", "FINRA", "FINT", "FINTX", "FINTY", "FIRST", "FOMC", "FOMO", "FOR",
|
"CAN", "CAP", "CARB", "CARES", "CASE", "CATL", "CBD", "CBGM", "CBS", "CCI",
|
||||||
"FOREX", "FRAUD", "FRG", "FROM", "FSPSX", "FTSE", "FUCK", "FUD", "FULL", "FUND",
|
"CCP", "CD", "CDN", "CEO", "CEST", "CET", "CEX", "CFD", "CFO", "CFPB",
|
||||||
"FXAIX", "FXIAX", "FY", "FYI", "FZROX", "GAAP", "GAIN", "GBP", "GDP", "GET",
|
"CHART", "CHASE", "CHATS", "CHECK", "CHF", "CHICK", "CHIP", "CHIPS", "CIA", "CIC",
|
||||||
"GL", "GLHF", "GMT", "GO", "GOAL", "GOAT", "GOING", "GPT", "GPU", "GRAB",
|
"CLAIM", "CLEAN", "CLICK", "CLOSE", "CMON", "CN", "CNBC", "CNN", "CNY", "COBRA",
|
||||||
"GTG", "HALF", "HAS", "HATE", "HAVE", "HEAR", "HEDGE", "HELP", "HIGH", "HINT",
|
"COCK", "COGS", "COIL", "COKE", "COME", "COST", "COULD", "COVID", "CPAP", "CPI",
|
||||||
"HKD", "HODL", "HOLD", "HOUR", "HSA", "HUF", "IF", "II", "IKZ", "IMHO",
|
"CRA", "CRE", "CRO", "CRV", "CSE", "CSP", "CSS", "CST", "CTB", "CTEP",
|
||||||
"IMO", "IN", "INR", "IP", "IPO", "IRA", "IRS", "IS", "ISA", "ISM",
|
"CTO", "CUCKS", "CULT", "CUM", "CUSMA", "CUTS", "CUV", "CYCLE", "CZK", "DA",
|
||||||
"IST", "IT", "ITM", "IV", "IVV", "IWM", "JD", "JPOW", "JPY", "JST",
|
"DAILY", "DAO", "DART", "DATA", "DATE", "DAX", "DAY", "DAYS", "DCA", "DCF",
|
||||||
"JUST", "KARMA", "KEEP", "KNOW", "KO", "KRW", "LANGT", "LARGE", "LAST", "LATE",
|
"DD", "DEAL", "DEBT", "DEEZ", "DEMO", "DET", "DEX", "DGAF", "DIA", "DID",
|
||||||
"LATER", "LBO", "LEAP", "LEAPS", "LETS", "LFG", "LIKE", "LIMIT", "LLC", "LLM",
|
"DIDNT", "DIP", "DITM", "DIV", "DIY", "DJI", "DJIA", "DJTJ", "DKK", "DL",
|
||||||
"LMAO", "LOKO", "LOL", "LONG", "LOOK", "LOSS", "LOVE", "LOW", "M&A", "MA",
|
"DM", "DMV", "DNI", "DNUTZ", "DO", "DOD", "DOE", "DOES", "DOGE", "DOING",
|
||||||
"MACD", "MAKE", "MAX", "MC", "ME", "MEME", "MERK", "MEXC", "MID", "MIGHT",
|
"DOJ", "DOM", "DONNY", "DONT", "DONUT", "DOOR", "DOWN", "DOZEN", "DPI", "DR",
|
||||||
"MIN", "MIND", "ML", "MOASS", "MONTH", "MORE", "MSK", "MUSIC", "MUST", "MXN",
|
"DUDE", "DUMP", "DUNT", "DUT", "DUTY", "DXY", "DXYXBT", "DYI", "DYNK", "DYODD",
|
||||||
"MY", "NATO", "NEAR", "NEED", "NEVER", "NEW", "NEXT", "NFA", "NFC", "NFT",
|
"DYOR", "EACH", "EARLY", "EARN", "EAST", "EASY", "EBIT", "ECB", "EDGAR", "EDIT",
|
||||||
"NGMI", "NIGHT", "NO", "NOK", "NONE", "NOT", "NOW", "NSA", "NULL", "NUT",
|
"EDT", "EJ", "EMA", "EMJ", "EMT", "END", "ENRON", "ENSI", "ENV", "EO",
|
||||||
"NYSE", "NZD", "OBV", "OEM", "OF", "OG", "OK", "OLD", "ON", "ONE",
|
"EOD", "EOM", "EOW", "EOY", "EPA", "EPK", "EPS", "ER", "ESG", "ESPP",
|
||||||
"ONLY", "OP", "OPEX", "OR", "OS", "OSCE", "OTC", "OTM", "OUGHT", "OUT",
|
"EST", "ETA", "ETF", "ETFS", "ETH", "ETHT", "ETL", "EU", "EUR", "EV",
|
||||||
"OVER", "OWN", "PANIC", "PC", "PDT", "PE", "PEAK", "PEG", "PEW", "PLAN",
|
"EVEN", "EVERY", "EVTOL", "EXTRA", "EYES", "EZ", "FAANG", "FAFO", "FAQ", "FAR",
|
||||||
"PLN", "PM", "PMI", "POC", "POS", "PPI", "PR", "PRICE", "PROFIT", "PSA",
|
"FAST", "FBI", "FCC", "FCFF", "FD", "FDA", "FED", "FEE", "FFH", "FFS",
|
||||||
"PST", "PT", "PUT", "Q1", "Q2", "Q3", "Q4", "QQQ", "QR", "RBA",
|
"FGMA", "FIG", "FIGMA", "FIHTX", "FILES", "FINAL", "FIND", "FING", "FINRA", "FINT",
|
||||||
"RBNZ", "RE", "REAL", "REIT", "REKT", "RH", "RIGHT", "RIP", "RISK", "ROCK",
|
"FINTX", "FINTY", "FIRE", "FIRST", "FKIN", "FLOAT", "FLRAA", "FLT", "FLY", "FML",
|
||||||
"ROE", "ROFL", "ROI", "ROTH", "RSD", "RSI", "RUB", "RULE", "SAME", "SAVE",
|
"FOLO", "FOMC", "FOMO", "FOR", "FOREX", "FRAUD", "FREAK", "FRED", "FRG", "FROM",
|
||||||
"SCALP", "SCAM", "SCHB", "SEC", "SEE", "SEK", "SELL", "SEP", "SGD", "SHALL",
|
"FRP", "FRS", "FSBO", "FSD", "FSE", "FSELK", "FSPSX", "FTD", "FTSE", "FUCK",
|
||||||
"SHARE", "SHORT", "SL", "SMA", "SMALL", "SO", "SOLIS", "SOME", "SOON", "SP",
|
"FUCKS", "FUD", "FULL", "FUND", "FUNNY", "FVG", "FWIW", "FX", "FXAIX", "FXIAX",
|
||||||
"SPAC", "SPEND", "SPLG", "SPX", "SPY", "START", "STILL", "STOCK", "STOP", "STOR",
|
"FXROX", "FY", "FYI", "FZROX", "GAAP", "GAIN", "GAV", "GAVE", "GBP", "GC",
|
||||||
"SWING", "TA", "TAG", "TAKE", "TERM", "THANK", "THAT", "THE", "THINK", "THIS",
|
"GDP", "GET", "GFC", "GG", "GGTM", "GIVES", "GJ", "GL", "GLHF", "GMAT",
|
||||||
"TIME", "TITS", "TL", "TL;DR", "TLDR", "TO", "TODAY", "TOTAL", "TRADE", "TREND",
|
"GMI", "GMT", "GO", "GOAL", "GOAT", "GOD", "GOING", "GOLD", "GONE", "GONNA",
|
||||||
"TRUE", "TRY", "TTYL", "TWO", "UI", "UK", "UNDER", "UP", "US", "USA",
|
"GOODS", "GOPRO", "GPT", "GPU", "GRAB", "GREAT", "GREEN", "GSOV", "GST", "GTA",
|
||||||
"USD", "UTC", "VALUE", "VOO", "VP", "VR", "VTI", "WAGMI", "WANT", "WATCH",
|
"GTC", "GTFO", "GTG", "GUH", "GUNS", "GUY", "GUYS", "HAD", "HAHA", "HALF",
|
||||||
"WAY", "WE", "WEB3", "WEEK", "WHALE", "WHO", "WHY", "WIDE", "WILL", "WORDS",
|
"HAM", "HANDS", "HAS", "HATE", "HAVE", "HBAR", "HCOL", "HEAR", "HEDGE", "HEGE",
|
||||||
"WORTH", "WOULD", "WSB", "WTF", "XRP", "YES", "YET", "YIELD", "YOLO", "YOU",
|
"HELD", "HELE", "HELL", "HELP", "HERE", "HEY", "HFCS", "HFT", "HGTV", "HIGH",
|
||||||
"YOUR", "YOY", "YT", "YTD", "ZAR", "ZEN", "ZERO"
|
"HIGHS", "HINT", "HIS", "HITID", "HK", "HKD", "HKEX", "HODL", "HODOR", "HOF",
|
||||||
|
"HOLD", "HOLY", "HOME", "HOT", "HOUR", "HOURS", "HOW", "HS", "HSA", "HSI",
|
||||||
|
"HT", "HTCI", "HTF", "HTML", "HUF", "HUGE", "HV", "HYPE", "IANAL", "IATF",
|
||||||
|
"IB", "IBS", "ICSID", "ICT", "ID", "IDF", "IDK", "IF", "II", "IIRC",
|
||||||
|
"IKKE", "IKZ", "IM", "IMHO", "IMI", "IMO", "IN", "INC", "INR", "INTEL",
|
||||||
|
"INTO", "IP", "IPO", "IQVIA", "IRA", "IRAS", "IRC", "IRISH", "IRL", "IRMAA",
|
||||||
|
"IRS", "IS", "ISA", "ISIN", "ISM", "ISN", "IST", "IT", "ITC", "ITM",
|
||||||
|
"ITS", "ITWN", "IUIT", "IV", "IVV", "IWM", "IXL", "IXLH", "IYKYK", "JAVA",
|
||||||
|
"JD", "JDG", "JDM", "JE", "JFC", "JK", "JLR", "JMO", "JOBS", "JOIN",
|
||||||
|
"JOKE", "JP", "JPOW", "JPY", "JS", "JST", "JULY", "JUN", "JUST", "KARMA",
|
||||||
|
"KEEP", "KILL", "KING", "KK", "KLA", "KLP", "KNEW", "KNOW", "KO", "KOHLS",
|
||||||
|
"KPMG", "KRW", "LA", "LANGT", "LARGE", "LAST", "LATE", "LATER", "LBO", "LBTC",
|
||||||
|
"LCS", "LDL", "LEADS", "LEAP", "LEAPS", "LEARN", "LEGS", "LEI", "LET", "LETF",
|
||||||
|
"LETS", "LFA", "LFG", "LFP", "LG", "LGEN", "LID", "LIFE", "LIG", "LIGMA",
|
||||||
|
"LIKE", "LIMIT", "LIST", "LLC", "LLM", "LM", "LMAO", "LMAOO", "LMM", "LMN",
|
||||||
|
"LOANS", "LOKO", "LOL", "LOLOL", "LONG", "LONGS", "LOOK", "LOSE", "LOSS", "LOST",
|
||||||
|
"LOVE", "LOVES", "LOW", "LOWER", "LOWS", "LP", "LSS", "LTCG", "LUCID", "LUPD",
|
||||||
|
"LYC", "LYING", "M&A", "MA", "MACD", "MAIL", "MAKE", "MAKES", "MANGE", "MANY",
|
||||||
|
"MASON", "MAX", "MAY", "MAYBE", "MBA", "MC", "MCAP", "MCNA", "MCP", "ME",
|
||||||
|
"MEAN", "MEME", "MER", "MERGE", "MERK", "MES", "MEXC", "MF", "MFER", "MID",
|
||||||
|
"MIGHT", "MIN", "MIND", "MINS", "ML", "MLB", "MLS", "MM", "MMF", "MNQ",
|
||||||
|
"MOASS", "MODEL", "MODTX", "MOM", "MONEY", "MONGO", "MONTH", "MONY", "MOON", "MORE",
|
||||||
|
"MOST", "MOU", "MSK", "MTVGA", "MUCH", "MUSIC", "MUST", "MVA", "MXN", "MY",
|
||||||
|
"MYMD", "NASA", "NASDA", "NATO", "NAV", "NBA", "NBC", "NCAN", "NCR", "NEAR",
|
||||||
|
"NEAT", "NEED", "NEVER", "NEW", "NEWS", "NEXT", "NFA", "NFC", "NFL", "NFT",
|
||||||
|
"NGAD", "NGMI", "NIGHT", "NIQ", "NK", "NO", "NOK", "NON", "NONE", "NOOO",
|
||||||
|
"NOPE", "NORTH", "NOT", "NOVA", "NOW", "NQ", "NRI", "NSA", "NSCLC", "NSLC",
|
||||||
|
"NTG", "NTVS", "NULL", "NUT", "NUTS", "NUTZ", "NVIDIA", "NVM", "NW", "NY",
|
||||||
|
"NYSE", "NZ", "NZD", "OBBB", "OBI", "OBS", "OBV", "OCD", "OCF", "OCO",
|
||||||
|
"ODAT", "ODTE", "OEM", "OF", "OFA", "OFF", "OG", "OH", "OK", "OKAY",
|
||||||
|
"OL", "OLD", "OMFG", "OMG", "ON", "ONDAS", "ONE", "ONLY", "OP", "OPEC",
|
||||||
|
"OPENQ", "OPEX", "OPRN", "OR", "ORB", "ORDER", "ORTEX", "OS", "OSCE", "OSE",
|
||||||
|
"OSEBX", "OT", "OTC", "OTM", "OTOH", "OUCH", "OUGHT", "OUR", "OUT", "OVER",
|
||||||
|
"OWN", "OZZY", "PA", "PAID", "PANIC", "PC", "PDT", "PE", "PEAK", "PEG",
|
||||||
|
"PETA", "PEW", "PFC", "PGHL", "PIMCO", "PITA", "PLAN", "PLAYS", "PLC", "PLN",
|
||||||
|
"PM", "PMCC", "PMI", "PNL", "POC", "POMO", "POP", "POS", "POSCO", "POTUS",
|
||||||
|
"POV", "POW", "PPI", "PR", "PRICE", "PRIME", "PROFIT", "PROXY", "PS", "PSA",
|
||||||
|
"PST", "PT", "PTD", "PUSSY", "PUT", "PUTS", "PWC", "Q1", "Q2", "Q3",
|
||||||
|
"Q4", "QE", "QED", "QIMC", "QQQ", "QR", "RAM", "RATM", "RBA", "RBNZ",
|
||||||
|
"RE", "REACH", "READY", "REAL", "RED", "REIT", "REITS", "REKT", "REPE", "RFK",
|
||||||
|
"RH", "RICO", "RIDE", "RIGHT", "RIP", "RISK", "RISKY", "RNDC", "ROCE", "ROCK",
|
||||||
|
"ROE", "ROFL", "ROI", "ROIC", "ROTH", "RPO", "RRSP", "RSD", "RSI", "RT",
|
||||||
|
"RTD", "RUB", "RUG", "RULE", "RUST", "RVOL", "SAGA", "SALES", "SAME", "SAVE",
|
||||||
|
"SAYS", "SBF", "SBLOC", "SC", "SCALP", "SCAM", "SCHB", "SCIF", "SEC", "SEE",
|
||||||
|
"SEK", "SELL", "SELLL", "SEP", "SESG", "SET", "SFOR", "SGD", "SHALL", "SHARE",
|
||||||
|
"SHEIN", "SHELL", "SHIT", "SHORT", "SHOW", "SHS", "SHTF", "SI", "SICK", "SIGN",
|
||||||
|
"SL", "SLIM", "SLOW", "SMA", "SMALL", "SMFH", "SNZ", "SO", "SOLD", "SOLIS",
|
||||||
|
"SOME", "SOON", "SOOO", "SOUTH", "SP", "SPAC", "SPDR", "SPEND", "SPLG", "SPX",
|
||||||
|
"SPY", "SQUAD", "SS", "SSA", "SSDI", "START", "STAY", "STEEL", "STFU", "STILL",
|
||||||
|
"STO", "STOCK", "STOOQ", "STOP", "STOR", "STQQQ", "STUCK", "STUDY", "SUS", "SUSHI",
|
||||||
|
"SUV", "SWIFT", "SWING", "TA", "TAG", "TAKE", "TAM", "TBTH", "TEAMS", "TED",
|
||||||
|
"TEMU", "TERM", "TESLA", "TEXT", "TF", "TFNA", "TFSA", "THAN", "THANK", "THAT",
|
||||||
|
"THATS", "THE", "THEIR", "THEM", "THEN", "THERE", "THESE", "THEY", "THING", "THINK",
|
||||||
|
"THIS", "THROW", "TI", "TIA", "TIKR", "TIME", "TIMES", "TINA", "TITS", "TJR",
|
||||||
|
"TL", "TL;DR", "TLDR", "TNT", "TO", "TODAY", "TOLD", "TONS", "TOO", "TOS",
|
||||||
|
"TOT", "TOTAL", "TP", "TPU", "TRADE", "TREND", "TRUE", "TRUMP", "TRUST", "TRY",
|
||||||
|
"TSA", "TSMC", "TSP", "TSX", "TSXV", "TTIP", "TTM", "TTYL", "TURNS", "TWO",
|
||||||
|
"UAW", "UCITS", "UGH", "UI", "UK", "UNDER", "UNITS", "UNO", "UNTIL", "UP",
|
||||||
|
"US", "USA", "USD", "USMCA", "USSA", "USSR", "UTC", "VALID", "VALUE", "VAMOS",
|
||||||
|
"VAT", "VEIEN", "VEO", "VERY", "VFMXX", "VFV", "VI", "VISA", "VIX", "VLI",
|
||||||
|
"VOO", "VP", "VPAY", "VR", "VRVP", "VSUS", "VTI", "VUAG", "VW", "VWAP",
|
||||||
|
"VWCE", "VXN", "VXUX", "WAGER", "WAGMI", "WAIT", "WALL", "WANT", "WAS", "WATCH",
|
||||||
|
"WAY", "WBTC", "WE", "WEB", "WEB3", "WEEK", "WENT", "WERO", "WEST", "WHALE",
|
||||||
|
"WHAT", "WHEN", "WHERE", "WHICH", "WHILE", "WHO", "WHOS", "WHY", "WIDE", "WILL",
|
||||||
|
"WIRE", "WIRED", "WITH", "WL", "WON", "WOOPS", "WORDS", "WORTH", "WOULD", "WP",
|
||||||
|
"WRONG", "WSB", "WSJ", "WTF", "WV", "WWII", "WWIII", "X", "XAU", "XCUSE",
|
||||||
|
"XD", "XEQT", "XI", "XIV", "XMR", "XO", "XRP", "XX", "YEAH", "YEET",
|
||||||
|
"YES", "YET", "YIELD", "YM", "YMMV", "YOIR", "YOLO", "YOU", "YOUR", "YOY",
|
||||||
|
"YT", "YTD", "YUGE", "YUP", "YUPPP", "ZAR", "ZEN", "ZERO", "ZEV"
|
||||||
}
|
}
|
||||||
|
|
||||||
def extract_tickers(text):
|
def extract_golden_tickers(text):
|
||||||
"""
|
"""
|
||||||
Extracts potential stock tickers from a given piece of text.
|
Extracts ONLY tickers with a '$' prefix. This is the highest-confidence signal.
|
||||||
A ticker is identified as a 1-5 character uppercase word, or a word prefixed with $.
|
Returns a set of cleaned ticker symbols (e.g., {'TSLA', 'GME'}).
|
||||||
"""
|
"""
|
||||||
# Regex to find potential tickers:
|
# Regex to find words prefixed with $: $AAPL, $TSLA
|
||||||
# 1. Words prefixed with $: $AAPL, $TSLA
|
ticker_regex = r"\$[A-Z]{1,5}\b"
|
||||||
# 2. All-caps words between 1 and 5 characters: GME, AMC
|
tickers = re.findall(ticker_regex, text)
|
||||||
ticker_regex = r"\$[A-Z]{1,5}\b|\b[A-Z]{2,5}\b"
|
# Clean the tickers by removing the '$' and return as a set
|
||||||
|
return {ticker.replace("$", "").upper() for ticker in tickers}
|
||||||
|
|
||||||
|
def extract_potential_tickers(text):
|
||||||
|
"""
|
||||||
|
Extracts potential tickers (all-caps words). This is a lower-confidence signal
|
||||||
|
used as a fallback when no golden tickers are present.
|
||||||
|
Returns a set of cleaned ticker symbols.
|
||||||
|
"""
|
||||||
|
# Regex to find all-caps words between 2 and 5 characters: GME, AMC
|
||||||
|
ticker_regex = r"\b[A-Z]{2,5}\b"
|
||||||
potential_tickers = re.findall(ticker_regex, text)
|
potential_tickers = re.findall(ticker_regex, text)
|
||||||
|
|
||||||
# Filter out common words and remove the '$' prefix
|
# Filter out common blacklisted words
|
||||||
tickers = []
|
return {ticker for ticker in potential_tickers if ticker not in COMMON_WORDS_BLACKLIST}
|
||||||
for ticker in potential_tickers:
|
|
||||||
cleaned_ticker = ticker.replace("$", "").upper()
|
|
||||||
if cleaned_ticker not in COMMON_WORDS_BLACKLIST:
|
|
||||||
tickers.append(cleaned_ticker)
|
|
||||||
|
|
||||||
return tickers
|
|
||||||
|
36
run_daily_job.sh
Executable file
@@ -0,0 +1,36 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
BASE_DIR="/home/rstat/reddit_stock_analyzer"
|
||||||
|
|
||||||
|
# CRITICAL: Navigate to the project directory using an absolute path.
|
||||||
|
cd ${BASE_DIR}
|
||||||
|
|
||||||
|
# CRITICAL: Activate the virtual environment using an absolute path.
|
||||||
|
source ${BASE_DIR}/.venv/bin/activate
|
||||||
|
|
||||||
|
echo "--- Starting RSTAT Daily Job on $(date +%F) ---"
|
||||||
|
|
||||||
|
# 1. Scrape data from the last 24 hours and update price for top tickers.
|
||||||
|
echo "Step 1: Scraping new data..."
|
||||||
|
rstat --no-financials --comments 256
|
||||||
|
rstat --update-top-tickers
|
||||||
|
|
||||||
|
# 2. Start the dashboard in the background.
|
||||||
|
echo "Step 2: Starting dashboard in background..."
|
||||||
|
rstat-dashboard &
|
||||||
|
DASHBOARD_PID=$!
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
# 3. Export the overall summary image.
|
||||||
|
echo "Step 3: Exporting overall summary image..."
|
||||||
|
python export_image.py --overall
|
||||||
|
|
||||||
|
# 4. Post the image to r/rstat.
|
||||||
|
echo "Step 4: Posting image to Reddit..."
|
||||||
|
python post_to_reddit.py --target-subreddit rstat
|
||||||
|
|
||||||
|
# 5. Clean up by stopping the dashboard server.
|
||||||
|
echo "Step 5: Stopping dashboard server..."
|
||||||
|
kill ${DASHBOARD_PID}
|
||||||
|
|
||||||
|
echo "--- RSTAT Daily Job Complete ---"
|
20
setup.py
@@ -2,23 +2,25 @@
|
|||||||
|
|
||||||
from setuptools import setup, find_packages
|
from setuptools import setup, find_packages
|
||||||
|
|
||||||
with open('requirements.txt') as f:
|
with open("requirements.txt") as f:
|
||||||
requirements = f.read().splitlines()
|
requirements = f.read().splitlines()
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='reddit-stock-analyzer',
|
name="reddit-stock-analyzer",
|
||||||
version='0.0.1',
|
version="0.0.2",
|
||||||
author='Pål-Kristian Hamre',
|
author="Pål-Kristian Hamre",
|
||||||
author_email='its@pkhamre.com',
|
author_email="its@pkhamre.com",
|
||||||
description='A command-line tool to analyze stock ticker mentions on Reddit.',
|
description="A command-line tool to analyze stock ticker mentions on Reddit.",
|
||||||
# This now correctly finds your 'rstat_tool' package
|
# This now correctly finds your 'rstat_tool' package
|
||||||
packages=find_packages(),
|
packages=find_packages(),
|
||||||
install_requires=requirements,
|
install_requires=requirements,
|
||||||
entry_points={
|
entry_points={
|
||||||
'console_scripts': [
|
"console_scripts": [
|
||||||
# The path is now 'package_name.module_name:function_name'
|
# The path is now 'package_name.module_name:function_name'
|
||||||
'rstat=rstat_tool.main:main',
|
"rstat=rstat_tool.main:main",
|
||||||
'rstat-dashboard=rstat_tool.dashboard:start_dashboard',
|
"rstat-dashboard=rstat_tool.dashboard:start_dashboard",
|
||||||
|
"rstat-cleanup=rstat_tool.cleanup:run_cleanup",
|
||||||
|
"rstat-flairs=rstat_tool.flair_finder:main",
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
)
|
)
|
BIN
static/apple-touch-icon.png
Normal file
After Width: | Height: | Size: 3.1 KiB |
2
static/css/input.css
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
@import "tailwindcss";
|
||||||
|
@plugin "@tailwindcss/typography";
|
1441
static/css/style.css
Normal file
BIN
static/dogecoin_logo.png
Normal file
After Width: | Height: | Size: 36 KiB |
BIN
static/favicon-96x96.png
Normal file
After Width: | Height: | Size: 1.6 KiB |
BIN
static/favicon.ico
Normal file
After Width: | Height: | Size: 15 KiB |
3
static/favicon.svg
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" version="1.1" xmlns:xlink="http://www.w3.org/1999/xlink" width="200" height="200"><svg xmlns="http://www.w3.org/2000/svg" version="1.1" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 200 200"><rect width="200" height="200" fill="url('#gradient')"></rect><defs><linearGradient id="SvgjsLinearGradient1001" gradientTransform="rotate(45 0.5 0.5)"><stop offset="0%" stop-color="#697f83"></stop><stop offset="100%" stop-color="#161f2f"></stop></linearGradient></defs><g><g fill="#b1d6bb" transform="matrix(12.518681318681319,0,0,12.518681318681319,14.808730859284879,189.00720071373405)" stroke="#498990" stroke-width="0.7"><path d="M8.87 0L6.36-5.02L4.50-5.02L4.50 0L1.07 0L1.07-14.22L6.67-14.22Q9.20-14.22 10.63-13.10Q12.05-11.97 12.05-9.92L12.05-9.92Q12.05-8.44 11.45-7.46Q10.85-6.48 9.57-5.88L9.57-5.88L12.54-0.15L12.54 0L8.87 0ZM4.50-11.57L4.50-7.67L6.67-7.67Q7.65-7.67 8.14-8.18Q8.63-8.69 8.63-9.61Q8.63-10.53 8.13-11.05Q7.64-11.57 6.67-11.57L6.67-11.57L4.50-11.57Z"></path></g></g></svg><style>@media (prefers-color-scheme: light) { :root { filter: none; } }
|
||||||
|
@media (prefers-color-scheme: dark) { :root { filter: none; } }
|
||||||
|
</style></svg>
|
After Width: | Height: | Size: 1.2 KiB |
21
static/site.webmanifest
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
{
|
||||||
|
"name": "MyWebSite",
|
||||||
|
"short_name": "MySite",
|
||||||
|
"icons": [
|
||||||
|
{
|
||||||
|
"src": "/web-app-manifest-192x192.png",
|
||||||
|
"sizes": "192x192",
|
||||||
|
"type": "image/png",
|
||||||
|
"purpose": "maskable"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"src": "/web-app-manifest-512x512.png",
|
||||||
|
"sizes": "512x512",
|
||||||
|
"type": "image/png",
|
||||||
|
"purpose": "maskable"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"theme_color": "#ffffff",
|
||||||
|
"background_color": "#ffffff",
|
||||||
|
"display": "standalone"
|
||||||
|
}
|
BIN
static/web-app-manifest-192x192.png
Normal file
After Width: | Height: | Size: 3.4 KiB |
BIN
static/web-app-manifest-512x512.png
Normal file
After Width: | Height: | Size: 16 KiB |
@@ -1,18 +1,18 @@
|
|||||||
{
|
{
|
||||||
"subreddits": [
|
"subreddits": [
|
||||||
|
"dividends",
|
||||||
|
"investing",
|
||||||
|
"options",
|
||||||
"pennystocks",
|
"pennystocks",
|
||||||
|
"SecurityAnalysis",
|
||||||
"Shortsqueeze",
|
"Shortsqueeze",
|
||||||
"smallstreetbets",
|
"smallstreetbets",
|
||||||
"wallstreetbets",
|
"stockmarket",
|
||||||
"Wallstreetbetsnew",
|
|
||||||
"wallstreetbets2",
|
|
||||||
"stocks",
|
"stocks",
|
||||||
"RobinHoodPennyStocks",
|
"thetagang",
|
||||||
"StocksAndTrading",
|
"Tollbugatabets",
|
||||||
"investing",
|
|
||||||
"WallStreetBetsELITE",
|
|
||||||
"ValueInvesting",
|
"ValueInvesting",
|
||||||
"Daytrading",
|
"wallstreetbets",
|
||||||
"Tollbugatabets"
|
"WallStreetBetsELITE"
|
||||||
]
|
]
|
||||||
}
|
}
|
27
tailwind.config.js
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
/** @type {import('tailwindcss').Config} */
|
||||||
|
module.exports = {
|
||||||
|
content: [
|
||||||
|
'./templates/**/*.html',
|
||||||
|
],
|
||||||
|
safelist: [
|
||||||
|
'text-violet-400',
|
||||||
|
'text-lime-400',
|
||||||
|
'text-cyan-400',
|
||||||
|
'text-yellow-400',
|
||||||
|
'text-red-400',
|
||||||
|
'text-orange-400',
|
||||||
|
'text-emerald-400',
|
||||||
|
'text-blue-400',
|
||||||
|
'text-gray-300',
|
||||||
|
'text-pink-400'
|
||||||
|
],
|
||||||
|
theme: {
|
||||||
|
extend: {
|
||||||
|
fontFamily: {
|
||||||
|
sans: ['Inter', 'sans-serif'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
plugins: [
|
||||||
|
],
|
||||||
|
}
|
51
templates/about.html
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
{% extends "dashboard_base.html" %}
|
||||||
|
|
||||||
|
{% block title %}About RSTAT{% endblock %}
|
||||||
|
|
||||||
|
{% block content %}
|
||||||
|
<!-- This outer div now handles the centering -->
|
||||||
|
<div class="flex flex-col items-center">
|
||||||
|
<div class="w-full max-w-3xl bg-slate-800/50 ring-1 ring-slate-700 rounded-2xl p-6 sm:p-10 shadow-2xl">
|
||||||
|
<div class="text-center mb-10">
|
||||||
|
<h1 class="text-3xl sm:text-4xl font-extrabold tracking-tight text-white">About RSTAT (beta)</h1>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- The 'prose' class will now work correctly inside this standard block flow -->
|
||||||
|
<article class="prose prose-slate prose-invert max-w-none">
|
||||||
|
<h2>What is this?</h2>
|
||||||
|
<p>RSTAT (Reddit Stock Analysis Tool) is an automated data pipeline that scans popular financial communities on
|
||||||
|
Reddit to identify and analyze trending stock tickers. It provides a daily and weekly snapshot of the most
|
||||||
|
discussed stocks, their social sentiment, and key financial data.</p>
|
||||||
|
|
||||||
|
<h2>How does it work?</h2>
|
||||||
|
<ul>
|
||||||
|
<li>A <strong>scraper</strong> runs on a schedule to read new posts and comments from a predefined list of
|
||||||
|
subreddits.</li>
|
||||||
|
<li>A <strong>sentiment analyzer</strong> scores each mention as Bullish, Bearish, or Neutral using a natural
|
||||||
|
language processing model.</li>
|
||||||
|
<li>A <strong>data fetcher</strong> enriches the ticker data with the latest closing price and market
|
||||||
|
capitalization from Yahoo Finance.</li>
|
||||||
|
<li>All data is stored in a local <strong>SQLite database</strong>.</li>
|
||||||
|
<li>This <strong>web dashboard</strong> reads from the database to provide a clean, interactive visualization of
|
||||||
|
the results.</li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<h2>Supporting the Project</h2>
|
||||||
|
<p>RSTAT is a <b>soon-to-be</b>free and open-source project. To ensure the dashboard remains fast and reliable, it is hosted on a
|
||||||
|
small virtual server with running costs of approximately $6 per month. And about $30 per year for he domain.
|
||||||
|
If you find this tool useful, donations are gratefully accepted via Dogecoin (DOGE).</p>
|
||||||
|
<div class="not-prose bg-slate-900/50 ring-1 ring-slate-700 rounded-lg p-3 text-center">
|
||||||
|
<code class="text-sm text-slate-200 break-all">DRTLo2BsBijY4MrLmNNHzmjZ5tVvpTebFE</code>
|
||||||
|
</div>
|
||||||
|
</article>
|
||||||
|
|
||||||
|
<footer class="mt-12 text-center">
|
||||||
|
<div class="text-xl font-extrabold tracking-tight text-white">r/rstat</div>
|
||||||
|
<div class="text-sm text-slate-400">
|
||||||
|
<a href="https://www.reddit.com/r/rstat/" target="_blank" class="hover:text-white transition-colors">visit us
|
||||||
|
for more.</a>
|
||||||
|
</div>
|
||||||
|
</footer>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{% endblock %}
|
@@ -1,109 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="UTF-8">
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
||||||
<title>{% block title %}Reddit Stock Dashboard{% endblock %}</title>
|
|
||||||
<style>
|
|
||||||
body {
|
|
||||||
font-family: 'Inter', -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif;
|
|
||||||
background-color: #f4f7f6;
|
|
||||||
color: #333;
|
|
||||||
margin: 0;
|
|
||||||
line-height: 1.6;
|
|
||||||
}
|
|
||||||
.navbar {
|
|
||||||
background-color: #ffffff;
|
|
||||||
padding: 1rem 2rem;
|
|
||||||
border-bottom: 1px solid #e0e0e0;
|
|
||||||
display: flex;
|
|
||||||
flex-wrap: wrap;
|
|
||||||
gap: 1rem;
|
|
||||||
}
|
|
||||||
.navbar a {
|
|
||||||
color: #555;
|
|
||||||
text-decoration: none;
|
|
||||||
font-weight: 600;
|
|
||||||
padding: 0.5rem 1rem;
|
|
||||||
border-radius: 6px;
|
|
||||||
transition: background-color 0.2s ease-in-out, color 0.2s ease-in-out;
|
|
||||||
}
|
|
||||||
.navbar a:hover {
|
|
||||||
background-color: #e9ecef;
|
|
||||||
color: #000;
|
|
||||||
}
|
|
||||||
.container {
|
|
||||||
max-width: 1000px;
|
|
||||||
margin: 2rem auto;
|
|
||||||
padding: 2rem;
|
|
||||||
background-color: #ffffff;
|
|
||||||
border-radius: 8px;
|
|
||||||
box-shadow: 0 4px 12px rgba(0,0,0,0.05);
|
|
||||||
}
|
|
||||||
h1 {
|
|
||||||
font-size: 1.75rem;
|
|
||||||
font-weight: 700;
|
|
||||||
margin-top: 0;
|
|
||||||
border-bottom: 1px solid #eee;
|
|
||||||
padding-bottom: 0.5rem;
|
|
||||||
}
|
|
||||||
table {
|
|
||||||
width: 100%;
|
|
||||||
border-collapse: collapse;
|
|
||||||
margin-top: 2rem;
|
|
||||||
font-size: 0.95rem;
|
|
||||||
}
|
|
||||||
th, td {
|
|
||||||
padding: 1rem;
|
|
||||||
text-align: left;
|
|
||||||
border-bottom: 1px solid #e0e0e0;
|
|
||||||
}
|
|
||||||
th {
|
|
||||||
font-weight: 600;
|
|
||||||
text-transform: uppercase;
|
|
||||||
font-size: 0.8rem;
|
|
||||||
letter-spacing: 0.05em;
|
|
||||||
color: #666;
|
|
||||||
}
|
|
||||||
tr:last-child td {
|
|
||||||
border-bottom: none;
|
|
||||||
}
|
|
||||||
.sentiment-bullish { color: #28a745; font-weight: 600; }
|
|
||||||
.sentiment-bearish { color: #dc3545; font-weight: 600; }
|
|
||||||
.sentiment-neutral { color: #6c757d; }
|
|
||||||
|
|
||||||
.post-card {
|
|
||||||
border: 1px solid #e0e0e0;
|
|
||||||
border-radius: 8px;
|
|
||||||
padding: 1.5rem;
|
|
||||||
margin-bottom: 1.5rem;
|
|
||||||
}
|
|
||||||
.post-card h3 {
|
|
||||||
margin-top: 0;
|
|
||||||
font-size: 1.2rem;
|
|
||||||
}
|
|
||||||
.post-card h3 a {
|
|
||||||
color: #0056b3;
|
|
||||||
text-decoration: none;
|
|
||||||
}
|
|
||||||
.post-card h3 a:hover {
|
|
||||||
text-decoration: underline;
|
|
||||||
}
|
|
||||||
.post-meta {
|
|
||||||
font-size: 0.9rem;
|
|
||||||
color: #666;
|
|
||||||
}
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<header class="navbar">
|
|
||||||
<a href="/">Overall</a>
|
|
||||||
{% for sub in subreddits %}
|
|
||||||
<a href="/subreddit/{{ sub }}">r/{{ sub }}</a>
|
|
||||||
{% endfor %}
|
|
||||||
</header>
|
|
||||||
<main class="container">
|
|
||||||
{% block content %}{% endblock %}
|
|
||||||
</main>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
119
templates/dashboard_base.html
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<title>{% block title %}RSTAT Dashboard{% endblock %}</title>
|
||||||
|
|
||||||
|
<link rel="icon" type="image/png" href="{{ url_for('static', filename='favicon-96x96.png') }}" sizes="96x96" />
|
||||||
|
<link rel="icon" href="{{ url_for('static', filename='favicon.ico') }}">
|
||||||
|
<link rel="shortcut icon" type="image/svg+xml" href="{{ url_for('static', filename='favicon.svg') }}">
|
||||||
|
<link rel="apple-touch-icon" sizes="180x180" href="{{ url_for('static', filename='apple-touch-icon.png') }}" />
|
||||||
|
<link rel="manifest" href="{{ url_for('static', filename='site.webmanifest') }}" />
|
||||||
|
|
||||||
|
<link rel="preconnect" href="https://fonts.googleapis.com">
|
||||||
|
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
||||||
|
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700;800&display=swap" rel="stylesheet">
|
||||||
|
<link rel="stylesheet" href="{{ url_for('static', filename='css/style.css') }}">
|
||||||
|
<script defer src="https://cdn.jsdelivr.net/npm/alpinejs@3.x.x/dist/cdn.min.js"></script>
|
||||||
|
<style>
|
||||||
|
/* This sets the custom font as the default for the page */
|
||||||
|
body {
|
||||||
|
font-family: 'Inter', sans-serif;
|
||||||
|
}
|
||||||
|
|
||||||
|
[class*="text-"]>a {
|
||||||
|
color: inherit;
|
||||||
|
text-decoration: none;
|
||||||
|
transition: color 0.2s ease-in-out;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
[class*="text-"]>a:hover {
|
||||||
|
color: #ffffff;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
|
||||||
|
<body class="bg-slate-900 text-slate-200 min-h-screen">
|
||||||
|
|
||||||
|
{% if not is_image_mode %}
|
||||||
|
<header class="p-4 sm:p-6 w-full">
|
||||||
|
<nav
|
||||||
|
class="w-full max-w-7xl mx-auto bg-slate-800/50 ring-1 ring-slate-700 rounded-xl p-4 flex flex-col sm:flex-row items-center gap-4">
|
||||||
|
<div class="flex items-center gap-4">
|
||||||
|
<!-- Home Link -->
|
||||||
|
<a href="/"
|
||||||
|
class="font-bold {% if not subreddit_name %}text-white{% else %}text-slate-400 hover:text-white{% endif %} transition-colors">Home</a>
|
||||||
|
|
||||||
|
<!-- Alpine.js Dropdown Component -->
|
||||||
|
<div x-data="{ isOpen: false }" class="relative">
|
||||||
|
<!-- The Button that toggles the 'isOpen' state -->
|
||||||
|
<button @click="isOpen = !isOpen"
|
||||||
|
class="font-bold flex items-center gap-1 cursor-pointer {% if subreddit_name %}text-white{% else %}text-slate-400 hover:text-white{% endif %} transition-colors">
|
||||||
|
<span>Subreddits</span>
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24" fill="none"
|
||||||
|
stroke="currentColor" stroke-width="3" stroke-linecap="round" stroke-linejoin="round"
|
||||||
|
class="transition-transform duration-200" :class="{'rotate-180': isOpen}">
|
||||||
|
<polyline points="6 9 12 15 18 9"></polyline>
|
||||||
|
</svg>
|
||||||
|
</button>
|
||||||
|
|
||||||
|
<!-- The Dropdown Menu, controlled by Alpine.js -->
|
||||||
|
<div x-show="isOpen" @click.outside="isOpen = false"
|
||||||
|
x-transition:enter="transition ease-out duration-100"
|
||||||
|
x-transition:enter-start="opacity-0 scale-95" x-transition:enter-end="opacity-100 scale-100"
|
||||||
|
x-transition:leave="transition ease-in duration-75"
|
||||||
|
x-transition:leave-start="opacity-100 scale-100" x-transition:leave-end="opacity-0 scale-95"
|
||||||
|
class="absolute left-0 mt-2 bg-slate-800 ring-1 ring-slate-700 shadow-lg rounded-lg py-1 w-48 z-10"
|
||||||
|
style="display: none;">
|
||||||
|
{% for sub in all_subreddits %}
|
||||||
|
<a href="/subreddit/{{ sub }}"
|
||||||
|
class="block px-4 py-2 text-sm text-slate-300 hover:bg-slate-700 hover:text-white">{{ sub
|
||||||
|
}}</a>
|
||||||
|
{% endfor %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="flex items-center gap-2 sm:ml-auto">
|
||||||
|
<a href="?view=daily"
|
||||||
|
class="px-3 py-1 rounded-md text-sm font-semibold {% if view_type == 'daily' %}bg-sky-500 text-white{% else %}bg-slate-700/50 text-slate-300 hover:bg-slate-700 hover:text-white{% endif %} transition-all">Daily</a>
|
||||||
|
<a href="?view=weekly"
|
||||||
|
class="px-3 py-1 rounded-md text-sm font-semibold {% if view_type == 'weekly' %}bg-sky-500 text-white{% else %}bg-slate-700/50 text-slate-300 hover:bg-slate-700 hover:text-white{% endif %} transition-all">Weekly</a>
|
||||||
|
<a href="/about" title="About this Project"
|
||||||
|
class="p-2 rounded-md text-slate-400 hover:bg-slate-700 hover:text-white transition-colors">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 24 24" fill="none"
|
||||||
|
stroke="currentColor" stroke-width="2.5" stroke-linecap="round" stroke-linejoin="round">
|
||||||
|
<circle cx="12" cy="12" r="10"></circle>
|
||||||
|
<line x1="12" y1="16" x2="12" y2="12"></line>
|
||||||
|
<line x1="12" y1="8" x2="12.01" y2="8"></line>
|
||||||
|
</svg>
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
</nav>
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
<main class="w-full p-4 sm:p-6">
|
||||||
|
{% block content %}{% endblock %}
|
||||||
|
</main>
|
||||||
|
|
||||||
|
{% if not is_image_mode %}
|
||||||
|
<footer class="mt-8 text-center">
|
||||||
|
<div class="flex items-center justify-center gap-2">
|
||||||
|
<img src="{{ url_for('static', filename='dogecoin_logo.png') }}" alt="Doge" class="w-6 h-6">
|
||||||
|
|
||||||
|
<!-- text-base makes the text larger -->
|
||||||
|
<span class="text-base text-slate-400">
|
||||||
|
Support this service:
|
||||||
|
<!-- text-sm and p-2 make the code block larger -->
|
||||||
|
<code
|
||||||
|
class="text-sm bg-slate-800 p-2 rounded-lg text-slate-300">DRTLo2BsBijY4MrLmNNHzmjZ5tVvpTebFE</code>
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</footer>
|
||||||
|
{% endif %}
|
||||||
|
</body>
|
||||||
|
|
||||||
|
</html>
|
126
templates/dashboard_view.html
Normal file
@@ -0,0 +1,126 @@
|
|||||||
|
{% extends "dashboard_base.html" %}
|
||||||
|
|
||||||
|
{% block title %}{{ title }}{% endblock %}
|
||||||
|
|
||||||
|
{% block content %}
|
||||||
|
<div class="flex flex-col items-center">
|
||||||
|
<div
|
||||||
|
class="w-full max-w-3xl bg-gradient-to-br from-slate-800 to-slate-900 ring-1 ring-slate-700 rounded-2xl p-6 sm:p-8 shadow-2xl">
|
||||||
|
<header class="flex flex-col sm:flex-row justify-between sm:items-start mb-8">
|
||||||
|
<div class="text-left">
|
||||||
|
<h1 class="text-2xl sm:text-4xl font-extrabold tracking-tight text-white">Reddit Ticker Mentions</h1>
|
||||||
|
<h2 class="text-lg sm:text-xl font-semibold mt-1 text-slate-400">{{ subtitle }}</h2>
|
||||||
|
</div>
|
||||||
|
<div class="text-left sm:text-right mt-2 sm:mt-0 flex-shrink-0">
|
||||||
|
<div class="text-md font-semibold text-slate-400 whitespace-nowrap">{{ date_string }}</div>
|
||||||
|
{% if not is_image_mode %}
|
||||||
|
<a href="{{ base_url }}?view={{ view_type }}&image=true" class="inline-block mt-2 sm:float-right"
|
||||||
|
title="View as Shareable Image">
|
||||||
|
<svg class="text-slate-400 hover:text-white transition-colors" xmlns="http://www.w3.org/2000/svg"
|
||||||
|
width="22" height="22" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2.5"
|
||||||
|
stroke-linecap="round" stroke-linejoin="round">
|
||||||
|
<path d="M23 19a2 2 0 0 1-2 2H3a2 2 0 0 1-2-2V8a2 2 0 0 1 2-2h4l2-3h6l2 3h4a2 2 0 0 1 2 2z">
|
||||||
|
</path>
|
||||||
|
<circle cx="12" cy="13" r="4"></circle>
|
||||||
|
</svg>
|
||||||
|
</a>
|
||||||
|
{% endif %}
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
|
||||||
|
{% set ticker_colors = {
|
||||||
|
1: 'text-violet-400', 2: 'text-lime-400', 3: 'text-cyan-400',
|
||||||
|
4: 'text-yellow-400', 5: 'text-red-400', 6: 'text-orange-400',
|
||||||
|
7: 'text-emerald-400', 8: 'text-blue-400', 9: 'text-gray-300',
|
||||||
|
10: 'text-pink-400'
|
||||||
|
} %}
|
||||||
|
|
||||||
|
<!-- Ticker List -->
|
||||||
|
<div class="flex flex-col">
|
||||||
|
|
||||||
|
<!-- 1. The Desktop Header Row (hidden on mobile) -->
|
||||||
|
<div
|
||||||
|
class="hidden sm:flex items-center text-xs font-bold text-slate-500 uppercase tracking-wider px-4 py-3 border-b border-slate-700">
|
||||||
|
<div class="w-1/4 flex items-center gap-4 text-left">
|
||||||
|
<span class="w-6 text-center">#</span>
|
||||||
|
<span>Ticker</span>
|
||||||
|
</div>
|
||||||
|
<div class="w-3/4 grid grid-cols-4 gap-4 text-right">
|
||||||
|
<div class="text-center">Mentions</div>
|
||||||
|
<div class="text-center">Sentiment</div>
|
||||||
|
<div>Mkt Cap</div>
|
||||||
|
<div>Last Price</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- 2. Ticker Rows -->
|
||||||
|
<div class="divide-y divide-slate-800">
|
||||||
|
{% for ticker in tickers %}
|
||||||
|
<!-- THIS IS THE UPDATED LINE -->
|
||||||
|
<div
|
||||||
|
class="p-4 flex flex-col sm:flex-row sm:items-center sm:gap-4 hover:bg-slate-800/50 transition-colors duration-150">
|
||||||
|
<!-- Rank & Ticker Symbol -->
|
||||||
|
<div class="flex items-center gap-4 w-full sm:w-1/4 text-left mb-4 sm:mb-0">
|
||||||
|
<span class="text-lg font-bold text-slate-500 w-6 text-center">{{ loop.index }}</span>
|
||||||
|
<div class="text-xl font-bold">
|
||||||
|
<span class="{{ ticker_colors.get(loop.index, 'text-slate-200') }}">
|
||||||
|
{% if is_image_mode %}
|
||||||
|
{{ ticker.symbol }}
|
||||||
|
{% else %}
|
||||||
|
<a href="/deep-dive/{{ ticker.symbol }}">{{ ticker.symbol }}</a>
|
||||||
|
{% endif %}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<!-- Financial Data Points -->
|
||||||
|
<div class="w-full grid grid-cols-2 sm:grid-cols-4 gap-4 text-right">
|
||||||
|
<div class="text-center sm:text-center">
|
||||||
|
<div class="sm:hidden text-xs font-bold text-slate-500 uppercase tracking-wider mb-1">
|
||||||
|
Mentions</div>
|
||||||
|
<div class="text-lg font-semibold text-white">{{ ticker.total_mentions }}</div>
|
||||||
|
</div>
|
||||||
|
<div class="text-center sm:text-center">
|
||||||
|
<div class="sm:hidden text-xs font-bold text-slate-500 uppercase tracking-wider mb-1">
|
||||||
|
Sentiment</div>
|
||||||
|
<div class="text-lg font-semibold">
|
||||||
|
{% if ticker.bullish_mentions > ticker.bearish_mentions %}<span
|
||||||
|
class="text-green-400">Bullish</span>
|
||||||
|
{% elif ticker.bearish_mentions > ticker.bullish_mentions %}<span
|
||||||
|
class="text-red-400">Bearish</span>
|
||||||
|
{% else %}<span class="text-slate-400">Neutral</span>{% endif %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<div class="sm:hidden text-xs font-bold text-slate-500 uppercase tracking-wider mb-1">Mkt
|
||||||
|
Cap</div>
|
||||||
|
<div class="text-lg font-semibold text-white">{{ ticker.market_cap | format_mc }}</div>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<div class="sm:hidden text-xs font-bold text-slate-500 uppercase tracking-wider mb-1">Last Price</div>
|
||||||
|
<div class="text-lg font-semibold text-white">
|
||||||
|
{% if ticker.closing_price %}<a
|
||||||
|
href="https://www.marketwatch.com/investing/stock/{{ ticker.symbol }}" target="_blank"
|
||||||
|
class="hover:text-blue-400 transition-colors">${{
|
||||||
|
"%.2f"|format(ticker.closing_price) }}</a>
|
||||||
|
{% else %}N/A{% endif %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{% else %}
|
||||||
|
<div class="text-center text-slate-500 p-8">No ticker data found for this period.</div>
|
||||||
|
{% endfor %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<footer class="mt-8 text-center">
|
||||||
|
<div class="text-xl font-extrabold tracking-tight text-white">r/rstat</div>
|
||||||
|
<div class="text-sm text-slate-400">
|
||||||
|
<a href="https://www.reddit.com/r/rstat/" target="_blank"
|
||||||
|
class="hover:text-white transition-colors">visit us for more.</a>
|
||||||
|
</div>
|
||||||
|
</footer>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{% endblock %}
|
@@ -1,29 +1,54 @@
|
|||||||
{% extends "base.html" %}
|
{% extends "dashboard_base.html" %}
|
||||||
|
|
||||||
{% block title %}Deep Dive: {{ symbol }}{% endblock %}
|
{% block title %}Deep Dive: {{ symbol }}{% endblock %}
|
||||||
|
|
||||||
{% block content %}
|
{% block content %}
|
||||||
<h1>Deep Dive Analysis for: <strong>{{ symbol }}</strong></h1>
|
<!-- This outer div handles the centering -->
|
||||||
<p>Showing posts that mention {{ symbol }}, sorted by most recent.</p>
|
<div class="flex flex-col items-center">
|
||||||
|
<div class="w-full max-w-3xl bg-slate-800/50 ring-1 ring-slate-700 rounded-2xl p-6 sm:p-10 shadow-2xl">
|
||||||
|
|
||||||
|
<!-- --- THIS IS THE KEY CHANGE --- -->
|
||||||
|
<!-- We wrap all the content in an <article> tag with the 'prose' classes -->
|
||||||
|
<article class="prose prose-slate prose-invert max-w-none">
|
||||||
|
|
||||||
|
<header class="text-center mb-8">
|
||||||
|
<!-- The h1 and p tags will now be beautifully styled by 'prose' -->
|
||||||
|
<h1>Deep Dive Analysis: <span class="text-sky-400">{{ symbol }}</span></h1>
|
||||||
|
<p>Showing posts that mention {{ symbol }}, sorted by most recent.</p>
|
||||||
|
</header>
|
||||||
|
|
||||||
|
<div class="space-y-4 not-prose">
|
||||||
{% for post in posts %}
|
{% for post in posts %}
|
||||||
<div class="post-card">
|
<!-- 'not-prose' is used on the container so we can control styling precisely -->
|
||||||
<h3><a href="{{ post.post_url }}" target="_blank">{{ post.title }}</a></h3>
|
<div class="bg-slate-800/50 ring-1 ring-slate-700/50 rounded-lg p-4 text-left not-prose">
|
||||||
<div class="post-meta">
|
<h3 class="text-lg font-bold text-slate-200 mb-2">
|
||||||
<span>r/{{ post.subreddit_name }}</span> |
|
<!-- This link WILL be styled by the parent 'prose' class -->
|
||||||
<span>{{ post.comment_count }} comments analyzed</span> |
|
<a href="{{ post.post_url }}" target="_blank">{{ post.title }}</a>
|
||||||
|
</h3>
|
||||||
|
<div class="text-sm text-slate-400 flex flex-col sm:flex-row sm:items-center gap-x-4 gap-y-1">
|
||||||
|
<span class="font-semibold">r/{{ post.subreddit_name }}</span>
|
||||||
|
<span class="hidden sm:inline">|</span>
|
||||||
|
<span>{{ post.comment_count }} comments analyzed</span>
|
||||||
|
<span class="hidden sm:inline">|</span>
|
||||||
<span>Avg. Sentiment:
|
<span>Avg. Sentiment:
|
||||||
{% if post.avg_comment_sentiment > 0.1 %}
|
{% if post.avg_comment_sentiment > 0.1 %}
|
||||||
<span class="sentiment-bullish">{{ "%.2f"|format(post.avg_comment_sentiment) }}</span>
|
<span class="font-bold text-green-400">{{ "%.2f"|format(post.avg_comment_sentiment) }}
|
||||||
{% elif post.avg_comment_sentiment < -0.1 %}
|
(Bullish)</span>
|
||||||
<span class="sentiment-bearish">{{ "%.2f"|format(post.avg_comment_sentiment) }}</span>
|
{% elif post.avg_comment_sentiment < -0.1 %} <span class="font-bold text-red-400">{{
|
||||||
|
"%.2f"|format(post.avg_comment_sentiment) }} (Bearish)</span>
|
||||||
{% else %}
|
{% else %}
|
||||||
<span class="sentiment-neutral">{{ "%.2f"|format(post.avg_comment_sentiment) }}</span>
|
<span class="font-bold text-slate-500">{{ "%.2f"|format(post.avg_comment_sentiment) }}
|
||||||
|
(Neutral)</span>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
{% else %}
|
{% else %}
|
||||||
<p>No analyzed posts found for this ticker. Run the 'rstat' scraper to gather data.</p>
|
<div class="text-center text-slate-500 p-8 not-prose">No analyzed posts found for this ticker.</div>
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</article>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
{% endblock %}
|
{% endblock %}
|
@@ -1,116 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="UTF-8">
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
||||||
<title>r/{{ subreddit_name }} Mentions</title>
|
|
||||||
<link rel="preconnect" href="https://fonts.googleapis.com">
|
|
||||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
|
||||||
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;600;700;800&display=swap" rel="stylesheet">
|
|
||||||
<style>
|
|
||||||
body {
|
|
||||||
margin: 0;
|
|
||||||
padding: 2rem;
|
|
||||||
font-family: 'Inter', sans-serif;
|
|
||||||
background: #1a1a1a;
|
|
||||||
display: flex;
|
|
||||||
justify-content: center;
|
|
||||||
align-items: center;
|
|
||||||
min-height: 100vh;
|
|
||||||
}
|
|
||||||
.image-container {
|
|
||||||
width: 650px; /* Increased width to accommodate new column */
|
|
||||||
background: linear-gradient(145deg, #4d302d, #1f2128);
|
|
||||||
color: #ffffff;
|
|
||||||
border-radius: 16px;
|
|
||||||
padding: 2.5rem;
|
|
||||||
box-shadow: 0 10px B30px rgba(0,0,0,0.5);
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
header {
|
|
||||||
display: flex;
|
|
||||||
justify-content: space-between;
|
|
||||||
align-items: flex-start;
|
|
||||||
margin-bottom: 2rem;
|
|
||||||
}
|
|
||||||
.title-block { text-align: left; }
|
|
||||||
.title-block h1 { font-size: 2.5rem; font-weight: 800; margin: 0; line-height: 1; }
|
|
||||||
.title-block h2 { font-size: 1.25rem; font-weight: 600; margin: 0.5rem 0 0; color: #b0b0b0; }
|
|
||||||
.date { font-size: 1.1rem; font-weight: 600; color: #c0c0c0; letter-spacing: 0.02em; }
|
|
||||||
table { width: 100%; border-collapse: collapse; text-align: left; }
|
|
||||||
th, td { padding: 1rem 0.5rem; border-bottom: 1px solid rgba(255, 255, 255, 0.1); }
|
|
||||||
th { font-weight: 700; text-transform: uppercase; font-size: 0.8rem; color: #a0a0a0; }
|
|
||||||
td { font-size: 1.1rem; font-weight: 600; }
|
|
||||||
tr:last-child td { border-bottom: none; }
|
|
||||||
td.rank { font-weight: 700; color: #d0d0d0; width: 8%; }
|
|
||||||
td.ticker { width: 30%; }
|
|
||||||
td.mentions { text-align: center; width: 18%; }
|
|
||||||
td.sentiment { text-align: center; width: 26%; } /* New width */
|
|
||||||
|
|
||||||
/* Sentiment Colors */
|
|
||||||
.sentiment-bullish { color: #28a745; font-weight: 700; }
|
|
||||||
.sentiment-bearish { color: #dc3545; font-weight: 700; }
|
|
||||||
.sentiment-neutral { color: #9e9e9e; font-weight: 600; }
|
|
||||||
|
|
||||||
/* Row colors */
|
|
||||||
tr:nth-child(1) td.ticker { color: #d8b4fe; } tr:nth-child(6) td.ticker { color: #fca5a5; }
|
|
||||||
tr:nth-child(2) td.ticker { color: #a3e635; } tr:nth-child(7) td.ticker { color: #fdba74; }
|
|
||||||
tr:nth-child(3) td.ticker { color: #67e8f9; } tr:nth-child(8) td.ticker { color: #6ee7b7; }
|
|
||||||
tr:nth-child(4) td.ticker { color: #fde047; } tr:nth-child(9) td.ticker { color: #93c5fd; }
|
|
||||||
tr:nth-child(5) td.ticker { color: #fcd34d; } tr:nth-child(10) td.ticker { color: #d1d5db; }
|
|
||||||
|
|
||||||
footer { margin-top: 2.5rem; }
|
|
||||||
.brand-name { font-size: 1.75rem; font-weight: 800; letter-spacing: -1px; }
|
|
||||||
.brand-subtitle { font-size: 1rem; color: #b0b0b0; }
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<div class="image-container">
|
|
||||||
<header>
|
|
||||||
<div class="title-block">
|
|
||||||
<h1>Reddit Mentions</h1>
|
|
||||||
<h2>r/{{ subreddit_name }}</h2>
|
|
||||||
</div>
|
|
||||||
<div class="date">{{ current_date }}</div>
|
|
||||||
</header>
|
|
||||||
|
|
||||||
<table>
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th class="rank">Rank</th>
|
|
||||||
<th class="ticker">Ticker</th>
|
|
||||||
<th class="mentions">Posts</th>
|
|
||||||
<th class="mentions">Comments</th>
|
|
||||||
<!-- UPDATED: Added Sentiment column header -->
|
|
||||||
<th class="sentiment">Sentiment</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{% for ticker in tickers %}
|
|
||||||
<tr>
|
|
||||||
<td class="rank">{{ loop.index }}</td>
|
|
||||||
<td class="ticker">{{ ticker.symbol }}</td>
|
|
||||||
<td class="mentions">{{ ticker.post_mentions }}</td>
|
|
||||||
<td class="mentions">{{ ticker.comment_mentions }}</td>
|
|
||||||
<!-- UPDATED: Added Sentiment data cell -->
|
|
||||||
<td class="sentiment">
|
|
||||||
{% if ticker.bullish_mentions > ticker.bearish_mentions %}
|
|
||||||
<span class="sentiment-bullish">Bullish</span>
|
|
||||||
{% elif ticker.bearish_mentions > ticker.bullish_mentions %}
|
|
||||||
<span class="sentiment-bearish">Bearish</span>
|
|
||||||
{% else %}
|
|
||||||
<span class="sentiment-neutral">Neutral</span>
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
{% endfor %}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
|
|
||||||
<footer>
|
|
||||||
<div class="brand-name">RSTAT</div>
|
|
||||||
<div class="brand-subtitle">Reddit Stock Analysis Tool</div>
|
|
||||||
</footer>
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@@ -1,44 +0,0 @@
|
|||||||
{% extends "base.html" %}
|
|
||||||
|
|
||||||
{% block title %}Overall Dashboard{% endblock %}
|
|
||||||
|
|
||||||
{% block content %}
|
|
||||||
<h1>Top 10 Tickers (All Subreddits)</h1>
|
|
||||||
<table>
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Ticker</th>
|
|
||||||
<th>Mentions</th>
|
|
||||||
<th>Market Cap</th>
|
|
||||||
<th>Closing Price</th>
|
|
||||||
<th>Sentiment</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{% for ticker in tickers %}
|
|
||||||
<tr>
|
|
||||||
<td><strong><a href="/deep-dive/{{ ticker.symbol }}">{{ ticker.symbol }}</a></strong></td>
|
|
||||||
<td>{{ ticker.mention_count }}</td>
|
|
||||||
<td>{{ ticker.market_cap | format_mc }}</td>
|
|
||||||
<!-- NEW COLUMN FOR CLOSING PRICE -->
|
|
||||||
<td>
|
|
||||||
{% if ticker.closing_price %}
|
|
||||||
${{ "%.2f"|format(ticker.closing_price) }}
|
|
||||||
{% else %}
|
|
||||||
N/A
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
{% if ticker.bullish_mentions > ticker.bearish_mentions %}
|
|
||||||
<span class="sentiment-bullish">Bullish</span>
|
|
||||||
{% elif ticker.bearish_mentions > ticker.bullish_mentions %}
|
|
||||||
<span class="sentiment-bearish">Bearish</span>
|
|
||||||
{% else %}
|
|
||||||
<span class="sentiment-neutral">Neutral</span>
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
{% endfor %}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
{% endblock %}
|
|
@@ -1,49 +0,0 @@
|
|||||||
{% extends "base.html" %}
|
|
||||||
|
|
||||||
{% block title %}r/{{ subreddit_name }} Dashboard{% endblock %}
|
|
||||||
|
|
||||||
{% block content %}
|
|
||||||
<h1>
|
|
||||||
Top 10 Tickers in r/{{ subreddit_name }}
|
|
||||||
<a href="/image/{{ subreddit_name }}" target="_blank" style="font-size: 0.8rem; margin-left: 1rem; font-weight: normal;">(View Daily Image)</a>
|
|
||||||
<!-- ADD THIS NEW LINK -->
|
|
||||||
<a href="/image/weekly/{{ subreddit_name }}" target="_blank" style="font-size: 0.8rem; margin-left: 1rem; font-weight: normal;">(View Weekly Image)</a>
|
|
||||||
</h1>
|
|
||||||
<table>
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Ticker</th>
|
|
||||||
<th>Mentions</th>
|
|
||||||
<th>Market Cap</th>
|
|
||||||
<th>Closing Price</th>
|
|
||||||
<th>Sentiment</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{% for ticker in tickers %}
|
|
||||||
<tr>
|
|
||||||
<td><strong><a href="/deep-dive/{{ ticker.symbol }}">{{ ticker.symbol }}</a></strong></td>
|
|
||||||
<td>{{ ticker.mention_count }}</td>
|
|
||||||
<td>{{ ticker.market_cap | format_mc }}</td>
|
|
||||||
<!-- NEW COLUMN FOR CLOSING PRICE -->
|
|
||||||
<td>
|
|
||||||
{% if ticker.closing_price %}
|
|
||||||
${{ "%.2f"|format(ticker.closing_price) }}
|
|
||||||
{% else %}
|
|
||||||
N/A
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
{% if ticker.bullish_mentions > ticker.bearish_mentions %}
|
|
||||||
<span class="sentiment-bullish">Bullish</span>
|
|
||||||
{% elif ticker.bearish_mentions > ticker.bullish_mentions %}
|
|
||||||
<span class="sentiment-bearish">Bearish</span>
|
|
||||||
{% else %}
|
|
||||||
<span class="sentiment-neutral">Neutral</span>
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
{% endfor %}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
{% endblock %}
|
|
@@ -1,93 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="UTF-8">
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
||||||
<title>Weekly Sentiment: r/{{ subreddit_name }}</title>
|
|
||||||
<!-- All the <style> and <link> tags from image_view.html go here -->
|
|
||||||
<!-- You can just copy the entire <head> section from image_view.html -->
|
|
||||||
<link rel="preconnect" href="https://fonts.googleapis.com">
|
|
||||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
|
||||||
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;600;700;800&display=swap" rel="stylesheet">
|
|
||||||
<style>
|
|
||||||
/* This entire style block is IDENTICAL to image_view.html */
|
|
||||||
body { margin: 0; padding: 2rem; font-family: 'Inter', sans-serif; background: #1a1a1a; display: flex; justify-content: center; align-items: center; min-height: 100vh; }
|
|
||||||
.image-container { width: 650px; background: linear-gradient(145deg, #4d302d, #1f2128); color: #ffffff; border-radius: 16px; padding: 2.5rem; box-shadow: 0 10px 30px rgba(0,0,0,0.5); text-align: center; }
|
|
||||||
header { display: flex; justify-content: space-between; align-items: flex-start; margin-bottom: 2rem; }
|
|
||||||
.title-block { text-align: left; }
|
|
||||||
.title-block h1 { font-size: 2.5rem; font-weight: 800; margin: 0; line-height: 1; }
|
|
||||||
.title-block h2 { font-size: 1.25rem; font-weight: 600; margin: 0.5rem 0 0; color: #b0b0b0; }
|
|
||||||
.date { font-size: 1rem; font-weight: 600; color: #c0c0c0; letter-spacing: 0.02em; }
|
|
||||||
table { width: 100%; border-collapse: collapse; text-align: left; }
|
|
||||||
th, td { padding: 1rem 0.5rem; border-bottom: 1px solid rgba(255, 255, 255, 0.1); }
|
|
||||||
th { font-weight: 700; text-transform: uppercase; font-size: 0.8rem; color: #a0a0a0; }
|
|
||||||
td { font-size: 1.1rem; font-weight: 600; }
|
|
||||||
tr:last-child td { border-bottom: none; }
|
|
||||||
td.rank { font-weight: 700; color: #d0d0d0; width: 8%; }
|
|
||||||
td.ticker { width: 30%; }
|
|
||||||
td.mentions { text-align: center; width: 18%; }
|
|
||||||
td.sentiment { text-align: center; width: 26%; }
|
|
||||||
.sentiment-bullish { color: #28a745; font-weight: 700; }
|
|
||||||
.sentiment-bearish { color: #dc3545; font-weight: 700; }
|
|
||||||
.sentiment-neutral { color: #9e9e9e; font-weight: 600; }
|
|
||||||
tr:nth-child(1) td.ticker { color: #d8b4fe; } tr:nth-child(6) td.ticker { color: #fca5a5; }
|
|
||||||
tr:nth-child(2) td.ticker { color: #a3e635; } tr:nth-child(7) td.ticker { color: #fdba74; }
|
|
||||||
tr:nth-child(3) td.ticker { color: #67e8f9; } tr:nth-child(8) td.ticker { color: #6ee7b7; }
|
|
||||||
tr:nth-child(4) td.ticker { color: #fde047; } tr:nth-child(9) td.ticker { color: #93c5fd; }
|
|
||||||
tr:nth-child(5) td.ticker { color: #fcd34d; } tr:nth-child(10) td.ticker { color: #d1d5db; }
|
|
||||||
footer { margin-top: 2.5rem; }
|
|
||||||
.brand-name { font-size: 1.75rem; font-weight: 800; letter-spacing: -1px; }
|
|
||||||
.brand-subtitle { font-size: 1rem; color: #b0b0b0; }
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<div class="image-container">
|
|
||||||
<header>
|
|
||||||
<div class="title-block">
|
|
||||||
<!-- UPDATED: Title shows it's a weekly report -->
|
|
||||||
<h1>Weekly Sentiment</h1>
|
|
||||||
<h2>r/{{ subreddit_name }} - Top 10</h2>
|
|
||||||
</div>
|
|
||||||
<!-- UPDATED: Date now shows the range -->
|
|
||||||
<div class="date">{{ date_range }}</div>
|
|
||||||
</header>
|
|
||||||
|
|
||||||
<!-- The entire table structure is IDENTICAL to image_view.html -->
|
|
||||||
<table>
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th class="rank">Rank</th>
|
|
||||||
<th class="ticker">Ticker</th>
|
|
||||||
<th class="mentions">Posts</th>
|
|
||||||
<th class="mentions">Comments</th>
|
|
||||||
<th class="sentiment">Sentiment</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{% for ticker in tickers %}
|
|
||||||
<tr>
|
|
||||||
<td class="rank">{{ loop.index }}</td>
|
|
||||||
<td class="ticker">{{ ticker.symbol }}</td>
|
|
||||||
<td class="mentions">{{ ticker.post_mentions }}</td>
|
|
||||||
<td class="mentions">{{ ticker.comment_mentions }}</td>
|
|
||||||
<td class="sentiment">
|
|
||||||
{% if ticker.bullish_mentions > ticker.bearish_mentions %}
|
|
||||||
<span class="sentiment-bullish">Bullish</span>
|
|
||||||
{% elif ticker.bearish_mentions > ticker.bullish_mentions %}
|
|
||||||
<span class="sentiment-bearish">Bearish</span>
|
|
||||||
{% else %}
|
|
||||||
<span class="sentiment-neutral">Neutral</span>
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
{% endfor %}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
|
|
||||||
<footer>
|
|
||||||
<div class="brand-name">RSTAT</div>
|
|
||||||
<div class="brand-subtitle">Reddit Stock Analysis Tool</div>
|
|
||||||
</footer>
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
59
yfinance_test.py
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
# yfinance_test.py
|
||||||
|
# A standalone script to diagnose the persistent yfinance issue.
|
||||||
|
|
||||||
|
import yfinance as yf
|
||||||
|
import logging
|
||||||
|
|
||||||
|
# Set up a simple logger to see detailed error tracebacks
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
|
||||||
|
)
|
||||||
|
|
||||||
|
# A list of tickers to test. One very common one, and two from your logs.
|
||||||
|
TICKERS_TO_TEST = ["MSFT", "AEBI", "AEHR"]
|
||||||
|
|
||||||
|
print("--- Starting YFINANCE Diagnostic Test ---")
|
||||||
|
|
||||||
|
for ticker_symbol in TICKERS_TO_TEST:
|
||||||
|
print(f"\n--- Testing Ticker: {ticker_symbol} ---")
|
||||||
|
|
||||||
|
# --- Test 1: The Ticker().info method ---
|
||||||
|
try:
|
||||||
|
logging.info(
|
||||||
|
f"Attempting to create Ticker object and get .info for {ticker_symbol}..."
|
||||||
|
)
|
||||||
|
ticker_obj = yf.Ticker(ticker_symbol)
|
||||||
|
market_cap = ticker_obj.info.get("marketCap")
|
||||||
|
if market_cap is not None:
|
||||||
|
logging.info(f"SUCCESS: Got market cap for {ticker_symbol}: {market_cap}")
|
||||||
|
else:
|
||||||
|
logging.warning(
|
||||||
|
f"PARTIAL SUCCESS: .info call for {ticker_symbol} worked, but no market cap was found."
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
logging.error(
|
||||||
|
f"FAILURE: An error occurred during the Ticker().info call for {ticker_symbol}.",
|
||||||
|
exc_info=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# --- Test 2: The yf.download() method ---
|
||||||
|
try:
|
||||||
|
logging.info(f"Attempting yf.download() for {ticker_symbol}...")
|
||||||
|
data = yf.download(
|
||||||
|
ticker_symbol, period="2d", progress=False, auto_adjust=False
|
||||||
|
)
|
||||||
|
if not data.empty:
|
||||||
|
logging.info(
|
||||||
|
f"SUCCESS: yf.download() for {ticker_symbol} returned {len(data)} rows of data."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logging.warning(
|
||||||
|
f"PARTIAL SUCCESS: yf.download() for {ticker_symbol} worked, but returned no data (likely delisted)."
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
logging.error(
|
||||||
|
f"FAILURE: An error occurred during the yf.download() call for {ticker_symbol}.",
|
||||||
|
exc_info=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\n--- YFINANCE Diagnostic Test Complete ---")
|