Compare commits
64 Commits
bd27db49e7
...
v0.0.1
Author | SHA1 | Date | |
---|---|---|---|
437f7b055f | |||
262b4ee3cc | |||
ef288d565b | |||
aa0a383b9c | |||
a17767f6e4 | |||
703f52c217 | |||
d85d372a65 | |||
885ddada71 | |||
8b6ad157dc | |||
474431084e | |||
051e24878a | |||
0fc88c17ef | |||
3beadf57a3 | |||
ff0f528d36 | |||
0623770d10 | |||
81338563a2 | |||
1cc7f556c4 | |||
30899d35b2 | |||
26011eb170 | |||
9329c591b5 | |||
bae0d1902b | |||
9e5455592b | |||
8a80df5946 | |||
712b12dc7f | |||
84486adb83 | |||
f7faebfc0d | |||
d05b3a0cc7 | |||
5d3e510f6b | |||
c792ea0bf8 | |||
319ee0f402 | |||
f314d57453 | |||
5ec49a53b5 | |||
e0fe761c3d | |||
2aa378f23b | |||
0acb8470c5 | |||
f6536761bc | |||
776c8ff688 | |||
ac7ae5e34a | |||
f3d01e296f | |||
6d610c7c31 | |||
6b2004cb27 | |||
6611999b5f | |||
a2459745c1 | |||
e92a508be3 | |||
3c2a38d1a1 | |||
55ea5d187f | |||
5319bc554a | |||
9f49660970 | |||
afcba995f1 | |||
3499cecb8b | |||
8448ff1897 | |||
56e0965a5f | |||
f940470de3 | |||
38e42efdef | |||
0dab12eb8c | |||
44fc3ef533 | |||
67f627e7ea | |||
c5a91c9d72 | |||
eb6de197f0 | |||
0d6d9516d7 | |||
841f6a5305 | |||
c9e754c9c9 | |||
07c1fd3841 | |||
de57a5b26b |
94
.dockerignore
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
# Git
|
||||||
|
.git
|
||||||
|
.gitignore
|
||||||
|
.gitattributes
|
||||||
|
|
||||||
|
|
||||||
|
# CI
|
||||||
|
.codeclimate.yml
|
||||||
|
.travis.yml
|
||||||
|
.taskcluster.yml
|
||||||
|
|
||||||
|
# Docker
|
||||||
|
docker-compose.yml
|
||||||
|
Dockerfile
|
||||||
|
.docker
|
||||||
|
.dockerignore
|
||||||
|
|
||||||
|
# Byte-compiled / optimized / DLL files
|
||||||
|
**/__pycache__/
|
||||||
|
**/*.py[cod]
|
||||||
|
|
||||||
|
# C extensions
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Distribution / packaging
|
||||||
|
.Python
|
||||||
|
env/
|
||||||
|
build/
|
||||||
|
develop-eggs/
|
||||||
|
dist/
|
||||||
|
downloads/
|
||||||
|
eggs/
|
||||||
|
lib/
|
||||||
|
lib64/
|
||||||
|
parts/
|
||||||
|
sdist/
|
||||||
|
var/
|
||||||
|
*.egg-info/
|
||||||
|
.installed.cfg
|
||||||
|
*.egg
|
||||||
|
|
||||||
|
# PyInstaller
|
||||||
|
# Usually these files are written by a python script from a template
|
||||||
|
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||||
|
*.manifest
|
||||||
|
*.spec
|
||||||
|
|
||||||
|
# Installer logs
|
||||||
|
pip-log.txt
|
||||||
|
pip-delete-this-directory.txt
|
||||||
|
|
||||||
|
# Unit test / coverage reports
|
||||||
|
htmlcov/
|
||||||
|
.tox/
|
||||||
|
.coverage
|
||||||
|
.cache
|
||||||
|
nosetests.xml
|
||||||
|
coverage.xml
|
||||||
|
|
||||||
|
# Translations
|
||||||
|
*.mo
|
||||||
|
*.pot
|
||||||
|
|
||||||
|
# Ignore Node.js dependencies (they will be installed inside the container)
|
||||||
|
node_modules/
|
||||||
|
|
||||||
|
# Ignore database and log files
|
||||||
|
*.db
|
||||||
|
*.log
|
||||||
|
*.db-journal
|
||||||
|
|
||||||
|
# Sphinx documentation
|
||||||
|
docs/_build/
|
||||||
|
|
||||||
|
# PyBuilder
|
||||||
|
target/
|
||||||
|
|
||||||
|
# Virtual environment
|
||||||
|
.env
|
||||||
|
.venv/
|
||||||
|
venv/
|
||||||
|
|
||||||
|
# PyCharm
|
||||||
|
.idea
|
||||||
|
|
||||||
|
# Python mode for VIM
|
||||||
|
.ropeproject
|
||||||
|
**/.ropeproject
|
||||||
|
|
||||||
|
# Vim swap files
|
||||||
|
**/*.swp
|
||||||
|
|
||||||
|
# VS Code
|
||||||
|
.vscode/
|
3
.gitignore
vendored
@@ -7,3 +7,6 @@ __pycache__/
|
|||||||
*.log
|
*.log
|
||||||
reddit_stock_analyzer.egg-info/
|
reddit_stock_analyzer.egg-info/
|
||||||
images/
|
images/
|
||||||
|
public/
|
||||||
|
config/certbot/
|
||||||
|
node_modules/
|
||||||
|
31
Dockerfile
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
FROM node:24-bookworm-slim AS builder
|
||||||
|
|
||||||
|
WORKDIR /usr/src/build
|
||||||
|
|
||||||
|
COPY package.json package-lock.json ./
|
||||||
|
|
||||||
|
RUN npm install
|
||||||
|
|
||||||
|
COPY tailwind.config.js ./
|
||||||
|
COPY templates/ ./templates/
|
||||||
|
COPY static/css/input.css ./static/css/input.css
|
||||||
|
|
||||||
|
RUN npx tailwindcss -i ./static/css/input.css -o ./static/css/style.css --minify
|
||||||
|
|
||||||
|
FROM python:3.13.5-slim
|
||||||
|
|
||||||
|
EXPOSE 5000
|
||||||
|
|
||||||
|
WORKDIR /usr/src/app
|
||||||
|
|
||||||
|
COPY requirements.txt .
|
||||||
|
|
||||||
|
RUN python3 -m pip install --upgrade pip
|
||||||
|
RUN python3 -m pip install --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
COPY --from=builder /usr/src/build/static/css/style.css ./static/css/style.css
|
||||||
|
|
||||||
|
RUN python3 -m pip install -e .
|
||||||
|
|
||||||
|
CMD [ "gunicorn", "--config", "rstat_tool/gunicorn-cfg.py", "-k", "sync", "rstat_tool.dashboard:app" ]
|
@@ -99,7 +99,13 @@ Run the included setup script **once** to download the required `vader_lexicon`
|
|||||||
python rstat_tool/setup_nltk.py
|
python rstat_tool/setup_nltk.py
|
||||||
```
|
```
|
||||||
|
|
||||||
### 7. Build and Install the Commands
|
### 7. Set Up Playwright
|
||||||
|
Run the install routine for playwright. You might need to install some dependencies. Follow on-screen instruction if that's the case.
|
||||||
|
```bash
|
||||||
|
playwright install
|
||||||
|
```
|
||||||
|
|
||||||
|
### 8. Build and Install the Commands
|
||||||
Install the tool in "editable" mode. This creates the `rstat` and `rstat-dashboard` commands in your virtual environment and links them to your source code.
|
Install the tool in "editable" mode. This creates the `rstat` and `rstat-dashboard` commands in your virtual environment and links them to your source code.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
17
config/nginx/dev/localhost.conf
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
server {
|
||||||
|
listen 80 default_server;
|
||||||
|
listen [::]:80 default_server;
|
||||||
|
|
||||||
|
server_name _;
|
||||||
|
server_tokens off;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
|
||||||
|
proxy_pass http://varnish:80;
|
||||||
|
proxy_redirect off;
|
||||||
|
}
|
||||||
|
}
|
15
config/nginx/rstat.net-NOSSL.conf
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
server {
|
||||||
|
listen 80 default_server;
|
||||||
|
listen [::]:80 default_server;
|
||||||
|
|
||||||
|
server_name www.rstat.net rstat.net;
|
||||||
|
server_tokens off;
|
||||||
|
|
||||||
|
location /.well-known/acme-challenge/ {
|
||||||
|
root /usr/share/nginx/certbot;
|
||||||
|
}
|
||||||
|
|
||||||
|
location / {
|
||||||
|
return 301 https://rstat.net$request_uri;
|
||||||
|
}
|
||||||
|
}
|
67
config/nginx/rstat.net-SSL.conf
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
listen [::]:443 ssl;
|
||||||
|
|
||||||
|
server_name www.rstat.net;
|
||||||
|
server_tokens off;
|
||||||
|
|
||||||
|
http2 on;
|
||||||
|
|
||||||
|
ssl_certificate /etc/nginx/ssl/live/www.rstat.net/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/nginx/ssl/live/www.rstat.net/privkey.pem;
|
||||||
|
|
||||||
|
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000" always;
|
||||||
|
|
||||||
|
return 301 https://rstat.net$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
listen [::]:443 ssl;
|
||||||
|
|
||||||
|
server_name rstat.net;
|
||||||
|
server_tokens off;
|
||||||
|
|
||||||
|
http2 on;
|
||||||
|
|
||||||
|
ssl_certificate /etc/nginx/ssl/live/www.rstat.net/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/nginx/ssl/live/www.rstat.net/privkey.pem;
|
||||||
|
|
||||||
|
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000" always;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
|
||||||
|
proxy_pass http://varnish:80;
|
||||||
|
proxy_redirect off;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# intermediate configuration
|
||||||
|
ssl_protocols TLSv1.2 TLSv1.3;
|
||||||
|
ssl_ecdh_curve X25519:prime256v1:secp384r1;
|
||||||
|
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
|
||||||
|
ssl_prefer_server_ciphers off;
|
||||||
|
|
||||||
|
# see also ssl_session_ticket_key alternative to stateful session cache
|
||||||
|
ssl_session_timeout 1d;
|
||||||
|
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
|
||||||
|
|
||||||
|
# curl https://ssl-config.mozilla.org/ffdhe2048.txt > /path/to/dhparam
|
||||||
|
ssl_dhparam /etc/nginx/ssl/dhparam.pem;
|
||||||
|
|
||||||
|
# OCSP stapling
|
||||||
|
ssl_stapling on;
|
||||||
|
ssl_stapling_verify on;
|
||||||
|
|
||||||
|
# verify chain of trust of OCSP response using Root CA and Intermediate certs
|
||||||
|
ssl_trusted_certificate /etc/nginx/ssl/live/www.rstat.net/chain.pem;
|
||||||
|
|
||||||
|
# replace with the IP address of your resolver;
|
||||||
|
# async 'resolver' is important for proper operation of OCSP stapling
|
||||||
|
resolver 67.207.67.3;
|
105
config/varnish/default.vcl
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
vcl 4.1;
|
||||||
|
|
||||||
|
# https://github.com/varnish/toolbox/tree/master/vcls/hit-miss
|
||||||
|
include "hit-miss.vcl";
|
||||||
|
import std;
|
||||||
|
|
||||||
|
backend default {
|
||||||
|
.host = "rstat-dashboard";
|
||||||
|
.port = "5000";
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_recv {
|
||||||
|
if (req.method != "GET" &&
|
||||||
|
req.method != "HEAD" &&
|
||||||
|
req.method != "PUT" &&
|
||||||
|
req.method != "POST" &&
|
||||||
|
req.method != "TRACE" &&
|
||||||
|
req.method != "OPTIONS" &&
|
||||||
|
req.method != "DELETE") {
|
||||||
|
/* Non-RFC2616 or CONNECT which is weird. */
|
||||||
|
return (pipe);
|
||||||
|
}
|
||||||
|
|
||||||
|
# We only deal with GET and HEAD by default
|
||||||
|
if (req.method != "GET" && req.method != "HEAD") {
|
||||||
|
return (pass);
|
||||||
|
}
|
||||||
|
|
||||||
|
set req.url = regsub(req.url, "^http[s]?://", "");
|
||||||
|
|
||||||
|
# static files are always cacheable. remove SSL flag and cookie
|
||||||
|
if (req.url ~ "^/(pub/)?(media|static)/.*\.(ico|jpg|jpeg|png|gif|tiff|bmp|mp3|ogg|svg|swf|woff|woff2|eot|ttf|otf)$") {
|
||||||
|
unset req.http.Https;
|
||||||
|
unset req.http.X-Forwarded-Proto;
|
||||||
|
unset req.http.Cookie;
|
||||||
|
unset req.http.css;
|
||||||
|
unset req.http.js;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_hash {
|
||||||
|
if (req.http.host) {
|
||||||
|
hash_data(req.http.host);
|
||||||
|
} else {
|
||||||
|
hash_data(server.ip);
|
||||||
|
}
|
||||||
|
|
||||||
|
# To make sure http users don't see ssl warning
|
||||||
|
if (req.http.X-Forwarded-Proto) {
|
||||||
|
hash_data(req.http.X-Forwarded-Proto);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_backend_response {
|
||||||
|
set beresp.http.X-Host = bereq.http.host;
|
||||||
|
|
||||||
|
set beresp.ttl = 1m;
|
||||||
|
# Enable stale content serving
|
||||||
|
set beresp.grace = 24h;
|
||||||
|
# Preserve the origin's Cache-Control header for client-side caching
|
||||||
|
if (beresp.http.Cache-Control) {
|
||||||
|
set beresp.http.X-Orig-Cache-Control = beresp.http.Cache-Control;
|
||||||
|
}
|
||||||
|
|
||||||
|
# validate if we need to cache it and prevent from setting cookie
|
||||||
|
# images, css and js are cacheable by default so we have to remove cookie also
|
||||||
|
if (beresp.ttl > 0s && (bereq.method == "GET" || bereq.method == "HEAD")) {
|
||||||
|
unset beresp.http.set-cookie;
|
||||||
|
unset beresp.http.set-css;
|
||||||
|
unset beresp.http.set-js;
|
||||||
|
if (bereq.url !~ "\.(ico|jpg|jpeg|png|gif|tiff|bmp|gz|tgz|bz2|tbz|mp3|ogg|svg|swf|woff|woff2|eot|ttf|otf)(\?|$)") {
|
||||||
|
set beresp.http.Pragma = "no-cache";
|
||||||
|
set beresp.http.Expires = "-1";
|
||||||
|
set beresp.http.Cache-Control = "no-store, no-cache, must-revalidate, max-age=0";
|
||||||
|
set beresp.grace = 1m;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# If page is not cacheable then bypass varnish for 2 minutes as Hit-For-Pass
|
||||||
|
if (beresp.ttl <= 0s ||
|
||||||
|
beresp.http.Surrogate-control ~ "no-store" ||
|
||||||
|
(!beresp.http.Surrogate-Control && beresp.http.Vary == "*")) {
|
||||||
|
# Mark as Hit-For-Pass for the next 2 minutes
|
||||||
|
set beresp.ttl = 120s;
|
||||||
|
set beresp.uncacheable = true;
|
||||||
|
}
|
||||||
|
return (deliver);
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_deliver {
|
||||||
|
# Restore the origin's Cache-Control header for the browser
|
||||||
|
if (resp.http.X-Orig-Cache-Control) {
|
||||||
|
set resp.http.Cache-Control = resp.http.X-Orig-Cache-Control;
|
||||||
|
unset resp.http.X-Orig-Cache-Control;
|
||||||
|
} else {
|
||||||
|
# If no Cache-Control was set by the origin, we'll set a default
|
||||||
|
set resp.http.Cache-Control = "no-cache, must-revalidate";
|
||||||
|
}
|
||||||
|
|
||||||
|
unset resp.http.Server;
|
||||||
|
unset resp.http.Via;
|
||||||
|
unset resp.http.Link;
|
||||||
|
}
|
105
config/varnish/dev.vcl
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
vcl 4.1;
|
||||||
|
|
||||||
|
# https://github.com/varnish/toolbox/tree/master/vcls/hit-miss
|
||||||
|
include "hit-miss.vcl";
|
||||||
|
import std;
|
||||||
|
|
||||||
|
backend default {
|
||||||
|
.host = "rstat-dashboard";
|
||||||
|
.port = "5000";
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_recv {
|
||||||
|
if (req.method != "GET" &&
|
||||||
|
req.method != "HEAD" &&
|
||||||
|
req.method != "PUT" &&
|
||||||
|
req.method != "POST" &&
|
||||||
|
req.method != "TRACE" &&
|
||||||
|
req.method != "OPTIONS" &&
|
||||||
|
req.method != "DELETE") {
|
||||||
|
/* Non-RFC2616 or CONNECT which is weird. */
|
||||||
|
return (pipe);
|
||||||
|
}
|
||||||
|
|
||||||
|
# We only deal with GET and HEAD by default
|
||||||
|
if (req.method != "GET" && req.method != "HEAD") {
|
||||||
|
return (pass);
|
||||||
|
}
|
||||||
|
|
||||||
|
set req.url = regsub(req.url, "^http[s]?://", "");
|
||||||
|
|
||||||
|
# static files are always cacheable. remove SSL flag and cookie
|
||||||
|
if (req.url ~ "^/(pub/)?(media|static)/.*\.(ico|jpg|jpeg|png|gif|tiff|bmp|mp3|ogg|svg|swf|woff|woff2|eot|ttf|otf)$") {
|
||||||
|
unset req.http.Https;
|
||||||
|
unset req.http.X-Forwarded-Proto;
|
||||||
|
unset req.http.Cookie;
|
||||||
|
unset req.http.css;
|
||||||
|
unset req.http.js;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_hash {
|
||||||
|
if (req.http.host) {
|
||||||
|
hash_data(req.http.host);
|
||||||
|
} else {
|
||||||
|
hash_data(server.ip);
|
||||||
|
}
|
||||||
|
|
||||||
|
# To make sure http users don't see ssl warning
|
||||||
|
if (req.http.X-Forwarded-Proto) {
|
||||||
|
hash_data(req.http.X-Forwarded-Proto);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_backend_response {
|
||||||
|
set beresp.http.X-Host = bereq.http.host;
|
||||||
|
|
||||||
|
set beresp.ttl = 1m;
|
||||||
|
# Enable stale content serving
|
||||||
|
set beresp.grace = 24h;
|
||||||
|
# Preserve the origin's Cache-Control header for client-side caching
|
||||||
|
if (beresp.http.Cache-Control) {
|
||||||
|
set beresp.http.X-Orig-Cache-Control = beresp.http.Cache-Control;
|
||||||
|
}
|
||||||
|
|
||||||
|
# validate if we need to cache it and prevent from setting cookie
|
||||||
|
# images, css and js are cacheable by default so we have to remove cookie also
|
||||||
|
if (beresp.ttl > 0s && (bereq.method == "GET" || bereq.method == "HEAD")) {
|
||||||
|
unset beresp.http.set-cookie;
|
||||||
|
unset beresp.http.set-css;
|
||||||
|
unset beresp.http.set-js;
|
||||||
|
if (bereq.url !~ "\.(ico|jpg|jpeg|png|gif|tiff|bmp|gz|tgz|bz2|tbz|mp3|ogg|svg|swf|woff|woff2|eot|ttf|otf)(\?|$)") {
|
||||||
|
set beresp.http.Pragma = "no-cache";
|
||||||
|
set beresp.http.Expires = "-1";
|
||||||
|
set beresp.http.Cache-Control = "no-store, no-cache, must-revalidate, max-age=0";
|
||||||
|
set beresp.grace = 1m;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# If page is not cacheable then bypass varnish for 2 minutes as Hit-For-Pass
|
||||||
|
if (beresp.ttl <= 0s ||
|
||||||
|
beresp.http.Surrogate-control ~ "no-store" ||
|
||||||
|
(!beresp.http.Surrogate-Control && beresp.http.Vary == "*")) {
|
||||||
|
# Mark as Hit-For-Pass for the next 2 minutes
|
||||||
|
set beresp.ttl = 120s;
|
||||||
|
set beresp.uncacheable = true;
|
||||||
|
}
|
||||||
|
return (deliver);
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_deliver {
|
||||||
|
# Restore the origin's Cache-Control header for the browser
|
||||||
|
if (resp.http.X-Orig-Cache-Control) {
|
||||||
|
set resp.http.Cache-Control = resp.http.X-Orig-Cache-Control;
|
||||||
|
unset resp.http.X-Orig-Cache-Control;
|
||||||
|
} else {
|
||||||
|
# If no Cache-Control was set by the origin, we'll set a default
|
||||||
|
set resp.http.Cache-Control = "no-cache, must-revalidate";
|
||||||
|
}
|
||||||
|
|
||||||
|
unset resp.http.Server;
|
||||||
|
unset resp.http.Via;
|
||||||
|
unset resp.http.Link;
|
||||||
|
}
|
39
config/varnish/hit-miss.vcl
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
sub vcl_recv {
|
||||||
|
unset req.http.x-cache;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_hit {
|
||||||
|
set req.http.x-cache = "hit";
|
||||||
|
if (obj.ttl <= 0s && obj.grace > 0s) {
|
||||||
|
set req.http.x-cache = "hit graced";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_miss {
|
||||||
|
set req.http.x-cache = "miss";
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_pass {
|
||||||
|
set req.http.x-cache = "pass";
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_pipe {
|
||||||
|
set req.http.x-cache = "pipe uncacheable";
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_synth {
|
||||||
|
set req.http.x-cache = "synth synth";
|
||||||
|
# comment the following line to omit the x-cache header in the response
|
||||||
|
set resp.http.x-cache = req.http.x-cache;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub vcl_deliver {
|
||||||
|
if (obj.uncacheable) {
|
||||||
|
set req.http.x-cache = req.http.x-cache + " uncacheable" ;
|
||||||
|
} else {
|
||||||
|
set req.http.x-cache = req.http.x-cache + " cached" ;
|
||||||
|
}
|
||||||
|
|
||||||
|
# comment the following line to omit the x-cache header in the response
|
||||||
|
set resp.http.x-cache = req.http.x-cache;
|
||||||
|
}
|
31
docker-compose-dev.yml
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
name: rstat
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
rstat-dashboard:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- ./reddit_stocks.db:/usr/src/app/reddit_stocks.db:ro
|
||||||
|
ports:
|
||||||
|
- "5000:5000"
|
||||||
|
|
||||||
|
nginx:
|
||||||
|
image: nginx:1.29.0
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- ./config/nginx/dev:/etc/nginx/conf.d:ro
|
||||||
|
- ./public:/usr/share/nginx:ro
|
||||||
|
ports:
|
||||||
|
- "80:80"
|
||||||
|
|
||||||
|
varnish:
|
||||||
|
image: varnish:7.7.1
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- ./config/varnish/dev.vcl:/etc/varnish/default.vcl:ro"
|
||||||
|
- ./config/varnish/hit-miss.vcl:/etc/varnish/hit-miss.vcl:ro"
|
||||||
|
tmpfs:
|
||||||
|
- /var/lib/varnish/varnishd:exec
|
39
docker-compose.yml
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
name: rstat
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
rstat-dashboard:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- ./reddit_stocks.db:/usr/src/app/reddit_stocks.db:ro
|
||||||
|
ports:
|
||||||
|
- "5000:5000"
|
||||||
|
|
||||||
|
nginx:
|
||||||
|
image: nginx:1.29.0
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- ./config/nginx:/etc/nginx/conf.d:ro
|
||||||
|
- ./config/certbot:/etc/nginx/ssl:ro
|
||||||
|
- ./public:/usr/share/nginx:ro
|
||||||
|
ports:
|
||||||
|
- "80:80"
|
||||||
|
- "443:443"
|
||||||
|
|
||||||
|
varnish:
|
||||||
|
image: varnish:7.7.1
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- ./config/varnish/default.vcl:/etc/varnish/default.vcl:ro"
|
||||||
|
- ./config/varnish/hit-miss.vcl:/etc/varnish/hit-miss.vcl:ro"
|
||||||
|
tmpfs:
|
||||||
|
- /var/lib/varnish/varnishd:exec
|
||||||
|
|
||||||
|
certbot:
|
||||||
|
image: certbot/certbot:v4.1.1
|
||||||
|
volumes:
|
||||||
|
- ./config/certbot:/etc/letsencrypt:rw
|
||||||
|
- ./public/certbot:/usr/share/nginx/certbot:rw
|
@@ -8,6 +8,7 @@ from playwright.sync_api import sync_playwright
|
|||||||
# Define the output directory as a constant
|
# Define the output directory as a constant
|
||||||
OUTPUT_DIR = "images"
|
OUTPUT_DIR = "images"
|
||||||
|
|
||||||
|
|
||||||
def export_image(url_path, filename_prefix):
|
def export_image(url_path, filename_prefix):
|
||||||
"""
|
"""
|
||||||
Launches a headless browser, navigates to a URL path, and screenshots
|
Launches a headless browser, navigates to a URL path, and screenshots
|
||||||
@@ -18,7 +19,7 @@ def export_image(url_path, filename_prefix):
|
|||||||
# 1. Ensure the output directory exists
|
# 1. Ensure the output directory exists
|
||||||
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
||||||
|
|
||||||
base_url = "http://127.0.0.1:5000"
|
base_url = "http://localhost:5000"
|
||||||
url = f"{base_url}/{url_path}"
|
url = f"{base_url}/{url_path}"
|
||||||
|
|
||||||
# 2. Construct the full output path including the new directory
|
# 2. Construct the full output path including the new directory
|
||||||
@@ -32,7 +33,7 @@ def export_image(url_path, filename_prefix):
|
|||||||
page.set_viewport_size({"width": 1920, "height": 1080})
|
page.set_viewport_size({"width": 1920, "height": 1080})
|
||||||
|
|
||||||
print(f" Navigating to {url}...")
|
print(f" Navigating to {url}...")
|
||||||
page.goto(url, wait_until="networkidle") # Wait for network to be idle
|
page.goto(url, wait_until="networkidle") # Wait for network to be idle
|
||||||
|
|
||||||
# Target the specific element we want to screenshot
|
# Target the specific element we want to screenshot
|
||||||
element = page.locator(".image-container")
|
element = page.locator(".image-container")
|
||||||
@@ -45,7 +46,9 @@ def export_image(url_path, filename_prefix):
|
|||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"\nAn error occurred during export: {e}")
|
print(f"\nAn error occurred during export: {e}")
|
||||||
print("Please ensure the 'rstat-dashboard' server is running in another terminal.")
|
print(
|
||||||
|
"Please ensure the 'rstat-dashboard' server is running in another terminal."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
@@ -53,21 +56,28 @@ if __name__ == "__main__":
|
|||||||
parser = argparse.ArgumentParser(description="Export subreddit sentiment images.")
|
parser = argparse.ArgumentParser(description="Export subreddit sentiment images.")
|
||||||
group = parser.add_mutually_exclusive_group(required=True)
|
group = parser.add_mutually_exclusive_group(required=True)
|
||||||
group.add_argument("-s", "--subreddit", help="The name of the subreddit to export.")
|
group.add_argument("-s", "--subreddit", help="The name of the subreddit to export.")
|
||||||
group.add_argument("-o", "--overall", action="store_true", help="Export the overall summary image.")
|
group.add_argument(
|
||||||
|
"-o", "--overall", action="store_true", help="Export the overall summary image."
|
||||||
|
)
|
||||||
|
|
||||||
parser.add_argument("-w", "--weekly", action="store_true", help="Export the weekly view instead of the daily view (only for --subreddit).")
|
parser.add_argument(
|
||||||
|
"-w",
|
||||||
|
"--weekly",
|
||||||
|
action="store_true",
|
||||||
|
help="Export the weekly view instead of the daily view (only for --subreddit).",
|
||||||
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
# Determine the correct URL path and filename based on arguments
|
# Determine the correct URL path and filename based on arguments
|
||||||
if args.subreddit:
|
if args.subreddit:
|
||||||
view_type = "weekly" if args.weekly else "daily"
|
view_type = "weekly" if args.weekly else "daily"
|
||||||
url_path_to_render = f"image/{view_type}/{args.subreddit}"
|
# Add ?view=... and the new &image=true parameter
|
||||||
|
url_path_to_render = f"subreddit/{args.subreddit}?view={view_type}&image=true"
|
||||||
filename_prefix_to_save = f"{args.subreddit}_{view_type}"
|
filename_prefix_to_save = f"{args.subreddit}_{view_type}"
|
||||||
export_image(url_path_to_render, filename_prefix_to_save)
|
export_image(url_path_to_render, filename_prefix_to_save)
|
||||||
|
|
||||||
elif args.overall:
|
elif args.overall:
|
||||||
if args.weekly:
|
# For overall, we assume daily view for the image
|
||||||
print("Warning: --weekly flag has no effect with --overall. Exporting overall summary.")
|
url_path_to_render = "/?view=daily&image=true"
|
||||||
url_path_to_render = "image/overall"
|
filename_prefix_to_save = "overall_summary_daily"
|
||||||
filename_prefix_to_save = "overall_summary"
|
|
||||||
export_image(url_path_to_render, filename_prefix_to_save)
|
export_image(url_path_to_render, filename_prefix_to_save)
|
37
fetch_close_price.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
# fetch_close_price.py
|
||||||
|
# This script does ONLY ONE THING: gets the closing price using the stable Ticker.history() method.
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
import yfinance as yf
|
||||||
|
import pandas as pd
|
||||||
|
import logging
|
||||||
|
|
||||||
|
# Suppress verbose yfinance logging in this isolated process
|
||||||
|
logging.getLogger("yfinance").setLevel(logging.CRITICAL)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if len(sys.argv) < 2:
|
||||||
|
# Exit with an error code if no ticker is provided
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
ticker_symbol = sys.argv[1]
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Instead of the global yf.download(), we use the Ticker object's .history() method.
|
||||||
|
# This uses a different internal code path that we have proven is stable.
|
||||||
|
ticker = yf.Ticker(ticker_symbol)
|
||||||
|
data = ticker.history(period="2d", auto_adjust=False)
|
||||||
|
|
||||||
|
closing_price = None
|
||||||
|
if not data.empty:
|
||||||
|
last_close_raw = data["Close"].iloc[-1]
|
||||||
|
if pd.notna(last_close_raw):
|
||||||
|
closing_price = float(last_close_raw)
|
||||||
|
|
||||||
|
# On success, print JSON to stdout and exit cleanly
|
||||||
|
print(json.dumps({"closing_price": closing_price}))
|
||||||
|
sys.exit(0)
|
||||||
|
except Exception:
|
||||||
|
# If any error occurs, print an empty JSON and exit with an error code
|
||||||
|
print(json.dumps({"closing_price": None}))
|
||||||
|
sys.exit(1)
|
28
fetch_market_cap.py
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# fetch_market_cap.py
|
||||||
|
# This script does ONLY ONE THING: gets the market cap.
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
import yfinance as yf
|
||||||
|
import logging
|
||||||
|
|
||||||
|
# Suppress verbose yfinance logging in this isolated process
|
||||||
|
logging.getLogger("yfinance").setLevel(logging.CRITICAL)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if len(sys.argv) < 2:
|
||||||
|
# Exit with an error code if no ticker is provided
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
ticker_symbol = sys.argv[1]
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Directly get the market cap
|
||||||
|
market_cap = yf.Ticker(ticker_symbol).info.get("marketCap")
|
||||||
|
|
||||||
|
# On success, print JSON to stdout and exit cleanly
|
||||||
|
print(json.dumps({"market_cap": market_cap}))
|
||||||
|
sys.exit(0)
|
||||||
|
except Exception:
|
||||||
|
# If any error occurs, print an empty JSON and exit with an error code
|
||||||
|
print(json.dumps({"market_cap": None}))
|
||||||
|
sys.exit(1)
|
@@ -10,6 +10,7 @@ import socket
|
|||||||
# --- IMPORTANT: Ensure this matches the "redirect uri" in your Reddit App settings ---
|
# --- IMPORTANT: Ensure this matches the "redirect uri" in your Reddit App settings ---
|
||||||
REDIRECT_URI = "http://localhost:5000"
|
REDIRECT_URI = "http://localhost:5000"
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
print("--- RSTAT Refresh Token Generator ---")
|
print("--- RSTAT Refresh Token Generator ---")
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
@@ -17,7 +18,9 @@ def main():
|
|||||||
client_secret = os.getenv("REDDIT_CLIENT_SECRET")
|
client_secret = os.getenv("REDDIT_CLIENT_SECRET")
|
||||||
|
|
||||||
if not all([client_id, client_secret]):
|
if not all([client_id, client_secret]):
|
||||||
print("Error: REDDIT_CLIENT_ID and REDDIT_CLIENT_SECRET must be set in your .env file.")
|
print(
|
||||||
|
"Error: REDDIT_CLIENT_ID and REDDIT_CLIENT_SECRET must be set in your .env file."
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
# 1. Initialize PRAW
|
# 1. Initialize PRAW
|
||||||
@@ -25,7 +28,7 @@ def main():
|
|||||||
client_id=client_id,
|
client_id=client_id,
|
||||||
client_secret=client_secret,
|
client_secret=client_secret,
|
||||||
redirect_uri=REDIRECT_URI,
|
redirect_uri=REDIRECT_URI,
|
||||||
user_agent="rstat_token_fetcher (by u/YourUsername)" # Can be anything
|
user_agent="rstat_token_fetcher (by u/YourUsername)", # Can be anything
|
||||||
)
|
)
|
||||||
|
|
||||||
# 2. Generate the authorization URL
|
# 2. Generate the authorization URL
|
||||||
@@ -37,11 +40,17 @@ def main():
|
|||||||
print("\nStep 1: Open this URL in your browser:\n")
|
print("\nStep 1: Open this URL in your browser:\n")
|
||||||
print(auth_url)
|
print(auth_url)
|
||||||
|
|
||||||
print("\nStep 2: Log in to Reddit, click 'Allow', and you'll be redirected to a 'page not found'.")
|
print(
|
||||||
print("Step 3: Copy the ENTIRE URL from your browser's address bar after the redirect.")
|
"\nStep 2: Log in to Reddit, click 'Allow', and you'll be redirected to a 'page not found'."
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"Step 3: Copy the ENTIRE URL from your browser's address bar after the redirect."
|
||||||
|
)
|
||||||
|
|
||||||
# 3. Get the redirected URL from the user
|
# 3. Get the redirected URL from the user
|
||||||
redirected_url = input("\nStep 4: Paste the full redirected URL here and press Enter:\n> ")
|
redirected_url = input(
|
||||||
|
"\nStep 4: Paste the full redirected URL here and press Enter:\n> "
|
||||||
|
)
|
||||||
|
|
||||||
# 4. Exchange the authorization code for a refresh token
|
# 4. Exchange the authorization code for a refresh token
|
||||||
try:
|
try:
|
||||||
@@ -57,12 +66,17 @@ def main():
|
|||||||
print("\n--- SUCCESS! ---")
|
print("\n--- SUCCESS! ---")
|
||||||
print("Your Refresh Token is:\n")
|
print("Your Refresh Token is:\n")
|
||||||
print(refresh_token)
|
print(refresh_token)
|
||||||
print("\nStep 5: Copy this token and add it to your .env file as REDDIT_REFRESH_TOKEN.")
|
print(
|
||||||
print("Step 6: You can now delete your REDDIT_USERNAME and REDDIT_PASSWORD from the .env file.")
|
"\nStep 5: Copy this token and add it to your .env file as REDDIT_REFRESH_TOKEN."
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"Step 6: You can now delete your REDDIT_USERNAME and REDDIT_PASSWORD from the .env file."
|
||||||
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"\nAn error occurred: {e}")
|
print(f"\nAn error occurred: {e}")
|
||||||
print("Please make sure you copied the full URL.")
|
print("Please make sure you copied the full URL.")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
1294
package-lock.json
generated
Normal file
25
package.json
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
{
|
||||||
|
"name": "reddit_stock_analyzer",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"description": "A powerful, installable command-line tool and web dashboard to scan Reddit for stock ticker mentions, perform sentiment analysis, generate insightful reports, and create shareable summary images.",
|
||||||
|
"main": "index.js",
|
||||||
|
"scripts": {
|
||||||
|
"test": "echo \"Error: no test specified\" && exit 1"
|
||||||
|
},
|
||||||
|
"repository": {
|
||||||
|
"type": "git",
|
||||||
|
"url": "ssh://git@git.pkhamre.com:43721/pkhamre/reddit_stock_analyzer.git"
|
||||||
|
},
|
||||||
|
"keywords": [],
|
||||||
|
"author": "",
|
||||||
|
"license": "ISC",
|
||||||
|
"type": "commonjs",
|
||||||
|
"devDependencies": {
|
||||||
|
"@tailwindcss/cli": "^4.1.11",
|
||||||
|
"@tailwindcss/typography": "^0.5.16",
|
||||||
|
"tailwindcss": "^4.1.11"
|
||||||
|
},
|
||||||
|
"dependencies": {
|
||||||
|
"@tailwindplus/elements": "^1.0.3"
|
||||||
|
}
|
||||||
|
}
|
@@ -6,29 +6,37 @@ import glob
|
|||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
import praw
|
import praw
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
# --- CONFIGURATION ---
|
# --- CONFIGURATION ---
|
||||||
IMAGE_DIR = "images"
|
IMAGE_DIR = "images"
|
||||||
|
|
||||||
|
|
||||||
def get_reddit_instance():
|
def get_reddit_instance():
|
||||||
"""Initializes and returns a PRAW Reddit instance from .env credentials."""
|
"""Initializes and returns a PRAW Reddit instance using OAuth2 refresh token."""
|
||||||
load_dotenv()
|
|
||||||
|
env_path = Path(__file__).parent / ".env"
|
||||||
|
load_dotenv(dotenv_path=env_path)
|
||||||
|
|
||||||
client_id = os.getenv("REDDIT_CLIENT_ID")
|
client_id = os.getenv("REDDIT_CLIENT_ID")
|
||||||
client_secret = os.getenv("REDDIT_CLIENT_SECRET")
|
client_secret = os.getenv("REDDIT_CLIENT_SECRET")
|
||||||
user_agent = os.getenv("REDDIT_USER_AGENT")
|
user_agent = os.getenv("REDDIT_USER_AGENT")
|
||||||
refresh_token = os.getenv("REDDIT_REFRESH_TOKEN")
|
refresh_token = os.getenv("REDDIT_REFRESH_TOKEN")
|
||||||
|
|
||||||
if not all([client_id, client_secret, user_agent, refresh_token]):
|
if not all([client_id, client_secret, user_agent, refresh_token]):
|
||||||
print("Error: Reddit API credentials (including REDDIT_REFRESH_TOKEN) must be set in .env file.")
|
print(
|
||||||
|
"Error: Reddit API credentials (including REDDIT_REFRESH_TOKEN) must be set in .env file."
|
||||||
|
)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return praw.Reddit(
|
return praw.Reddit(
|
||||||
client_id=client_id,
|
client_id=client_id,
|
||||||
client_secret=client_secret,
|
client_secret=client_secret,
|
||||||
user_agent=user_agent,
|
user_agent=user_agent,
|
||||||
refresh_token=refresh_token
|
refresh_token=refresh_token,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def find_latest_image(pattern):
|
def find_latest_image(pattern):
|
||||||
"""Finds the most recent file in the IMAGE_DIR that matches a given pattern."""
|
"""Finds the most recent file in the IMAGE_DIR that matches a given pattern."""
|
||||||
try:
|
try:
|
||||||
@@ -43,12 +51,40 @@ def find_latest_image(pattern):
|
|||||||
print(f"Error finding image file: {e}")
|
print(f"Error finding image file: {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def get_flair_id(subreddit, flair_text):
|
||||||
|
"""
|
||||||
|
Attempts to find the ID of a flair by its text.
|
||||||
|
Returns the ID string or None if not found or an error occurs.
|
||||||
|
"""
|
||||||
|
if not flair_text:
|
||||||
|
return None
|
||||||
|
|
||||||
|
print(f"Attempting to find Flair ID for text: '{flair_text}'...")
|
||||||
|
try:
|
||||||
|
flairs = subreddit.flair.link_templates
|
||||||
|
for flair in flairs:
|
||||||
|
if flair['text'].lower() == flair_text.lower():
|
||||||
|
print(f" -> Found Flair ID: {flair['id']}")
|
||||||
|
return flair['id']
|
||||||
|
print(" -> Warning: No matching flair text found.")
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
print(f" -> Warning: Could not fetch flairs for this subreddit (Error: {e}). Proceeding without flair.")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Main function to find an image and post it to Reddit."""
|
"""Main function to find an image and post it to Reddit."""
|
||||||
parser = argparse.ArgumentParser(description="Find the latest sentiment image and post it to a subreddit.")
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Find the latest sentiment image and post it to a subreddit."
|
||||||
|
)
|
||||||
parser.add_argument("-s", "--subreddit", help="The source subreddit of the image to post. (Defaults to overall summary)")
|
parser.add_argument("-s", "--subreddit", help="The source subreddit of the image to post. (Defaults to overall summary)")
|
||||||
parser.add_argument("-w", "--weekly", action="store_true", help="Post the weekly summary instead of the daily one.")
|
parser.add_argument("-w", "--weekly", action="store_true", help="Post the weekly summary instead of the daily one.")
|
||||||
parser.add_argument("-t", "--target-subreddit", default="rstat", help="The subreddit to post the image to. (Default: rstat)")
|
parser.add_argument("-t", "--target-subreddit", default="rstat", help="The subreddit to post the image to. (Default: rstat)")
|
||||||
|
parser.add_argument("--flair-text", help="The text of the flair to search for (e.g., 'Daily Summary').")
|
||||||
|
parser.add_argument("--flair-id", help="Manually provide a specific Flair ID (overrides --flair-text).")
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
# --- 1. Determine filename pattern and post title ---
|
# --- 1. Determine filename pattern and post title ---
|
||||||
@@ -61,9 +97,13 @@ def main():
|
|||||||
else:
|
else:
|
||||||
# Default to the overall summary
|
# Default to the overall summary
|
||||||
if args.weekly:
|
if args.weekly:
|
||||||
print("Warning: --weekly flag has no effect for overall summary. Posting overall daily image.")
|
print(
|
||||||
|
"Warning: --weekly flag has no effect for overall summary. Posting overall daily image."
|
||||||
|
)
|
||||||
filename_pattern = "overall_summary_*.png"
|
filename_pattern = "overall_summary_*.png"
|
||||||
post_title = f"Overall Top 10 Ticker Mentions Across Reddit ({current_date_str})"
|
post_title = (
|
||||||
|
f"Overall Top 10 Ticker Mentions Across Reddit ({current_date_str})"
|
||||||
|
)
|
||||||
|
|
||||||
print(f"Searching for image pattern: {filename_pattern}")
|
print(f"Searching for image pattern: {filename_pattern}")
|
||||||
|
|
||||||
@@ -71,7 +111,9 @@ def main():
|
|||||||
image_to_post = find_latest_image(filename_pattern)
|
image_to_post = find_latest_image(filename_pattern)
|
||||||
|
|
||||||
if not image_to_post:
|
if not image_to_post:
|
||||||
print(f"Error: No image found matching the pattern '{filename_pattern}'. Please run the scraper and exporter first.")
|
print(
|
||||||
|
f"Error: No image found matching the pattern '{filename_pattern}'. Please run the scraper and exporter first."
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
print(f"Found image: {image_to_post}")
|
print(f"Found image: {image_to_post}")
|
||||||
@@ -83,12 +125,23 @@ def main():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
target_sub = reddit.subreddit(args.target_subreddit)
|
target_sub = reddit.subreddit(args.target_subreddit)
|
||||||
|
|
||||||
|
# --- NEW SMART FLAIR LOGIC ---
|
||||||
|
final_flair_id = None
|
||||||
|
if args.flair_id:
|
||||||
|
# If the user provides a specific ID, use it directly.
|
||||||
|
print(f"Using provided Flair ID: {args.flair_id}")
|
||||||
|
final_flair_id = args.flair_id
|
||||||
|
elif args.flair_text:
|
||||||
|
# If they provide text, try to find the ID automatically.
|
||||||
|
final_flair_id = get_flair_id(target_sub, args.flair_text)
|
||||||
|
|
||||||
print(f"Submitting '{post_title}' to r/{target_sub.display_name}...")
|
print(f"Submitting '{post_title}' to r/{target_sub.display_name}...")
|
||||||
|
|
||||||
submission = target_sub.submit_image(
|
submission = target_sub.submit_image(
|
||||||
title=post_title,
|
title=post_title,
|
||||||
image_path=image_to_post,
|
image_path=image_to_post,
|
||||||
flair_id=None # Optional: You can add a flair ID here if you want
|
flair_id=final_flair_id # This will be the found ID or None
|
||||||
)
|
)
|
||||||
|
|
||||||
print("\n--- Post Successful! ---")
|
print("\n--- Post Successful! ---")
|
||||||
@@ -96,7 +149,8 @@ def main():
|
|||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"\nAn error occurred while posting to Reddit: {e}")
|
print(f"\nAn error occurred while posting to Reddit: {e}")
|
||||||
|
if 'FLAIR_REQUIRED' in str(e):
|
||||||
|
print("\nHint: This subreddit requires a flair. Try finding the flair text or ID and use the --flair-text or --flair-id argument.")
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
@@ -1,6 +1,8 @@
|
|||||||
Flask
|
Flask==3.1.1
|
||||||
nltk
|
gunicorn==23.0.0
|
||||||
playwright
|
nltk==3.9.1
|
||||||
praw
|
playwright==1.54.0
|
||||||
python-dotenv
|
praw==7.8.1
|
||||||
yfinance
|
python-dotenv==1.1.1
|
||||||
|
uvicorn==0.35.0
|
||||||
|
yfinance==0.2.65
|
@@ -3,27 +3,34 @@
|
|||||||
import argparse
|
import argparse
|
||||||
from . import database
|
from . import database
|
||||||
from .logger_setup import setup_logging, logger as log
|
from .logger_setup import setup_logging, logger as log
|
||||||
|
|
||||||
# We can't reuse load_subreddits from main anymore if it's not in the same file
|
# We can't reuse load_subreddits from main anymore if it's not in the same file
|
||||||
# So we will duplicate it here. It's small and keeps this script self-contained.
|
# So we will duplicate it here. It's small and keeps this script self-contained.
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
|
||||||
def load_subreddits(filepath):
|
def load_subreddits(filepath):
|
||||||
"""Loads a list of subreddits from a JSON file."""
|
"""Loads a list of subreddits from a JSON file."""
|
||||||
try:
|
try:
|
||||||
with open(filepath, 'r') as f:
|
with open(filepath, "r") as f:
|
||||||
data = json.load(f)
|
data = json.load(f)
|
||||||
return data.get("subreddits", [])
|
return data.get("subreddits", [])
|
||||||
except (FileNotFoundError, json.JSONDecodeError) as e:
|
except (FileNotFoundError, json.JSONDecodeError) as e:
|
||||||
log.error(f"Error loading config file '{filepath}': {e}")
|
log.error(f"Error loading config file '{filepath}': {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def run_cleanup():
|
def run_cleanup():
|
||||||
"""Main function for the cleanup tool."""
|
"""Main function for the cleanup tool."""
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="A tool to clean stale data from the RSTAT database.",
|
description="A tool to clean stale data from the RSTAT database.",
|
||||||
formatter_class=argparse.RawTextHelpFormatter
|
formatter_class=argparse.RawTextHelpFormatter,
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--tickers",
|
||||||
|
action="store_true",
|
||||||
|
help="Clean tickers that are in the blacklist.",
|
||||||
)
|
)
|
||||||
parser.add_argument("--tickers", action="store_true", help="Clean tickers that are in the blacklist.")
|
|
||||||
|
|
||||||
# --- UPDATED ARGUMENT DEFINITION ---
|
# --- UPDATED ARGUMENT DEFINITION ---
|
||||||
# nargs='?': Makes the argument optional.
|
# nargs='?': Makes the argument optional.
|
||||||
@@ -31,14 +38,18 @@ def run_cleanup():
|
|||||||
# default=None: The value if the flag is not present at all.
|
# default=None: The value if the flag is not present at all.
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--subreddits",
|
"--subreddits",
|
||||||
nargs='?',
|
nargs="?",
|
||||||
const='subreddits.json',
|
const="subreddits.json",
|
||||||
default=None,
|
default=None,
|
||||||
help="Clean data from subreddits NOT in the specified config file.\n(Defaults to 'subreddits.json' if flag is used without a value)."
|
help="Clean data from subreddits NOT in the specified config file.\n(Defaults to 'subreddits.json' if flag is used without a value).",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument("--all", action="store_true", help="Run all available cleanup tasks.")
|
parser.add_argument(
|
||||||
parser.add_argument("--stdout", action="store_true", help="Print all log messages to the console.")
|
"--all", action="store_true", help="Run all available cleanup tasks."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--stdout", action="store_true", help="Print all log messages to the console."
|
||||||
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
@@ -57,7 +68,7 @@ def run_cleanup():
|
|||||||
if args.all or args.subreddits is not None:
|
if args.all or args.subreddits is not None:
|
||||||
run_any_task = True
|
run_any_task = True
|
||||||
# If --all is used, default to 'subreddits.json' if --subreddits wasn't also specified
|
# If --all is used, default to 'subreddits.json' if --subreddits wasn't also specified
|
||||||
config_file = args.subreddits or 'subreddits.json'
|
config_file = args.subreddits or "subreddits.json"
|
||||||
log.info(f"\nCleaning subreddits based on active list in: {config_file}")
|
log.info(f"\nCleaning subreddits based on active list in: {config_file}")
|
||||||
active_subreddits = load_subreddits(config_file)
|
active_subreddits = load_subreddits(config_file)
|
||||||
if active_subreddits is not None:
|
if active_subreddits is not None:
|
||||||
@@ -65,10 +76,13 @@ def run_cleanup():
|
|||||||
|
|
||||||
if not run_any_task:
|
if not run_any_task:
|
||||||
parser.print_help()
|
parser.print_help()
|
||||||
log.error("\nError: Please provide at least one cleanup option (e.g., --tickers, --subreddits, --all).")
|
log.error(
|
||||||
|
"\nError: Please provide at least one cleanup option (e.g., --tickers, --subreddits, --all)."
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
log.critical("\nCleanup finished.")
|
log.critical("\nCleanup finished.")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
run_cleanup()
|
run_cleanup()
|
@@ -4,18 +4,17 @@ from flask import Flask, render_template, request
|
|||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from .logger_setup import logger as log
|
from .logger_setup import logger as log
|
||||||
from .database import (
|
from .database import (
|
||||||
get_overall_summary,
|
|
||||||
get_subreddit_summary,
|
|
||||||
get_all_scanned_subreddits,
|
get_all_scanned_subreddits,
|
||||||
get_deep_dive_details,
|
get_deep_dive_details,
|
||||||
get_daily_summary_for_subreddit,
|
get_daily_summary_for_subreddit,
|
||||||
get_weekly_summary_for_subreddit,
|
get_weekly_summary_for_subreddit,
|
||||||
get_overall_image_view_summary
|
get_overall_daily_summary, # Now correctly imported
|
||||||
|
get_overall_weekly_summary, # Now correctly imported
|
||||||
)
|
)
|
||||||
|
|
||||||
app = Flask(__name__, template_folder='../templates')
|
app = Flask(__name__, template_folder='../templates', static_folder='../static')
|
||||||
|
|
||||||
@app.template_filter('format_mc')
|
@app.template_filter("format_mc")
|
||||||
def format_market_cap(mc):
|
def format_market_cap(mc):
|
||||||
"""Formats a large number into a readable market cap string."""
|
"""Formats a large number into a readable market cap string."""
|
||||||
if mc is None or mc == 0:
|
if mc is None or mc == 0:
|
||||||
@@ -29,23 +28,70 @@ def format_market_cap(mc):
|
|||||||
else:
|
else:
|
||||||
return f"${mc:,}"
|
return f"${mc:,}"
|
||||||
|
|
||||||
|
|
||||||
@app.context_processor
|
@app.context_processor
|
||||||
def inject_subreddits():
|
def inject_subreddits():
|
||||||
"""Makes the list of all scanned subreddits available to every template."""
|
"""Makes the list of all subreddits available to every template for the navbar."""
|
||||||
subreddits = get_all_scanned_subreddits()
|
return dict(all_subreddits=get_all_scanned_subreddits())
|
||||||
return dict(subreddits=subreddits)
|
|
||||||
|
|
||||||
@app.route("/")
|
@app.route("/")
|
||||||
def index():
|
def overall_dashboard():
|
||||||
"""The handler for the main dashboard page."""
|
"""Handler for the main, overall dashboard."""
|
||||||
tickers = get_overall_summary(limit=10)
|
view_type = request.args.get("view", "daily")
|
||||||
return render_template("index.html", tickers=tickers)
|
is_image_mode = request.args.get("image") == "true"
|
||||||
|
|
||||||
|
if view_type == "weekly":
|
||||||
|
tickers, start, end = get_overall_weekly_summary()
|
||||||
|
date_string = f"{start.strftime('%b %d')} - {end.strftime('%b %d, %Y')}"
|
||||||
|
subtitle = "All Subreddits - Top 10 Weekly"
|
||||||
|
else: # Default to daily
|
||||||
|
tickers = get_overall_daily_summary()
|
||||||
|
date_string = datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
||||||
|
subtitle = "All Subreddits - Top 10 Daily"
|
||||||
|
|
||||||
|
return render_template(
|
||||||
|
"dashboard_view.html",
|
||||||
|
title="Overall Dashboard",
|
||||||
|
subtitle=subtitle,
|
||||||
|
date_string=date_string,
|
||||||
|
tickers=tickers,
|
||||||
|
view_type=view_type,
|
||||||
|
subreddit_name=None,
|
||||||
|
is_image_mode=is_image_mode,
|
||||||
|
base_url="/",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@app.route("/subreddit/<name>")
|
@app.route("/subreddit/<name>")
|
||||||
def subreddit_dashboard(name):
|
def subreddit_dashboard(name):
|
||||||
"""A dynamic route for per-subreddit dashboards."""
|
"""Handler for per-subreddit dashboards."""
|
||||||
tickers = get_subreddit_summary(name, limit=10)
|
view_type = request.args.get("view", "daily")
|
||||||
return render_template("subreddit.html", tickers=tickers, subreddit_name=name)
|
is_image_mode = request.args.get("image") == "true"
|
||||||
|
|
||||||
|
if view_type == "weekly":
|
||||||
|
today = datetime.now(timezone.utc)
|
||||||
|
target_date = today - timedelta(days=7)
|
||||||
|
tickers, start, end = get_weekly_summary_for_subreddit(name, target_date)
|
||||||
|
date_string = f"{start.strftime('%b %d')} - {end.strftime('%b %d, %Y')}"
|
||||||
|
subtitle = f"r/{name} - Top 10 Weekly"
|
||||||
|
else: # Default to daily
|
||||||
|
tickers = get_daily_summary_for_subreddit(name)
|
||||||
|
date_string = datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
||||||
|
subtitle = f"r/{name} - Top 10 Daily"
|
||||||
|
|
||||||
|
return render_template(
|
||||||
|
"dashboard_view.html",
|
||||||
|
title=f"r/{name} Dashboard",
|
||||||
|
subtitle=subtitle,
|
||||||
|
date_string=date_string,
|
||||||
|
tickers=tickers,
|
||||||
|
view_type=view_type,
|
||||||
|
subreddit_name=name,
|
||||||
|
is_image_mode=is_image_mode,
|
||||||
|
base_url=f"/subreddit/{name}",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@app.route("/deep-dive/<symbol>")
|
@app.route("/deep-dive/<symbol>")
|
||||||
def deep_dive(symbol):
|
def deep_dive(symbol):
|
||||||
@@ -54,62 +100,13 @@ def deep_dive(symbol):
|
|||||||
posts = get_deep_dive_details(symbol)
|
posts = get_deep_dive_details(symbol)
|
||||||
return render_template("deep_dive.html", posts=posts, symbol=symbol)
|
return render_template("deep_dive.html", posts=posts, symbol=symbol)
|
||||||
|
|
||||||
@app.route("/image/daily/<name>")
|
|
||||||
def daily_image_view(name):
|
|
||||||
"""The handler for the image-style dashboard."""
|
|
||||||
tickers = get_daily_summary_for_subreddit(name)
|
|
||||||
current_date = datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
|
||||||
return render_template(
|
|
||||||
"daily_image_view.html",
|
|
||||||
tickers=tickers,
|
|
||||||
subreddit_name=name,
|
|
||||||
current_date=current_date
|
|
||||||
)
|
|
||||||
|
|
||||||
@app.route("/image/weekly/<name>")
|
@app.route("/about")
|
||||||
def weekly_image_view(name):
|
def about_page():
|
||||||
"""
|
"""Handler for the static About page."""
|
||||||
The handler for the WEEKLY image-style dashboard.
|
# We need to pass these so the navbar knows which items to highlight
|
||||||
Accepts an optional 'date' query parameter in YYYY-MM-DD format.
|
return render_template("about.html", subreddit_name=None, view_type='daily')
|
||||||
"""
|
|
||||||
# Get the date from the URL query string, e.g., ?date=2025-07-21
|
|
||||||
date_str = request.args.get('date')
|
|
||||||
target_date = None
|
|
||||||
|
|
||||||
if date_str:
|
|
||||||
try:
|
|
||||||
# Convert the string to a datetime object
|
|
||||||
target_date = datetime.strptime(date_str, "%Y-%m-%d").replace(tzinfo=timezone.utc)
|
|
||||||
except ValueError:
|
|
||||||
return "Invalid date format. Please use YYYY-MM-DD.", 400
|
|
||||||
else:
|
|
||||||
# If no date is provided, default to showing LAST week
|
|
||||||
today = datetime.now(timezone.utc)
|
|
||||||
target_date = today - timedelta(days=7)
|
|
||||||
|
|
||||||
# The query now returns the results and the date objects used
|
|
||||||
tickers, start_of_week, end_of_week = get_weekly_summary_for_subreddit(name, target_date)
|
|
||||||
|
|
||||||
# Format the date range for the title
|
|
||||||
date_range_str = f"{start_of_week.strftime('%b %d')} - {end_of_week.strftime('%b %d, %Y')}"
|
|
||||||
|
|
||||||
return render_template(
|
|
||||||
"weekly_image_view.html",
|
|
||||||
tickers=tickers,
|
|
||||||
subreddit_name=name,
|
|
||||||
date_range=date_range_str
|
|
||||||
)
|
|
||||||
|
|
||||||
@app.route("/image/overall")
|
|
||||||
def overall_image_view():
|
|
||||||
"""The handler for the overall image-style dashboard."""
|
|
||||||
tickers = get_overall_image_view_summary()
|
|
||||||
current_date = datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
|
||||||
return render_template(
|
|
||||||
"overall_image_view.html",
|
|
||||||
tickers=tickers,
|
|
||||||
current_date=current_date
|
|
||||||
)
|
|
||||||
|
|
||||||
def start_dashboard():
|
def start_dashboard():
|
||||||
"""The main function called by the 'rstat-dashboard' command."""
|
"""The main function called by the 'rstat-dashboard' command."""
|
||||||
@@ -118,5 +115,6 @@ def start_dashboard():
|
|||||||
log.info("Press CTRL+C to stop the server.")
|
log.info("Press CTRL+C to stop the server.")
|
||||||
app.run(debug=True)
|
app.run(debug=True)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
start_dashboard()
|
start_dashboard()
|
@@ -7,72 +7,8 @@ from .logger_setup import logger as log
|
|||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
|
|
||||||
DB_FILE = "reddit_stocks.db"
|
DB_FILE = "reddit_stocks.db"
|
||||||
|
MARKET_CAP_REFRESH_INTERVAL = 86400
|
||||||
|
|
||||||
def get_db_connection():
|
|
||||||
"""Establishes a connection to the SQLite database."""
|
|
||||||
conn = sqlite3.connect(DB_FILE)
|
|
||||||
conn.row_factory = sqlite3.Row
|
|
||||||
return conn
|
|
||||||
|
|
||||||
def initialize_db():
|
|
||||||
"""
|
|
||||||
Initializes the database and creates the necessary tables if they don't exist.
|
|
||||||
"""
|
|
||||||
conn = get_db_connection()
|
|
||||||
cursor = conn.cursor()
|
|
||||||
|
|
||||||
# --- Create tickers table ---
|
|
||||||
cursor.execute("""
|
|
||||||
CREATE TABLE IF NOT EXISTS tickers (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
symbol TEXT NOT NULL UNIQUE,
|
|
||||||
market_cap INTEGER,
|
|
||||||
closing_price REAL,
|
|
||||||
last_updated INTEGER
|
|
||||||
)
|
|
||||||
""")
|
|
||||||
|
|
||||||
# --- Create subreddits table ---
|
|
||||||
cursor.execute("""
|
|
||||||
CREATE TABLE IF NOT EXISTS subreddits (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
name TEXT NOT NULL UNIQUE
|
|
||||||
)
|
|
||||||
""")
|
|
||||||
|
|
||||||
# --- Create mentions table ---
|
|
||||||
cursor.execute("""
|
|
||||||
CREATE TABLE IF NOT EXISTS mentions (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
ticker_id INTEGER,
|
|
||||||
subreddit_id INTEGER,
|
|
||||||
post_id TEXT NOT NULL,
|
|
||||||
mention_type TEXT NOT NULL,
|
|
||||||
mention_sentiment REAL, -- Renamed from sentiment_score for clarity
|
|
||||||
post_avg_sentiment REAL, -- NEW: Stores the avg sentiment of the whole post
|
|
||||||
mention_timestamp INTEGER NOT NULL,
|
|
||||||
FOREIGN KEY (ticker_id) REFERENCES tickers (id),
|
|
||||||
FOREIGN KEY (subreddit_id) REFERENCES subreddits (id)
|
|
||||||
)
|
|
||||||
""")
|
|
||||||
|
|
||||||
cursor.execute("""
|
|
||||||
CREATE TABLE IF NOT EXISTS posts (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
post_id TEXT NOT NULL UNIQUE,
|
|
||||||
title TEXT NOT NULL,
|
|
||||||
post_url TEXT,
|
|
||||||
subreddit_id INTEGER,
|
|
||||||
post_timestamp INTEGER,
|
|
||||||
comment_count INTEGER,
|
|
||||||
avg_comment_sentiment REAL,
|
|
||||||
FOREIGN KEY (subreddit_id) REFERENCES subreddits (id)
|
|
||||||
)
|
|
||||||
""")
|
|
||||||
|
|
||||||
conn.commit()
|
|
||||||
conn.close()
|
|
||||||
log.info("Database initialized successfully.")
|
|
||||||
|
|
||||||
def clean_stale_tickers():
|
def clean_stale_tickers():
|
||||||
"""
|
"""
|
||||||
@@ -83,7 +19,7 @@ def clean_stale_tickers():
|
|||||||
conn = get_db_connection()
|
conn = get_db_connection()
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
|
|
||||||
placeholders = ','.join('?' for _ in COMMON_WORDS_BLACKLIST)
|
placeholders = ",".join("?" for _ in COMMON_WORDS_BLACKLIST)
|
||||||
query = f"SELECT id, symbol FROM tickers WHERE symbol IN ({placeholders})"
|
query = f"SELECT id, symbol FROM tickers WHERE symbol IN ({placeholders})"
|
||||||
|
|
||||||
cursor.execute(query, tuple(COMMON_WORDS_BLACKLIST))
|
cursor.execute(query, tuple(COMMON_WORDS_BLACKLIST))
|
||||||
@@ -95,8 +31,8 @@ def clean_stale_tickers():
|
|||||||
return
|
return
|
||||||
|
|
||||||
for ticker in stale_tickers:
|
for ticker in stale_tickers:
|
||||||
ticker_id = ticker['id']
|
ticker_id = ticker["id"]
|
||||||
ticker_symbol = ticker['symbol']
|
ticker_symbol = ticker["symbol"]
|
||||||
log.info(f"Removing stale ticker '{ticker_symbol}' (ID: {ticker_id})...")
|
log.info(f"Removing stale ticker '{ticker_symbol}' (ID: {ticker_id})...")
|
||||||
cursor.execute("DELETE FROM mentions WHERE ticker_id = ?", (ticker_id,))
|
cursor.execute("DELETE FROM mentions WHERE ticker_id = ?", (ticker_id,))
|
||||||
cursor.execute("DELETE FROM tickers WHERE id = ?", (ticker_id,))
|
cursor.execute("DELETE FROM tickers WHERE id = ?", (ticker_id,))
|
||||||
@@ -106,6 +42,7 @@ def clean_stale_tickers():
|
|||||||
conn.close()
|
conn.close()
|
||||||
log.info(f"Cleanup complete. Removed {deleted_count} records.")
|
log.info(f"Cleanup complete. Removed {deleted_count} records.")
|
||||||
|
|
||||||
|
|
||||||
def clean_stale_subreddits(active_subreddits):
|
def clean_stale_subreddits(active_subreddits):
|
||||||
"""
|
"""
|
||||||
Removes all data associated with subreddits that are NOT in the active list.
|
Removes all data associated with subreddits that are NOT in the active list.
|
||||||
@@ -122,9 +59,9 @@ def clean_stale_subreddits(active_subreddits):
|
|||||||
db_subreddits = cursor.fetchall()
|
db_subreddits = cursor.fetchall()
|
||||||
stale_sub_ids = []
|
stale_sub_ids = []
|
||||||
for sub in db_subreddits:
|
for sub in db_subreddits:
|
||||||
if sub['name'] not in active_subreddits_lower:
|
if sub["name"] not in active_subreddits_lower:
|
||||||
log.info(f"Found stale subreddit to remove: r/{sub['name']}")
|
log.info(f"Found stale subreddit to remove: r/{sub['name']}")
|
||||||
stale_sub_ids.append(sub['id'])
|
stale_sub_ids.append(sub["id"])
|
||||||
if not stale_sub_ids:
|
if not stale_sub_ids:
|
||||||
log.info("No stale subreddits to clean.")
|
log.info("No stale subreddits to clean.")
|
||||||
conn.close()
|
conn.close()
|
||||||
@@ -138,15 +75,18 @@ def clean_stale_subreddits(active_subreddits):
|
|||||||
conn.close()
|
conn.close()
|
||||||
log.info("Stale subreddit cleanup complete.")
|
log.info("Stale subreddit cleanup complete.")
|
||||||
|
|
||||||
|
|
||||||
def get_db_connection():
|
def get_db_connection():
|
||||||
conn = sqlite3.connect(DB_FILE)
|
conn = sqlite3.connect(DB_FILE)
|
||||||
conn.row_factory = sqlite3.Row
|
conn.row_factory = sqlite3.Row
|
||||||
return conn
|
return conn
|
||||||
|
|
||||||
|
|
||||||
def initialize_db():
|
def initialize_db():
|
||||||
conn = get_db_connection()
|
conn = get_db_connection()
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
cursor.execute("""
|
cursor.execute(
|
||||||
|
"""
|
||||||
CREATE TABLE IF NOT EXISTS tickers (
|
CREATE TABLE IF NOT EXISTS tickers (
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
symbol TEXT NOT NULL UNIQUE,
|
symbol TEXT NOT NULL UNIQUE,
|
||||||
@@ -154,14 +94,18 @@ def initialize_db():
|
|||||||
closing_price REAL,
|
closing_price REAL,
|
||||||
last_updated INTEGER
|
last_updated INTEGER
|
||||||
)
|
)
|
||||||
""")
|
"""
|
||||||
cursor.execute("""
|
)
|
||||||
|
cursor.execute(
|
||||||
|
"""
|
||||||
CREATE TABLE IF NOT EXISTS subreddits (
|
CREATE TABLE IF NOT EXISTS subreddits (
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
name TEXT NOT NULL UNIQUE
|
name TEXT NOT NULL UNIQUE
|
||||||
)
|
)
|
||||||
""")
|
"""
|
||||||
cursor.execute("""
|
)
|
||||||
|
cursor.execute(
|
||||||
|
"""
|
||||||
CREATE TABLE IF NOT EXISTS mentions (
|
CREATE TABLE IF NOT EXISTS mentions (
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
ticker_id INTEGER,
|
ticker_id INTEGER,
|
||||||
@@ -174,8 +118,10 @@ def initialize_db():
|
|||||||
FOREIGN KEY (ticker_id) REFERENCES tickers (id),
|
FOREIGN KEY (ticker_id) REFERENCES tickers (id),
|
||||||
FOREIGN KEY (subreddit_id) REFERENCES subreddits (id)
|
FOREIGN KEY (subreddit_id) REFERENCES subreddits (id)
|
||||||
)
|
)
|
||||||
""")
|
"""
|
||||||
cursor.execute("""
|
)
|
||||||
|
cursor.execute(
|
||||||
|
"""
|
||||||
CREATE TABLE IF NOT EXISTS posts (
|
CREATE TABLE IF NOT EXISTS posts (
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
post_id TEXT NOT NULL UNIQUE,
|
post_id TEXT NOT NULL UNIQUE,
|
||||||
@@ -187,12 +133,23 @@ def initialize_db():
|
|||||||
avg_comment_sentiment REAL,
|
avg_comment_sentiment REAL,
|
||||||
FOREIGN KEY (subreddit_id) REFERENCES subreddits (id)
|
FOREIGN KEY (subreddit_id) REFERENCES subreddits (id)
|
||||||
)
|
)
|
||||||
""")
|
"""
|
||||||
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
conn.close()
|
||||||
log.info("Database initialized successfully.")
|
log.info("Database initialized successfully.")
|
||||||
|
|
||||||
def add_mention(conn, ticker_id, subreddit_id, post_id, mention_type, timestamp, mention_sentiment, post_avg_sentiment=None):
|
|
||||||
|
def add_mention(
|
||||||
|
conn,
|
||||||
|
ticker_id,
|
||||||
|
subreddit_id,
|
||||||
|
post_id,
|
||||||
|
mention_type,
|
||||||
|
timestamp,
|
||||||
|
mention_sentiment,
|
||||||
|
post_avg_sentiment=None,
|
||||||
|
):
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
try:
|
try:
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
@@ -200,40 +157,52 @@ def add_mention(conn, ticker_id, subreddit_id, post_id, mention_type, timestamp,
|
|||||||
INSERT INTO mentions (ticker_id, subreddit_id, post_id, mention_type, mention_timestamp, mention_sentiment, post_avg_sentiment)
|
INSERT INTO mentions (ticker_id, subreddit_id, post_id, mention_type, mention_timestamp, mention_sentiment, post_avg_sentiment)
|
||||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||||
""",
|
""",
|
||||||
(ticker_id, subreddit_id, post_id, mention_type, timestamp, mention_sentiment, post_avg_sentiment)
|
(
|
||||||
|
ticker_id,
|
||||||
|
subreddit_id,
|
||||||
|
post_id,
|
||||||
|
mention_type,
|
||||||
|
timestamp,
|
||||||
|
mention_sentiment,
|
||||||
|
post_avg_sentiment,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
except sqlite3.IntegrityError:
|
except sqlite3.IntegrityError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def get_or_create_entity(conn, table_name, column_name, value):
|
def get_or_create_entity(conn, table_name, column_name, value):
|
||||||
"""Generic function to get or create an entity and return its ID."""
|
"""Generic function to get or create an entity and return its ID."""
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
cursor.execute(f"SELECT id FROM {table_name} WHERE {column_name} = ?", (value,))
|
cursor.execute(f"SELECT id FROM {table_name} WHERE {column_name} = ?", (value,))
|
||||||
result = cursor.fetchone()
|
result = cursor.fetchone()
|
||||||
if result:
|
if result:
|
||||||
return result['id']
|
return result["id"]
|
||||||
else:
|
else:
|
||||||
cursor.execute(f"INSERT INTO {table_name} ({column_name}) VALUES (?)", (value,))
|
cursor.execute(f"INSERT INTO {table_name} ({column_name}) VALUES (?)", (value,))
|
||||||
conn.commit()
|
conn.commit()
|
||||||
return cursor.lastrowid
|
return cursor.lastrowid
|
||||||
|
|
||||||
|
|
||||||
def update_ticker_financials(conn, ticker_id, market_cap, closing_price):
|
def update_ticker_financials(conn, ticker_id, market_cap, closing_price):
|
||||||
"""Updates the financials and timestamp for a specific ticker."""
|
"""Updates the financials and timestamp for a specific ticker."""
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
current_timestamp = int(time.time())
|
current_timestamp = int(time.time())
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"UPDATE tickers SET market_cap = ?, closing_price = ?, last_updated = ? WHERE id = ?",
|
"UPDATE tickers SET market_cap = ?, closing_price = ?, last_updated = ? WHERE id = ?",
|
||||||
(market_cap, closing_price, current_timestamp, ticker_id)
|
(market_cap, closing_price, current_timestamp, ticker_id),
|
||||||
)
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
|
|
||||||
|
|
||||||
def get_ticker_info(conn, ticker_id):
|
def get_ticker_info(conn, ticker_id):
|
||||||
"""Retrieves all info for a specific ticker by its ID."""
|
"""Retrieves all info for a specific ticker by its ID."""
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
cursor.execute("SELECT * FROM tickers WHERE id = ?", (ticker_id,))
|
cursor.execute("SELECT * FROM tickers WHERE id = ?", (ticker_id,))
|
||||||
return cursor.fetchone()
|
return cursor.fetchone()
|
||||||
|
|
||||||
|
|
||||||
def get_week_start_end(for_date):
|
def get_week_start_end(for_date):
|
||||||
"""
|
"""
|
||||||
Calculates the start (Monday, 00:00:00) and end (Sunday, 23:59:59)
|
Calculates the start (Monday, 00:00:00) and end (Sunday, 23:59:59)
|
||||||
@@ -250,6 +219,7 @@ def get_week_start_end(for_date):
|
|||||||
|
|
||||||
return start_of_week, end_of_week
|
return start_of_week, end_of_week
|
||||||
|
|
||||||
|
|
||||||
def add_or_update_post_analysis(conn, post_data):
|
def add_or_update_post_analysis(conn, post_data):
|
||||||
"""
|
"""
|
||||||
Inserts a new post analysis record or updates an existing one.
|
Inserts a new post analysis record or updates an existing one.
|
||||||
@@ -265,40 +235,61 @@ def add_or_update_post_analysis(conn, post_data):
|
|||||||
comment_count = excluded.comment_count,
|
comment_count = excluded.comment_count,
|
||||||
avg_comment_sentiment = excluded.avg_comment_sentiment;
|
avg_comment_sentiment = excluded.avg_comment_sentiment;
|
||||||
""",
|
""",
|
||||||
post_data
|
post_data,
|
||||||
)
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
|
|
||||||
def get_overall_summary(limit=50):
|
|
||||||
|
def get_overall_summary(limit=10):
|
||||||
|
"""
|
||||||
|
Gets the top tickers across all subreddits from the LAST 24 HOURS.
|
||||||
|
"""
|
||||||
conn = get_db_connection()
|
conn = get_db_connection()
|
||||||
|
one_day_ago = datetime.now(timezone.utc) - timedelta(days=1)
|
||||||
|
one_day_ago_timestamp = int(one_day_ago.timestamp())
|
||||||
|
|
||||||
query = """
|
query = """
|
||||||
SELECT t.symbol, t.market_cap, t.closing_price, COUNT(m.id) as mention_count,
|
SELECT t.symbol, t.market_cap, t.closing_price, COUNT(m.id) as mention_count,
|
||||||
SUM(CASE WHEN m.mention_sentiment > 0.1 THEN 1 ELSE 0 END) as bullish_mentions,
|
SUM(CASE WHEN m.mention_sentiment > 0.1 THEN 1 ELSE 0 END) as bullish_mentions,
|
||||||
SUM(CASE WHEN m.mention_sentiment < -0.1 THEN 1 ELSE 0 END) as bearish_mentions,
|
SUM(CASE WHEN m.mention_sentiment < -0.1 THEN 1 ELSE 0 END) as bearish_mentions,
|
||||||
SUM(CASE WHEN m.mention_sentiment BETWEEN -0.1 AND 0.1 THEN 1 ELSE 0 END) as neutral_mentions
|
SUM(CASE WHEN m.mention_sentiment BETWEEN -0.1 AND 0.1 THEN 1 ELSE 0 END) as neutral_mentions
|
||||||
FROM mentions m JOIN tickers t ON m.ticker_id = t.id
|
FROM mentions m JOIN tickers t ON m.ticker_id = t.id
|
||||||
GROUP BY t.symbol, t.market_cap, t.closing_price ORDER BY mention_count DESC LIMIT ?;
|
WHERE m.mention_timestamp >= ? -- <-- ADDED TIME FILTER
|
||||||
|
GROUP BY t.symbol, t.market_cap, t.closing_price
|
||||||
|
ORDER BY mention_count DESC LIMIT ?;
|
||||||
"""
|
"""
|
||||||
results = conn.execute(query, (limit,)).fetchall()
|
results = conn.execute(query, (one_day_ago_timestamp, limit)).fetchall()
|
||||||
conn.close()
|
conn.close()
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def get_subreddit_summary(subreddit_name, limit=50):
|
|
||||||
|
def get_subreddit_summary(subreddit_name, limit=10):
|
||||||
|
"""
|
||||||
|
Gets the top tickers for a specific subreddit from the LAST 24 HOURS.
|
||||||
|
"""
|
||||||
conn = get_db_connection()
|
conn = get_db_connection()
|
||||||
|
one_day_ago = datetime.now(timezone.utc) - timedelta(days=1)
|
||||||
|
one_day_ago_timestamp = int(one_day_ago.timestamp())
|
||||||
|
|
||||||
query = """
|
query = """
|
||||||
SELECT t.symbol, t.market_cap, t.closing_price, COUNT(m.id) as mention_count,
|
SELECT t.symbol, t.market_cap, t.closing_price, COUNT(m.id) as mention_count,
|
||||||
SUM(CASE WHEN m.mention_sentiment > 0.1 THEN 1 ELSE 0 END) as bullish_mentions,
|
SUM(CASE WHEN m.mention_sentiment > 0.1 THEN 1 ELSE 0 END) as bullish_mentions,
|
||||||
SUM(CASE WHEN m.mention_sentiment < -0.1 THEN 1 ELSE 0 END) as bearish_mentions,
|
SUM(CASE WHEN m.mention_sentiment < -0.1 THEN 1 ELSE 0 END) as bearish_mentions,
|
||||||
SUM(CASE WHEN m.mention_sentiment BETWEEN -0.1 AND 0.1 THEN 1 ELSE 0 END) as neutral_mentions
|
SUM(CASE WHEN m.mention_sentiment BETWEEN -0.1 AND 0.1 THEN 1 ELSE 0 END) as neutral_mentions
|
||||||
FROM mentions m JOIN tickers t ON m.ticker_id = t.id JOIN subreddits s ON m.subreddit_id = s.id
|
FROM mentions m JOIN tickers t ON m.ticker_id = t.id JOIN subreddits s ON m.subreddit_id = s.id
|
||||||
WHERE LOWER(s.name) = LOWER(?) GROUP BY t.symbol, t.market_cap, t.closing_price ORDER BY mention_count DESC LIMIT ?;
|
WHERE LOWER(s.name) = LOWER(?) AND m.mention_timestamp >= ? -- <-- ADDED TIME FILTER
|
||||||
|
GROUP BY t.symbol, t.market_cap, t.closing_price
|
||||||
|
ORDER BY mention_count DESC LIMIT ?;
|
||||||
"""
|
"""
|
||||||
results = conn.execute(query, (subreddit_name, limit)).fetchall()
|
results = conn.execute(
|
||||||
|
query, (subreddit_name, one_day_ago_timestamp, limit)
|
||||||
|
).fetchall()
|
||||||
conn.close()
|
conn.close()
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
def get_daily_summary_for_subreddit(subreddit_name):
|
def get_daily_summary_for_subreddit(subreddit_name):
|
||||||
""" Gets a summary for the DAILY image view (last 24 hours). """
|
"""Gets a summary for the DAILY image view (last 24 hours)."""
|
||||||
conn = get_db_connection()
|
conn = get_db_connection()
|
||||||
one_day_ago = datetime.now(timezone.utc) - timedelta(days=1)
|
one_day_ago = datetime.now(timezone.utc) - timedelta(days=1)
|
||||||
one_day_ago_timestamp = int(one_day_ago.timestamp())
|
one_day_ago_timestamp = int(one_day_ago.timestamp())
|
||||||
@@ -317,8 +308,9 @@ def get_daily_summary_for_subreddit(subreddit_name):
|
|||||||
conn.close()
|
conn.close()
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
def get_weekly_summary_for_subreddit(subreddit_name, for_date):
|
def get_weekly_summary_for_subreddit(subreddit_name, for_date):
|
||||||
""" Gets a summary for the WEEKLY image view (full week). """
|
"""Gets a summary for the WEEKLY image view (full week)."""
|
||||||
conn = get_db_connection()
|
conn = get_db_connection()
|
||||||
start_of_week, end_of_week = get_week_start_end(for_date)
|
start_of_week, end_of_week = get_week_start_end(for_date)
|
||||||
start_timestamp = int(start_of_week.timestamp())
|
start_timestamp = int(start_of_week.timestamp())
|
||||||
@@ -334,13 +326,20 @@ def get_weekly_summary_for_subreddit(subreddit_name, for_date):
|
|||||||
GROUP BY t.symbol, t.market_cap, t.closing_price
|
GROUP BY t.symbol, t.market_cap, t.closing_price
|
||||||
ORDER BY total_mentions DESC LIMIT 10;
|
ORDER BY total_mentions DESC LIMIT 10;
|
||||||
"""
|
"""
|
||||||
results = conn.execute(query, (subreddit_name, start_timestamp, end_timestamp)).fetchall()
|
results = conn.execute(
|
||||||
|
query, (subreddit_name, start_timestamp, end_timestamp)
|
||||||
|
).fetchall()
|
||||||
conn.close()
|
conn.close()
|
||||||
return results, start_of_week, end_of_week
|
return results, start_of_week, end_of_week
|
||||||
|
|
||||||
|
|
||||||
def get_overall_image_view_summary():
|
def get_overall_image_view_summary():
|
||||||
""" Gets a summary of top tickers across ALL subreddits for the image view. """
|
"""
|
||||||
|
Gets a summary of top tickers across ALL subreddits for the DAILY image view (last 24 hours).
|
||||||
|
"""
|
||||||
conn = get_db_connection()
|
conn = get_db_connection()
|
||||||
|
one_day_ago = datetime.now(timezone.utc) - timedelta(days=1)
|
||||||
|
one_day_ago_timestamp = int(one_day_ago.timestamp())
|
||||||
query = """
|
query = """
|
||||||
SELECT
|
SELECT
|
||||||
t.symbol, t.market_cap, t.closing_price,
|
t.symbol, t.market_cap, t.closing_price,
|
||||||
@@ -348,15 +347,64 @@ def get_overall_image_view_summary():
|
|||||||
COUNT(CASE WHEN m.mention_sentiment > 0.1 THEN 1 END) as bullish_mentions,
|
COUNT(CASE WHEN m.mention_sentiment > 0.1 THEN 1 END) as bullish_mentions,
|
||||||
COUNT(CASE WHEN m.mention_sentiment < -0.1 THEN 1 END) as bearish_mentions
|
COUNT(CASE WHEN m.mention_sentiment < -0.1 THEN 1 END) as bearish_mentions
|
||||||
FROM mentions m JOIN tickers t ON m.ticker_id = t.id
|
FROM mentions m JOIN tickers t ON m.ticker_id = t.id
|
||||||
|
WHERE m.mention_timestamp >= ? -- <-- ADDED TIME FILTER
|
||||||
GROUP BY t.symbol, t.market_cap, t.closing_price
|
GROUP BY t.symbol, t.market_cap, t.closing_price
|
||||||
ORDER BY total_mentions DESC LIMIT 10;
|
ORDER BY total_mentions DESC LIMIT 10;
|
||||||
"""
|
"""
|
||||||
results = conn.execute(query).fetchall()
|
results = conn.execute(query, (one_day_ago_timestamp,)).fetchall()
|
||||||
conn.close()
|
conn.close()
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def get_overall_daily_summary():
|
||||||
|
"""
|
||||||
|
Gets the top tickers across all subreddits from the LAST 24 HOURS.
|
||||||
|
(This is a copy of get_overall_summary, renamed for clarity).
|
||||||
|
"""
|
||||||
|
conn = get_db_connection()
|
||||||
|
one_day_ago = datetime.now(timezone.utc) - timedelta(days=1)
|
||||||
|
one_day_ago_timestamp = int(one_day_ago.timestamp())
|
||||||
|
query = """
|
||||||
|
SELECT t.symbol, t.market_cap, t.closing_price, COUNT(m.id) as total_mentions,
|
||||||
|
SUM(CASE WHEN m.mention_sentiment > 0.1 THEN 1 ELSE 0 END) as bullish_mentions,
|
||||||
|
SUM(CASE WHEN m.mention_sentiment < -0.1 THEN 1 ELSE 0 END) as bearish_mentions
|
||||||
|
FROM mentions m JOIN tickers t ON m.ticker_id = t.id
|
||||||
|
WHERE m.mention_timestamp >= ?
|
||||||
|
GROUP BY t.symbol, t.market_cap, t.closing_price
|
||||||
|
ORDER BY total_mentions DESC LIMIT 10;
|
||||||
|
"""
|
||||||
|
results = conn.execute(query, (one_day_ago_timestamp,)).fetchall()
|
||||||
|
conn.close()
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def get_overall_weekly_summary():
|
||||||
|
"""
|
||||||
|
Gets the top tickers across all subreddits for the LAST 7 DAYS.
|
||||||
|
"""
|
||||||
|
conn = get_db_connection()
|
||||||
|
today = datetime.now(timezone.utc)
|
||||||
|
start_of_week, end_of_week = get_week_start_end(
|
||||||
|
today - timedelta(days=7)
|
||||||
|
) # Get last week's boundaries
|
||||||
|
start_timestamp = int(start_of_week.timestamp())
|
||||||
|
end_timestamp = int(end_of_week.timestamp())
|
||||||
|
query = """
|
||||||
|
SELECT t.symbol, t.market_cap, t.closing_price, COUNT(m.id) as total_mentions,
|
||||||
|
SUM(CASE WHEN m.mention_sentiment > 0.1 THEN 1 ELSE 0 END) as bullish_mentions,
|
||||||
|
SUM(CASE WHEN m.mention_sentiment < -0.1 THEN 1 ELSE 0 END) as bearish_mentions
|
||||||
|
FROM mentions m JOIN tickers t ON m.ticker_id = t.id
|
||||||
|
WHERE m.mention_timestamp BETWEEN ? AND ?
|
||||||
|
GROUP BY t.symbol, t.market_cap, t.closing_price
|
||||||
|
ORDER BY total_mentions DESC LIMIT 10;
|
||||||
|
"""
|
||||||
|
results = conn.execute(query, (start_timestamp, end_timestamp)).fetchall()
|
||||||
|
conn.close()
|
||||||
|
return results, start_of_week, end_of_week
|
||||||
|
|
||||||
|
|
||||||
def get_deep_dive_details(ticker_symbol):
|
def get_deep_dive_details(ticker_symbol):
|
||||||
""" Gets all analyzed posts that mention a specific ticker. """
|
"""Gets all analyzed posts that mention a specific ticker."""
|
||||||
conn = get_db_connection()
|
conn = get_db_connection()
|
||||||
query = """
|
query = """
|
||||||
SELECT DISTINCT p.*, s.name as subreddit_name FROM posts p
|
SELECT DISTINCT p.*, s.name as subreddit_name FROM posts p
|
||||||
@@ -368,12 +416,16 @@ def get_deep_dive_details(ticker_symbol):
|
|||||||
conn.close()
|
conn.close()
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
def get_all_scanned_subreddits():
|
def get_all_scanned_subreddits():
|
||||||
""" Gets a unique list of all subreddits we have data for. """
|
"""Gets a unique list of all subreddits we have data for."""
|
||||||
conn = get_db_connection()
|
conn = get_db_connection()
|
||||||
results = conn.execute("SELECT DISTINCT name FROM subreddits ORDER BY name ASC;").fetchall()
|
results = conn.execute(
|
||||||
|
"SELECT DISTINCT name FROM subreddits ORDER BY name ASC;"
|
||||||
|
).fetchall()
|
||||||
conn.close()
|
conn.close()
|
||||||
return [row['name'] for row in results]
|
return [row["name"] for row in results]
|
||||||
|
|
||||||
|
|
||||||
def get_all_tickers():
|
def get_all_tickers():
|
||||||
"""Retrieves the ID and symbol of every ticker in the database."""
|
"""Retrieves the ID and symbol of every ticker in the database."""
|
||||||
@@ -381,3 +433,92 @@ def get_all_tickers():
|
|||||||
results = conn.execute("SELECT id, symbol FROM tickers;").fetchall()
|
results = conn.execute("SELECT id, symbol FROM tickers;").fetchall()
|
||||||
conn.close()
|
conn.close()
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def get_ticker_by_symbol(symbol):
|
||||||
|
"""
|
||||||
|
Retrieves a single ticker's ID and symbol from the database.
|
||||||
|
The search is case-insensitive. Returns a Row object or None if not found.
|
||||||
|
"""
|
||||||
|
conn = get_db_connection()
|
||||||
|
cursor = conn.cursor()
|
||||||
|
cursor.execute(
|
||||||
|
"SELECT id, symbol FROM tickers WHERE LOWER(symbol) = LOWER(?)", (symbol,)
|
||||||
|
)
|
||||||
|
result = cursor.fetchone()
|
||||||
|
conn.close()
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def get_top_daily_ticker_symbols():
|
||||||
|
"""Gets a simple list of the Top 10 ticker symbols from the last 24 hours."""
|
||||||
|
conn = get_db_connection()
|
||||||
|
one_day_ago = datetime.now(timezone.utc) - timedelta(days=1)
|
||||||
|
one_day_ago_timestamp = int(one_day_ago.timestamp())
|
||||||
|
query = """
|
||||||
|
SELECT t.symbol FROM mentions m JOIN tickers t ON m.ticker_id = t.id
|
||||||
|
WHERE m.mention_timestamp >= ?
|
||||||
|
GROUP BY t.symbol ORDER BY COUNT(m.id) DESC LIMIT 10;
|
||||||
|
"""
|
||||||
|
results = conn.execute(query, (one_day_ago_timestamp,)).fetchall()
|
||||||
|
conn.close()
|
||||||
|
return [row["symbol"] for row in results] # Return a simple list of strings
|
||||||
|
|
||||||
|
|
||||||
|
def get_top_weekly_ticker_symbols():
|
||||||
|
"""Gets a simple list of the Top 10 ticker symbols from the last 7 days."""
|
||||||
|
conn = get_db_connection()
|
||||||
|
seven_days_ago = datetime.now(timezone.utc) - timedelta(days=7)
|
||||||
|
seven_days_ago_timestamp = int(seven_days_ago.timestamp())
|
||||||
|
query = """
|
||||||
|
SELECT t.symbol FROM mentions m JOIN tickers t ON m.ticker_id = t.id
|
||||||
|
WHERE m.mention_timestamp >= ?
|
||||||
|
GROUP BY t.symbol ORDER BY COUNT(m.id) DESC LIMIT 10;
|
||||||
|
"""
|
||||||
|
results = conn.execute(query, (seven_days_ago_timestamp,)).fetchall()
|
||||||
|
conn.close()
|
||||||
|
return [row["symbol"] for row in results] # Return a simple list of strings
|
||||||
|
|
||||||
|
|
||||||
|
def get_top_daily_ticker_symbols_for_subreddit(subreddit_name):
|
||||||
|
"""Gets a list of the Top 10 daily ticker symbols for a specific subreddit."""
|
||||||
|
conn = get_db_connection()
|
||||||
|
one_day_ago = datetime.now(timezone.utc) - timedelta(days=1)
|
||||||
|
one_day_ago_timestamp = int(one_day_ago.timestamp())
|
||||||
|
query = """
|
||||||
|
SELECT t.symbol FROM mentions m JOIN tickers t ON m.ticker_id = t.id
|
||||||
|
JOIN subreddits s ON m.subreddit_id = s.id
|
||||||
|
WHERE LOWER(s.name) = LOWER(?) AND m.mention_timestamp >= ?
|
||||||
|
GROUP BY t.symbol ORDER BY COUNT(m.id) DESC LIMIT 10;
|
||||||
|
"""
|
||||||
|
results = conn.execute(
|
||||||
|
query,
|
||||||
|
(
|
||||||
|
subreddit_name,
|
||||||
|
one_day_ago_timestamp,
|
||||||
|
),
|
||||||
|
).fetchall()
|
||||||
|
conn.close()
|
||||||
|
return [row["symbol"] for row in results]
|
||||||
|
|
||||||
|
|
||||||
|
def get_top_weekly_ticker_symbols_for_subreddit(subreddit_name):
|
||||||
|
"""Gets a list of the Top 10 weekly ticker symbols for a specific subreddit."""
|
||||||
|
conn = get_db_connection()
|
||||||
|
seven_days_ago = datetime.now(timezone.utc) - timedelta(days=7)
|
||||||
|
seven_days_ago_timestamp = int(seven_days_ago.timestamp())
|
||||||
|
query = """
|
||||||
|
SELECT t.symbol FROM mentions m JOIN tickers t ON m.ticker_id = t.id
|
||||||
|
JOIN subreddits s ON m.subreddit_id = s.id
|
||||||
|
WHERE LOWER(s.name) = LOWER(?) AND m.mention_timestamp >= ?
|
||||||
|
GROUP BY t.symbol ORDER BY COUNT(m.id) DESC LIMIT 10;
|
||||||
|
"""
|
||||||
|
results = conn.execute(
|
||||||
|
query,
|
||||||
|
(
|
||||||
|
subreddit_name,
|
||||||
|
seven_days_ago_timestamp,
|
||||||
|
),
|
||||||
|
).fetchall()
|
||||||
|
conn.close()
|
||||||
|
return [row["symbol"] for row in results]
|
||||||
|
@@ -1,46 +0,0 @@
|
|||||||
# rstat_tool/fetcher.py
|
|
||||||
# A dedicated, isolated script for fetching financial data.
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import json
|
|
||||||
import yfinance as yf
|
|
||||||
import pandas as pd
|
|
||||||
import logging
|
|
||||||
|
|
||||||
# Suppress verbose yfinance logging in this isolated process
|
|
||||||
logging.getLogger("yfinance").setLevel(logging.CRITICAL)
|
|
||||||
|
|
||||||
def get_financial_data_isolated(ticker_symbol):
|
|
||||||
"""
|
|
||||||
Fetches market cap and the most recent closing price for a ticker.
|
|
||||||
This is the robust version of the function.
|
|
||||||
"""
|
|
||||||
market_cap = None
|
|
||||||
closing_price = None
|
|
||||||
try:
|
|
||||||
data = yf.download(
|
|
||||||
ticker_symbol, period="2d", progress=False, auto_adjust=False
|
|
||||||
)
|
|
||||||
if not data.empty:
|
|
||||||
last_close_raw = data['Close'].iloc[-1]
|
|
||||||
if pd.notna(last_close_raw):
|
|
||||||
closing_price = float(last_close_raw)
|
|
||||||
try:
|
|
||||||
market_cap = yf.Ticker(ticker_symbol).info.get('marketCap')
|
|
||||||
except Exception:
|
|
||||||
# This is a non-critical failure, we can proceed without market cap
|
|
||||||
pass
|
|
||||||
return {"market_cap": market_cap, "closing_price": closing_price}
|
|
||||||
except Exception:
|
|
||||||
# This is a critical failure, return None for both
|
|
||||||
return {"market_cap": None, "closing_price": None}
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
if len(sys.argv) < 2:
|
|
||||||
# This script requires a ticker symbol as an argument
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
ticker_to_fetch = sys.argv[1]
|
|
||||||
result = get_financial_data_isolated(ticker_to_fetch)
|
|
||||||
# Print the result as a JSON string to standard output
|
|
||||||
print(json.dumps(result))
|
|
@@ -6,110 +6,113 @@ COMMON_WORDS_BLACKLIST = {
|
|||||||
"AI", "AINT", "AK", "ALD", "ALGOS", "ALIVE", "ALL", "ALPHA", "ALSO", "AM",
|
"AI", "AINT", "AK", "ALD", "ALGOS", "ALIVE", "ALL", "ALPHA", "ALSO", "AM",
|
||||||
"AMA", "AMEX", "AMK", "AMY", "AND", "ANSS", "ANY", "APES", "APL", "APPL",
|
"AMA", "AMEX", "AMK", "AMY", "AND", "ANSS", "ANY", "APES", "APL", "APPL",
|
||||||
"APPLE", "APR", "APUS", "APY", "AR", "ARBK", "ARE", "AREA", "ARH", "ARK",
|
"APPLE", "APR", "APUS", "APY", "AR", "ARBK", "ARE", "AREA", "ARH", "ARK",
|
||||||
"AROUND", "ART", "ASAP", "ASEAN", "ASK", "ASS", "ASSET", "AST", "AT", "ATH",
|
"AROUND", "ART", "AS", "ASAP", "ASEAN", "ASK", "ASS", "ASSET", "AST", "AT",
|
||||||
"ATL", "ATM", "AUD", "AUG", "AUM", "AV", "AVG", "AWS", "BABY", "BAG",
|
"ATH", "ATL", "ATM", "AUD", "AUG", "AUM", "AV", "AVG", "AWS", "BABY",
|
||||||
"BAGS", "BALLS", "BAN", "BANG", "BASIC", "BBB", "BBBY", "BE", "BEAR", "BEARS",
|
"BAG", "BAGS", "BALLS", "BAN", "BANG", "BASIC", "BBB", "BBBY", "BE", "BEAR",
|
||||||
"BECN", "BEER", "BELL", "BELOW", "BETA", "BETS", "BF", "BID", "BIG", "BIS",
|
"BEARS", "BECN", "BEER", "BELL", "BELOW", "BETA", "BETS", "BF", "BID", "BIG",
|
||||||
"BITCH", "BKEY", "BLEND", "BNPL", "BOE", "BOJ", "BOLL", "BOMB", "BOND", "BONED",
|
"BIS", "BITCH", "BKEY", "BLEND", "BMW", "BNP", "BNPL", "BOE", "BOJ", "BOLL",
|
||||||
"BORN", "BOTH", "BOTS", "BOY", "BOYS", "BRB", "BRICS", "BRK", "BRKA", "BRKB",
|
"BOMB", "BOND", "BONED", "BORN", "BOTH", "BOTS", "BOY", "BOYS", "BRB", "BRICS",
|
||||||
"BRL", "BROKE", "BRRRR", "BS", "BSE", "BST", "BSU", "BT", "BTC", "BTS",
|
"BRK", "BRKA", "BRKB", "BRL", "BROKE", "BRRRR", "BS", "BSE", "BST", "BSU",
|
||||||
"BTW", "BUDDY", "BULL", "BULLS", "BUST", "BUT", "BUY", "BUZZ", "CAD", "CAFE",
|
"BT", "BTC", "BTS", "BTW", "BUDDY", "BULL", "BULLS", "BUST", "BUT", "BUY",
|
||||||
"CAGR", "CALL", "CALLS", "CAN", "CAP", "CARB", "CARES", "CASE", "CATL", "CBD",
|
"BUZZ", "CAD", "CAFE", "CAGR", "CALL", "CALLS", "CAN", "CAP", "CARB", "CARES",
|
||||||
"CBGM", "CBS", "CCI", "CCP", "CD", "CDN", "CEO", "CEST", "CET", "CEX",
|
"CASE", "CATL", "CBD", "CBGM", "CBS", "CCI", "CCP", "CD", "CDN", "CEO",
|
||||||
"CFD", "CFO", "CFPB", "CHART", "CHASE", "CHATS", "CHECK", "CHF", "CHICK", "CHIP",
|
"CEST", "CET", "CEX", "CFD", "CFO", "CFPB", "CHART", "CHASE", "CHATS", "CHECK",
|
||||||
"CHIPS", "CIA", "CIC", "CLAIM", "CLEAN", "CLICK", "CLOSE", "CMON", "CN", "CNBC",
|
"CHF", "CHICK", "CHIP", "CHIPS", "CIA", "CIC", "CLAIM", "CLEAN", "CLICK", "CLOSE",
|
||||||
"CNN", "CNY", "COBRA", "COCK", "COGS", "COIL", "COKE", "COME", "COST", "COULD",
|
"CMON", "CN", "CNBC", "CNN", "CNY", "COBRA", "COCK", "COGS", "COIL", "COKE",
|
||||||
"COVID", "CPAP", "CPI", "CRA", "CRE", "CRO", "CRV", "CSE", "CSP", "CSS",
|
"COME", "COST", "COULD", "COVID", "CPAP", "CPI", "CRA", "CRE", "CRO", "CRV",
|
||||||
"CST", "CTB", "CTEP", "CTO", "CUCKS", "CULT", "CUM", "CUTS", "CUV", "CYCLE",
|
"CSE", "CSP", "CSS", "CST", "CTB", "CTEP", "CTO", "CUCKS", "CULT", "CUM",
|
||||||
"CZK", "DA", "DAILY", "DAO", "DATE", "DAX", "DAY", "DAYS", "DCA", "DCF",
|
"CUSMA", "CUTS", "CUV", "CYCLE", "CZK", "DA", "DAILY", "DAO", "DATE", "DAX",
|
||||||
"DD", "DEAL", "DEBT", "DEEZ", "DEMO", "DET", "DEX", "DGAF", "DIA", "DID",
|
"DAY", "DAYS", "DCA", "DCF", "DD", "DEAL", "DEBT", "DEEZ", "DEMO", "DET",
|
||||||
"DIDNT", "DIP", "DITM", "DIV", "DIY", "DJI", "DJIA", "DJTJ", "DKK", "DL",
|
"DEX", "DGAF", "DIA", "DID", "DIDNT", "DIP", "DITM", "DIV", "DIY", "DJI",
|
||||||
"DM", "DMV", "DNI", "DNUTZ", "DO", "DOD", "DOE", "DOES", "DOGE", "DOING",
|
"DJIA", "DJTJ", "DKK", "DL", "DM", "DMV", "DNI", "DNUTZ", "DO", "DOD",
|
||||||
"DOJ", "DOM", "DONNY", "DONT", "DONUT", "DOOR", "DOWN", "DOZEN", "DPI", "DR",
|
"DOE", "DOES", "DOGE", "DOING", "DOJ", "DOM", "DONNY", "DONT", "DONUT", "DOOR",
|
||||||
"DUDE", "DUMP", "DUNT", "DUT", "DUTY", "DXY", "DXYXBT", "DYI", "DYNK", "DYODD",
|
"DOWN", "DOZEN", "DPI", "DR", "DUDE", "DUMP", "DUNT", "DUT", "DUTY", "DXY",
|
||||||
"DYOR", "EACH", "EARLY", "EARN", "EAST", "EASY", "ECB", "EDGAR", "EDIT", "EDT",
|
"DXYXBT", "DYI", "DYNK", "DYODD", "DYOR", "EACH", "EARLY", "EARN", "EAST", "EASY",
|
||||||
"EJ", "EMA", "EMJ", "EMT", "END", "ENRON", "ENSI", "ENV", "EO", "EOD",
|
"ECB", "EDGAR", "EDIT", "EDT", "EJ", "EMA", "EMJ", "EMT", "END", "ENRON",
|
||||||
"EOM", "EOW", "EOY", "EPA", "EPK", "EPS", "ER", "ESG", "ESPP", "EST",
|
"ENSI", "ENV", "EO", "EOD", "EOM", "EOW", "EOY", "EPA", "EPK", "EPS",
|
||||||
"ETA", "ETF", "ETFS", "ETH", "ETL", "EU", "EUR", "EV", "EVEN", "EVERY",
|
"ER", "ESG", "ESPP", "EST", "ETA", "ETF", "ETFS", "ETH", "ETL", "EU",
|
||||||
"EVTOL", "EXTRA", "EYES", "EZ", "FAANG", "FAFO", "FAQ", "FAR", "FAST", "FBI",
|
"EUR", "EV", "EVEN", "EVERY", "EVTOL", "EXTRA", "EYES", "EZ", "FAANG", "FAFO",
|
||||||
"FCC", "FCFF", "FD", "FDA", "FEE", "FFH", "FFS", "FGMA", "FIG", "FIGMA",
|
"FAQ", "FAR", "FAST", "FBI", "FCC", "FCFF", "FD", "FDA", "FEE", "FFH",
|
||||||
"FIHTX", "FILES", "FINAL", "FIND", "FING", "FINRA", "FINT", "FINTX", "FINTY", "FIRST",
|
"FFS", "FGMA", "FIG", "FIGMA", "FIHTX", "FILES", "FINAL", "FIND", "FING", "FINRA",
|
||||||
"FKIN", "FLRAA", "FLT", "FLY", "FML", "FOLO", "FOMC", "FOMO", "FOR", "FOREX",
|
"FINT", "FINTX", "FINTY", "FIRE", "FIRST", "FKIN", "FLRAA", "FLT", "FLY", "FML",
|
||||||
"FRAUD", "FREAK", "FRED", "FRG", "FROM", "FRP", "FRS", "FSBO", "FSD", "FSE",
|
"FOLO", "FOMC", "FOMO", "FOR", "FOREX", "FRAUD", "FREAK", "FRED", "FRG", "FROM",
|
||||||
"FSELK", "FSPSX", "FTD", "FTSE", "FUCK", "FUCKS", "FUD", "FULL", "FUND", "FUNNY",
|
"FRP", "FRS", "FSBO", "FSD", "FSE", "FSELK", "FSPSX", "FTD", "FTSE", "FUCK",
|
||||||
"FVG", "FWIW", "FX", "FXAIX", "FXIAX", "FXROX", "FY", "FYI", "FZROX", "GAAP",
|
"FUCKS", "FUD", "FULL", "FUND", "FUNNY", "FVG", "FWIW", "FX", "FXAIX", "FXIAX",
|
||||||
"GAIN", "GAVE", "GBP", "GC", "GDP", "GET", "GG", "GGTM", "GIVES", "GJ",
|
"FXROX", "FY", "FYI", "FZROX", "GAAP", "GAIN", "GAVE", "GBP", "GC", "GDP",
|
||||||
"GL", "GLHF", "GMAT", "GMI", "GMT", "GO", "GOAL", "GOAT", "GOD", "GOING",
|
"GET", "GFC", "GG", "GGTM", "GIVES", "GJ", "GL", "GLHF", "GMAT", "GMI",
|
||||||
"GOLD", "GONE", "GONNA", "GOODS", "GOPRO", "GPT", "GPU", "GRAB", "GREAT", "GREEN",
|
"GMT", "GO", "GOAL", "GOAT", "GOD", "GOING", "GOLD", "GONE", "GONNA", "GOODS",
|
||||||
"GSOV", "GST", "GTA", "GTC", "GTFO", "GTG", "GUH", "GUNS", "GUY", "GUYS",
|
"GOPRO", "GPT", "GPU", "GRAB", "GREAT", "GREEN", "GSOV", "GST", "GTA", "GTC",
|
||||||
"HAD", "HAHA", "HALF", "HAM", "HANDS", "HAS", "HATE", "HAVE", "HBAR", "HCOL",
|
"GTFO", "GTG", "GUH", "GUNS", "GUY", "GUYS", "HAD", "HAHA", "HALF", "HAM",
|
||||||
"HEAR", "HEDGE", "HEGE", "HELD", "HELL", "HELP", "HERE", "HEY", "HFCS", "HFT",
|
"HANDS", "HAS", "HATE", "HAVE", "HBAR", "HCOL", "HEAR", "HEDGE", "HEGE", "HELD",
|
||||||
"HGTV", "HIGH", "HIGHS", "HINT", "HIS", "HITID", "HK", "HKD", "HKEX", "HODL",
|
"HELL", "HELP", "HERE", "HEY", "HFCS", "HFT", "HGTV", "HIGH", "HIGHS", "HINT",
|
||||||
"HODOR", "HOF", "HOLD", "HOLY", "HOME", "HOT", "HOUR", "HOURS", "HOW", "HS",
|
"HIS", "HITID", "HK", "HKD", "HKEX", "HODL", "HODOR", "HOF", "HOLD", "HOLY",
|
||||||
"HSA", "HSI", "HT", "HTF", "HTML", "HUF", "HUGE", "HYPE", "IANAL", "IATF",
|
"HOME", "HOT", "HOUR", "HOURS", "HOW", "HS", "HSA", "HSI", "HT", "HTCI",
|
||||||
"IB", "IBS", "ICT", "ID", "IDF", "IDK", "IF", "II", "IIRC", "IKKE",
|
"HTF", "HTML", "HUF", "HUGE", "HV", "HYPE", "IANAL", "IATF", "IB", "IBS",
|
||||||
"IKZ", "IM", "IMHO", "IMI", "IMO", "IN", "INC", "INR", "INTO", "IP",
|
"ICSID", "ICT", "ID", "IDF", "IDK", "IF", "II", "IIRC", "IKKE", "IKZ",
|
||||||
|
"IM", "IMHO", "IMI", "IMO", "IN", "INC", "INR", "INTEL", "INTO", "IP",
|
||||||
"IPO", "IQVIA", "IRA", "IRAS", "IRC", "IRISH", "IRMAA", "IRS", "IS", "ISA",
|
"IPO", "IQVIA", "IRA", "IRAS", "IRC", "IRISH", "IRMAA", "IRS", "IS", "ISA",
|
||||||
"ISIN", "ISM", "ISN", "IST", "IT", "ITC", "ITM", "ITS", "ITWN", "IUIT",
|
"ISIN", "ISM", "ISN", "IST", "IT", "ITC", "ITM", "ITS", "ITWN", "IUIT",
|
||||||
"IV", "IVV", "IWM", "IXL", "IYKYK", "JAVA", "JD", "JDM", "JE", "JFC",
|
"IV", "IVV", "IWM", "IXL", "IXLH", "IYKYK", "JAVA", "JD", "JDG", "JDM",
|
||||||
"JK", "JLR", "JMO", "JOBS", "JOIN", "JOKE", "JP", "JPOW", "JPY", "JS",
|
"JE", "JFC", "JK", "JLR", "JMO", "JOBS", "JOIN", "JOKE", "JP", "JPOW",
|
||||||
"JST", "JUN", "JUST", "KARMA", "KEEP", "KILL", "KING", "KK", "KNEW", "KNOW",
|
"JPY", "JS", "JST", "JUN", "JUST", "KARMA", "KEEP", "KILL", "KING", "KK",
|
||||||
"KO", "KOHLS", "KPMG", "KRW", "LANGT", "LARGE", "LAST", "LATE", "LATER", "LBO",
|
"KLA", "KLP", "KNEW", "KNOW", "KO", "KOHLS", "KPMG", "KRW", "LA", "LANGT",
|
||||||
"LBTC", "LCS", "LDL", "LEADS", "LEAP", "LEAPS", "LEARN", "LEI", "LET", "LETF",
|
"LARGE", "LAST", "LATE", "LATER", "LBO", "LBTC", "LCS", "LDL", "LEADS", "LEAP",
|
||||||
"LETS", "LFA", "LFG", "LFP", "LG", "LGEN", "LIFE", "LIG", "LIGMA", "LIKE",
|
"LEAPS", "LEARN", "LEI", "LET", "LETF", "LETS", "LFA", "LFG", "LFP", "LG",
|
||||||
"LIMIT", "LIST", "LLC", "LLM", "LM", "LMAO", "LMAOO", "LMM", "LMN", "LOANS",
|
"LGEN", "LIFE", "LIG", "LIGMA", "LIKE", "LIMIT", "LIST", "LLC", "LLM", "LM",
|
||||||
"LOKO", "LOL", "LOLOL", "LONG", "LONGS", "LOOK", "LOSE", "LOSS", "LOST", "LOVE",
|
"LMAO", "LMAOO", "LMM", "LMN", "LOANS", "LOKO", "LOL", "LOLOL", "LONG", "LONGS",
|
||||||
"LOVES", "LOW", "LOWER", "LOWS", "LP", "LSS", "LTCG", "LUCID", "LUPD", "LYC",
|
"LOOK", "LOSE", "LOSS", "LOST", "LOVE", "LOVES", "LOW", "LOWER", "LOWS", "LP",
|
||||||
"LYING", "M&A", "MA", "MACD", "MAIL", "MAKE", "MAKES", "MANGE", "MANY", "MASON",
|
"LSS", "LTCG", "LUCID", "LUPD", "LYC", "LYING", "M&A", "MA", "MACD", "MAIL",
|
||||||
"MAX", "MAY", "MAYBE", "MBA", "MC", "MCAP", "MCNA", "MCP", "ME", "MEAN",
|
"MAKE", "MAKES", "MANGE", "MANY", "MASON", "MAX", "MAY", "MAYBE", "MBA", "MC",
|
||||||
"MEME", "MERGE", "MERK", "MES", "MEXC", "MF", "MFER", "MID", "MIGHT", "MIN",
|
"MCAP", "MCNA", "MCP", "ME", "MEAN", "MEME", "MERGE", "MERK", "MES", "MEXC",
|
||||||
"MIND", "MINS", "ML", "MLB", "MLS", "MM", "MMF", "MNQ", "MOASS", "MODEL",
|
"MF", "MFER", "MID", "MIGHT", "MIN", "MIND", "MINS", "ML", "MLB", "MLS",
|
||||||
"MOM", "MONEY", "MONTH", "MONY", "MOON", "MORE", "MOST", "MOU", "MSK", "MTVGA",
|
"MM", "MMF", "MNQ", "MOASS", "MODEL", "MODTX", "MOM", "MONEY", "MONTH", "MONY",
|
||||||
"MUCH", "MUSIC", "MUST", "MVA", "MXN", "MY", "MYMD", "NASA", "NASDA", "NATO",
|
"MOON", "MORE", "MOST", "MOU", "MSK", "MTVGA", "MUCH", "MUSIC", "MUST", "MVA",
|
||||||
"NAV", "NBA", "NBC", "NCAN", "NCR", "NEAR", "NEAT", "NEED", "NEVER", "NEW",
|
"MXN", "MY", "MYMD", "NASA", "NASDA", "NATO", "NAV", "NBA", "NBC", "NCAN",
|
||||||
"NEWS", "NEXT", "NFA", "NFC", "NFL", "NFT", "NGAD", "NGMI", "NIGHT", "NIQ",
|
"NCR", "NEAR", "NEAT", "NEED", "NEVER", "NEW", "NEWS", "NEXT", "NFA", "NFC",
|
||||||
"NK", "NO", "NOK", "NONE", "NOOO", "NOPE", "NORTH", "NOT", "NOVA", "NOW",
|
"NFL", "NFT", "NGAD", "NGMI", "NIGHT", "NIQ", "NK", "NO", "NOK", "NON",
|
||||||
"NQ", "NRI", "NSA", "NSLC", "NTG", "NTVS", "NULL", "NUT", "NUTS", "NUTZ",
|
"NONE", "NOOO", "NOPE", "NORTH", "NOT", "NOVA", "NOW", "NQ", "NRI", "NSA",
|
||||||
"NVM", "NW", "NY", "NYSE", "NZ", "NZD", "OBBB", "OBI", "OBS", "OBV",
|
"NSCLC", "NSLC", "NTG", "NTVS", "NULL", "NUT", "NUTS", "NUTZ", "NVM", "NW",
|
||||||
"OCD", "OCF", "OCO", "ODAT", "OEM", "OF", "OFA", "OFF", "OG", "OH",
|
"NY", "NYSE", "NZ", "NZD", "OBBB", "OBI", "OBS", "OBV", "OCD", "OCF",
|
||||||
"OK", "OKAY", "OL", "OLD", "OMFG", "OMG", "ON", "ONDAS", "ONE", "ONLY",
|
"OCO", "ODAT", "ODTE", "OEM", "OF", "OFA", "OFF", "OG", "OH", "OK",
|
||||||
"OP", "OPEC", "OPENQ", "OPEX", "OPRN", "OR", "ORB", "ORDER", "ORTEX", "OS",
|
"OKAY", "OL", "OLD", "OMFG", "OMG", "ON", "ONDAS", "ONE", "ONLY", "OP",
|
||||||
"OSCE", "OT", "OTC", "OTM", "OTOH", "OUCH", "OUGHT", "OUR", "OUT", "OVER",
|
"OPEC", "OPENQ", "OPEX", "OPRN", "OR", "ORB", "ORDER", "ORTEX", "OS", "OSCE",
|
||||||
"OWN", "OZZY", "PA", "PANIC", "PC", "PDT", "PE", "PEAK", "PEG", "PETA",
|
"OT", "OTC", "OTM", "OTOH", "OUCH", "OUGHT", "OUR", "OUT", "OVER", "OWN",
|
||||||
"PEW", "PFC", "PGHL", "PIMCO", "PITA", "PLAN", "PLAYS", "PLC", "PLN", "PM",
|
"OZZY", "PA", "PANIC", "PC", "PDT", "PE", "PEAK", "PEG", "PETA", "PEW",
|
||||||
|
"PFC", "PGHL", "PIMCO", "PITA", "PLAN", "PLAYS", "PLC", "PLN", "PM", "PMCC",
|
||||||
"PMI", "PNL", "POC", "POMO", "POP", "POS", "POSCO", "POTUS", "POV", "POW",
|
"PMI", "PNL", "POC", "POMO", "POP", "POS", "POSCO", "POTUS", "POV", "POW",
|
||||||
"PPI", "PR", "PRICE", "PRIME", "PROFIT", "PROXY", "PS", "PSA", "PST", "PT",
|
"PPI", "PR", "PRICE", "PRIME", "PROFIT", "PROXY", "PS", "PSA", "PST", "PT",
|
||||||
"PTD", "PUSSY", "PUT", "PWC", "Q1", "Q2", "Q3", "Q4", "QE", "QED",
|
"PTD", "PUSSY", "PUT", "PUTS", "PWC", "Q1", "Q2", "Q3", "Q4", "QE",
|
||||||
"QIMC", "QQQ", "QR", "RAM", "RATM", "RBA", "RBNZ", "RE", "REACH", "READY",
|
"QED", "QIMC", "QQQ", "QR", "RAM", "RATM", "RBA", "RBNZ", "RE", "REACH",
|
||||||
"REAL", "RED", "REIT", "REITS", "REKT", "REPE", "RFK", "RH", "RICO", "RIDE",
|
"READY", "REAL", "RED", "REIT", "REITS", "REKT", "REPE", "RFK", "RH", "RICO",
|
||||||
"RIGHT", "RIP", "RISK", "RISKY", "RNDC", "ROCE", "ROCK", "ROE", "ROFL", "ROI",
|
"RIDE", "RIGHT", "RIP", "RISK", "RISKY", "RNDC", "ROCE", "ROCK", "ROE", "ROFL",
|
||||||
"ROIC", "ROTH", "RPO", "RRSP", "RSD", "RSI", "RT", "RTD", "RUB", "RUG",
|
"ROI", "ROIC", "ROTH", "RPO", "RRSP", "RSD", "RSI", "RT", "RTD", "RUB",
|
||||||
"RULE", "RUST", "RVOL", "SAGA", "SALES", "SAME", "SAVE", "SAYS", "SBF", "SBLOC",
|
"RUG", "RULE", "RUST", "RVOL", "SAGA", "SALES", "SAME", "SAVE", "SAYS", "SBF",
|
||||||
"SC", "SCALP", "SCAM", "SCHB", "SCIF", "SEC", "SEE", "SEK", "SELL", "SELLL",
|
"SBLOC", "SC", "SCALP", "SCAM", "SCHB", "SCIF", "SEC", "SEE", "SEK", "SELL",
|
||||||
"SEP", "SESG", "SET", "SFOR", "SGD", "SHALL", "SHARE", "SHEIN", "SHELL", "SHIT",
|
"SELLL", "SEP", "SESG", "SET", "SFOR", "SGD", "SHALL", "SHARE", "SHEIN", "SHELL",
|
||||||
"SHORT", "SHOW", "SHTF", "SI", "SICK", "SIGN", "SL", "SLIM", "SLOW", "SMA",
|
"SHIT", "SHORT", "SHOW", "SHS", "SHTF", "SI", "SICK", "SIGN", "SL", "SLIM",
|
||||||
"SMALL", "SMFH", "SNZ", "SO", "SOLD", "SOLIS", "SOME", "SOON", "SOOO", "SOUTH",
|
"SLOW", "SMA", "SMALL", "SMFH", "SNZ", "SO", "SOLD", "SOLIS", "SOME", "SOON",
|
||||||
"SP", "SPAC", "SPDR", "SPEND", "SPLG", "SPX", "SPY", "SQUAD", "SS", "SSA",
|
"SOOO", "SOUTH", "SP", "SPAC", "SPDR", "SPEND", "SPLG", "SPX", "SPY", "SQUAD",
|
||||||
"SSDI", "START", "STAY", "STEEL", "STFU", "STILL", "STOCK", "STOOQ", "STOP", "STOR",
|
"SS", "SSA", "SSDI", "START", "STAY", "STEEL", "STFU", "STILL", "STO", "STOCK",
|
||||||
"STQQQ", "STUCK", "STUDY", "SUS", "SUSHI", "SUV", "SWIFT", "SWING", "TA", "TAG",
|
"STOOQ", "STOP", "STOR", "STQQQ", "STUCK", "STUDY", "SUS", "SUSHI", "SUV", "SWIFT",
|
||||||
"TAKE", "TAM", "TBTH", "TEAMS", "TED", "TEMU", "TERM", "TESLA", "TEXT", "TF",
|
"SWING", "TA", "TAG", "TAKE", "TAM", "TBTH", "TEAMS", "TED", "TEMU", "TERM",
|
||||||
"TFNA", "TFSA", "THAN", "THANK", "THAT", "THATS", "THE", "THEIR", "THEM", "THEN",
|
"TESLA", "TEXT", "TF", "TFNA", "TFSA", "THAN", "THANK", "THAT", "THATS", "THE",
|
||||||
"THERE", "THESE", "THEY", "THING", "THINK", "THIS", "TI", "TIA", "TIKR", "TIME",
|
"THEIR", "THEM", "THEN", "THERE", "THESE", "THEY", "THING", "THINK", "THIS", "TI",
|
||||||
"TIMES", "TINA", "TITS", "TJR", "TL", "TL;DR", "TLDR", "TO", "TODAY", "TOLD",
|
"TIA", "TIKR", "TIME", "TIMES", "TINA", "TITS", "TJR", "TL", "TL;DR", "TLDR",
|
||||||
"TONS", "TOO", "TOS", "TOT", "TOTAL", "TP", "TPU", "TRADE", "TREND", "TRUE",
|
"TNT", "TO", "TODAY", "TOLD", "TONS", "TOO", "TOS", "TOT", "TOTAL", "TP",
|
||||||
"TRUMP", "TRUST", "TRY", "TSA", "TSMC", "TSP", "TSX", "TSXV", "TTM", "TTYL",
|
"TPU", "TRADE", "TREND", "TRUE", "TRUMP", "TRUST", "TRY", "TSA", "TSMC", "TSP",
|
||||||
"TWO", "UAW", "UCITS", "UGH", "UI", "UK", "UNDER", "UNITS", "UNTIL", "UP",
|
"TSX", "TSXV", "TTIP", "TTM", "TTYL", "TURNS", "TWO", "UAW", "UCITS", "UGH",
|
||||||
"US", "USA", "USD", "USMCA", "USSA", "USSR", "UTC", "VALID", "VALUE", "VAMOS",
|
"UI", "UK", "UNDER", "UNITS", "UNO", "UNTIL", "UP", "US", "USA", "USD",
|
||||||
"VEO", "VERY", "VFMXX", "VFV", "VI", "VISA", "VIX", "VLI", "VOO", "VP",
|
"USMCA", "USSA", "USSR", "UTC", "VALID", "VALUE", "VAMOS", "VAT", "VEO", "VERY",
|
||||||
"VPAY", "VR", "VRVP", "VSUS", "VTI", "VUAG", "VW", "VWAP", "VWCE", "VXN",
|
"VFMXX", "VFV", "VI", "VISA", "VIX", "VLI", "VOO", "VP", "VPAY", "VR",
|
||||||
"VXUX", "WAGER", "WAGMI", "WAIT", "WALL", "WANT", "WAS", "WATCH", "WAY", "WBTC",
|
"VRVP", "VSUS", "VTI", "VUAG", "VW", "VWAP", "VWCE", "VXN", "VXUX", "WAGER",
|
||||||
"WE", "WEB", "WEB3", "WEEK", "WENT", "WERO", "WEST", "WHALE", "WHAT", "WHEN",
|
"WAGMI", "WAIT", "WALL", "WANT", "WAS", "WATCH", "WAY", "WBTC", "WE", "WEB",
|
||||||
"WHERE", "WHICH", "WHILE", "WHO", "WHOS", "WHY", "WIDE", "WILL", "WIRE", "WIRED",
|
"WEB3", "WEEK", "WENT", "WERO", "WEST", "WHALE", "WHAT", "WHEN", "WHERE", "WHICH",
|
||||||
"WITH", "WL", "WON", "WOOPS", "WORDS", "WORTH", "WOULD", "WP", "WRONG", "WSB",
|
"WHILE", "WHO", "WHOS", "WHY", "WIDE", "WILL", "WIRE", "WIRED", "WITH", "WL",
|
||||||
"WSJ", "WTF", "WV", "WWII", "WWIII", "X", "XAU", "XCUSE", "XD", "XEQT",
|
"WON", "WOOPS", "WORDS", "WORTH", "WOULD", "WP", "WRONG", "WSB", "WSJ", "WTF",
|
||||||
"XI", "XMR", "XO", "XRP", "XX", "YEAH", "YEET", "YES", "YET", "YIELD",
|
"WV", "WWII", "WWIII", "X", "XAU", "XCUSE", "XD", "XEQT", "XI", "XIV",
|
||||||
"YM", "YMMV", "YOIR", "YOLO", "YOU", "YOUR", "YOY", "YT", "YTD", "YUGE",
|
"XMR", "XO", "XRP", "XX", "YEAH", "YEET", "YES", "YET", "YIELD", "YM",
|
||||||
"YUPPP", "ZAR", "ZEN", "ZERO", "ZEV"
|
"YMMV", "YOIR", "YOLO", "YOU", "YOUR", "YOY", "YT", "YTD", "YUGE", "YUPPP",
|
||||||
|
"ZAR", "ZEN", "ZERO", "ZEV"
|
||||||
}
|
}
|
||||||
|
|
||||||
def format_and_print_list(word_set, words_per_line=10):
|
def format_and_print_list(word_set, words_per_line=10):
|
||||||
@@ -130,7 +133,7 @@ def format_and_print_list(word_set, words_per_line=10):
|
|||||||
# 3. Iterate through the sorted list and print words, respecting the line limit
|
# 3. Iterate through the sorted list and print words, respecting the line limit
|
||||||
for i in range(0, len(sorted_words), words_per_line):
|
for i in range(0, len(sorted_words), words_per_line):
|
||||||
# Get a chunk of words for the current line
|
# Get a chunk of words for the current line
|
||||||
line_chunk = sorted_words[i:i + words_per_line]
|
line_chunk = sorted_words[i : i + words_per_line]
|
||||||
|
|
||||||
# Format each word with double quotes
|
# Format each word with double quotes
|
||||||
formatted_words = [f'"{word}"' for word in line_chunk]
|
formatted_words = [f'"{word}"' for word in line_chunk]
|
||||||
@@ -149,6 +152,7 @@ def format_and_print_list(word_set, words_per_line=10):
|
|||||||
# 4. Print the closing brace
|
# 4. Print the closing brace
|
||||||
print("}")
|
print("}")
|
||||||
|
|
||||||
|
|
||||||
# --- Main execution ---
|
# --- Main execution ---
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
format_and_print_list(COMMON_WORDS_BLACKLIST)
|
format_and_print_list(COMMON_WORDS_BLACKLIST)
|
9
rstat_tool/gunicorn-cfg.py
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# -*- encoding: utf-8 -*-
|
||||||
|
|
||||||
|
bind = '0.0.0.0:5000'
|
||||||
|
workers = 4
|
||||||
|
worker_class = 'uvicorn.workers.UvicornWorker'
|
||||||
|
accesslog = '-'
|
||||||
|
loglevel = 'debug'
|
||||||
|
capture_output = True
|
||||||
|
enable_stdio_inheritance = True
|
@@ -3,32 +3,38 @@
|
|||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
# Get the root logger for our application. Other modules will import this.
|
|
||||||
logger = logging.getLogger("rstat_app")
|
logger = logging.getLogger("rstat_app")
|
||||||
|
|
||||||
def setup_logging(console_verbose=False):
|
|
||||||
"""
|
|
||||||
Configures the application's logger and captures logs from yfinance.
|
|
||||||
"""
|
|
||||||
logger.setLevel(logging.INFO)
|
|
||||||
logger.propagate = False
|
|
||||||
|
|
||||||
|
def setup_logging(console_verbose=False, debug_mode=False):
|
||||||
|
"""
|
||||||
|
Configures the application's logger with a new DEBUG level.
|
||||||
|
"""
|
||||||
|
# The logger itself must be set to the lowest possible level (DEBUG).
|
||||||
|
log_level = logging.DEBUG if debug_mode else logging.INFO
|
||||||
|
logger.setLevel(log_level)
|
||||||
|
|
||||||
|
logger.propagate = False
|
||||||
if logger.hasHandlers():
|
if logger.hasHandlers():
|
||||||
logger.handlers.clear()
|
logger.handlers.clear()
|
||||||
|
|
||||||
# File Handler (Always Verbose)
|
# File Handler (Always verbose at INFO level or higher)
|
||||||
file_handler = logging.FileHandler("rstat.log", mode='a')
|
file_handler = logging.FileHandler("rstat.log", mode="a")
|
||||||
file_handler.setLevel(logging.INFO)
|
file_handler.setLevel(logging.INFO) # We don't need debug spam in the file usually
|
||||||
file_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
|
file_formatter = logging.Formatter(
|
||||||
|
"%(asctime)s - %(levelname)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
|
||||||
|
)
|
||||||
file_handler.setFormatter(file_formatter)
|
file_handler.setFormatter(file_formatter)
|
||||||
logger.addHandler(file_handler)
|
logger.addHandler(file_handler)
|
||||||
|
|
||||||
# Console Handler (Verbosity is Controlled)
|
# Console Handler (Verbosity is controlled)
|
||||||
console_handler = logging.StreamHandler(sys.stdout)
|
console_handler = logging.StreamHandler(sys.stdout)
|
||||||
console_formatter = logging.Formatter('%(message)s')
|
console_formatter = logging.Formatter("%(message)s")
|
||||||
console_handler.setFormatter(console_formatter)
|
console_handler.setFormatter(console_formatter)
|
||||||
|
|
||||||
if console_verbose:
|
if debug_mode:
|
||||||
|
console_handler.setLevel(logging.DEBUG)
|
||||||
|
elif console_verbose:
|
||||||
console_handler.setLevel(logging.INFO)
|
console_handler.setLevel(logging.INFO)
|
||||||
else:
|
else:
|
||||||
console_handler.setLevel(logging.CRITICAL)
|
console_handler.setLevel(logging.CRITICAL)
|
||||||
@@ -41,5 +47,5 @@ def setup_logging(console_verbose=False):
|
|||||||
if yfinance_logger.hasHandlers():
|
if yfinance_logger.hasHandlers():
|
||||||
yfinance_logger.handlers.clear()
|
yfinance_logger.handlers.clear()
|
||||||
yfinance_logger.setLevel(logging.WARNING)
|
yfinance_logger.setLevel(logging.WARNING)
|
||||||
yfinance_logger.addHandler(console_handler) # Use the same console handler
|
yfinance_logger.addHandler(console_handler)
|
||||||
yfinance_logger.addHandler(file_handler) # Use the same file handler
|
yfinance_logger.addHandler(file_handler)
|
||||||
|
@@ -5,208 +5,416 @@ import json
|
|||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
import sys
|
import sys
|
||||||
import subprocess
|
|
||||||
import praw
|
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
|
from pathlib import Path
|
||||||
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
|
||||||
|
import praw
|
||||||
|
import yfinance as yf
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
from . import database
|
from . import database
|
||||||
from .ticker_extractor import extract_tickers
|
from .ticker_extractor import extract_golden_tickers, extract_potential_tickers
|
||||||
from .sentiment_analyzer import get_sentiment_score
|
from .sentiment_analyzer import get_sentiment_score
|
||||||
from .logger_setup import setup_logging, logger as log
|
from .logger_setup import setup_logging, logger as log
|
||||||
|
|
||||||
load_dotenv()
|
|
||||||
MARKET_CAP_REFRESH_INTERVAL = 86400
|
|
||||||
POST_AGE_LIMIT = 86400
|
|
||||||
|
|
||||||
|
|
||||||
def load_subreddits(filepath):
|
def load_subreddits(filepath):
|
||||||
|
"""Loads a list of subreddits from a JSON file."""
|
||||||
try:
|
try:
|
||||||
with open(filepath, 'r') as f:
|
with open(filepath, "r") as f:
|
||||||
return json.load(f).get("subreddits", [])
|
return json.load(f).get("subreddits", [])
|
||||||
except (FileNotFoundError, json.JSONDecodeError) as e:
|
except (FileNotFoundError, json.JSONDecodeError) as e:
|
||||||
log.error(f"Error loading config file '{filepath}': {e}")
|
log.error(f"Error loading config file '{filepath}': {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def get_reddit_instance():
|
def get_reddit_instance():
|
||||||
|
"""Initializes and returns a PRAW Reddit instance."""
|
||||||
|
env_path = Path(__file__).parent.parent / ".env"
|
||||||
|
load_dotenv(dotenv_path=env_path)
|
||||||
client_id = os.getenv("REDDIT_CLIENT_ID")
|
client_id = os.getenv("REDDIT_CLIENT_ID")
|
||||||
client_secret = os.getenv("REDDIT_CLIENT_SECRET")
|
client_secret = os.getenv("REDDIT_CLIENT_SECRET")
|
||||||
user_agent = os.getenv("REDDIT_USER_AGENT")
|
user_agent = os.getenv("REDDIT_USER_AGENT")
|
||||||
if not all([client_id, client_secret, user_agent]):
|
if not all([client_id, client_secret, user_agent]):
|
||||||
print("Error: Reddit API credentials not found in .env file.")
|
log.error("Error: Reddit API credentials not found in .env file.")
|
||||||
return None
|
return None
|
||||||
return praw.Reddit(client_id=client_id, client_secret=client_secret, user_agent=user_agent)
|
return praw.Reddit(
|
||||||
|
client_id=client_id, client_secret=client_secret, user_agent=user_agent
|
||||||
|
)
|
||||||
|
|
||||||
def scan_subreddits(reddit, subreddits_list, post_limit=100, comment_limit=100, days_to_scan=1):
|
|
||||||
|
def fetch_financial_data(ticker_symbol):
|
||||||
"""
|
"""
|
||||||
Scans subreddits with a hybrid mention counting logic.
|
Fetches market cap and the most recent closing price for a single ticker.
|
||||||
- If a ticker is in the title, it gets credit for all comments.
|
This function is designed to be thread-safe and robust.
|
||||||
- If not, tickers only get credit for direct mentions in comments.
|
"""
|
||||||
|
try:
|
||||||
|
ticker = yf.Ticker(ticker_symbol)
|
||||||
|
market_cap = ticker.info.get("marketCap")
|
||||||
|
data = ticker.history(period="2d", auto_adjust=False)
|
||||||
|
closing_price = None
|
||||||
|
if not data.empty:
|
||||||
|
last_close_raw = data["Close"].iloc[-1]
|
||||||
|
if pd.notna(last_close_raw):
|
||||||
|
closing_price = float(last_close_raw)
|
||||||
|
return ticker_symbol, {"market_cap": market_cap, "closing_price": closing_price}
|
||||||
|
except Exception:
|
||||||
|
return ticker_symbol, None
|
||||||
|
|
||||||
|
|
||||||
|
def _process_submission(submission, subreddit_id, conn, comment_limit):
|
||||||
|
"""
|
||||||
|
Processes a single Reddit submission using the "Golden Ticker" logic.
|
||||||
|
- Prioritizes tickers with a '$' prefix.
|
||||||
|
- Falls back to potential tickers only if no '$' tickers are found.
|
||||||
|
"""
|
||||||
|
# 1. --- Golden Ticker Discovery ---
|
||||||
|
# First, search the entire post (title and body) for high-confidence '$' tickers.
|
||||||
|
post_text_for_discovery = submission.title + " " + submission.selftext
|
||||||
|
golden_tickers = extract_golden_tickers(post_text_for_discovery)
|
||||||
|
|
||||||
|
tickers_in_title = set()
|
||||||
|
comment_only_tickers = set()
|
||||||
|
all_tickers_found_in_post = set()
|
||||||
|
|
||||||
|
# 2. --- Apply Contextual Logic ---
|
||||||
|
if golden_tickers:
|
||||||
|
# --- CASE A: Golden Tickers were found ---
|
||||||
|
log.info(f" -> Golden Ticker(s) Found: {', '.join(golden_tickers)}. Prioritizing these.")
|
||||||
|
all_tickers_found_in_post.update(golden_tickers)
|
||||||
|
# We only care about which of the golden tickers appeared in the title for the hybrid logic.
|
||||||
|
tickers_in_title = {ticker for ticker in golden_tickers if ticker in extract_golden_tickers(submission.title)}
|
||||||
|
else:
|
||||||
|
# --- CASE B: No Golden Tickers, fall back to best-guess ---
|
||||||
|
log.info(" -> No Golden Tickers. Falling back to potential ticker search.")
|
||||||
|
# Now we search for potential tickers (e.g., 'GME' without a '$')
|
||||||
|
tickers_in_title = extract_potential_tickers(submission.title)
|
||||||
|
all_tickers_found_in_post.update(tickers_in_title)
|
||||||
|
|
||||||
|
# 3. --- Mention Processing (This logic remains the same, but uses our cleanly identified tickers) ---
|
||||||
|
ticker_id_cache = {}
|
||||||
|
submission.comments.replace_more(limit=0)
|
||||||
|
all_comments = submission.comments.list()[:comment_limit]
|
||||||
|
|
||||||
|
# Process title mentions
|
||||||
|
if tickers_in_title:
|
||||||
|
log.info(f" -> Title Mention(s): {', '.join(tickers_in_title)}. Attributing all comments.")
|
||||||
|
post_sentiment = get_sentiment_score(submission.title)
|
||||||
|
for ticker_symbol in tickers_in_title:
|
||||||
|
ticker_id = database.get_or_create_entity(conn, 'tickers', 'symbol', ticker_symbol)
|
||||||
|
ticker_id_cache[ticker_symbol] = ticker_id
|
||||||
|
database.add_mention(conn, ticker_id, subreddit_id, submission.id, 'post', int(submission.created_utc), post_sentiment)
|
||||||
|
|
||||||
|
# Process comments
|
||||||
|
for comment in all_comments:
|
||||||
|
comment_sentiment = get_sentiment_score(comment.body)
|
||||||
|
if tickers_in_title:
|
||||||
|
for ticker_symbol in tickers_in_title:
|
||||||
|
ticker_id = ticker_id_cache[ticker_symbol]
|
||||||
|
database.add_mention(conn, ticker_id, subreddit_id, submission.id, 'comment', int(comment.created_utc), comment_sentiment)
|
||||||
|
else:
|
||||||
|
# If no title tickers, we must scan comments for potential tickers
|
||||||
|
tickers_in_comment = extract_potential_tickers(comment.body)
|
||||||
|
if tickers_in_comment:
|
||||||
|
all_tickers_found_in_post.update(tickers_in_comment)
|
||||||
|
for ticker_symbol in tickers_in_comment:
|
||||||
|
ticker_id = database.get_or_create_entity(conn, 'tickers', 'symbol', ticker_symbol)
|
||||||
|
database.add_mention(conn, ticker_id, subreddit_id, submission.id, 'comment', int(comment.created_utc), comment_sentiment)
|
||||||
|
|
||||||
|
# 4. --- Save Deep Dive and Return Tickers for Financial Update ---
|
||||||
|
# (This part is unchanged)
|
||||||
|
all_comment_sentiments = [get_sentiment_score(c.body) for c in all_comments]
|
||||||
|
avg_sentiment = sum(all_comment_sentiments) / len(all_comment_sentiments) if all_comment_sentiments else 0
|
||||||
|
post_analysis_data = {
|
||||||
|
"post_id": submission.id, "title": submission.title,
|
||||||
|
"post_url": f"https://reddit.com{submission.permalink}", "subreddit_id": subreddit_id,
|
||||||
|
"post_timestamp": int(submission.created_utc), "comment_count": len(all_comments),
|
||||||
|
"avg_comment_sentiment": avg_sentiment
|
||||||
|
}
|
||||||
|
database.add_or_update_post_analysis(conn, post_analysis_data)
|
||||||
|
|
||||||
|
return all_tickers_found_in_post
|
||||||
|
|
||||||
|
|
||||||
|
def scan_subreddits(
|
||||||
|
reddit,
|
||||||
|
subreddits_list,
|
||||||
|
post_limit=100,
|
||||||
|
comment_limit=100,
|
||||||
|
days_to_scan=1,
|
||||||
|
fetch_financials=True,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Scans subreddits to discover mentions, then performs a single batch update for financials if enabled.
|
||||||
"""
|
"""
|
||||||
conn = database.get_db_connection()
|
conn = database.get_db_connection()
|
||||||
post_age_limit = days_to_scan * 86400
|
post_age_limit = days_to_scan * 86400
|
||||||
current_time = time.time()
|
current_time = time.time()
|
||||||
|
all_tickers_to_update = set()
|
||||||
|
|
||||||
|
log.info(f"Scanning {len(subreddits_list)} subreddit(s) for NEW posts...")
|
||||||
|
if not fetch_financials:
|
||||||
|
log.warning("NOTE: Financial data fetching is disabled for this run.")
|
||||||
|
|
||||||
log.info(f"\nScanning {len(subreddits_list)} subreddit(s) for NEW posts in the last {days_to_scan} day(s)...")
|
|
||||||
for subreddit_name in subreddits_list:
|
for subreddit_name in subreddits_list:
|
||||||
try:
|
try:
|
||||||
# Always use the lowercase version of the name for consistency.
|
|
||||||
normalized_sub_name = subreddit_name.lower()
|
normalized_sub_name = subreddit_name.lower()
|
||||||
|
subreddit_id = database.get_or_create_entity(
|
||||||
subreddit_id = database.get_or_create_entity(conn, 'subreddits', 'name', normalized_sub_name)
|
conn, "subreddits", "name", normalized_sub_name
|
||||||
|
)
|
||||||
subreddit = reddit.subreddit(normalized_sub_name)
|
subreddit = reddit.subreddit(normalized_sub_name)
|
||||||
log.info(f"Scanning r/{normalized_sub_name}...")
|
log.info(f"Scanning r/{normalized_sub_name}...")
|
||||||
|
|
||||||
for submission in subreddit.new(limit=post_limit):
|
for submission in subreddit.new(limit=post_limit):
|
||||||
if (current_time - submission.created_utc) > post_age_limit:
|
if (current_time - submission.created_utc) > post_age_limit:
|
||||||
log.info(f" -> Reached posts older than the {days_to_scan}-day limit.")
|
log.info(
|
||||||
|
f" -> Reached posts older than the {days_to_scan}-day limit."
|
||||||
|
)
|
||||||
break
|
break
|
||||||
|
|
||||||
tickers_in_title = set(extract_tickers(submission.title))
|
tickers_found = _process_submission(
|
||||||
all_tickers_found_in_post = set(tickers_in_title) # Start a set to track all tickers for financials
|
submission, subreddit_id, conn, comment_limit
|
||||||
|
)
|
||||||
submission.comments.replace_more(limit=0)
|
if tickers_found:
|
||||||
all_comments = submission.comments.list()[:comment_limit]
|
all_tickers_to_update.update(tickers_found)
|
||||||
|
|
||||||
# --- CASE A: Tickers were found in the title ---
|
|
||||||
if tickers_in_title:
|
|
||||||
log.info(f" -> Title Mention(s): {', '.join(tickers_in_title)}. Attributing all comments.")
|
|
||||||
post_sentiment = get_sentiment_score(submission.title)
|
|
||||||
|
|
||||||
# Add one 'post' mention for each title ticker
|
|
||||||
for ticker_symbol in tickers_in_title:
|
|
||||||
ticker_id = database.get_or_create_entity(conn, 'tickers', 'symbol', ticker_symbol)
|
|
||||||
database.add_mention(conn, ticker_id, subreddit_id, submission.id, 'post', int(submission.created_utc), post_sentiment)
|
|
||||||
|
|
||||||
# Add one 'comment' mention for EACH comment FOR EACH title ticker
|
|
||||||
for comment in all_comments:
|
|
||||||
comment_sentiment = get_sentiment_score(comment.body)
|
|
||||||
for ticker_symbol in tickers_in_title:
|
|
||||||
ticker_id = database.get_or_create_entity(conn, 'tickers', 'symbol', ticker_symbol)
|
|
||||||
database.add_mention(conn, ticker_id, subreddit_id, submission.id, 'comment', int(comment.created_utc), comment_sentiment)
|
|
||||||
|
|
||||||
# --- CASE B: No tickers in the title, scan comments individually ---
|
|
||||||
else:
|
|
||||||
for comment in all_comments:
|
|
||||||
tickers_in_comment = set(extract_tickers(comment.body))
|
|
||||||
if tickers_in_comment:
|
|
||||||
all_tickers_found_in_post.update(tickers_in_comment) # Add to our set for financials
|
|
||||||
comment_sentiment = get_sentiment_score(comment.body)
|
|
||||||
for ticker_symbol in tickers_in_comment:
|
|
||||||
ticker_id = database.get_or_create_entity(conn, 'tickers', 'symbol', ticker_symbol)
|
|
||||||
database.add_mention(conn, ticker_id, subreddit_id, submission.id, 'comment', int(comment.created_utc), comment_sentiment)
|
|
||||||
|
|
||||||
# --- EFFICIENT FINANCIALS UPDATE ---
|
|
||||||
# Now, update market cap once for every unique ticker found in the whole post
|
|
||||||
for ticker_symbol in all_tickers_found_in_post:
|
|
||||||
ticker_id = database.get_or_create_entity(conn, 'tickers', 'symbol', ticker_symbol)
|
|
||||||
ticker_info = database.get_ticker_info(conn, ticker_id)
|
|
||||||
if not ticker_info['last_updated'] or (current_time - ticker_info['last_updated'] > MARKET_CAP_REFRESH_INTERVAL):
|
|
||||||
log.info(f" -> Fetching financial data for {ticker_symbol}...")
|
|
||||||
financials = get_financial_data(ticker_symbol)
|
|
||||||
database.update_ticker_financials(
|
|
||||||
conn, ticker_id,
|
|
||||||
financials['market_cap'] or ticker_info['market_cap'],
|
|
||||||
financials['closing_price'] or ticker_info['closing_price']
|
|
||||||
)
|
|
||||||
|
|
||||||
# --- DEEP DIVE SAVE (Still valuable) ---
|
|
||||||
all_comment_sentiments = [get_sentiment_score(c.body) for c in all_comments]
|
|
||||||
avg_sentiment = sum(all_comment_sentiments) / len(all_comment_sentiments) if all_comment_sentiments else 0
|
|
||||||
post_analysis_data = {
|
|
||||||
"post_id": submission.id, "title": submission.title,
|
|
||||||
"post_url": f"https://reddit.com{submission.permalink}", "subreddit_id": subreddit_id,
|
|
||||||
"post_timestamp": int(submission.created_utc), "comment_count": len(all_comments),
|
|
||||||
"avg_comment_sentiment": avg_sentiment
|
|
||||||
}
|
|
||||||
database.add_or_update_post_analysis(conn, post_analysis_data)
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log.error(f"Could not scan r/{subreddit_name}. Error: {e}")
|
log.error(
|
||||||
|
f"Could not scan r/{normalized_sub_name}. Error: {e}", exc_info=True
|
||||||
|
)
|
||||||
|
|
||||||
conn.close()
|
conn.close()
|
||||||
log.critical("\n--- Scan Complete ---")
|
log.critical("\n--- Reddit Scan Complete ---")
|
||||||
|
|
||||||
|
if fetch_financials and all_tickers_to_update:
|
||||||
|
log.critical(
|
||||||
|
f"\n--- Starting Batch Financial Update for {len(all_tickers_to_update)} Discovered Tickers ---"
|
||||||
|
)
|
||||||
|
|
||||||
|
tickers_from_db = {t["symbol"]: t["id"] for t in database.get_all_tickers()}
|
||||||
|
tickers_needing_update_symbols = [
|
||||||
|
symbol for symbol in all_tickers_to_update if symbol in tickers_from_db
|
||||||
|
]
|
||||||
|
|
||||||
|
financial_data_batch = {}
|
||||||
|
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||||
|
results = executor.map(fetch_financial_data, tickers_needing_update_symbols)
|
||||||
|
for symbol, data in results:
|
||||||
|
if data:
|
||||||
|
financial_data_batch[symbol] = data
|
||||||
|
|
||||||
|
if financial_data_batch:
|
||||||
|
conn = database.get_db_connection()
|
||||||
|
for symbol, financials in financial_data_batch.items():
|
||||||
|
database.update_ticker_financials(
|
||||||
|
conn,
|
||||||
|
tickers_from_db[symbol],
|
||||||
|
financials.get("market_cap"),
|
||||||
|
financials.get("closing_price"),
|
||||||
|
)
|
||||||
|
conn.close()
|
||||||
|
log.critical("--- Batch Financial Update Complete ---")
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Main function to run the Reddit stock analysis tool."""
|
"""Main function to run the Reddit stock analysis tool."""
|
||||||
parser = argparse.ArgumentParser(description="Analyze stock ticker mentions on Reddit.", formatter_class=argparse.RawTextHelpFormatter)
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Analyze stock ticker mentions on Reddit.",
|
||||||
|
formatter_class=argparse.RawTextHelpFormatter,
|
||||||
|
)
|
||||||
|
|
||||||
parser.add_argument("-f", "--config", default="subreddits.json", help="Path to the JSON file containing subreddits.\n(Default: subreddits.json)")
|
parser.add_argument(
|
||||||
parser.add_argument("-s", "--subreddit", help="Scan a single subreddit, ignoring the config file.")
|
"-f",
|
||||||
parser.add_argument("-d", "--days", type=int, default=1, help="Number of past days to scan for new posts.\n(Default: 1 for last 24 hours)")
|
"--config",
|
||||||
parser.add_argument("-p", "--posts", type=int, default=200, help="Max posts to check per subreddit.\n(Default: 200)")
|
default="subreddits.json",
|
||||||
parser.add_argument("-c", "--comments", type=int, default=100, help="Number of comments to scan per post.\n(Default: 100)")
|
help="Path to the JSON file for scanning. (Default: subreddits.json)",
|
||||||
parser.add_argument("-u", "--update-financials-only", action="store_true", help="Skip Reddit scan and only update financial data for all existing tickers.")
|
)
|
||||||
parser.add_argument("--stdout", action="store_true", help="Print all log messages to the console.")
|
parser.add_argument(
|
||||||
|
"-s", "--subreddit", help="Scan a single subreddit, ignoring the config file."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-d",
|
||||||
|
"--days",
|
||||||
|
type=int,
|
||||||
|
default=1,
|
||||||
|
help="Number of past days to scan for new posts. (Default: 1)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-p",
|
||||||
|
"--posts",
|
||||||
|
type=int,
|
||||||
|
default=200,
|
||||||
|
help="Max posts to check per subreddit. (Default: 200)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-c",
|
||||||
|
"--comments",
|
||||||
|
type=int,
|
||||||
|
default=100,
|
||||||
|
help="Number of comments to scan per post. (Default: 100)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-n",
|
||||||
|
"--no-financials",
|
||||||
|
action="store_true",
|
||||||
|
help="Disable fetching of financial data during the Reddit scan.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--update-top-tickers",
|
||||||
|
action="store_true",
|
||||||
|
help="Update financial data only for tickers currently in the Top 10 daily/weekly dashboards.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-u",
|
||||||
|
"--update-financials-only",
|
||||||
|
nargs="?",
|
||||||
|
const="ALL_TICKERS", # A special value to signify "update all"
|
||||||
|
default=None,
|
||||||
|
metavar="TICKER",
|
||||||
|
help="Update financials. Provide a ticker symbol to update just one,\nor use the flag alone to update all tickers in the database.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--debug",
|
||||||
|
action="store_true",
|
||||||
|
help="Enable detailed debug logging to the console.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--stdout", action="store_true", help="Print all log messages to the console."
|
||||||
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
setup_logging(console_verbose=args.stdout, debug_mode=args.debug)
|
||||||
|
|
||||||
setup_logging(console_verbose=args.stdout)
|
|
||||||
|
|
||||||
if args.subreddit:
|
|
||||||
# If --subreddit is used, create a list with just that one.
|
|
||||||
subreddits_to_scan = [args.subreddit]
|
|
||||||
log.critical(f"Targeted Scan Mode: Focusing on r/{args.subreddit}")
|
|
||||||
else:
|
|
||||||
# Otherwise, load from the config file.
|
|
||||||
log.critical(f"Config Scan Mode: Loading subreddits from {args.config}")
|
|
||||||
# Use the correct argument name: args.config
|
|
||||||
subreddits_to_scan = load_subreddits(args.config)
|
|
||||||
|
|
||||||
if not subreddits_to_scan:
|
|
||||||
log.error("Error: No subreddits to scan. Please check your config file or --subreddit argument.")
|
|
||||||
return
|
|
||||||
|
|
||||||
# --- Initialize and Run ---
|
|
||||||
database.initialize_db()
|
database.initialize_db()
|
||||||
|
|
||||||
if args.update_financials_only:
|
if args.update_top_tickers:
|
||||||
log.critical("--- Starting Financial Data Update Only Mode (using isolated fetcher) ---")
|
# --- Mode 1: Update Top Tickers ---
|
||||||
all_tickers = database.get_all_tickers() # No longer need to manage 'conn' here
|
log.critical("--- Starting Financial Data Update for Top Tickers ---")
|
||||||
log.info(f"Found {len(all_tickers)} tickers in the database to update.")
|
top_daily = database.get_top_daily_ticker_symbols()
|
||||||
|
top_weekly = database.get_top_weekly_ticker_symbols()
|
||||||
|
all_sub_names = database.get_all_scanned_subreddits()
|
||||||
|
for sub_name in all_sub_names:
|
||||||
|
top_daily.extend(
|
||||||
|
database.get_top_daily_ticker_symbols_for_subreddit(sub_name)
|
||||||
|
)
|
||||||
|
top_weekly.extend(
|
||||||
|
database.get_top_weekly_ticker_symbols_for_subreddit(sub_name)
|
||||||
|
)
|
||||||
|
tickers_to_update = sorted(list(set(top_daily + top_weekly)))
|
||||||
|
|
||||||
conn = database.get_db_connection()
|
if not tickers_to_update:
|
||||||
for ticker in all_tickers:
|
log.info("No top tickers found in the last week. Nothing to update.")
|
||||||
symbol = ticker['symbol']
|
else:
|
||||||
log.info(f" -> Fetching financials for {symbol}...")
|
log.info(
|
||||||
|
f"Found {len(tickers_to_update)} unique top tickers to update. Fetching in parallel..."
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
financial_data_batch = {}
|
||||||
# --- THIS IS THE NEW LOGIC ---
|
successful_updates = 0
|
||||||
# Construct the command to run our fetcher script in a new process
|
failed_updates = 0
|
||||||
command = [sys.executable, "-m", "rstat_tool.fetcher", symbol]
|
|
||||||
|
|
||||||
# Run the command, capture the output, and set a timeout
|
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||||
result = subprocess.run(
|
results = executor.map(fetch_financial_data, tickers_to_update)
|
||||||
command,
|
for symbol, data in results:
|
||||||
capture_output=True,
|
# A successful fetch is one where data is returned and has a closing price
|
||||||
text=True,
|
if data and data.get("closing_price") is not None:
|
||||||
check=True, # This will raise an exception if the script returns a non-zero exit code
|
log.info(f" -> SUCCESS: Fetched data for {symbol}")
|
||||||
timeout=30 # Timeout after 30 seconds
|
financial_data_batch[symbol] = data
|
||||||
|
successful_updates += 1
|
||||||
|
else:
|
||||||
|
log.warning(
|
||||||
|
f" -> FAILED: Could not fetch valid financial data for {symbol}"
|
||||||
|
)
|
||||||
|
failed_updates += 1
|
||||||
|
|
||||||
|
if not financial_data_batch:
|
||||||
|
log.error("Failed to fetch any batch financial data. Aborting update.")
|
||||||
|
else:
|
||||||
|
conn = database.get_db_connection()
|
||||||
|
all_tickers_from_db = database.get_all_tickers()
|
||||||
|
ticker_map = {t["symbol"]: t["id"] for t in all_tickers_from_db}
|
||||||
|
|
||||||
|
for symbol, financials in financial_data_batch.items():
|
||||||
|
if symbol in ticker_map:
|
||||||
|
database.update_ticker_financials(
|
||||||
|
conn,
|
||||||
|
ticker_map[symbol],
|
||||||
|
financials.get("market_cap"),
|
||||||
|
financials.get("closing_price"),
|
||||||
|
)
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
log.critical("--- Top Ticker Financial Data Update Complete ---")
|
||||||
|
log.critical(f" Successful updates: {successful_updates}")
|
||||||
|
log.critical(f" Failed updates: {failed_updates}")
|
||||||
|
|
||||||
|
elif args.update_financials_only:
|
||||||
|
# --- Mode 2: Update All or a Single Ticker ---
|
||||||
|
update_mode = args.update_financials_only
|
||||||
|
tickers_to_update = []
|
||||||
|
if update_mode == "ALL_TICKERS":
|
||||||
|
log.critical("--- Starting Financial Data Update for ALL tickers ---")
|
||||||
|
all_tickers_from_db = database.get_all_tickers()
|
||||||
|
tickers_to_update = [t["symbol"] for t in all_tickers_from_db]
|
||||||
|
else:
|
||||||
|
ticker_symbol_to_update = update_mode
|
||||||
|
log.critical(
|
||||||
|
f"--- Starting Financial Data Update for single ticker: {ticker_symbol_to_update} ---"
|
||||||
|
)
|
||||||
|
if database.get_ticker_by_symbol(ticker_symbol_to_update):
|
||||||
|
tickers_to_update = [ticker_symbol_to_update]
|
||||||
|
else:
|
||||||
|
log.error(
|
||||||
|
f"Ticker '{ticker_symbol_to_update}' not found in the database."
|
||||||
)
|
)
|
||||||
|
|
||||||
# The output from the script is a JSON string
|
if tickers_to_update:
|
||||||
financials = json.loads(result.stdout)
|
log.info(
|
||||||
|
f"Found {len(tickers_to_update)} unique tickers to update. Fetching in parallel..."
|
||||||
|
)
|
||||||
|
|
||||||
database.update_ticker_financials(
|
financial_data_batch = {}
|
||||||
conn, ticker['id'],
|
successful_updates = 0
|
||||||
financials.get('market_cap'),
|
failed_updates = 0
|
||||||
financials.get('closing_price')
|
|
||||||
)
|
|
||||||
# --- END OF NEW LOGIC ---
|
|
||||||
|
|
||||||
except subprocess.CalledProcessError as e:
|
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||||
log.error(f"Fetcher script failed for {symbol}: {e.stderr}")
|
results = executor.map(fetch_financial_data, tickers_to_update)
|
||||||
except subprocess.TimeoutExpired:
|
for symbol, data in results:
|
||||||
log.error(f"Fetcher script timed out for {symbol}.")
|
# A successful fetch is one where data is returned and has a closing price
|
||||||
except json.JSONDecodeError:
|
if data and data.get("closing_price") is not None:
|
||||||
log.error(f"Could not parse JSON from fetcher script for {symbol}.")
|
log.info(f" -> SUCCESS: Fetched data for {symbol}")
|
||||||
except Exception as e:
|
financial_data_batch[symbol] = data
|
||||||
log.error(f"An unexpected error occurred for {symbol}: {e}")
|
successful_updates += 1
|
||||||
|
else:
|
||||||
|
log.warning(
|
||||||
|
f" -> FAILED: Could not fetch valid financial data for {symbol}"
|
||||||
|
)
|
||||||
|
failed_updates += 1
|
||||||
|
|
||||||
|
if not financial_data_batch:
|
||||||
|
log.error("Failed to fetch any batch financial data. Aborting update.")
|
||||||
|
else:
|
||||||
|
conn = database.get_db_connection()
|
||||||
|
all_tickers_from_db = database.get_all_tickers()
|
||||||
|
ticker_map = {t["symbol"]: t["id"] for t in all_tickers_from_db}
|
||||||
|
|
||||||
|
for symbol, financials in financial_data_batch.items():
|
||||||
|
if symbol in ticker_map:
|
||||||
|
database.update_ticker_financials(
|
||||||
|
conn,
|
||||||
|
ticker_map[symbol],
|
||||||
|
financials.get("market_cap"),
|
||||||
|
financials.get("closing_price"),
|
||||||
|
)
|
||||||
|
conn.close()
|
||||||
|
|
||||||
conn.close()
|
|
||||||
log.critical("--- Financial Data Update Complete ---")
|
log.critical("--- Financial Data Update Complete ---")
|
||||||
|
log.critical(f" Successful updates: {successful_updates}")
|
||||||
|
log.critical(f" Failed updates: {failed_updates}")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# This is the normal Reddit scanning logic
|
# --- Mode 3: Default Reddit Scan ---
|
||||||
log.critical("--- Starting Reddit Scan Mode ---")
|
log.critical("--- Starting Reddit Scan Mode ---")
|
||||||
if args.subreddit:
|
if args.subreddit:
|
||||||
subreddits_to_scan = [args.subreddit]
|
subreddits_to_scan = [args.subreddit]
|
||||||
@@ -220,15 +428,18 @@ def main():
|
|||||||
return
|
return
|
||||||
|
|
||||||
reddit = get_reddit_instance()
|
reddit = get_reddit_instance()
|
||||||
if not reddit: return
|
if not reddit:
|
||||||
|
return
|
||||||
|
|
||||||
scan_subreddits(
|
scan_subreddits(
|
||||||
reddit,
|
reddit,
|
||||||
subreddits_to_scan,
|
subreddits_to_scan,
|
||||||
post_limit=args.posts,
|
post_limit=args.posts,
|
||||||
comment_limit=args.comments,
|
comment_limit=args.comments,
|
||||||
days_to_scan=args.days
|
days_to_scan=args.days,
|
||||||
|
fetch_financials=(not args.no_financials),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
@@ -16,4 +16,4 @@ def get_sentiment_score(text):
|
|||||||
# The polarity_scores() method returns a dictionary with 'neg', 'neu', 'pos', and 'compound' scores.
|
# The polarity_scores() method returns a dictionary with 'neg', 'neu', 'pos', and 'compound' scores.
|
||||||
# We are most interested in the 'compound' score.
|
# We are most interested in the 'compound' score.
|
||||||
scores = _analyzer.polarity_scores(text)
|
scores = _analyzer.polarity_scores(text)
|
||||||
return scores['compound']
|
return scores["compound"]
|
||||||
|
@@ -3,9 +3,9 @@ import nltk
|
|||||||
# This will download the 'vader_lexicon' dataset
|
# This will download the 'vader_lexicon' dataset
|
||||||
# It only needs to be run once
|
# It only needs to be run once
|
||||||
try:
|
try:
|
||||||
nltk.data.find('sentiment/vader_lexicon.zip')
|
nltk.data.find("sentiment/vader_lexicon.zip")
|
||||||
print("VADER lexicon is already downloaded.")
|
print("VADER lexicon is already downloaded.")
|
||||||
except LookupError:
|
except LookupError:
|
||||||
print("Downloading VADER lexicon...")
|
print("Downloading VADER lexicon...")
|
||||||
nltk.download('vader_lexicon')
|
nltk.download("vader_lexicon")
|
||||||
print("Download complete.")
|
print("Download complete.")
|
@@ -10,129 +10,135 @@ COMMON_WORDS_BLACKLIST = {
|
|||||||
"AI", "AINT", "AK", "ALD", "ALGOS", "ALIVE", "ALL", "ALPHA", "ALSO", "AM",
|
"AI", "AINT", "AK", "ALD", "ALGOS", "ALIVE", "ALL", "ALPHA", "ALSO", "AM",
|
||||||
"AMA", "AMEX", "AMK", "AMY", "AND", "ANSS", "ANY", "APES", "APL", "APPL",
|
"AMA", "AMEX", "AMK", "AMY", "AND", "ANSS", "ANY", "APES", "APL", "APPL",
|
||||||
"APPLE", "APR", "APUS", "APY", "AR", "ARBK", "ARE", "AREA", "ARH", "ARK",
|
"APPLE", "APR", "APUS", "APY", "AR", "ARBK", "ARE", "AREA", "ARH", "ARK",
|
||||||
"AROUND", "ART", "ASAP", "ASEAN", "ASK", "ASS", "ASSET", "AST", "AT", "ATH",
|
"AROUND", "ART", "AS", "ASAP", "ASEAN", "ASK", "ASS", "ASSET", "AST", "AT",
|
||||||
"ATL", "ATM", "AUD", "AUG", "AUM", "AV", "AVG", "AWS", "BABY", "BAG",
|
"ATH", "ATL", "ATM", "AUD", "AUG", "AUM", "AV", "AVG", "AWS", "BABY",
|
||||||
"BAGS", "BALLS", "BAN", "BANG", "BASIC", "BBB", "BBBY", "BE", "BEAR", "BEARS",
|
"BAG", "BAGS", "BALLS", "BAN", "BANG", "BASIC", "BBB", "BBBY", "BE", "BEAR",
|
||||||
"BECN", "BEER", "BELL", "BELOW", "BETA", "BETS", "BF", "BID", "BIG", "BIS",
|
"BEARS", "BECN", "BEER", "BELL", "BELOW", "BETA", "BETS", "BF", "BID", "BIG",
|
||||||
"BITCH", "BKEY", "BLEND", "BNPL", "BOE", "BOJ", "BOLL", "BOMB", "BOND", "BONED",
|
"BIS", "BITCH", "BKEY", "BLEND", "BMW", "BNP", "BNPL", "BOE", "BOJ", "BOLL",
|
||||||
"BORN", "BOTH", "BOTS", "BOY", "BOYS", "BRB", "BRICS", "BRK", "BRKA", "BRKB",
|
"BOMB", "BOND", "BONED", "BORN", "BOTH", "BOTS", "BOY", "BOYS", "BRB", "BRICS",
|
||||||
"BRL", "BROKE", "BRRRR", "BS", "BSE", "BST", "BSU", "BT", "BTC", "BTS",
|
"BRK", "BRKA", "BRKB", "BRL", "BROKE", "BRRRR", "BS", "BSE", "BST", "BSU",
|
||||||
"BTW", "BUDDY", "BULL", "BULLS", "BUST", "BUT", "BUY", "BUZZ", "CAD", "CAFE",
|
"BT", "BTC", "BTS", "BTW", "BUDDY", "BULL", "BULLS", "BUST", "BUT", "BUY",
|
||||||
"CAGR", "CALL", "CALLS", "CAN", "CAP", "CARB", "CARES", "CASE", "CATL", "CBD",
|
"BUZZ", "CAD", "CAFE", "CAGR", "CALL", "CALLS", "CAN", "CAP", "CARB", "CARES",
|
||||||
"CBGM", "CBS", "CCI", "CCP", "CD", "CDN", "CEO", "CEST", "CET", "CEX",
|
"CASE", "CATL", "CBD", "CBGM", "CBS", "CCI", "CCP", "CD", "CDN", "CEO",
|
||||||
"CFD", "CFO", "CFPB", "CHART", "CHASE", "CHATS", "CHECK", "CHF", "CHICK", "CHIP",
|
"CEST", "CET", "CEX", "CFD", "CFO", "CFPB", "CHART", "CHASE", "CHATS", "CHECK",
|
||||||
"CHIPS", "CIA", "CIC", "CLAIM", "CLEAN", "CLICK", "CLOSE", "CMON", "CN", "CNBC",
|
"CHF", "CHICK", "CHIP", "CHIPS", "CIA", "CIC", "CLAIM", "CLEAN", "CLICK", "CLOSE",
|
||||||
"CNN", "CNY", "COBRA", "COCK", "COGS", "COIL", "COKE", "COME", "COST", "COULD",
|
"CMON", "CN", "CNBC", "CNN", "CNY", "COBRA", "COCK", "COGS", "COIL", "COKE",
|
||||||
"COVID", "CPAP", "CPI", "CRA", "CRE", "CRO", "CRV", "CSE", "CSP", "CSS",
|
"COME", "COST", "COULD", "COVID", "CPAP", "CPI", "CRA", "CRE", "CRO", "CRV",
|
||||||
"CST", "CTB", "CTEP", "CTO", "CUCKS", "CULT", "CUM", "CUTS", "CUV", "CYCLE",
|
"CSE", "CSP", "CSS", "CST", "CTB", "CTEP", "CTO", "CUCKS", "CULT", "CUM",
|
||||||
"CZK", "DA", "DAILY", "DAO", "DATE", "DAX", "DAY", "DAYS", "DCA", "DCF",
|
"CUSMA", "CUTS", "CUV", "CYCLE", "CZK", "DA", "DAILY", "DAO", "DATE", "DAX",
|
||||||
"DD", "DEAL", "DEBT", "DEEZ", "DEMO", "DET", "DEX", "DGAF", "DIA", "DID",
|
"DAY", "DAYS", "DCA", "DCF", "DD", "DEAL", "DEBT", "DEEZ", "DEMO", "DET",
|
||||||
"DIDNT", "DIP", "DITM", "DIV", "DIY", "DJI", "DJIA", "DJTJ", "DKK", "DL",
|
"DEX", "DGAF", "DIA", "DID", "DIDNT", "DIP", "DITM", "DIV", "DIY", "DJI",
|
||||||
"DM", "DMV", "DNI", "DNUTZ", "DO", "DOD", "DOE", "DOES", "DOGE", "DOING",
|
"DJIA", "DJTJ", "DKK", "DL", "DM", "DMV", "DNI", "DNUTZ", "DO", "DOD",
|
||||||
"DOJ", "DOM", "DONNY", "DONT", "DONUT", "DOOR", "DOWN", "DOZEN", "DPI", "DR",
|
"DOE", "DOES", "DOGE", "DOING", "DOJ", "DOM", "DONNY", "DONT", "DONUT", "DOOR",
|
||||||
"DUDE", "DUMP", "DUNT", "DUT", "DUTY", "DXY", "DXYXBT", "DYI", "DYNK", "DYODD",
|
"DOWN", "DOZEN", "DPI", "DR", "DUDE", "DUMP", "DUNT", "DUT", "DUTY", "DXY",
|
||||||
"DYOR", "EACH", "EARLY", "EARN", "EAST", "EASY", "ECB", "EDGAR", "EDIT", "EDT",
|
"DXYXBT", "DYI", "DYNK", "DYODD", "DYOR", "EACH", "EARLY", "EARN", "EAST", "EASY",
|
||||||
"EJ", "EMA", "EMJ", "EMT", "END", "ENRON", "ENSI", "ENV", "EO", "EOD",
|
"ECB", "EDGAR", "EDIT", "EDT", "EJ", "EMA", "EMJ", "EMT", "END", "ENRON",
|
||||||
"EOM", "EOW", "EOY", "EPA", "EPK", "EPS", "ER", "ESG", "ESPP", "EST",
|
"ENSI", "ENV", "EO", "EOD", "EOM", "EOW", "EOY", "EPA", "EPK", "EPS",
|
||||||
"ETA", "ETF", "ETFS", "ETH", "ETL", "EU", "EUR", "EV", "EVEN", "EVERY",
|
"ER", "ESG", "ESPP", "EST", "ETA", "ETF", "ETFS", "ETH", "ETL", "EU",
|
||||||
"EVTOL", "EXTRA", "EYES", "EZ", "FAANG", "FAFO", "FAQ", "FAR", "FAST", "FBI",
|
"EUR", "EV", "EVEN", "EVERY", "EVTOL", "EXTRA", "EYES", "EZ", "FAANG", "FAFO",
|
||||||
"FCC", "FCFF", "FD", "FDA", "FEE", "FFH", "FFS", "FGMA", "FIG", "FIGMA",
|
"FAQ", "FAR", "FAST", "FBI", "FCC", "FCFF", "FD", "FDA", "FEE", "FFH",
|
||||||
"FIHTX", "FILES", "FINAL", "FIND", "FING", "FINRA", "FINT", "FINTX", "FINTY", "FIRST",
|
"FFS", "FGMA", "FIG", "FIGMA", "FIHTX", "FILES", "FINAL", "FIND", "FING", "FINRA",
|
||||||
"FKIN", "FLRAA", "FLT", "FLY", "FML", "FOLO", "FOMC", "FOMO", "FOR", "FOREX",
|
"FINT", "FINTX", "FINTY", "FIRE", "FIRST", "FKIN", "FLRAA", "FLT", "FLY", "FML",
|
||||||
"FRAUD", "FREAK", "FRED", "FRG", "FROM", "FRP", "FRS", "FSBO", "FSD", "FSE",
|
"FOLO", "FOMC", "FOMO", "FOR", "FOREX", "FRAUD", "FREAK", "FRED", "FRG", "FROM",
|
||||||
"FSELK", "FSPSX", "FTD", "FTSE", "FUCK", "FUCKS", "FUD", "FULL", "FUND", "FUNNY",
|
"FRP", "FRS", "FSBO", "FSD", "FSE", "FSELK", "FSPSX", "FTD", "FTSE", "FUCK",
|
||||||
"FVG", "FWIW", "FX", "FXAIX", "FXIAX", "FXROX", "FY", "FYI", "FZROX", "GAAP",
|
"FUCKS", "FUD", "FULL", "FUND", "FUNNY", "FVG", "FWIW", "FX", "FXAIX", "FXIAX",
|
||||||
"GAIN", "GAVE", "GBP", "GC", "GDP", "GET", "GG", "GGTM", "GIVES", "GJ",
|
"FXROX", "FY", "FYI", "FZROX", "GAAP", "GAIN", "GAVE", "GBP", "GC", "GDP",
|
||||||
"GL", "GLHF", "GMAT", "GMI", "GMT", "GO", "GOAL", "GOAT", "GOD", "GOING",
|
"GET", "GFC", "GG", "GGTM", "GIVES", "GJ", "GL", "GLHF", "GMAT", "GMI",
|
||||||
"GOLD", "GONE", "GONNA", "GOODS", "GOPRO", "GPT", "GPU", "GRAB", "GREAT", "GREEN",
|
"GMT", "GO", "GOAL", "GOAT", "GOD", "GOING", "GOLD", "GONE", "GONNA", "GOODS",
|
||||||
"GSOV", "GST", "GTA", "GTC", "GTFO", "GTG", "GUH", "GUNS", "GUY", "GUYS",
|
"GOPRO", "GPT", "GPU", "GRAB", "GREAT", "GREEN", "GSOV", "GST", "GTA", "GTC",
|
||||||
"HAD", "HAHA", "HALF", "HAM", "HANDS", "HAS", "HATE", "HAVE", "HBAR", "HCOL",
|
"GTFO", "GTG", "GUH", "GUNS", "GUY", "GUYS", "HAD", "HAHA", "HALF", "HAM",
|
||||||
"HEAR", "HEDGE", "HEGE", "HELD", "HELL", "HELP", "HERE", "HEY", "HFCS", "HFT",
|
"HANDS", "HAS", "HATE", "HAVE", "HBAR", "HCOL", "HEAR", "HEDGE", "HEGE", "HELD",
|
||||||
"HGTV", "HIGH", "HIGHS", "HINT", "HIS", "HITID", "HK", "HKD", "HKEX", "HODL",
|
"HELL", "HELP", "HERE", "HEY", "HFCS", "HFT", "HGTV", "HIGH", "HIGHS", "HINT",
|
||||||
"HODOR", "HOF", "HOLD", "HOLY", "HOME", "HOT", "HOUR", "HOURS", "HOW", "HS",
|
"HIS", "HITID", "HK", "HKD", "HKEX", "HODL", "HODOR", "HOF", "HOLD", "HOLY",
|
||||||
"HSA", "HSI", "HT", "HTF", "HTML", "HUF", "HUGE", "HYPE", "IANAL", "IATF",
|
"HOME", "HOT", "HOUR", "HOURS", "HOW", "HS", "HSA", "HSI", "HT", "HTCI",
|
||||||
"IB", "IBS", "ICT", "ID", "IDF", "IDK", "IF", "II", "IIRC", "IKKE",
|
"HTF", "HTML", "HUF", "HUGE", "HV", "HYPE", "IANAL", "IATF", "IB", "IBS",
|
||||||
"IKZ", "IM", "IMHO", "IMI", "IMO", "IN", "INC", "INR", "INTO", "IP",
|
"ICSID", "ICT", "ID", "IDF", "IDK", "IF", "II", "IIRC", "IKKE", "IKZ",
|
||||||
|
"IM", "IMHO", "IMI", "IMO", "IN", "INC", "INR", "INTEL", "INTO", "IP",
|
||||||
"IPO", "IQVIA", "IRA", "IRAS", "IRC", "IRISH", "IRMAA", "IRS", "IS", "ISA",
|
"IPO", "IQVIA", "IRA", "IRAS", "IRC", "IRISH", "IRMAA", "IRS", "IS", "ISA",
|
||||||
"ISIN", "ISM", "ISN", "IST", "IT", "ITC", "ITM", "ITS", "ITWN", "IUIT",
|
"ISIN", "ISM", "ISN", "IST", "IT", "ITC", "ITM", "ITS", "ITWN", "IUIT",
|
||||||
"IV", "IVV", "IWM", "IXL", "IYKYK", "JAVA", "JD", "JDM", "JE", "JFC",
|
"IV", "IVV", "IWM", "IXL", "IXLH", "IYKYK", "JAVA", "JD", "JDG", "JDM",
|
||||||
"JK", "JLR", "JMO", "JOBS", "JOIN", "JOKE", "JP", "JPOW", "JPY", "JS",
|
"JE", "JFC", "JK", "JLR", "JMO", "JOBS", "JOIN", "JOKE", "JP", "JPOW",
|
||||||
"JST", "JUN", "JUST", "KARMA", "KEEP", "KILL", "KING", "KK", "KNEW", "KNOW",
|
"JPY", "JS", "JST", "JUN", "JUST", "KARMA", "KEEP", "KILL", "KING", "KK",
|
||||||
"KO", "KOHLS", "KPMG", "KRW", "LANGT", "LARGE", "LAST", "LATE", "LATER", "LBO",
|
"KLA", "KLP", "KNEW", "KNOW", "KO", "KOHLS", "KPMG", "KRW", "LA", "LANGT",
|
||||||
"LBTC", "LCS", "LDL", "LEADS", "LEAP", "LEAPS", "LEARN", "LEI", "LET", "LETF",
|
"LARGE", "LAST", "LATE", "LATER", "LBO", "LBTC", "LCS", "LDL", "LEADS", "LEAP",
|
||||||
"LETS", "LFA", "LFG", "LFP", "LG", "LGEN", "LIFE", "LIG", "LIGMA", "LIKE",
|
"LEAPS", "LEARN", "LEI", "LET", "LETF", "LETS", "LFA", "LFG", "LFP", "LG",
|
||||||
"LIMIT", "LIST", "LLC", "LLM", "LM", "LMAO", "LMAOO", "LMM", "LMN", "LOANS",
|
"LGEN", "LIFE", "LIG", "LIGMA", "LIKE", "LIMIT", "LIST", "LLC", "LLM", "LM",
|
||||||
"LOKO", "LOL", "LOLOL", "LONG", "LONGS", "LOOK", "LOSE", "LOSS", "LOST", "LOVE",
|
"LMAO", "LMAOO", "LMM", "LMN", "LOANS", "LOKO", "LOL", "LOLOL", "LONG", "LONGS",
|
||||||
"LOVES", "LOW", "LOWER", "LOWS", "LP", "LSS", "LTCG", "LUCID", "LUPD", "LYC",
|
"LOOK", "LOSE", "LOSS", "LOST", "LOVE", "LOVES", "LOW", "LOWER", "LOWS", "LP",
|
||||||
"LYING", "M&A", "MA", "MACD", "MAIL", "MAKE", "MAKES", "MANGE", "MANY", "MASON",
|
"LSS", "LTCG", "LUCID", "LUPD", "LYC", "LYING", "M&A", "MA", "MACD", "MAIL",
|
||||||
"MAX", "MAY", "MAYBE", "MBA", "MC", "MCAP", "MCNA", "MCP", "ME", "MEAN",
|
"MAKE", "MAKES", "MANGE", "MANY", "MASON", "MAX", "MAY", "MAYBE", "MBA", "MC",
|
||||||
"MEME", "MERGE", "MERK", "MES", "MEXC", "MF", "MFER", "MID", "MIGHT", "MIN",
|
"MCAP", "MCNA", "MCP", "ME", "MEAN", "MEME", "MERGE", "MERK", "MES", "MEXC",
|
||||||
"MIND", "MINS", "ML", "MLB", "MLS", "MM", "MMF", "MNQ", "MOASS", "MODEL",
|
"MF", "MFER", "MID", "MIGHT", "MIN", "MIND", "MINS", "ML", "MLB", "MLS",
|
||||||
"MOM", "MONEY", "MONTH", "MONY", "MOON", "MORE", "MOST", "MOU", "MSK", "MTVGA",
|
"MM", "MMF", "MNQ", "MOASS", "MODEL", "MODTX", "MOM", "MONEY", "MONTH", "MONY",
|
||||||
"MUCH", "MUSIC", "MUST", "MVA", "MXN", "MY", "MYMD", "NASA", "NASDA", "NATO",
|
"MOON", "MORE", "MOST", "MOU", "MSK", "MTVGA", "MUCH", "MUSIC", "MUST", "MVA",
|
||||||
"NAV", "NBA", "NBC", "NCAN", "NCR", "NEAR", "NEAT", "NEED", "NEVER", "NEW",
|
"MXN", "MY", "MYMD", "NASA", "NASDA", "NATO", "NAV", "NBA", "NBC", "NCAN",
|
||||||
"NEWS", "NEXT", "NFA", "NFC", "NFL", "NFT", "NGAD", "NGMI", "NIGHT", "NIQ",
|
"NCR", "NEAR", "NEAT", "NEED", "NEVER", "NEW", "NEWS", "NEXT", "NFA", "NFC",
|
||||||
"NK", "NO", "NOK", "NONE", "NOOO", "NOPE", "NORTH", "NOT", "NOVA", "NOW",
|
"NFL", "NFT", "NGAD", "NGMI", "NIGHT", "NIQ", "NK", "NO", "NOK", "NON",
|
||||||
"NQ", "NRI", "NSA", "NSLC", "NTG", "NTVS", "NULL", "NUT", "NUTS", "NUTZ",
|
"NONE", "NOOO", "NOPE", "NORTH", "NOT", "NOVA", "NOW", "NQ", "NRI", "NSA",
|
||||||
"NVM", "NW", "NY", "NYSE", "NZ", "NZD", "OBBB", "OBI", "OBS", "OBV",
|
"NSCLC", "NSLC", "NTG", "NTVS", "NULL", "NUT", "NUTS", "NUTZ", "NVM", "NW",
|
||||||
"OCD", "OCF", "OCO", "ODAT", "OEM", "OF", "OFA", "OFF", "OG", "OH",
|
"NY", "NYSE", "NZ", "NZD", "OBBB", "OBI", "OBS", "OBV", "OCD", "OCF",
|
||||||
"OK", "OKAY", "OL", "OLD", "OMFG", "OMG", "ON", "ONDAS", "ONE", "ONLY",
|
"OCO", "ODAT", "ODTE", "OEM", "OF", "OFA", "OFF", "OG", "OH", "OK",
|
||||||
"OP", "OPEC", "OPENQ", "OPEX", "OPRN", "OR", "ORB", "ORDER", "ORTEX", "OS",
|
"OKAY", "OL", "OLD", "OMFG", "OMG", "ON", "ONDAS", "ONE", "ONLY", "OP",
|
||||||
"OSCE", "OT", "OTC", "OTM", "OTOH", "OUCH", "OUGHT", "OUR", "OUT", "OVER",
|
"OPEC", "OPENQ", "OPEX", "OPRN", "OR", "ORB", "ORDER", "ORTEX", "OS", "OSCE",
|
||||||
"OWN", "OZZY", "PA", "PANIC", "PC", "PDT", "PE", "PEAK", "PEG", "PETA",
|
"OT", "OTC", "OTM", "OTOH", "OUCH", "OUGHT", "OUR", "OUT", "OVER", "OWN",
|
||||||
"PEW", "PFC", "PGHL", "PIMCO", "PITA", "PLAN", "PLAYS", "PLC", "PLN", "PM",
|
"OZZY", "PA", "PANIC", "PC", "PDT", "PE", "PEAK", "PEG", "PETA", "PEW",
|
||||||
|
"PFC", "PGHL", "PIMCO", "PITA", "PLAN", "PLAYS", "PLC", "PLN", "PM", "PMCC",
|
||||||
"PMI", "PNL", "POC", "POMO", "POP", "POS", "POSCO", "POTUS", "POV", "POW",
|
"PMI", "PNL", "POC", "POMO", "POP", "POS", "POSCO", "POTUS", "POV", "POW",
|
||||||
"PPI", "PR", "PRICE", "PRIME", "PROFIT", "PROXY", "PS", "PSA", "PST", "PT",
|
"PPI", "PR", "PRICE", "PRIME", "PROFIT", "PROXY", "PS", "PSA", "PST", "PT",
|
||||||
"PTD", "PUSSY", "PUT", "PWC", "Q1", "Q2", "Q3", "Q4", "QE", "QED",
|
"PTD", "PUSSY", "PUT", "PUTS", "PWC", "Q1", "Q2", "Q3", "Q4", "QE",
|
||||||
"QIMC", "QQQ", "QR", "RAM", "RATM", "RBA", "RBNZ", "RE", "REACH", "READY",
|
"QED", "QIMC", "QQQ", "QR", "RAM", "RATM", "RBA", "RBNZ", "RE", "REACH",
|
||||||
"REAL", "RED", "REIT", "REITS", "REKT", "REPE", "RFK", "RH", "RICO", "RIDE",
|
"READY", "REAL", "RED", "REIT", "REITS", "REKT", "REPE", "RFK", "RH", "RICO",
|
||||||
"RIGHT", "RIP", "RISK", "RISKY", "RNDC", "ROCE", "ROCK", "ROE", "ROFL", "ROI",
|
"RIDE", "RIGHT", "RIP", "RISK", "RISKY", "RNDC", "ROCE", "ROCK", "ROE", "ROFL",
|
||||||
"ROIC", "ROTH", "RPO", "RRSP", "RSD", "RSI", "RT", "RTD", "RUB", "RUG",
|
"ROI", "ROIC", "ROTH", "RPO", "RRSP", "RSD", "RSI", "RT", "RTD", "RUB",
|
||||||
"RULE", "RUST", "RVOL", "SAGA", "SALES", "SAME", "SAVE", "SAYS", "SBF", "SBLOC",
|
"RUG", "RULE", "RUST", "RVOL", "SAGA", "SALES", "SAME", "SAVE", "SAYS", "SBF",
|
||||||
"SC", "SCALP", "SCAM", "SCHB", "SCIF", "SEC", "SEE", "SEK", "SELL", "SELLL",
|
"SBLOC", "SC", "SCALP", "SCAM", "SCHB", "SCIF", "SEC", "SEE", "SEK", "SELL",
|
||||||
"SEP", "SESG", "SET", "SFOR", "SGD", "SHALL", "SHARE", "SHEIN", "SHELL", "SHIT",
|
"SELLL", "SEP", "SESG", "SET", "SFOR", "SGD", "SHALL", "SHARE", "SHEIN", "SHELL",
|
||||||
"SHORT", "SHOW", "SHTF", "SI", "SICK", "SIGN", "SL", "SLIM", "SLOW", "SMA",
|
"SHIT", "SHORT", "SHOW", "SHS", "SHTF", "SI", "SICK", "SIGN", "SL", "SLIM",
|
||||||
"SMALL", "SMFH", "SNZ", "SO", "SOLD", "SOLIS", "SOME", "SOON", "SOOO", "SOUTH",
|
"SLOW", "SMA", "SMALL", "SMFH", "SNZ", "SO", "SOLD", "SOLIS", "SOME", "SOON",
|
||||||
"SP", "SPAC", "SPDR", "SPEND", "SPLG", "SPX", "SPY", "SQUAD", "SS", "SSA",
|
"SOOO", "SOUTH", "SP", "SPAC", "SPDR", "SPEND", "SPLG", "SPX", "SPY", "SQUAD",
|
||||||
"SSDI", "START", "STAY", "STEEL", "STFU", "STILL", "STOCK", "STOOQ", "STOP", "STOR",
|
"SS", "SSA", "SSDI", "START", "STAY", "STEEL", "STFU", "STILL", "STO", "STOCK",
|
||||||
"STQQQ", "STUCK", "STUDY", "SUS", "SUSHI", "SUV", "SWIFT", "SWING", "TA", "TAG",
|
"STOOQ", "STOP", "STOR", "STQQQ", "STUCK", "STUDY", "SUS", "SUSHI", "SUV", "SWIFT",
|
||||||
"TAKE", "TAM", "TBTH", "TEAMS", "TED", "TEMU", "TERM", "TESLA", "TEXT", "TF",
|
"SWING", "TA", "TAG", "TAKE", "TAM", "TBTH", "TEAMS", "TED", "TEMU", "TERM",
|
||||||
"TFNA", "TFSA", "THAN", "THANK", "THAT", "THATS", "THE", "THEIR", "THEM", "THEN",
|
"TESLA", "TEXT", "TF", "TFNA", "TFSA", "THAN", "THANK", "THAT", "THATS", "THE",
|
||||||
"THERE", "THESE", "THEY", "THING", "THINK", "THIS", "TI", "TIA", "TIKR", "TIME",
|
"THEIR", "THEM", "THEN", "THERE", "THESE", "THEY", "THING", "THINK", "THIS", "TI",
|
||||||
"TIMES", "TINA", "TITS", "TJR", "TL", "TL;DR", "TLDR", "TO", "TODAY", "TOLD",
|
"TIA", "TIKR", "TIME", "TIMES", "TINA", "TITS", "TJR", "TL", "TL;DR", "TLDR",
|
||||||
"TONS", "TOO", "TOS", "TOT", "TOTAL", "TP", "TPU", "TRADE", "TREND", "TRUE",
|
"TNT", "TO", "TODAY", "TOLD", "TONS", "TOO", "TOS", "TOT", "TOTAL", "TP",
|
||||||
"TRUMP", "TRUST", "TRY", "TSA", "TSMC", "TSP", "TSX", "TSXV", "TTM", "TTYL",
|
"TPU", "TRADE", "TREND", "TRUE", "TRUMP", "TRUST", "TRY", "TSA", "TSMC", "TSP",
|
||||||
"TWO", "UAW", "UCITS", "UGH", "UI", "UK", "UNDER", "UNITS", "UNTIL", "UP",
|
"TSX", "TSXV", "TTIP", "TTM", "TTYL", "TURNS", "TWO", "UAW", "UCITS", "UGH",
|
||||||
"US", "USA", "USD", "USMCA", "USSA", "USSR", "UTC", "VALID", "VALUE", "VAMOS",
|
"UI", "UK", "UNDER", "UNITS", "UNO", "UNTIL", "UP", "US", "USA", "USD",
|
||||||
"VEO", "VERY", "VFMXX", "VFV", "VI", "VISA", "VIX", "VLI", "VOO", "VP",
|
"USMCA", "USSA", "USSR", "UTC", "VALID", "VALUE", "VAMOS", "VAT", "VEO", "VERY",
|
||||||
"VPAY", "VR", "VRVP", "VSUS", "VTI", "VUAG", "VW", "VWAP", "VWCE", "VXN",
|
"VFMXX", "VFV", "VI", "VISA", "VIX", "VLI", "VOO", "VP", "VPAY", "VR",
|
||||||
"VXUX", "WAGER", "WAGMI", "WAIT", "WALL", "WANT", "WAS", "WATCH", "WAY", "WBTC",
|
"VRVP", "VSUS", "VTI", "VUAG", "VW", "VWAP", "VWCE", "VXN", "VXUX", "WAGER",
|
||||||
"WE", "WEB", "WEB3", "WEEK", "WENT", "WERO", "WEST", "WHALE", "WHAT", "WHEN",
|
"WAGMI", "WAIT", "WALL", "WANT", "WAS", "WATCH", "WAY", "WBTC", "WE", "WEB",
|
||||||
"WHERE", "WHICH", "WHILE", "WHO", "WHOS", "WHY", "WIDE", "WILL", "WIRE", "WIRED",
|
"WEB3", "WEEK", "WENT", "WERO", "WEST", "WHALE", "WHAT", "WHEN", "WHERE", "WHICH",
|
||||||
"WITH", "WL", "WON", "WOOPS", "WORDS", "WORTH", "WOULD", "WP", "WRONG", "WSB",
|
"WHILE", "WHO", "WHOS", "WHY", "WIDE", "WILL", "WIRE", "WIRED", "WITH", "WL",
|
||||||
"WSJ", "WTF", "WV", "WWII", "WWIII", "X", "XAU", "XCUSE", "XD", "XEQT",
|
"WON", "WOOPS", "WORDS", "WORTH", "WOULD", "WP", "WRONG", "WSB", "WSJ", "WTF",
|
||||||
"XI", "XMR", "XO", "XRP", "XX", "YEAH", "YEET", "YES", "YET", "YIELD",
|
"WV", "WWII", "WWIII", "X", "XAU", "XCUSE", "XD", "XEQT", "XI", "XIV",
|
||||||
"YM", "YMMV", "YOIR", "YOLO", "YOU", "YOUR", "YOY", "YT", "YTD", "YUGE",
|
"XMR", "XO", "XRP", "XX", "YEAH", "YEET", "YES", "YET", "YIELD", "YM",
|
||||||
"YUPPP", "ZAR", "ZEN", "ZERO", "ZEV"
|
"YMMV", "YOIR", "YOLO", "YOU", "YOUR", "YOY", "YT", "YTD", "YUGE", "YUPPP",
|
||||||
|
"ZAR", "ZEN", "ZERO", "ZEV"
|
||||||
}
|
}
|
||||||
|
|
||||||
def extract_tickers(text):
|
def extract_golden_tickers(text):
|
||||||
"""
|
"""
|
||||||
Extracts potential stock tickers from a given piece of text.
|
Extracts ONLY tickers with a '$' prefix. This is the highest-confidence signal.
|
||||||
A ticker is identified as a 1-5 character uppercase word, or a word prefixed with $.
|
Returns a set of cleaned ticker symbols (e.g., {'TSLA', 'GME'}).
|
||||||
"""
|
"""
|
||||||
# Regex to find potential tickers:
|
# Regex to find words prefixed with $: $AAPL, $TSLA
|
||||||
# 1. Words prefixed with $: $AAPL, $TSLA
|
ticker_regex = r"\$[A-Z]{1,5}\b"
|
||||||
# 2. All-caps words between 1 and 5 characters: GME, AMC
|
tickers = re.findall(ticker_regex, text)
|
||||||
ticker_regex = r"\$[A-Z]{1,5}\b|\b[A-Z]{2,5}\b"
|
# Clean the tickers by removing the '$' and return as a set
|
||||||
|
return {ticker.replace("$", "").upper() for ticker in tickers}
|
||||||
|
|
||||||
|
def extract_potential_tickers(text):
|
||||||
|
"""
|
||||||
|
Extracts potential tickers (all-caps words). This is a lower-confidence signal
|
||||||
|
used as a fallback when no golden tickers are present.
|
||||||
|
Returns a set of cleaned ticker symbols.
|
||||||
|
"""
|
||||||
|
# Regex to find all-caps words between 2 and 5 characters: GME, AMC
|
||||||
|
ticker_regex = r"\b[A-Z]{2,5}\b"
|
||||||
potential_tickers = re.findall(ticker_regex, text)
|
potential_tickers = re.findall(ticker_regex, text)
|
||||||
|
|
||||||
# Filter out common words and remove the '$' prefix
|
# Filter out common blacklisted words
|
||||||
tickers = []
|
return {ticker for ticker in potential_tickers if ticker not in COMMON_WORDS_BLACKLIST}
|
||||||
for ticker in potential_tickers:
|
|
||||||
cleaned_ticker = ticker.replace("$", "").upper()
|
|
||||||
if cleaned_ticker not in COMMON_WORDS_BLACKLIST:
|
|
||||||
tickers.append(cleaned_ticker)
|
|
||||||
|
|
||||||
return tickers
|
|
||||||
|
@@ -1,16 +1,19 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
BASE_DIR="/home/rstat/reddit_stock_analyzer"
|
||||||
|
|
||||||
# CRITICAL: Navigate to the project directory using an absolute path.
|
# CRITICAL: Navigate to the project directory using an absolute path.
|
||||||
cd /home/pkhamre/git/rstat
|
cd ${BASE_DIR}
|
||||||
|
|
||||||
# CRITICAL: Activate the virtual environment using an absolute path.
|
# CRITICAL: Activate the virtual environment using an absolute path.
|
||||||
source /home/pkhamre/git/rstat/.venv/bin/activate
|
source ${BASE_DIR}/.venv/bin/activate
|
||||||
|
|
||||||
echo "--- Starting RSTAT Daily Job on $(date +%F) ---"
|
echo "--- Starting RSTAT Daily Job on $(date +%F) ---"
|
||||||
|
|
||||||
# 1. Scrape data from the last 24 hours.
|
# 1. Scrape data from the last 24 hours and update price for top tickers.
|
||||||
echo "Step 1: Scraping new data..."
|
echo "Step 1: Scraping new data..."
|
||||||
rstat -c 250
|
rstat --no-financials --comments 256
|
||||||
|
rstat --update-top-tickers
|
||||||
|
|
||||||
# 2. Start the dashboard in the background.
|
# 2. Start the dashboard in the background.
|
||||||
echo "Step 2: Starting dashboard in background..."
|
echo "Step 2: Starting dashboard in background..."
|
||||||
@@ -28,6 +31,6 @@ python post_to_reddit.py --target-subreddit rstat
|
|||||||
|
|
||||||
# 5. Clean up by stopping the dashboard server.
|
# 5. Clean up by stopping the dashboard server.
|
||||||
echo "Step 5: Stopping dashboard server..."
|
echo "Step 5: Stopping dashboard server..."
|
||||||
kill $DASHBOARD_PID
|
kill ${DASHBOARD_PID}
|
||||||
|
|
||||||
echo "--- RSTAT Daily Job Complete ---"
|
echo "--- RSTAT Daily Job Complete ---"
|
20
setup.py
@@ -2,24 +2,24 @@
|
|||||||
|
|
||||||
from setuptools import setup, find_packages
|
from setuptools import setup, find_packages
|
||||||
|
|
||||||
with open('requirements.txt') as f:
|
with open("requirements.txt") as f:
|
||||||
requirements = f.read().splitlines()
|
requirements = f.read().splitlines()
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='reddit-stock-analyzer',
|
name="reddit-stock-analyzer",
|
||||||
version='0.0.1',
|
version="0.0.1",
|
||||||
author='Pål-Kristian Hamre',
|
author="Pål-Kristian Hamre",
|
||||||
author_email='its@pkhamre.com',
|
author_email="its@pkhamre.com",
|
||||||
description='A command-line tool to analyze stock ticker mentions on Reddit.',
|
description="A command-line tool to analyze stock ticker mentions on Reddit.",
|
||||||
# This now correctly finds your 'rstat_tool' package
|
# This now correctly finds your 'rstat_tool' package
|
||||||
packages=find_packages(),
|
packages=find_packages(),
|
||||||
install_requires=requirements,
|
install_requires=requirements,
|
||||||
entry_points={
|
entry_points={
|
||||||
'console_scripts': [
|
"console_scripts": [
|
||||||
# The path is now 'package_name.module_name:function_name'
|
# The path is now 'package_name.module_name:function_name'
|
||||||
'rstat=rstat_tool.main:main',
|
"rstat=rstat_tool.main:main",
|
||||||
'rstat-dashboard=rstat_tool.dashboard:start_dashboard',
|
"rstat-dashboard=rstat_tool.dashboard:start_dashboard",
|
||||||
'rstat-cleanup=rstat_tool.cleanup:run_cleanup',
|
"rstat-cleanup=rstat_tool.cleanup:run_cleanup",
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
)
|
)
|
BIN
static/apple-touch-icon.png
Normal file
After Width: | Height: | Size: 3.1 KiB |
2
static/css/input.css
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
@import "tailwindcss";
|
||||||
|
@plugin "@tailwindcss/typography";
|
1441
static/css/style.css
Normal file
BIN
static/dogecoin_logo.png
Normal file
After Width: | Height: | Size: 36 KiB |
BIN
static/favicon-96x96.png
Normal file
After Width: | Height: | Size: 1.6 KiB |
BIN
static/favicon.ico
Normal file
After Width: | Height: | Size: 15 KiB |
3
static/favicon.svg
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" version="1.1" xmlns:xlink="http://www.w3.org/1999/xlink" width="200" height="200"><svg xmlns="http://www.w3.org/2000/svg" version="1.1" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 200 200"><rect width="200" height="200" fill="url('#gradient')"></rect><defs><linearGradient id="SvgjsLinearGradient1001" gradientTransform="rotate(45 0.5 0.5)"><stop offset="0%" stop-color="#697f83"></stop><stop offset="100%" stop-color="#161f2f"></stop></linearGradient></defs><g><g fill="#b1d6bb" transform="matrix(12.518681318681319,0,0,12.518681318681319,14.808730859284879,189.00720071373405)" stroke="#498990" stroke-width="0.7"><path d="M8.87 0L6.36-5.02L4.50-5.02L4.50 0L1.07 0L1.07-14.22L6.67-14.22Q9.20-14.22 10.63-13.10Q12.05-11.97 12.05-9.92L12.05-9.92Q12.05-8.44 11.45-7.46Q10.85-6.48 9.57-5.88L9.57-5.88L12.54-0.15L12.54 0L8.87 0ZM4.50-11.57L4.50-7.67L6.67-7.67Q7.65-7.67 8.14-8.18Q8.63-8.69 8.63-9.61Q8.63-10.53 8.13-11.05Q7.64-11.57 6.67-11.57L6.67-11.57L4.50-11.57Z"></path></g></g></svg><style>@media (prefers-color-scheme: light) { :root { filter: none; } }
|
||||||
|
@media (prefers-color-scheme: dark) { :root { filter: none; } }
|
||||||
|
</style></svg>
|
After Width: | Height: | Size: 1.2 KiB |
21
static/site.webmanifest
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
{
|
||||||
|
"name": "MyWebSite",
|
||||||
|
"short_name": "MySite",
|
||||||
|
"icons": [
|
||||||
|
{
|
||||||
|
"src": "/web-app-manifest-192x192.png",
|
||||||
|
"sizes": "192x192",
|
||||||
|
"type": "image/png",
|
||||||
|
"purpose": "maskable"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"src": "/web-app-manifest-512x512.png",
|
||||||
|
"sizes": "512x512",
|
||||||
|
"type": "image/png",
|
||||||
|
"purpose": "maskable"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"theme_color": "#ffffff",
|
||||||
|
"background_color": "#ffffff",
|
||||||
|
"display": "standalone"
|
||||||
|
}
|
BIN
static/web-app-manifest-192x192.png
Normal file
After Width: | Height: | Size: 3.4 KiB |
BIN
static/web-app-manifest-512x512.png
Normal file
After Width: | Height: | Size: 16 KiB |
@@ -1,10 +1,15 @@
|
|||||||
{
|
{
|
||||||
"subreddits": [
|
"subreddits": [
|
||||||
|
"dividends",
|
||||||
"investing",
|
"investing",
|
||||||
|
"options",
|
||||||
"pennystocks",
|
"pennystocks",
|
||||||
|
"SecurityAnalysis",
|
||||||
"Shortsqueeze",
|
"Shortsqueeze",
|
||||||
"smallstreetbets",
|
"smallstreetbets",
|
||||||
|
"stockmarket",
|
||||||
"stocks",
|
"stocks",
|
||||||
|
"thetagang",
|
||||||
"Tollbugatabets",
|
"Tollbugatabets",
|
||||||
"ValueInvesting",
|
"ValueInvesting",
|
||||||
"wallstreetbets",
|
"wallstreetbets",
|
||||||
|
27
tailwind.config.js
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
/** @type {import('tailwindcss').Config} */
|
||||||
|
module.exports = {
|
||||||
|
content: [
|
||||||
|
'./templates/**/*.html',
|
||||||
|
],
|
||||||
|
safelist: [
|
||||||
|
'text-violet-400',
|
||||||
|
'text-lime-400',
|
||||||
|
'text-cyan-400',
|
||||||
|
'text-yellow-400',
|
||||||
|
'text-red-400',
|
||||||
|
'text-orange-400',
|
||||||
|
'text-emerald-400',
|
||||||
|
'text-blue-400',
|
||||||
|
'text-gray-300',
|
||||||
|
'text-pink-400'
|
||||||
|
],
|
||||||
|
theme: {
|
||||||
|
extend: {
|
||||||
|
fontFamily: {
|
||||||
|
sans: ['Inter', 'sans-serif'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
plugins: [
|
||||||
|
],
|
||||||
|
}
|
51
templates/about.html
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
{% extends "dashboard_base.html" %}
|
||||||
|
|
||||||
|
{% block title %}About RSTAT{% endblock %}
|
||||||
|
|
||||||
|
{% block content %}
|
||||||
|
<!-- This outer div now handles the centering -->
|
||||||
|
<div class="flex flex-col items-center">
|
||||||
|
<div class="w-full max-w-3xl bg-slate-800/50 ring-1 ring-slate-700 rounded-2xl p-6 sm:p-10 shadow-2xl">
|
||||||
|
<div class="text-center mb-10">
|
||||||
|
<h1 class="text-3xl sm:text-4xl font-extrabold tracking-tight text-white">About RSTAT (beta)</h1>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- The 'prose' class will now work correctly inside this standard block flow -->
|
||||||
|
<article class="prose prose-slate prose-invert max-w-none">
|
||||||
|
<h2>What is this?</h2>
|
||||||
|
<p>RSTAT (Reddit Stock Analysis Tool) is an automated data pipeline that scans popular financial communities on
|
||||||
|
Reddit to identify and analyze trending stock tickers. It provides a daily and weekly snapshot of the most
|
||||||
|
discussed stocks, their social sentiment, and key financial data.</p>
|
||||||
|
|
||||||
|
<h2>How does it work?</h2>
|
||||||
|
<ul>
|
||||||
|
<li>A <strong>scraper</strong> runs on a schedule to read new posts and comments from a predefined list of
|
||||||
|
subreddits.</li>
|
||||||
|
<li>A <strong>sentiment analyzer</strong> scores each mention as Bullish, Bearish, or Neutral using a natural
|
||||||
|
language processing model.</li>
|
||||||
|
<li>A <strong>data fetcher</strong> enriches the ticker data with the latest closing price and market
|
||||||
|
capitalization from Yahoo Finance.</li>
|
||||||
|
<li>All data is stored in a local <strong>SQLite database</strong>.</li>
|
||||||
|
<li>This <strong>web dashboard</strong> reads from the database to provide a clean, interactive visualization of
|
||||||
|
the results.</li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<h2>Supporting the Project</h2>
|
||||||
|
<p>RSTAT is a <b>soon-to-be</b>free and open-source project. To ensure the dashboard remains fast and reliable, it is hosted on a
|
||||||
|
small virtual server with running costs of approximately $6 per month. And about $30 per year for he domain.
|
||||||
|
If you find this tool useful, donations are gratefully accepted via Dogecoin (DOGE).</p>
|
||||||
|
<div class="not-prose bg-slate-900/50 ring-1 ring-slate-700 rounded-lg p-3 text-center">
|
||||||
|
<code class="text-sm text-slate-200 break-all">DRTLo2BsBijY4MrLmNNHzmjZ5tVvpTebFE</code>
|
||||||
|
</div>
|
||||||
|
</article>
|
||||||
|
|
||||||
|
<footer class="mt-12 text-center">
|
||||||
|
<div class="text-xl font-extrabold tracking-tight text-white">r/rstat</div>
|
||||||
|
<div class="text-sm text-slate-400">
|
||||||
|
<a href="https://www.reddit.com/r/rstat/" target="_blank" class="hover:text-white transition-colors">visit us
|
||||||
|
for more.</a>
|
||||||
|
</div>
|
||||||
|
</footer>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{% endblock %}
|
@@ -1,109 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="UTF-8">
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
||||||
<title>{% block title %}Reddit Stock Dashboard{% endblock %}</title>
|
|
||||||
<style>
|
|
||||||
body {
|
|
||||||
font-family: 'Inter', -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif;
|
|
||||||
background-color: #f4f7f6;
|
|
||||||
color: #333;
|
|
||||||
margin: 0;
|
|
||||||
line-height: 1.6;
|
|
||||||
}
|
|
||||||
.navbar {
|
|
||||||
background-color: #ffffff;
|
|
||||||
padding: 1rem 2rem;
|
|
||||||
border-bottom: 1px solid #e0e0e0;
|
|
||||||
display: flex;
|
|
||||||
flex-wrap: wrap;
|
|
||||||
gap: 1rem;
|
|
||||||
}
|
|
||||||
.navbar a {
|
|
||||||
color: #555;
|
|
||||||
text-decoration: none;
|
|
||||||
font-weight: 600;
|
|
||||||
padding: 0.5rem 1rem;
|
|
||||||
border-radius: 6px;
|
|
||||||
transition: background-color 0.2s ease-in-out, color 0.2s ease-in-out;
|
|
||||||
}
|
|
||||||
.navbar a:hover {
|
|
||||||
background-color: #e9ecef;
|
|
||||||
color: #000;
|
|
||||||
}
|
|
||||||
.container {
|
|
||||||
max-width: 1000px;
|
|
||||||
margin: 2rem auto;
|
|
||||||
padding: 2rem;
|
|
||||||
background-color: #ffffff;
|
|
||||||
border-radius: 8px;
|
|
||||||
box-shadow: 0 4px 12px rgba(0,0,0,0.05);
|
|
||||||
}
|
|
||||||
h1 {
|
|
||||||
font-size: 1.75rem;
|
|
||||||
font-weight: 700;
|
|
||||||
margin-top: 0;
|
|
||||||
border-bottom: 1px solid #eee;
|
|
||||||
padding-bottom: 0.5rem;
|
|
||||||
}
|
|
||||||
table {
|
|
||||||
width: 100%;
|
|
||||||
border-collapse: collapse;
|
|
||||||
margin-top: 2rem;
|
|
||||||
font-size: 0.95rem;
|
|
||||||
}
|
|
||||||
th, td {
|
|
||||||
padding: 1rem;
|
|
||||||
text-align: left;
|
|
||||||
border-bottom: 1px solid #e0e0e0;
|
|
||||||
}
|
|
||||||
th {
|
|
||||||
font-weight: 600;
|
|
||||||
text-transform: uppercase;
|
|
||||||
font-size: 0.8rem;
|
|
||||||
letter-spacing: 0.05em;
|
|
||||||
color: #666;
|
|
||||||
}
|
|
||||||
tr:last-child td {
|
|
||||||
border-bottom: none;
|
|
||||||
}
|
|
||||||
.sentiment-bullish { color: #28a745; font-weight: 600; }
|
|
||||||
.sentiment-bearish { color: #dc3545; font-weight: 600; }
|
|
||||||
.sentiment-neutral { color: #6c757d; }
|
|
||||||
|
|
||||||
.post-card {
|
|
||||||
border: 1px solid #e0e0e0;
|
|
||||||
border-radius: 8px;
|
|
||||||
padding: 1.5rem;
|
|
||||||
margin-bottom: 1.5rem;
|
|
||||||
}
|
|
||||||
.post-card h3 {
|
|
||||||
margin-top: 0;
|
|
||||||
font-size: 1.2rem;
|
|
||||||
}
|
|
||||||
.post-card h3 a {
|
|
||||||
color: #0056b3;
|
|
||||||
text-decoration: none;
|
|
||||||
}
|
|
||||||
.post-card h3 a:hover {
|
|
||||||
text-decoration: underline;
|
|
||||||
}
|
|
||||||
.post-meta {
|
|
||||||
font-size: 0.9rem;
|
|
||||||
color: #666;
|
|
||||||
}
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<header class="navbar">
|
|
||||||
<a href="/">Overall</a>
|
|
||||||
{% for sub in subreddits %}
|
|
||||||
<a href="/subreddit/{{ sub }}">r/{{ sub }}</a>
|
|
||||||
{% endfor %}
|
|
||||||
</header>
|
|
||||||
<main class="container">
|
|
||||||
{% block content %}{% endblock %}
|
|
||||||
</main>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@@ -1,95 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="UTF-8">
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
||||||
<title>r/{{ subreddit_name }} Ticker Mentions</title>
|
|
||||||
<link rel="preconnect" href="https://fonts.googleapis.com">
|
|
||||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
|
||||||
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;600;700;800&display=swap" rel="stylesheet">
|
|
||||||
<style>
|
|
||||||
body { margin: 0; padding: 2rem; font-family: 'Inter', sans-serif; background: #1a1a1a; display: flex; justify-content: center; align-items: center; min-height: 100vh; }
|
|
||||||
.image-container { width: 750px; background: linear-gradient(145deg, #2d3748, #1a202c); color: #ffffff; border-radius: 16px; padding: 2.5rem; box-shadow: 0 10px 30px rgba(0,0,0,0.5); text-align: center; }
|
|
||||||
header { display: flex; justify-content: space-between; align-items: flex-start; margin-bottom: 2rem; }
|
|
||||||
.title-block { text-align: left; }
|
|
||||||
.title-block h1 { font-size: 2.5rem; font-weight: 800; margin: 0; line-height: 1; }
|
|
||||||
.title-block h2 { font-size: 1.25rem; font-weight: 600; margin: 0.5rem 0 0; color: #a0aec0; }
|
|
||||||
.date { font-size: 1.1rem; font-weight: 600; color: #a0aec0; letter-spacing: 0.02em; }
|
|
||||||
table { width: 100%; border-collapse: collapse; text-align: left; }
|
|
||||||
th, td { padding: 1rem 0.5rem; border-bottom: 1px solid rgba(255, 255, 255, 0.1); }
|
|
||||||
th { font-weight: 700; text-transform: uppercase; font-size: 0.75rem; color: #718096; letter-spacing: 0.05em; }
|
|
||||||
td { font-size: 1.1rem; font-weight: 600; }
|
|
||||||
tr:last-child td { border-bottom: none; }
|
|
||||||
td.rank { font-weight: 700; color: #cbd5e0; width: 5%; }
|
|
||||||
td.ticker { width: 15%; }
|
|
||||||
td.financials { text-align: right; width: 20%; }
|
|
||||||
td.mentions { text-align: center; width: 15%; }
|
|
||||||
td.sentiment { text-align: center; width: 20%; }
|
|
||||||
th.mentions, th.sentiment {
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
th.financials {
|
|
||||||
text-align: right;
|
|
||||||
}
|
|
||||||
.sentiment-bullish { color: #48bb78; font-weight: 700; }
|
|
||||||
.sentiment-bearish { color: #f56565; font-weight: 700; }
|
|
||||||
.sentiment-neutral { color: #a0aec0; font-weight: 600; }
|
|
||||||
footer { margin-top: 2.5rem; }
|
|
||||||
.brand-name { font-size: 1.75rem; font-weight: 800; letter-spacing: -1px; }
|
|
||||||
.brand-subtitle { font-size: 1rem; color: #a0aec0; }
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<div class="image-container">
|
|
||||||
<header>
|
|
||||||
<div class="title-block">
|
|
||||||
<h1>Ticker Mentions Daily</h1>
|
|
||||||
<h2>r/{{ subreddit_name }}</h2>
|
|
||||||
</div>
|
|
||||||
<div class="date">{{ current_date }}</div>
|
|
||||||
</header>
|
|
||||||
<table>
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th class="rank">Rank</th>
|
|
||||||
<th class="ticker">Ticker</th>
|
|
||||||
<th class="mentions">Mentions</th>
|
|
||||||
<th class="financials">Mkt Cap</th>
|
|
||||||
<th class="financials">Close Price</th>
|
|
||||||
<th class="sentiment">Sentiment</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{% for ticker in tickers %}
|
|
||||||
<tr>
|
|
||||||
<td class="rank">{{ loop.index }}</td>
|
|
||||||
<td class="ticker">{{ ticker.symbol }}</td>
|
|
||||||
<td class="mentions">{{ ticker.total_mentions }}</td>
|
|
||||||
<td class="financials">{{ ticker.market_cap | format_mc }}</td>
|
|
||||||
<td class="financials">
|
|
||||||
{% if ticker.closing_price %}
|
|
||||||
${{ "%.2f"|format(ticker.closing_price) }}
|
|
||||||
{% else %}
|
|
||||||
N/A
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
<td class="sentiment">
|
|
||||||
{% if ticker.bullish_mentions > ticker.bearish_mentions %}
|
|
||||||
<span class="sentiment-bullish">Bullish</span>
|
|
||||||
{% elif ticker.bearish_mentions > ticker.bullish_mentions %}
|
|
||||||
<span class="sentiment-bearish">Bearish</span>
|
|
||||||
{% else %}
|
|
||||||
<span class="sentiment-neutral">Neutral</span>
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
{% endfor %}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
<footer>
|
|
||||||
<div class="brand-name">r/rstat</div>
|
|
||||||
<div class="brand-subtitle">visit us for more</div>
|
|
||||||
</footer>
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
119
templates/dashboard_base.html
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<title>{% block title %}RSTAT Dashboard{% endblock %}</title>
|
||||||
|
|
||||||
|
<link rel="icon" type="image/png" href="{{ url_for('static', filename='favicon-96x96.png') }}" sizes="96x96" />
|
||||||
|
<link rel="icon" href="{{ url_for('static', filename='favicon.ico') }}">
|
||||||
|
<link rel="shortcut icon" type="image/svg+xml" href="{{ url_for('static', filename='favicon.svg') }}">
|
||||||
|
<link rel="apple-touch-icon" sizes="180x180" href="{{ url_for('static', filename='apple-touch-icon.png') }}" />
|
||||||
|
<link rel="manifest" href="{{ url_for('static', filename='site.webmanifest') }}" />
|
||||||
|
|
||||||
|
<link rel="preconnect" href="https://fonts.googleapis.com">
|
||||||
|
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
||||||
|
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700;800&display=swap" rel="stylesheet">
|
||||||
|
<link rel="stylesheet" href="{{ url_for('static', filename='css/style.css') }}">
|
||||||
|
<script defer src="https://cdn.jsdelivr.net/npm/alpinejs@3.x.x/dist/cdn.min.js"></script>
|
||||||
|
<style>
|
||||||
|
/* This sets the custom font as the default for the page */
|
||||||
|
body {
|
||||||
|
font-family: 'Inter', sans-serif;
|
||||||
|
}
|
||||||
|
|
||||||
|
[class*="text-"]>a {
|
||||||
|
color: inherit;
|
||||||
|
text-decoration: none;
|
||||||
|
transition: color 0.2s ease-in-out;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
[class*="text-"]>a:hover {
|
||||||
|
color: #ffffff;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
|
||||||
|
<body class="bg-slate-900 text-slate-200 min-h-screen">
|
||||||
|
|
||||||
|
{% if not is_image_mode %}
|
||||||
|
<header class="p-4 sm:p-6 w-full">
|
||||||
|
<nav
|
||||||
|
class="w-full max-w-7xl mx-auto bg-slate-800/50 ring-1 ring-slate-700 rounded-xl p-4 flex flex-col sm:flex-row items-center gap-4">
|
||||||
|
<div class="flex items-center gap-4">
|
||||||
|
<!-- Home Link -->
|
||||||
|
<a href="/"
|
||||||
|
class="font-bold {% if not subreddit_name %}text-white{% else %}text-slate-400 hover:text-white{% endif %} transition-colors">Home</a>
|
||||||
|
|
||||||
|
<!-- Alpine.js Dropdown Component -->
|
||||||
|
<div x-data="{ isOpen: false }" class="relative">
|
||||||
|
<!-- The Button that toggles the 'isOpen' state -->
|
||||||
|
<button @click="isOpen = !isOpen"
|
||||||
|
class="font-bold flex items-center gap-1 cursor-pointer {% if subreddit_name %}text-white{% else %}text-slate-400 hover:text-white{% endif %} transition-colors">
|
||||||
|
<span>Subreddits</span>
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24" fill="none"
|
||||||
|
stroke="currentColor" stroke-width="3" stroke-linecap="round" stroke-linejoin="round"
|
||||||
|
class="transition-transform duration-200" :class="{'rotate-180': isOpen}">
|
||||||
|
<polyline points="6 9 12 15 18 9"></polyline>
|
||||||
|
</svg>
|
||||||
|
</button>
|
||||||
|
|
||||||
|
<!-- The Dropdown Menu, controlled by Alpine.js -->
|
||||||
|
<div x-show="isOpen" @click.outside="isOpen = false"
|
||||||
|
x-transition:enter="transition ease-out duration-100"
|
||||||
|
x-transition:enter-start="opacity-0 scale-95" x-transition:enter-end="opacity-100 scale-100"
|
||||||
|
x-transition:leave="transition ease-in duration-75"
|
||||||
|
x-transition:leave-start="opacity-100 scale-100" x-transition:leave-end="opacity-0 scale-95"
|
||||||
|
class="absolute left-0 mt-2 bg-slate-800 ring-1 ring-slate-700 shadow-lg rounded-lg py-1 w-48 z-10"
|
||||||
|
style="display: none;">
|
||||||
|
{% for sub in all_subreddits %}
|
||||||
|
<a href="/subreddit/{{ sub }}"
|
||||||
|
class="block px-4 py-2 text-sm text-slate-300 hover:bg-slate-700 hover:text-white">{{ sub
|
||||||
|
}}</a>
|
||||||
|
{% endfor %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="flex items-center gap-2 sm:ml-auto">
|
||||||
|
<a href="?view=daily"
|
||||||
|
class="px-3 py-1 rounded-md text-sm font-semibold {% if view_type == 'daily' %}bg-sky-500 text-white{% else %}bg-slate-700/50 text-slate-300 hover:bg-slate-700 hover:text-white{% endif %} transition-all">Daily</a>
|
||||||
|
<a href="?view=weekly"
|
||||||
|
class="px-3 py-1 rounded-md text-sm font-semibold {% if view_type == 'weekly' %}bg-sky-500 text-white{% else %}bg-slate-700/50 text-slate-300 hover:bg-slate-700 hover:text-white{% endif %} transition-all">Weekly</a>
|
||||||
|
<a href="/about" title="About this Project"
|
||||||
|
class="p-2 rounded-md text-slate-400 hover:bg-slate-700 hover:text-white transition-colors">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 24 24" fill="none"
|
||||||
|
stroke="currentColor" stroke-width="2.5" stroke-linecap="round" stroke-linejoin="round">
|
||||||
|
<circle cx="12" cy="12" r="10"></circle>
|
||||||
|
<line x1="12" y1="16" x2="12" y2="12"></line>
|
||||||
|
<line x1="12" y1="8" x2="12.01" y2="8"></line>
|
||||||
|
</svg>
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
</nav>
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
<main class="w-full p-4 sm:p-6">
|
||||||
|
{% block content %}{% endblock %}
|
||||||
|
</main>
|
||||||
|
|
||||||
|
{% if not is_image_mode %}
|
||||||
|
<footer class="mt-8 text-center">
|
||||||
|
<div class="flex items-center justify-center gap-2">
|
||||||
|
<img src="{{ url_for('static', filename='dogecoin_logo.png') }}" alt="Doge" class="w-6 h-6">
|
||||||
|
|
||||||
|
<!-- text-base makes the text larger -->
|
||||||
|
<span class="text-base text-slate-400">
|
||||||
|
Support this service:
|
||||||
|
<!-- text-sm and p-2 make the code block larger -->
|
||||||
|
<code
|
||||||
|
class="text-sm bg-slate-800 p-2 rounded-lg text-slate-300">DRTLo2BsBijY4MrLmNNHzmjZ5tVvpTebFE</code>
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</footer>
|
||||||
|
{% endif %}
|
||||||
|
</body>
|
||||||
|
|
||||||
|
</html>
|
127
templates/dashboard_view.html
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
{% extends "dashboard_base.html" %}
|
||||||
|
|
||||||
|
{% block title %}{{ title }}{% endblock %}
|
||||||
|
|
||||||
|
{% block content %}
|
||||||
|
<div class="flex flex-col items-center">
|
||||||
|
<div
|
||||||
|
class="w-full max-w-3xl bg-gradient-to-br from-slate-800 to-slate-900 ring-1 ring-slate-700 rounded-2xl p-6 sm:p-8 shadow-2xl">
|
||||||
|
<header class="flex flex-col sm:flex-row justify-between sm:items-start mb-8">
|
||||||
|
<div class="text-left">
|
||||||
|
<h1 class="text-2xl sm:text-4xl font-extrabold tracking-tight text-white">Reddit Ticker Mentions</h1>
|
||||||
|
<h2 class="text-lg sm:text-xl font-semibold mt-1 text-slate-400">{{ subtitle }}</h2>
|
||||||
|
</div>
|
||||||
|
<div class="text-left sm:text-right mt-2 sm:mt-0 flex-shrink-0">
|
||||||
|
<div class="text-md font-semibold text-slate-400 whitespace-nowrap">{{ date_string }}</div>
|
||||||
|
{% if not is_image_mode %}
|
||||||
|
<a href="{{ base_url }}?view={{ view_type }}&image=true" class="inline-block mt-2 sm:float-right"
|
||||||
|
title="View as Shareable Image">
|
||||||
|
<svg class="text-slate-400 hover:text-white transition-colors" xmlns="http://www.w3.org/2000/svg"
|
||||||
|
width="22" height="22" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2.5"
|
||||||
|
stroke-linecap="round" stroke-linejoin="round">
|
||||||
|
<path d="M23 19a2 2 0 0 1-2 2H3a2 2 0 0 1-2-2V8a2 2 0 0 1 2-2h4l2-3h6l2 3h4a2 2 0 0 1 2 2z">
|
||||||
|
</path>
|
||||||
|
<circle cx="12" cy="13" r="4"></circle>
|
||||||
|
</svg>
|
||||||
|
</a>
|
||||||
|
{% endif %}
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
|
||||||
|
{% set ticker_colors = {
|
||||||
|
1: 'text-violet-400', 2: 'text-lime-400', 3: 'text-cyan-400',
|
||||||
|
4: 'text-yellow-400', 5: 'text-red-400', 6: 'text-orange-400',
|
||||||
|
7: 'text-emerald-400', 8: 'text-blue-400', 9: 'text-gray-300',
|
||||||
|
10: 'text-pink-400'
|
||||||
|
} %}
|
||||||
|
|
||||||
|
<!-- Ticker List -->
|
||||||
|
<div class="flex flex-col">
|
||||||
|
|
||||||
|
<!-- 1. The Desktop Header Row (hidden on mobile) -->
|
||||||
|
<div
|
||||||
|
class="hidden sm:flex items-center text-xs font-bold text-slate-500 uppercase tracking-wider px-4 py-3 border-b border-slate-700">
|
||||||
|
<div class="w-1/4 flex items-center gap-4 text-left">
|
||||||
|
<span class="w-6 text-center">#</span>
|
||||||
|
<span>Ticker</span>
|
||||||
|
</div>
|
||||||
|
<div class="w-3/4 grid grid-cols-4 gap-4 text-right">
|
||||||
|
<div class="text-center">Mentions</div>
|
||||||
|
<div class="text-center">Sentiment</div>
|
||||||
|
<div>Mkt Cap</div>
|
||||||
|
<div>Close Price</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- 2. Ticker Rows -->
|
||||||
|
<div class="divide-y divide-slate-800">
|
||||||
|
{% for ticker in tickers %}
|
||||||
|
<!-- THIS IS THE UPDATED LINE -->
|
||||||
|
<div
|
||||||
|
class="p-4 flex flex-col sm:flex-row sm:items-center sm:gap-4 hover:bg-slate-800/50 transition-colors duration-150">
|
||||||
|
<!-- Rank & Ticker Symbol -->
|
||||||
|
<div class="flex items-center gap-4 w-full sm:w-1/4 text-left mb-4 sm:mb-0">
|
||||||
|
<span class="text-lg font-bold text-slate-500 w-6 text-center">{{ loop.index }}</span>
|
||||||
|
<div class="text-xl font-bold">
|
||||||
|
<span class="{{ ticker_colors.get(loop.index, 'text-slate-200') }}">
|
||||||
|
{% if is_image_mode %}
|
||||||
|
{{ ticker.symbol }}
|
||||||
|
{% else %}
|
||||||
|
<a href="/deep-dive/{{ ticker.symbol }}">{{ ticker.symbol }}</a>
|
||||||
|
{% endif %}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<!-- Financial Data Points -->
|
||||||
|
<div class="w-full grid grid-cols-2 sm:grid-cols-4 gap-4 text-right">
|
||||||
|
<div class="text-center sm:text-center">
|
||||||
|
<div class="sm:hidden text-xs font-bold text-slate-500 uppercase tracking-wider mb-1">
|
||||||
|
Mentions</div>
|
||||||
|
<div class="text-lg font-semibold text-white">{{ ticker.total_mentions }}</div>
|
||||||
|
</div>
|
||||||
|
<div class="text-center sm:text-center">
|
||||||
|
<div class="sm:hidden text-xs font-bold text-slate-500 uppercase tracking-wider mb-1">
|
||||||
|
Sentiment</div>
|
||||||
|
<div class="text-lg font-semibold">
|
||||||
|
{% if ticker.bullish_mentions > ticker.bearish_mentions %}<span
|
||||||
|
class="text-green-400">Bullish</span>
|
||||||
|
{% elif ticker.bearish_mentions > ticker.bullish_mentions %}<span
|
||||||
|
class="text-red-400">Bearish</span>
|
||||||
|
{% else %}<span class="text-slate-400">Neutral</span>{% endif %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<div class="sm:hidden text-xs font-bold text-slate-500 uppercase tracking-wider mb-1">Mkt
|
||||||
|
Cap</div>
|
||||||
|
<div class="text-lg font-semibold text-white">{{ ticker.market_cap | format_mc }}</div>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<div class="sm:hidden text-xs font-bold text-slate-500 uppercase tracking-wider mb-1">Close
|
||||||
|
Price</div>
|
||||||
|
<div class="text-lg font-semibold text-white">
|
||||||
|
{% if ticker.closing_price %}<a
|
||||||
|
href="https://finance.yahoo.com/quote/{{ ticker.symbol }}" target="_blank"
|
||||||
|
class="hover:text-blue-400 transition-colors">${{
|
||||||
|
"%.2f"|format(ticker.closing_price) }}</a>
|
||||||
|
{% else %}N/A{% endif %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{% else %}
|
||||||
|
<div class="text-center text-slate-500 p-8">No ticker data found for this period.</div>
|
||||||
|
{% endfor %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<footer class="mt-8 text-center">
|
||||||
|
<div class="text-xl font-extrabold tracking-tight text-white">r/rstat</div>
|
||||||
|
<div class="text-sm text-slate-400">
|
||||||
|
<a href="https://www.reddit.com/r/rstat/" target="_blank"
|
||||||
|
class="hover:text-white transition-colors">visit us for more.</a>
|
||||||
|
</div>
|
||||||
|
</footer>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{% endblock %}
|
@@ -1,29 +1,54 @@
|
|||||||
{% extends "base.html" %}
|
{% extends "dashboard_base.html" %}
|
||||||
|
|
||||||
{% block title %}Deep Dive: {{ symbol }}{% endblock %}
|
{% block title %}Deep Dive: {{ symbol }}{% endblock %}
|
||||||
|
|
||||||
{% block content %}
|
{% block content %}
|
||||||
<h1>Deep Dive Analysis for: <strong>{{ symbol }}</strong></h1>
|
<!-- This outer div handles the centering -->
|
||||||
<p>Showing posts that mention {{ symbol }}, sorted by most recent.</p>
|
<div class="flex flex-col items-center">
|
||||||
|
<div class="w-full max-w-3xl bg-slate-800/50 ring-1 ring-slate-700 rounded-2xl p-6 sm:p-10 shadow-2xl">
|
||||||
|
|
||||||
{% for post in posts %}
|
<!-- --- THIS IS THE KEY CHANGE --- -->
|
||||||
<div class="post-card">
|
<!-- We wrap all the content in an <article> tag with the 'prose' classes -->
|
||||||
<h3><a href="{{ post.post_url }}" target="_blank">{{ post.title }}</a></h3>
|
<article class="prose prose-slate prose-invert max-w-none">
|
||||||
<div class="post-meta">
|
|
||||||
<span>r/{{ post.subreddit_name }}</span> |
|
<header class="text-center mb-8">
|
||||||
<span>{{ post.comment_count }} comments analyzed</span> |
|
<!-- The h1 and p tags will now be beautifully styled by 'prose' -->
|
||||||
<span>Avg. Sentiment:
|
<h1>Deep Dive Analysis: <span class="text-sky-400">{{ symbol }}</span></h1>
|
||||||
{% if post.avg_comment_sentiment > 0.1 %}
|
<p>Showing posts that mention {{ symbol }}, sorted by most recent.</p>
|
||||||
<span class="sentiment-bullish">{{ "%.2f"|format(post.avg_comment_sentiment) }}</span>
|
</header>
|
||||||
{% elif post.avg_comment_sentiment < -0.1 %}
|
|
||||||
<span class="sentiment-bearish">{{ "%.2f"|format(post.avg_comment_sentiment) }}</span>
|
<div class="space-y-4 not-prose">
|
||||||
{% else %}
|
{% for post in posts %}
|
||||||
<span class="sentiment-neutral">{{ "%.2f"|format(post.avg_comment_sentiment) }}</span>
|
<!-- 'not-prose' is used on the container so we can control styling precisely -->
|
||||||
{% endif %}
|
<div class="bg-slate-800/50 ring-1 ring-slate-700/50 rounded-lg p-4 text-left not-prose">
|
||||||
</span>
|
<h3 class="text-lg font-bold text-slate-200 mb-2">
|
||||||
|
<!-- This link WILL be styled by the parent 'prose' class -->
|
||||||
|
<a href="{{ post.post_url }}" target="_blank">{{ post.title }}</a>
|
||||||
|
</h3>
|
||||||
|
<div class="text-sm text-slate-400 flex flex-col sm:flex-row sm:items-center gap-x-4 gap-y-1">
|
||||||
|
<span class="font-semibold">r/{{ post.subreddit_name }}</span>
|
||||||
|
<span class="hidden sm:inline">|</span>
|
||||||
|
<span>{{ post.comment_count }} comments analyzed</span>
|
||||||
|
<span class="hidden sm:inline">|</span>
|
||||||
|
<span>Avg. Sentiment:
|
||||||
|
{% if post.avg_comment_sentiment > 0.1 %}
|
||||||
|
<span class="font-bold text-green-400">{{ "%.2f"|format(post.avg_comment_sentiment) }}
|
||||||
|
(Bullish)</span>
|
||||||
|
{% elif post.avg_comment_sentiment < -0.1 %} <span class="font-bold text-red-400">{{
|
||||||
|
"%.2f"|format(post.avg_comment_sentiment) }} (Bearish)</span>
|
||||||
|
{% else %}
|
||||||
|
<span class="font-bold text-slate-500">{{ "%.2f"|format(post.avg_comment_sentiment) }}
|
||||||
|
(Neutral)</span>
|
||||||
|
{% endif %}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{% else %}
|
||||||
|
<div class="text-center text-slate-500 p-8 not-prose">No analyzed posts found for this ticker.</div>
|
||||||
|
{% endfor %}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
|
||||||
{% else %}
|
</article>
|
||||||
<p>No analyzed posts found for this ticker. Run the 'rstat' scraper to gather data.</p>
|
</div>
|
||||||
{% endfor %}
|
</div>
|
||||||
{% endblock %}
|
{% endblock %}
|
@@ -1,48 +0,0 @@
|
|||||||
{% extends "base.html" %}
|
|
||||||
|
|
||||||
{% block title %}Overall Dashboard{% endblock %}
|
|
||||||
|
|
||||||
{% block content %}
|
|
||||||
<h1>
|
|
||||||
Top 10 Tickers (All Subreddits)
|
|
||||||
<!-- ADD THIS LINK -->
|
|
||||||
<a href="/image/overall" target="_blank" style="font-size: 0.8rem; margin-left: 1rem; font-weight: normal;">image</a>
|
|
||||||
</h1>
|
|
||||||
<table>
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Ticker</th>
|
|
||||||
<th>Mentions</th>
|
|
||||||
<th>Market Cap</th>
|
|
||||||
<th>Closing Price</th>
|
|
||||||
<th>Sentiment</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{% for ticker in tickers %}
|
|
||||||
<tr>
|
|
||||||
<td><strong><a href="/deep-dive/{{ ticker.symbol }}">{{ ticker.symbol }}</a></strong></td>
|
|
||||||
<td>{{ ticker.mention_count }}</td>
|
|
||||||
<td>{{ ticker.market_cap | format_mc }}</td>
|
|
||||||
<!-- NEW COLUMN FOR CLOSING PRICE -->
|
|
||||||
<td>
|
|
||||||
{% if ticker.closing_price %}
|
|
||||||
${{ "%.2f"|format(ticker.closing_price) }}
|
|
||||||
{% else %}
|
|
||||||
N/A
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
{% if ticker.bullish_mentions > ticker.bearish_mentions %}
|
|
||||||
<span class="sentiment-bullish">Bullish</span>
|
|
||||||
{% elif ticker.bearish_mentions > ticker.bullish_mentions %}
|
|
||||||
<span class="sentiment-bearish">Bearish</span>
|
|
||||||
{% else %}
|
|
||||||
<span class="sentiment-neutral">Neutral</span>
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
{% endfor %}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
{% endblock %}
|
|
@@ -1,95 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="UTF-8">
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
||||||
<title>r/{{ subreddit_name }} Ticker Mentions</title>
|
|
||||||
<link rel="preconnect" href="https://fonts.googleapis.com">
|
|
||||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
|
||||||
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;600;700;800&display=swap" rel="stylesheet">
|
|
||||||
<style>
|
|
||||||
body { margin: 0; padding: 2rem; font-family: 'Inter', sans-serif; background: #1a1a1a; display: flex; justify-content: center; align-items: center; min-height: 100vh; }
|
|
||||||
.image-container { width: 750px; background: linear-gradient(145deg, #2d3748, #1a202c); color: #ffffff; border-radius: 16px; padding: 2.5rem; box-shadow: 0 10px 30px rgba(0,0,0,0.5); text-align: center; }
|
|
||||||
header { display: flex; justify-content: space-between; align-items: flex-start; margin-bottom: 2rem; }
|
|
||||||
.title-block { text-align: left; }
|
|
||||||
.title-block h1 { font-size: 2.5rem; font-weight: 800; margin: 0; line-height: 1; }
|
|
||||||
.title-block h2 { font-size: 1.25rem; font-weight: 600; margin: 0.5rem 0 0; color: #a0aec0; }
|
|
||||||
.date { font-size: 1.1rem; font-weight: 600; color: #a0aec0; letter-spacing: 0.02em; }
|
|
||||||
table { width: 100%; border-collapse: collapse; text-align: left; }
|
|
||||||
th, td { padding: 1rem 0.5rem; border-bottom: 1px solid rgba(255, 255, 255, 0.1); }
|
|
||||||
th { font-weight: 700; text-transform: uppercase; font-size: 0.75rem; color: #718096; letter-spacing: 0.05em; }
|
|
||||||
td { font-size: 1.1rem; font-weight: 600; }
|
|
||||||
tr:last-child td { border-bottom: none; }
|
|
||||||
td.rank { font-weight: 700; color: #cbd5e0; width: 5%; }
|
|
||||||
td.ticker { width: 15%; }
|
|
||||||
td.financials { text-align: right; width: 20%; }
|
|
||||||
td.mentions { text-align: center; width: 15%; }
|
|
||||||
td.sentiment { text-align: center; width: 20%; }
|
|
||||||
th.mentions, th.sentiment {
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
th.financials {
|
|
||||||
text-align: right;
|
|
||||||
}
|
|
||||||
.sentiment-bullish { color: #48bb78; font-weight: 700; }
|
|
||||||
.sentiment-bearish { color: #f56565; font-weight: 700; }
|
|
||||||
.sentiment-neutral { color: #a0aec0; font-weight: 600; }
|
|
||||||
footer { margin-top: 2.5rem; }
|
|
||||||
.brand-name { font-size: 1.75rem; font-weight: 800; letter-spacing: -1px; }
|
|
||||||
.brand-subtitle { font-size: 1rem; color: #a0aec0; }
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<div class="image-container">
|
|
||||||
<header>
|
|
||||||
<div class="title-block">
|
|
||||||
<h1>Ticker Mentions Daily</h1>
|
|
||||||
<h2>All Subreddits</h2>
|
|
||||||
</div>
|
|
||||||
<div class="date">{{ current_date }}</div>
|
|
||||||
</header>
|
|
||||||
<table>
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th class="rank">Rank</th>
|
|
||||||
<th class="ticker">Ticker</th>
|
|
||||||
<th class="mentions">Mentions</th>
|
|
||||||
<th class="financials">Mkt Cap</th>
|
|
||||||
<th class="financials">Close Price</th>
|
|
||||||
<th class="sentiment">Sentiment</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{% for ticker in tickers %}
|
|
||||||
<tr>
|
|
||||||
<td class="rank">{{ loop.index }}</td>
|
|
||||||
<td class="ticker">{{ ticker.symbol }}</td>
|
|
||||||
<td class="mentions">{{ ticker.total_mentions }}</td>
|
|
||||||
<td class="financials">{{ ticker.market_cap | format_mc }}</td>
|
|
||||||
<td class="financials">
|
|
||||||
{% if ticker.closing_price %}
|
|
||||||
${{ "%.2f"|format(ticker.closing_price) }}
|
|
||||||
{% else %}
|
|
||||||
N/A
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
<td class="sentiment">
|
|
||||||
{% if ticker.bullish_mentions > ticker.bearish_mentions %}
|
|
||||||
<span class="sentiment-bullish">Bullish</span>
|
|
||||||
{% elif ticker.bearish_mentions > ticker.bullish_mentions %}
|
|
||||||
<span class="sentiment-bearish">Bearish</span>
|
|
||||||
{% else %}
|
|
||||||
<span class="sentiment-neutral">Neutral</span>
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
{% endfor %}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
<footer>
|
|
||||||
<div class="brand-name">r/rstat</div>
|
|
||||||
<div class="brand-subtitle">visit us for more</div>
|
|
||||||
</footer>
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@@ -1,48 +0,0 @@
|
|||||||
{% extends "base.html" %}
|
|
||||||
|
|
||||||
{% block title %}r/{{ subreddit_name }} Dashboard{% endblock %}
|
|
||||||
|
|
||||||
{% block content %}
|
|
||||||
<h1>
|
|
||||||
Top 10 Tickers in r/{{ subreddit_name }}
|
|
||||||
<a href="/image/daily/{{ subreddit_name }}" target="_blank" style="font-size: 0.8rem; margin-left: 1rem; font-weight: normal;">daily image</a>
|
|
||||||
<a href="/image/weekly/{{ subreddit_name }}" target="_blank" style="font-size: 0.8rem; margin-left: 1rem; font-weight: normal;">weekly image</a>
|
|
||||||
</h1>
|
|
||||||
<table>
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Ticker</th>
|
|
||||||
<th>Mentions</th>
|
|
||||||
<th>Market Cap</th>
|
|
||||||
<th>Closing Price</th>
|
|
||||||
<th>Sentiment</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{% for ticker in tickers %}
|
|
||||||
<tr>
|
|
||||||
<td><strong><a href="/deep-dive/{{ ticker.symbol }}">{{ ticker.symbol }}</a></strong></td>
|
|
||||||
<td>{{ ticker.mention_count }}</td>
|
|
||||||
<td>{{ ticker.market_cap | format_mc }}</td>
|
|
||||||
<!-- NEW COLUMN FOR CLOSING PRICE -->
|
|
||||||
<td>
|
|
||||||
{% if ticker.closing_price %}
|
|
||||||
${{ "%.2f"|format(ticker.closing_price) }}
|
|
||||||
{% else %}
|
|
||||||
N/A
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
{% if ticker.bullish_mentions > ticker.bearish_mentions %}
|
|
||||||
<span class="sentiment-bullish">Bullish</span>
|
|
||||||
{% elif ticker.bearish_mentions > ticker.bullish_mentions %}
|
|
||||||
<span class="sentiment-bearish">Bearish</span>
|
|
||||||
{% else %}
|
|
||||||
<span class="sentiment-neutral">Neutral</span>
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
{% endfor %}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
{% endblock %}
|
|
@@ -1,95 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="UTF-8">
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
||||||
<title>r/{{ subreddit_name }} Ticker Mentions</title>
|
|
||||||
<link rel="preconnect" href="https://fonts.googleapis.com">
|
|
||||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
|
||||||
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;600;700;800&display=swap" rel="stylesheet">
|
|
||||||
<style>
|
|
||||||
body { margin: 0; padding: 2rem; font-family: 'Inter', sans-serif; background: #1a1a1a; display: flex; justify-content: center; align-items: center; min-height: 100vh; }
|
|
||||||
.image-container { width: 750px; background: linear-gradient(145deg, #2d3748, #1a202c); color: #ffffff; border-radius: 16px; padding: 2.5rem; box-shadow: 0 10px 30px rgba(0,0,0,0.5); text-align: center; }
|
|
||||||
header { display: flex; justify-content: space-between; align-items: flex-start; margin-bottom: 2rem; }
|
|
||||||
.title-block { text-align: left; }
|
|
||||||
.title-block h1 { font-size: 2.5rem; font-weight: 800; margin: 0; line-height: 1; }
|
|
||||||
.title-block h2 { font-size: 1.25rem; font-weight: 600; margin: 0.5rem 0 0; color: #a0aec0; }
|
|
||||||
.date { font-size: 1.1rem; font-weight: 600; color: #a0aec0; letter-spacing: 0.02em; }
|
|
||||||
table { width: 100%; border-collapse: collapse; text-align: left; }
|
|
||||||
th, td { padding: 1rem 0.5rem; border-bottom: 1px solid rgba(255, 255, 255, 0.1); }
|
|
||||||
th { font-weight: 700; text-transform: uppercase; font-size: 0.75rem; color: #718096; letter-spacing: 0.05em; }
|
|
||||||
td { font-size: 1.1rem; font-weight: 600; }
|
|
||||||
tr:last-child td { border-bottom: none; }
|
|
||||||
td.rank { font-weight: 700; color: #cbd5e0; width: 5%; }
|
|
||||||
td.ticker { width: 15%; }
|
|
||||||
td.financials { text-align: right; width: 20%; }
|
|
||||||
td.mentions { text-align: center; width: 15%; }
|
|
||||||
td.sentiment { text-align: center; width: 20%; }
|
|
||||||
th.mentions, th.sentiment {
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
th.financials {
|
|
||||||
text-align: right;
|
|
||||||
}
|
|
||||||
.sentiment-bullish { color: #48bb78; font-weight: 700; }
|
|
||||||
.sentiment-bearish { color: #f56565; font-weight: 700; }
|
|
||||||
.sentiment-neutral { color: #a0aec0; font-weight: 600; }
|
|
||||||
footer { margin-top: 2.5rem; }
|
|
||||||
.brand-name { font-size: 1.75rem; font-weight: 800; letter-spacing: -1px; }
|
|
||||||
.brand-subtitle { font-size: 1rem; color: #a0aec0; }
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<div class="image-container">
|
|
||||||
<header>
|
|
||||||
<div class="title-block">
|
|
||||||
<h1>Ticker Mentions Weekly</h1>
|
|
||||||
<h2>r/{{ subreddit_name }}</h2>
|
|
||||||
</div>
|
|
||||||
<div class="date">{{ date_range }}</div>
|
|
||||||
</header>
|
|
||||||
<table>
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th class="rank">Rank</th>
|
|
||||||
<th class="ticker">Ticker</th>
|
|
||||||
<th class="mentions">Mentions</th>
|
|
||||||
<th class="financials">Mkt Cap</th>
|
|
||||||
<th class="financials">Close Price</th>
|
|
||||||
<th class="sentiment">Sentiment</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{% for ticker in tickers %}
|
|
||||||
<tr>
|
|
||||||
<td class="rank">{{ loop.index }}</td>
|
|
||||||
<td class="ticker">{{ ticker.symbol }}</td>
|
|
||||||
<td class="mentions">{{ ticker.total_mentions }}</td>
|
|
||||||
<td class="financials">{{ ticker.market_cap | format_mc }}</td>
|
|
||||||
<td class="financials">
|
|
||||||
{% if ticker.closing_price %}
|
|
||||||
${{ "%.2f"|format(ticker.closing_price) }}
|
|
||||||
{% else %}
|
|
||||||
N/A
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
<td class="sentiment">
|
|
||||||
{% if ticker.bullish_mentions > ticker.bearish_mentions %}
|
|
||||||
<span class="sentiment-bullish">Bullish</span>
|
|
||||||
{% elif ticker.bearish_mentions > ticker.bullish_mentions %}
|
|
||||||
<span class="sentiment-bearish">Bearish</span>
|
|
||||||
{% else %}
|
|
||||||
<span class="sentiment-neutral">Neutral</span>
|
|
||||||
{% endif %}
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
{% endfor %}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
<footer>
|
|
||||||
<div class="brand-name">r/rstat</div>
|
|
||||||
<div class="brand-subtitle">visit us for more</div>
|
|
||||||
</footer>
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@@ -6,8 +6,7 @@ import logging
|
|||||||
|
|
||||||
# Set up a simple logger to see detailed error tracebacks
|
# Set up a simple logger to see detailed error tracebacks
|
||||||
logging.basicConfig(
|
logging.basicConfig(
|
||||||
level=logging.INFO,
|
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
|
||||||
format='%(asctime)s - %(levelname)s - %(message)s'
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# A list of tickers to test. One very common one, and two from your logs.
|
# A list of tickers to test. One very common one, and two from your logs.
|
||||||
@@ -20,31 +19,41 @@ for ticker_symbol in TICKERS_TO_TEST:
|
|||||||
|
|
||||||
# --- Test 1: The Ticker().info method ---
|
# --- Test 1: The Ticker().info method ---
|
||||||
try:
|
try:
|
||||||
logging.info(f"Attempting to create Ticker object and get .info for {ticker_symbol}...")
|
logging.info(
|
||||||
|
f"Attempting to create Ticker object and get .info for {ticker_symbol}..."
|
||||||
|
)
|
||||||
ticker_obj = yf.Ticker(ticker_symbol)
|
ticker_obj = yf.Ticker(ticker_symbol)
|
||||||
market_cap = ticker_obj.info.get('marketCap')
|
market_cap = ticker_obj.info.get("marketCap")
|
||||||
if market_cap is not None:
|
if market_cap is not None:
|
||||||
logging.info(f"SUCCESS: Got market cap for {ticker_symbol}: {market_cap}")
|
logging.info(f"SUCCESS: Got market cap for {ticker_symbol}: {market_cap}")
|
||||||
else:
|
else:
|
||||||
logging.warning(f"PARTIAL SUCCESS: .info call for {ticker_symbol} worked, but no market cap was found.")
|
logging.warning(
|
||||||
|
f"PARTIAL SUCCESS: .info call for {ticker_symbol} worked, but no market cap was found."
|
||||||
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
logging.error(f"FAILURE: An error occurred during the Ticker().info call for {ticker_symbol}.", exc_info=True)
|
logging.error(
|
||||||
|
f"FAILURE: An error occurred during the Ticker().info call for {ticker_symbol}.",
|
||||||
|
exc_info=True,
|
||||||
|
)
|
||||||
|
|
||||||
# --- Test 2: The yf.download() method ---
|
# --- Test 2: The yf.download() method ---
|
||||||
try:
|
try:
|
||||||
logging.info(f"Attempting yf.download() for {ticker_symbol}...")
|
logging.info(f"Attempting yf.download() for {ticker_symbol}...")
|
||||||
data = yf.download(
|
data = yf.download(
|
||||||
ticker_symbol,
|
ticker_symbol, period="2d", progress=False, auto_adjust=False
|
||||||
period="2d",
|
|
||||||
progress=False,
|
|
||||||
auto_adjust=False
|
|
||||||
)
|
)
|
||||||
if not data.empty:
|
if not data.empty:
|
||||||
logging.info(f"SUCCESS: yf.download() for {ticker_symbol} returned {len(data)} rows of data.")
|
logging.info(
|
||||||
|
f"SUCCESS: yf.download() for {ticker_symbol} returned {len(data)} rows of data."
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
logging.warning(f"PARTIAL SUCCESS: yf.download() for {ticker_symbol} worked, but returned no data (likely delisted).")
|
logging.warning(
|
||||||
|
f"PARTIAL SUCCESS: yf.download() for {ticker_symbol} worked, but returned no data (likely delisted)."
|
||||||
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
logging.error(f"FAILURE: An error occurred during the yf.download() call for {ticker_symbol}.", exc_info=True)
|
logging.error(
|
||||||
|
f"FAILURE: An error occurred during the yf.download() call for {ticker_symbol}.",
|
||||||
|
exc_info=True,
|
||||||
|
)
|
||||||
|
|
||||||
print("\n--- YFINANCE Diagnostic Test Complete ---")
|
print("\n--- YFINANCE Diagnostic Test Complete ---")
|