| """ |
| News & AI Dashboard Page - Real-time Financial Intelligence |
| Powered by professional-grade news monitoring with low-latency delivery |
| """ |
|
|
| import streamlit as st |
| import sys |
| import os |
| import logging |
|
|
| |
| logging.getLogger('asyncio').setLevel(logging.CRITICAL) |
| logging.getLogger('playwright').setLevel(logging.WARNING) |
|
|
| |
| sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) |
|
|
| from components.styles import DARK_THEME_CSS |
| from components.news import ( |
| display_news_statistics, |
| display_category_breakdown, |
| display_breaking_news_banner, |
| display_scrollable_news_section, |
| display_prediction_card, |
| display_economic_event_card, |
| display_economic_calendar_widget |
| ) |
| from utils.breaking_news_scorer import get_breaking_news_scorer |
| from utils.ai_summary_store import init_storage, enqueue_items, fetch_summaries, get_status |
| from utils.ai_summary_worker import start_worker_if_needed |
|
|
| |
| try: |
| from services.news_scraper import FinanceNewsScraper |
| RSS_AVAILABLE = True |
| except ImportError: |
| RSS_AVAILABLE = False |
|
|
| try: |
| from services.twitter_news_playwright import TwitterFinanceMonitor |
| TWITTER_AVAILABLE = True |
| except ImportError: |
| TWITTER_AVAILABLE = False |
|
|
| try: |
| from services.reddit_news import RedditFinanceMonitor |
| REDDIT_AVAILABLE = True |
| except ImportError: |
| REDDIT_AVAILABLE = False |
|
|
| try: |
| from services.ai_tech_news import AITechNewsScraper |
| AI_TECH_AVAILABLE = True |
| except ImportError: |
| AI_TECH_AVAILABLE = False |
|
|
| try: |
| from services.prediction_markets import PredictionMarketsScraper |
| PREDICTIONS_AVAILABLE = True |
| except ImportError: |
| PREDICTIONS_AVAILABLE = False |
|
|
| try: |
| from services.sectoral_news import SectoralNewsScraper |
| SECTORAL_AVAILABLE = True |
| except ImportError: |
| SECTORAL_AVAILABLE = False |
|
|
| try: |
| from services.market_events import MarketEventsScraper |
| EVENTS_AVAILABLE = True |
| except ImportError: |
| EVENTS_AVAILABLE = False |
|
|
| try: |
| from services.economic_calendar import EconomicCalendarService |
| CALENDAR_AVAILABLE = True |
| except ImportError: |
| CALENDAR_AVAILABLE = False |
|
|
|
|
| |
| st.set_page_config( |
| page_title="News Dashboard - Financial Platform", |
| page_icon="π°", |
| layout="wide", |
| initial_sidebar_state="expanded", |
| ) |
|
|
| |
| st.markdown(DARK_THEME_CSS, unsafe_allow_html=True) |
|
|
| |
| if 'rss_monitor' not in st.session_state and RSS_AVAILABLE: |
| st.session_state.rss_monitor = FinanceNewsScraper() |
|
|
| if 'twitter_monitor' not in st.session_state and TWITTER_AVAILABLE: |
| st.session_state.twitter_monitor = TwitterFinanceMonitor() |
|
|
| if 'reddit_monitor' not in st.session_state and REDDIT_AVAILABLE: |
| st.session_state.reddit_monitor = RedditFinanceMonitor() |
|
|
| if 'ai_tech_monitor' not in st.session_state and AI_TECH_AVAILABLE: |
| st.session_state.ai_tech_monitor = AITechNewsScraper() |
|
|
| if 'prediction_markets_monitor' not in st.session_state and PREDICTIONS_AVAILABLE: |
| st.session_state.prediction_markets_monitor = PredictionMarketsScraper() |
|
|
| if 'sectoral_news_monitor' not in st.session_state and SECTORAL_AVAILABLE: |
| st.session_state.sectoral_news_monitor = SectoralNewsScraper() |
|
|
| if 'market_events_monitor' not in st.session_state and EVENTS_AVAILABLE: |
| st.session_state.market_events_monitor = MarketEventsScraper() |
|
|
| if 'economic_calendar_service' not in st.session_state and CALENDAR_AVAILABLE: |
| st.session_state.economic_calendar_service = EconomicCalendarService() |
|
|
| rss_monitor = st.session_state.get('rss_monitor') |
| twitter_monitor = st.session_state.get('twitter_monitor') |
| reddit_monitor = st.session_state.get('reddit_monitor') |
| ai_tech_monitor = st.session_state.get('ai_tech_monitor') |
| prediction_markets_monitor = st.session_state.get('prediction_markets_monitor') |
| sectoral_news_monitor = st.session_state.get('sectoral_news_monitor') |
| market_events_monitor = st.session_state.get('market_events_monitor') |
| economic_calendar_service = st.session_state.get('economic_calendar_service') |
|
|
| |
| if 'news_cache_manager' not in st.session_state: |
| from utils.news_cache import NewsCacheManager |
| st.session_state.news_cache_manager = NewsCacheManager(default_ttl=180) |
|
|
| cache_manager = st.session_state.news_cache_manager |
|
|
| |
| st.markdown("# π€ Live Financial News & AI Dashboard") |
| st.markdown("AI-powered market insights with sentiment analysis and trading recommendations. Real-time macro, markets & geopolitical intelligence") |
|
|
| st.markdown("---") |
|
|
| |
| with st.sidebar: |
| st.markdown("## βοΈ News Filters") |
|
|
| |
| category_filter = st.selectbox( |
| "Category", |
| ["all", "macro", "markets", "geopolitical"], |
| format_func=lambda x: x.upper() if x != "all" else "ALL CATEGORIES", |
| help="Filter by news category" |
| ) |
|
|
| |
| sentiment_filter = st.selectbox( |
| "Sentiment", |
| ["all", "positive", "negative", "neutral"], |
| format_func=lambda x: x.upper() if x != "all" else "ALL SENTIMENTS", |
| help="Filter by market sentiment" |
| ) |
|
|
| |
| impact_filter = st.selectbox( |
| "Impact Level", |
| ["all", "high", "medium", "low"], |
| format_func=lambda x: x.upper() if x != "all" else "ALL IMPACT LEVELS", |
| help="Filter by market impact" |
| ) |
|
|
| st.markdown("---") |
|
|
| |
| st.markdown("### π Refresh Settings") |
|
|
| col1, col2 = st.columns(2) |
| with col1: |
| if st.button("π Refresh Now", use_container_width=True, type="primary"): |
| st.session_state.force_refresh = True |
| st.rerun() |
|
|
| with col2: |
| auto_refresh = st.checkbox("Auto-refresh", value=True, help="Auto-refresh every 3 minutes") |
|
|
| if auto_refresh: |
| st.info("β±οΈ Auto-refresh enabled (3 min)") |
|
|
| st.markdown("---") |
| st.markdown("### π Feed Statistics") |
|
|
| |
| cache_stats = cache_manager.get_statistics() |
|
|
| |
| total_stories = ( |
| cache_stats['twitter']['items'] + |
| cache_stats['reddit']['items'] + |
| cache_stats['rss']['items'] + |
| cache_stats.get('ai_tech', {}).get('items', 0) |
| ) |
|
|
| |
| st.metric("Total Stories", total_stories) |
| st.metric("Cache Status", "β
Active" if total_stories > 0 else "β³ Loading") |
|
|
| |
| if cache_stats['twitter']['is_valid']: |
| age = int(cache_stats['twitter']['age_seconds']) |
| st.caption(f"π Cache age: {age}s / 180s") |
| else: |
| st.caption("π Fetching fresh data...") |
|
|
| st.markdown("---") |
| st.markdown("### βΉοΈ Sources") |
|
|
| |
| twitter_sources = len(twitter_monitor.SOURCES) if twitter_monitor else 0 |
| reddit_sources = len(reddit_monitor.SUBREDDITS) if reddit_monitor else 0 |
| rss_sources = len(rss_monitor.SOURCES) if rss_monitor else 0 |
| ai_tech_sources = len(ai_tech_monitor.SOURCES) if ai_tech_monitor else 0 |
| prediction_sources = 3 |
| sectoral_sources = 7 |
| events_sources = 3 |
| total_sources = twitter_sources + reddit_sources + rss_sources + ai_tech_sources + prediction_sources + sectoral_sources + events_sources |
|
|
| st.markdown(f""" |
| <div style='font-size: 11px; line-height: 1.6;'> |
| |
| **Twitter/X Accounts ({twitter_sources})** |
| β’ WalterBloomberg β’ FXHedge β’ DeItaone |
| β’ Reuters β’ Bloomberg β’ FT β’ WSJ |
| β’ CNBC β’ BBC β’ MarketWatch |
| β’ The Economist β’ AP β’ AFP |
| |
| **Reddit Communities ({reddit_sources})** |
| β’ r/wallstreetbets β’ r/stocks β’ r/investing |
| β’ r/algotrading β’ r/economics β’ r/geopolitics |
| β’ r/options β’ r/SecurityAnalysis |
| |
| **RSS + Web Scraping ({rss_sources})** |
| β’ CNBC β’ Bloomberg β’ FT β’ WSJ |
| β’ BBC β’ Yahoo Finance β’ Google News |
| β’ The Economist β’ Fed (2.0x) β’ ECB (2.0x) β’ IMF |
| |
| **AI & Tech Sources ({ai_tech_sources})** |
| β’ OpenAI β’ Google AI β’ Microsoft AI β’ Meta AI |
| β’ DeepMind β’ Anthropic β’ AWS AI β’ NVIDIA |
| β’ TechCrunch β’ The Verge β’ VentureBeat |
| β’ MIT Tech Review β’ Wired β’ Ars Technica |
| |
| **Prediction Markets ({prediction_sources})** |
| β’ Polymarket β’ Metaculus β’ CME FedWatch |
| |
| **Sectoral Coverage ({sectoral_sources})** |
| β’ Finance β’ Tech β’ Energy β’ Healthcare |
| β’ Consumer β’ Industrials β’ Real Estate |
| |
| **Market Events ({events_sources})** |
| β’ Earnings Calendar β’ Economic Indicators |
| β’ Central Bank Events (Fed, ECB, BoE, BoJ) |
| |
| **Total: {total_sources} Premium Sources** |
| </div> |
| """, unsafe_allow_html=True) |
|
|
|
|
| |
|
|
| |
| force_refresh = st.session_state.get('force_refresh', False) |
|
|
| |
| init_storage() |
| start_worker_if_needed() |
|
|
| |
| import pandas as pd |
| from concurrent.futures import ThreadPoolExecutor, as_completed |
|
|
| twitter_df = pd.DataFrame() |
| reddit_df = pd.DataFrame() |
| rss_all_df = pd.DataFrame() |
| rss_main_df = pd.DataFrame() |
| ai_tech_df = pd.DataFrame() |
| predictions_df = pd.DataFrame() |
| sectoral_news_df = pd.DataFrame() |
| market_events_df = pd.DataFrame() |
| economic_calendar_df = pd.DataFrame() |
|
|
| def fetch_twitter_news(): |
| """Fetch Twitter/X news via cache manager""" |
| try: |
| if twitter_monitor: |
| |
| twitter_news = cache_manager.get_news( |
| source='twitter', |
| fetcher_func=twitter_monitor.scrape_twitter_news, |
| force_refresh=force_refresh, |
| max_tweets=50 |
| ) |
| if twitter_news: |
| df = pd.DataFrame(twitter_news) |
| if not df.empty: |
| df['timestamp'] = pd.to_datetime(df['timestamp']) |
| return df, None |
| except Exception as e: |
| return pd.DataFrame(), f"Twitter scraping unavailable: {e}" |
| return pd.DataFrame(), None |
|
|
| def fetch_reddit_news(): |
| """Fetch Reddit news via cache manager""" |
| try: |
| if reddit_monitor: |
| |
| reddit_news = cache_manager.get_news( |
| source='reddit', |
| fetcher_func=reddit_monitor.scrape_reddit_news, |
| force_refresh=force_refresh, |
| max_posts=50, |
| hours=12 |
| ) |
| if reddit_news: |
| df = pd.DataFrame(reddit_news) |
| if not df.empty: |
| df['timestamp'] = pd.to_datetime(df['timestamp']) |
| return df, None |
| except Exception as e: |
| return pd.DataFrame(), f"Reddit scraping unavailable: {e}" |
| return pd.DataFrame(), None |
|
|
| def fetch_rss_news(): |
| """Fetch RSS + Web scraped news via cache manager""" |
| try: |
| if rss_monitor: |
| |
| rss_news = cache_manager.get_news( |
| source='rss', |
| fetcher_func=rss_monitor.scrape_news, |
| force_refresh=force_refresh, |
| max_items=100 |
| ) |
| if rss_news: |
| df = pd.DataFrame(rss_news) |
| if not df.empty: |
| df['timestamp'] = pd.to_datetime(df['timestamp']) |
| return df, None |
| except Exception as e: |
| return pd.DataFrame(), f"RSS scraping unavailable: {e}" |
| return pd.DataFrame(), None |
|
|
| def fetch_ai_tech_news(): |
| """Fetch AI/Tech news via cache manager""" |
| try: |
| if ai_tech_monitor: |
| |
| ai_tech_news = cache_manager.get_news( |
| source='ai_tech', |
| fetcher_func=ai_tech_monitor.scrape_ai_tech_news, |
| force_refresh=force_refresh, |
| max_items=100, |
| hours=48 |
| ) |
| if ai_tech_news: |
| df = pd.DataFrame(ai_tech_news) |
| if not df.empty: |
| df['timestamp'] = pd.to_datetime(df['timestamp']) |
| return df, None |
| except Exception as e: |
| return pd.DataFrame(), f"AI/Tech news unavailable: {e}" |
| return pd.DataFrame(), None |
|
|
| def fetch_prediction_markets(): |
| """Fetch prediction market data via cache manager""" |
| try: |
| if prediction_markets_monitor: |
| predictions = cache_manager.get_news( |
| source='predictions', |
| fetcher_func=prediction_markets_monitor.scrape_predictions, |
| force_refresh=force_refresh, |
| max_items=50 |
| ) |
| if predictions: |
| df = pd.DataFrame(predictions) |
| if not df.empty: |
| df['timestamp'] = pd.to_datetime(df['timestamp']) |
| return df, None |
| except Exception as e: |
| return pd.DataFrame(), f"Prediction markets unavailable: {e}" |
| return pd.DataFrame(), None |
|
|
| def fetch_sectoral_news(): |
| """Fetch sectoral news via cache manager""" |
| try: |
| if sectoral_news_monitor: |
| sectoral_news = cache_manager.get_news( |
| source='sectoral_news', |
| fetcher_func=sectoral_news_monitor.scrape_sectoral_news, |
| force_refresh=force_refresh, |
| max_items=50, |
| hours=24 |
| ) |
| if sectoral_news: |
| df = pd.DataFrame(sectoral_news) |
| if not df.empty: |
| df['timestamp'] = pd.to_datetime(df['timestamp']) |
| return df, None |
| except Exception as e: |
| return pd.DataFrame(), f"Sectoral news unavailable: {e}" |
| return pd.DataFrame(), None |
|
|
| def fetch_market_events(): |
| """Fetch market events via cache manager""" |
| try: |
| if market_events_monitor: |
| events = cache_manager.get_news( |
| source='market_events', |
| fetcher_func=market_events_monitor.scrape_market_events, |
| force_refresh=force_refresh, |
| max_items=50, |
| days_ahead=14 |
| ) |
| if events: |
| df = pd.DataFrame(events) |
| if not df.empty: |
| df['timestamp'] = pd.to_datetime(df['timestamp']) |
| return df, None |
| except Exception as e: |
| return pd.DataFrame(), f"Market events unavailable: {e}" |
| return pd.DataFrame(), None |
|
|
| def fetch_economic_calendar(): |
| """Fetch economic calendar via cache manager""" |
| try: |
| if economic_calendar_service: |
| calendar_events = cache_manager.get_news( |
| source='economic_calendar', |
| fetcher_func=economic_calendar_service.get_upcoming_events, |
| force_refresh=force_refresh, |
| days_ahead=7, |
| min_importance='medium' |
| ) |
| if calendar_events: |
| df = pd.DataFrame(calendar_events) |
| if not df.empty: |
| df['timestamp'] = pd.to_datetime(df['timestamp']) |
| return df, None |
| except Exception as e: |
| return pd.DataFrame(), f"Economic calendar unavailable: {e}" |
| return pd.DataFrame(), None |
|
|
| |
| |
| status_placeholder = st.empty() |
|
|
| |
| with st.spinner("Loading news from 8 sources..."): |
| with ThreadPoolExecutor(max_workers=8) as executor: |
| |
| futures_map = { |
| executor.submit(fetch_twitter_news): 'twitter', |
| executor.submit(fetch_reddit_news): 'reddit', |
| executor.submit(fetch_rss_news): 'rss', |
| executor.submit(fetch_ai_tech_news): 'ai_tech', |
| executor.submit(fetch_prediction_markets): 'predictions', |
| executor.submit(fetch_sectoral_news): 'sectoral_news', |
| executor.submit(fetch_market_events): 'market_events', |
| executor.submit(fetch_economic_calendar): 'economic_calendar' |
| } |
|
|
| |
| fetch_errors = [] |
| completed_sources = [] |
|
|
| |
| try: |
| for future in as_completed(futures_map, timeout=90): |
| source_name = futures_map[future] |
|
|
| try: |
| result_df, error = future.result() |
|
|
| |
| completed_sources.append(source_name) |
| status_placeholder.info(f"π Loaded {len(completed_sources)}/8 sources ({', '.join(completed_sources)})") |
|
|
| if source_name == 'twitter': |
| twitter_df = result_df |
| if error: |
| fetch_errors.append(error) |
| elif source_name == 'reddit': |
| reddit_df = result_df |
| if error: |
| fetch_errors.append(error) |
| elif source_name == 'rss': |
| rss_all_df = result_df |
| if error: |
| fetch_errors.append(error) |
| |
| if not rss_all_df.empty and 'from_web' in rss_all_df.columns: |
| rss_main_df = rss_all_df[rss_all_df['from_web'] == True].copy() |
| elif source_name == 'ai_tech': |
| ai_tech_df = result_df |
| if error: |
| fetch_errors.append(error) |
| elif source_name == 'predictions': |
| predictions_df = result_df |
| if error: |
| fetch_errors.append(error) |
| elif source_name == 'sectoral_news': |
| sectoral_news_df = result_df |
| if error: |
| fetch_errors.append(error) |
| elif source_name == 'market_events': |
| market_events_df = result_df |
| if error: |
| fetch_errors.append(error) |
| elif source_name == 'economic_calendar': |
| economic_calendar_df = result_df |
| if error: |
| fetch_errors.append(error) |
|
|
| except Exception as e: |
| fetch_errors.append(f"Error fetching {source_name} news: {e}") |
| completed_sources.append(f"{source_name} (error)") |
| status_placeholder.warning(f"β οΈ {source_name} failed, continuing with other sources...") |
|
|
| except TimeoutError: |
| |
| fetch_errors.append("β±οΈ Some sources timed out after 90 seconds - displaying available results") |
| status_placeholder.warning(f"β οΈ {len(completed_sources)}/8 sources loaded (some timed out)") |
|
|
| |
| all_sources = set(futures_map.values()) |
| incomplete_sources = all_sources - set(completed_sources) |
| for source in incomplete_sources: |
| fetch_errors.append(f"{source} timed out - skipped") |
| completed_sources.append(f"{source} (timeout)") |
|
|
| |
| status_placeholder.success(f"β
Loaded {len(completed_sources)}/8 sources successfully") |
|
|
| |
| import logging |
| logger = logging.getLogger(__name__) |
| logger.info(f"News Fetch Results: Twitter={len(twitter_df)}, Reddit={len(reddit_df)}, RSS={len(rss_all_df)}, AI/Tech={len(ai_tech_df)}, Predictions={len(predictions_df)}, Sectoral={len(sectoral_news_df)}, Events={len(market_events_df)}, Calendar={len(economic_calendar_df)}") |
| logger.info(f"Availability: Predictions={PREDICTIONS_AVAILABLE}, Sectoral={SECTORAL_AVAILABLE}, Events={EVENTS_AVAILABLE}, Calendar={CALENDAR_AVAILABLE}") |
| if fetch_errors: |
| for err in fetch_errors: |
| logger.warning(f"Fetch error: {err}") |
|
|
| |
| ai_summary_dfs = [ |
| twitter_df, |
| reddit_df, |
| rss_all_df, |
| ai_tech_df, |
| sectoral_news_df, |
| market_events_df, |
| economic_calendar_df, |
| predictions_df, |
| ] |
|
|
| all_items = [] |
| for df in ai_summary_dfs: |
| if df.empty: |
| continue |
| all_items.extend(df.to_dict("records")) |
|
|
| if all_items: |
| enqueue_items(all_items) |
|
|
| |
| if force_refresh: |
| st.session_state.force_refresh = False |
|
|
| |
| filters = { |
| 'category': category_filter, |
| 'sentiment': sentiment_filter, |
| 'impact': impact_filter |
| } |
|
|
| twitter_filtered = cache_manager.get_filtered_news(twitter_df, filters, 'twitter') if not twitter_df.empty else twitter_df |
| reddit_filtered = cache_manager.get_filtered_news(reddit_df, filters, 'reddit') if not reddit_df.empty else reddit_df |
| rss_main_filtered = cache_manager.get_filtered_news(rss_main_df, filters, 'rss_main') if not rss_main_df.empty else rss_main_df |
| rss_all_filtered = cache_manager.get_filtered_news(rss_all_df, filters, 'rss_all') if not rss_all_df.empty else rss_all_df |
|
|
| |
| twitter_reddit_df = pd.concat([twitter_filtered, reddit_filtered], ignore_index=True) if not twitter_filtered.empty or not reddit_filtered.empty else pd.DataFrame() |
| if not twitter_reddit_df.empty: |
| twitter_reddit_df = twitter_reddit_df.sort_values('timestamp', ascending=False) |
|
|
| |
| all_news_df = pd.concat([twitter_filtered, reddit_filtered, rss_all_filtered], ignore_index=True) if not twitter_filtered.empty or not reddit_filtered.empty or not rss_all_filtered.empty else pd.DataFrame() |
|
|
| |
| if not all_news_df.empty: |
| |
| scorer = get_breaking_news_scorer() |
|
|
| |
| all_news_list = all_news_df.to_dict('records') |
|
|
| |
| |
| breaking_news_items = scorer.get_breaking_news(all_news_list, top_n=1) |
|
|
| if breaking_news_items and breaking_news_items[0]['breaking_score'] >= 40.0: |
| |
| breaking_df = pd.DataFrame([breaking_news_items[0]]) |
| display_breaking_news_banner(breaking_df) |
| else: |
| |
| if breaking_news_items: |
| top_score = breaking_news_items[0]['breaking_score'] |
| st.info(f"π Monitoring financial markets - highest impact score: {top_score:.1f}/100 (threshold: 40)") |
| else: |
| st.info("π Monitoring financial markets - no news items available for scoring") |
| else: |
| |
| st.info("π Loading financial news - breaking news banner will appear when data is available") |
|
|
| st.markdown("---") |
|
|
| |
| if not economic_calendar_df.empty: |
| display_economic_calendar_widget(economic_calendar_df) |
| st.markdown("---") |
|
|
| |
|
|
| col1, col2, col3, col4 = st.columns(4) |
|
|
| with col1: |
| |
| if not twitter_reddit_df.empty: |
| display_scrollable_news_section( |
| twitter_reddit_df, |
| section_title="Twitter/X & Reddit News", |
| section_icon="π", |
| section_subtitle="Real-time news from premium accounts & communities (last 12h)", |
| max_items=100, |
| height="700px" |
| ) |
| elif not twitter_df.empty or not reddit_df.empty: |
| st.markdown(""" |
| <div style="background: linear-gradient(135deg, #1E222D 0%, #131722 100%); border: 1px solid #2A2E39; border-radius: 8px; padding: 30px; text-align: center;"> |
| <div style="font-size: 48px; margin-bottom: 16px;">π</div> |
| <div style="color: #D1D4DC; font-size: 16px; font-weight: 600; margin-bottom: 8px;">No matches found</div> |
| <div style="color: #787B86; font-size: 13px;">Try adjusting your filters to see Twitter/X & Reddit news</div> |
| </div> |
| """, unsafe_allow_html=True) |
| else: |
| st.markdown(""" |
| <div style="background: linear-gradient(135deg, #1E222D 0%, #131722 100%); border: 1px solid #2A2E39; border-radius: 8px; padding: 30px; text-align: center;"> |
| <div style="font-size: 48px; margin-bottom: 16px; animation: pulse 2s ease-in-out infinite;">β³</div> |
| <div style="color: #D1D4DC; font-size: 16px; font-weight: 600; margin-bottom: 8px;">Loading Twitter/X & Reddit News</div> |
| <div style="color: #787B86; font-size: 13px;">Fetching real-time news from premium sources...</div> |
| <div style="color: #787B86; font-size: 12px; margin-top: 8px; opacity: 0.7;">This may take 30-60 seconds on first load</div> |
| </div> |
| <style> |
| @keyframes pulse { |
| 0%, 100% { opacity: 1; transform: scale(1); } |
| 50% { opacity: 0.6; transform: scale(1.1); } |
| } |
| </style> |
| """, unsafe_allow_html=True) |
|
|
| with col2: |
| |
| if not rss_main_filtered.empty: |
| display_scrollable_news_section( |
| rss_main_filtered, |
| section_title="Top Headlines", |
| section_icon="π₯", |
| section_subtitle="Latest from main pages", |
| max_items=50, |
| height="700px" |
| ) |
| elif not rss_main_df.empty: |
| st.markdown(""" |
| <div style="background: linear-gradient(135deg, #1E222D 0%, #131722 100%); border: 1px solid #2A2E39; border-radius: 8px; padding: 30px; text-align: center;"> |
| <div style="font-size: 48px; margin-bottom: 16px;">π</div> |
| <div style="color: #D1D4DC; font-size: 16px; font-weight: 600; margin-bottom: 8px;">No matches found</div> |
| <div style="color: #787B86; font-size: 13px;">Try adjusting your filters to see top headlines</div> |
| </div> |
| """, unsafe_allow_html=True) |
| else: |
| st.markdown(""" |
| <div style="background: linear-gradient(135deg, #1E222D 0%, #131722 100%); border: 1px solid #2A2E39; border-radius: 8px; padding: 30px; text-align: center;"> |
| <div style="font-size: 48px; margin-bottom: 16px; animation: pulse 2s ease-in-out infinite;">β³</div> |
| <div style="color: #D1D4DC; font-size: 16px; font-weight: 600; margin-bottom: 8px;">Loading Top Headlines</div> |
| <div style="color: #787B86; font-size: 13px;">Fetching latest news from major outlets...</div> |
| <div style="color: #787B86; font-size: 12px; margin-top: 8px; opacity: 0.7;">Web scraping main pages</div> |
| </div> |
| <style> |
| @keyframes pulse { |
| 0%, 100% { opacity: 1; transform: scale(1); } |
| 50% { opacity: 0.6; transform: scale(1.1); } |
| } |
| </style> |
| """, unsafe_allow_html=True) |
|
|
| with col3: |
| |
| if not rss_all_filtered.empty: |
| display_scrollable_news_section( |
| rss_all_filtered, |
| section_title="RSS Feed", |
| section_icon="π°", |
| section_subtitle="Aggregated from all sources", |
| max_items=100, |
| height="700px" |
| ) |
| elif not rss_all_df.empty: |
| st.markdown(""" |
| <div style="background: linear-gradient(135deg, #1E222D 0%, #131722 100%); border: 1px solid #2A2E39; border-radius: 8px; padding: 30px; text-align: center;"> |
| <div style="font-size: 48px; margin-bottom: 16px;">π</div> |
| <div style="color: #D1D4DC; font-size: 16px; font-weight: 600; margin-bottom: 8px;">No matches found</div> |
| <div style="color: #787B86; font-size: 13px;">Try adjusting your filters to see RSS feed news</div> |
| </div> |
| """, unsafe_allow_html=True) |
| else: |
| st.markdown(""" |
| <div style="background: linear-gradient(135deg, #1E222D 0%, #131722 100%); border: 1px solid #2A2E39; border-radius: 8px; padding: 30px; text-align: center;"> |
| <div style="font-size: 48px; margin-bottom: 16px; animation: pulse 2s ease-in-out infinite;">β³</div> |
| <div style="color: #D1D4DC; font-size: 16px; font-weight: 600; margin-bottom: 8px;">Loading RSS Feed</div> |
| <div style="color: #787B86; font-size: 13px;">Aggregating news from all RSS sources...</div> |
| <div style="color: #787B86; font-size: 12px; margin-top: 8px; opacity: 0.7;">Bloomberg, Reuters, FT, WSJ & more</div> |
| </div> |
| <style> |
| @keyframes pulse { |
| 0%, 100% { opacity: 1; transform: scale(1); } |
| 50% { opacity: 0.6; transform: scale(1.1); } |
| } |
| </style> |
| """, unsafe_allow_html=True) |
|
|
| with col4: |
| |
| if not ai_tech_df.empty: |
| display_scrollable_news_section( |
| ai_tech_df, |
| section_title="AI & Tech News", |
| section_icon="π€", |
| section_subtitle="Latest from tech giants & AI research", |
| max_items=100, |
| height="700px" |
| ) |
| else: |
| |
| ai_tech_error = next((err for err in fetch_errors if 'ai_tech' in err.lower() or 'AI/Tech' in err), None) if 'fetch_errors' in locals() else None |
|
|
| if ai_tech_error: |
| |
| st.markdown(f""" |
| <div style="background: linear-gradient(135deg, #1E222D 0%, #131722 100%); border: 1px solid #2A2E39; border-radius: 8px; padding: 30px; text-align: center;"> |
| <div style="font-size: 48px; margin-bottom: 16px;">β οΈ</div> |
| <div style="color: #D1D4DC; font-size: 16px; font-weight: 600; margin-bottom: 8px;">AI & Tech News Unavailable</div> |
| <div style="color: #787B86; font-size: 13px;">{ai_tech_error}</div> |
| </div> |
| """, unsafe_allow_html=True) |
| else: |
| |
| st.markdown(""" |
| <div style="background: linear-gradient(135deg, #1E222D 0%, #131722 100%); border: 1px solid #2A2E39; border-radius: 8px; padding: 30px; text-align: center;"> |
| <div style="font-size: 48px; margin-bottom: 16px; animation: pulse 2s ease-in-out infinite;">β³</div> |
| <div style="color: #D1D4DC; font-size: 16px; font-weight: 600; margin-bottom: 8px;">Loading AI & Tech News</div> |
| <div style="color: #787B86; font-size: 13px;">Aggregating from tech blogs & research...</div> |
| <div style="color: #787B86; font-size: 12px; margin-top: 8px; opacity: 0.7;">OpenAI, Google AI, Microsoft, Meta & more</div> |
| <div style="color: #FF9500; font-size: 12px; margin-top: 12px;">If this persists, check the "Source Fetch Warnings" section below</div> |
| </div> |
| <style> |
| @keyframes pulse { |
| 0%, 100% { opacity: 1; transform: scale(1); } |
| 50% { opacity: 0.6; transform: scale(1.1); } |
| } |
| </style> |
| """, unsafe_allow_html=True) |
|
|
| |
| st.markdown("---") |
| st.markdown("## π Market Intelligence - Predictions, Sectors & Events") |
|
|
| col5, col6, col7 = st.columns(3) |
|
|
| with col5: |
| |
| if not predictions_df.empty: |
| display_scrollable_news_section( |
| predictions_df, |
| section_title="Prediction Markets", |
| section_icon="π²", |
| section_subtitle="Polymarket, Metaculus & CME FedWatch", |
| max_items=50, |
| height="600px" |
| ) |
| else: |
| st.markdown(""" |
| <div style="background: linear-gradient(135deg, #1E222D 0%, #131722 100%); border: 1px solid #2A2E39; border-radius: 8px; padding: 30px; text-align: center;"> |
| <div style="font-size: 48px; margin-bottom: 16px; animation: pulse 2s ease-in-out infinite;">β³</div> |
| <div style="color: #D1D4DC; font-size: 16px; font-weight: 600; margin-bottom: 8px;">Loading Prediction Markets</div> |
| <div style="color: #787B86; font-size: 13px;">Fetching market forecasts...</div> |
| </div> |
| <style> |
| @keyframes pulse { |
| 0%, 100% { opacity: 1; transform: scale(1); } |
| 50% { opacity: 0.6; transform: scale(1.1); } |
| } |
| </style> |
| """, unsafe_allow_html=True) |
|
|
| with col6: |
| |
| if not sectoral_news_df.empty: |
| display_scrollable_news_section( |
| sectoral_news_df, |
| section_title="Sectoral News", |
| section_icon="π", |
| section_subtitle="7 sectors: Finance, Tech, Energy & more", |
| max_items=50, |
| height="600px" |
| ) |
| else: |
| st.markdown(""" |
| <div style="background: linear-gradient(135deg, #1E222D 0%, #131722 100%); border: 1px solid #2A2E39; border-radius: 8px; padding: 30px; text-align: center;"> |
| <div style="font-size: 48px; margin-bottom: 16px; animation: pulse 2s ease-in-out infinite;">β³</div> |
| <div style="color: #D1D4DC; font-size: 16px; font-weight: 600; margin-bottom: 8px;">Loading Sectoral News</div> |
| <div style="color: #787B86; font-size: 13px;">Aggregating sector-specific news...</div> |
| </div> |
| <style> |
| @keyframes pulse { |
| 0%, 100% { opacity: 1; transform: scale(1); } |
| 50% { opacity: 0.6; transform: scale(1.1); } |
| } |
| </style> |
| """, unsafe_allow_html=True) |
|
|
| with col7: |
| |
| if not market_events_df.empty: |
| display_scrollable_news_section( |
| market_events_df, |
| section_title="Market Events", |
| section_icon="π", |
| section_subtitle="Earnings, indicators & central banks", |
| max_items=50, |
| height="600px" |
| ) |
| else: |
| st.markdown(""" |
| <div style="background: linear-gradient(135deg, #1E222D 0%, #131722 100%); border: 1px solid #2A2E39; border-radius: 8px; padding: 30px; text-align: center;"> |
| <div style="font-size: 48px; margin-bottom: 16px; animation: pulse 2s ease-in-out infinite;">β³</div> |
| <div style="color: #D1D4DC; font-size: 16px; font-weight: 600; margin-bottom: 8px;">Loading Market Events</div> |
| <div style="color: #787B86; font-size: 13px;">Fetching earnings & economic indicators...</div> |
| </div> |
| <style> |
| @keyframes pulse { |
| 0%, 100% { opacity: 1; transform: scale(1); } |
| 50% { opacity: 0.6; transform: scale(1.1); } |
| } |
| </style> |
| """, unsafe_allow_html=True) |
|
|
| |
| if 'fetch_errors' in locals() and fetch_errors: |
| with st.expander("β οΈ Source Fetch Warnings", expanded=False): |
| for error in fetch_errors: |
| st.caption(f"β’ {error}") |
|
|
| |
| total_items = sum(len(df) for df in ai_summary_dfs if not df.empty) |
| ai_summarized = 0 |
| for df in ai_summary_dfs: |
| if df.empty or "summary_ai" not in df.columns: |
| continue |
| ai_summarized += df["summary_ai"].fillna("").astype(str).str.strip().ne("").sum() |
|
|
| ai_summary_pct = (ai_summarized / total_items * 100) if total_items else 0.0 |
|
|
| st.markdown("---") |
| @st.fragment(run_every=60) |
| def render_ai_summary_section(): |
| summaries = fetch_summaries(limit=50) |
| status = get_status() |
| last_update_text = status.get("last_update") or "N/A" |
| buffer_remaining = status.get("buffer_remaining_seconds") |
| buffer_text = "N/A" |
| if buffer_remaining is not None: |
| buffer_text = f"{int(buffer_remaining)}s" |
|
|
| st.markdown("## π€ AI Summary") |
| st.markdown( |
| f""" |
| <div style="background: linear-gradient(135deg, #1E222D 0%, #131722 100%); border: 1px solid #2A2E39; border-radius: 8px; padding: 20px; margin-bottom: 12px;"> |
| <div style="color: #E0E3EB; font-size: 16px; font-weight: 600; margin-bottom: 6px;">Current AI Summarizations</div> |
| <div style="color: #D1D4DC; font-size: 14px; line-height: 1.6;"> |
| {ai_summarized} / {total_items} items summarized |
| <span style="color: #787B86; font-size: 12px; margin-left: 8px;">({ai_summary_pct:.1f}% coverage)</span> |
| </div> |
| <div style="color: #787B86; font-size: 12px; margin-top: 6px;">Last update: {last_update_text}</div> |
| <div style="color: #787B86; font-size: 12px;">Buffer: {status.get("buffer_size", 0)} items, next flush in {buffer_text}</div> |
| <div style="color: #787B86; font-size: 12px;">Cache: {status.get("total_summaries", 0)} summaries, batch max ~{status.get("batch_max_chars", 0)} chars</div> |
| </div> |
| """, |
| unsafe_allow_html=True, |
| ) |
|
|
| if summaries: |
| for item in summaries: |
| source = item.get("source", "") |
| summary = item.get("summary", "") |
| title = item.get("title", "") |
| st.markdown( |
| f""" |
| <div style="background: #131722; border: 1px solid #2A2E39; border-radius: 6px; padding: 10px; margin-bottom: 8px;"> |
| <div style="color: #E0E3EB; font-size: 13px; font-weight: 600;">{source} β {title}</div> |
| <div style="color: #D1D4DC; font-size: 13px; margin-top: 4px;">{summary}</div> |
| </div> |
| """, |
| unsafe_allow_html=True, |
| ) |
| else: |
| st.info("AI summaries will appear after the 2-minute buffering window completes.") |
|
|
| render_ai_summary_section() |
|
|
| |
| if auto_refresh: |
| import time |
| time.sleep(180) |
| st.rerun() |
|
|
| |
| st.markdown("---") |
| st.markdown(""" |
| ### π‘ How to Use This Dashboard |
| |
| **For Traders:** |
| - Monitor breaking news in real-time for market-moving events |
| - Filter by category to focus on macro, markets, or geopolitical news |
| - Use sentiment analysis to gauge market mood |
| - High-impact news items require immediate attention |
| |
| **Tips:** |
| - Enable auto-refresh for continuous monitoring during trading hours |
| - Focus on "HIGH IMPACT" news for potential volatility |
| - Breaking news (π΄) indicates urgent market-moving information |
| - Check engagement metrics (likes + retweets) for news importance |
| |
| **Data Source:** Dual-mode scraping - RSS feeds + direct web page parsing from Reuters, Bloomberg, FT, WSJ, CNBC, Google News, Yahoo Finance, Fed, ECB and more |
| **Update Frequency:** 3-minute cache for low-latency delivery |
| **No Authentication Required:** Public sources - works out of the box |
| """) |
|
|