oilverse-api / api_server.py
ๅญ™ๅฎถๆ˜Ž
deploy: OilVerse for HuggingFace (Node.js 18 fix)
fab9847
"""
api_server.py โ€” FastAPI REST API for Oil Risk Dashboard
========================================================
Serves data from output/ directory as JSON REST endpoints.
Run: python api_server.py
"""
import os, json
import pandas as pd
import numpy as np
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import Optional
from config import BASE_DIR, OUTPUT_DIR, OUTPUT_FILES, PRICE_COLS, INDUSTRIES
os.chdir(BASE_DIR)
app = FastAPI(title="Oil Risk Intelligence API", version="2.0")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# โ”€โ”€ Helpers โ”€โ”€
def _load_json(path, default=None):
try:
with open(path, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception:
return default if default is not None else {}
def _load_results(benchmark: str) -> pd.DataFrame:
path = os.path.join(OUTPUT_DIR, f'v2_results_{benchmark}.csv')
if not os.path.exists(path):
# Fallback to main results
path = OUTPUT_FILES['results']
if not os.path.exists(path):
raise HTTPException(404, f"Results for {benchmark} not found")
df = pd.read_csv(path)
df['test_date'] = pd.to_datetime(df['test_date'])
return df
def _process_row(row):
"""Convert a results row to API-friendly dict."""
d = {
'date': row['test_date'].strftime('%Y-%m'),
'risk_level': row.get('risk_level', 'Medium'),
'risk_bias': row.get('risk_bias', 'Balanced'),
'pred_vol': round(row.get('pred_vol', 0) * 100, 2),
'top_factor': row.get('top_factor', 'Unknown'),
'regime_match': row.get('regime_match', 'Unknown'),
'regime_similarity': round(row.get('regime_similarity', 0), 4),
'regime_type': row.get('regime_type', 'normal'),
}
# Quantile predictions
for k in ['pred_q10_1m', 'pred_q50_1m', 'pred_q90_1m',
'qr_q10_1m', 'qr_q50_1m', 'qr_q90_1m',
'lgb_q10_1m', 'lgb_q50_1m', 'lgb_q90_1m',
'pred_q10_3m', 'pred_q50_3m', 'pred_q90_3m',
'cqr_q10_1m', 'cqr_q50_1m', 'cqr_q90_1m']:
if k in row.index and pd.notna(row.get(k)):
d[k] = round(float(row[k]) * 100, 2)
# Fallback: if pred_q*_3m missing, use qr_q*_3m
for q in ['q10', 'q50', 'q90']:
pk = f'pred_{q}_3m'
if pk not in d or d.get(pk) is None:
qk = f'qr_{q}_3m'
if qk in d:
d[pk] = d[qk]
# Actuals
if pd.notna(row.get('actual_ret_1m')):
d['actual_ret_1m'] = round(float(row['actual_ret_1m']) * 100, 2)
if pd.notna(row.get('actual_ret_3m')):
d['actual_ret_3m'] = round(float(row['actual_ret_3m']) * 100, 2)
d['actual_vol'] = round(float(row.get('actual_vol', 0)) * 100, 2) if pd.notna(row.get('actual_vol')) else None
# Factor contributions
for fk in ['Price', 'Supply', 'Demand', 'Risk_Geo', 'Technical', 'Alternative']:
col_f = f'factor_{fk}'
d[f'f_{fk}'] = round(float(row[col_f]) * 100, 2) if col_f in row.index and pd.notna(row.get(col_f)) else 0
col_s = f'shap_{fk}'
d[f's_{fk}'] = round(float(row[col_s]) * 100, 1) if col_s in row.index and pd.notna(row.get(col_s)) else 0
# Industry
for ind in INDUSTRIES:
d[f'{ind}_r'] = row.get(f'{ind}_risk', 'Low')
d[f'{ind}_a'] = row.get(f'{ind}_action', 'Routine monitoring')
# Scenarios
for sc in ['scenario_base', 'scenario_vix_shock', 'scenario_supply_cut', 'scenario_demand_crash']:
if sc in row.index and pd.notna(row[sc]):
d[sc.replace('scenario_', '')] = round(float(row[sc]) * 100, 2)
return d
def _compute_eval(df: pd.DataFrame) -> dict:
"""Compute evaluation metrics from results DataFrame."""
mask = df['actual_ret_1m'].notna()
r = df[mask].copy()
n = len(r)
if n == 0:
return {'n': 0}
ar = r['actual_ret_1m'].values
pv = r['pred_vol'].values
av = r['actual_vol'].values
q10 = r['pred_q10_1m'].values
q90 = r['pred_q90_1m'].values
cov_1m = float(((ar >= q10) & (ar <= q90)).mean())
wis_1m = float(((q90 - q10) + (2 / 0.2) * np.maximum(q10 - ar, 0) + (2 / 0.2) * np.maximum(ar - q90, 0)).mean())
naive_wis = float(((np.quantile(ar, 0.90) - np.quantile(ar, 0.10)) + (2 / 0.2) * np.maximum(np.quantile(ar, 0.10) - ar, 0) + (2 / 0.2) * np.maximum(ar - np.quantile(ar, 0.90), 0)).mean())
vm = np.nanmedian(av)
hi = av > vm
cov_hi = float(((ar[hi] >= q10[hi]) & (ar[hi] <= q90[hi])).mean()) if hi.sum() > 0 else 0
vol_rmse = float(np.sqrt(np.nanmean((av - pv) ** 2)))
vol_corr = float(np.corrcoef(av[~np.isnan(av)], pv[~np.isnan(av)])[0, 1]) if n > 2 else 0
m3m = r['actual_ret_3m'].notna()
cov_3m = float(((r.loc[m3m, 'actual_ret_3m'].values >= r.loc[m3m, 'pred_q10_3m'].values) & (r.loc[m3m, 'actual_ret_3m'].values <= r.loc[m3m, 'pred_q90_3m'].values)).mean()) if m3m.sum() > 10 else 0
lgb_cov = lgb_wis = 0
if 'lgb_q10_1m' in r.columns:
lgb_cov = float(((ar >= r['lgb_q10_1m'].values) & (ar <= r['lgb_q90_1m'].values)).mean())
lgb_wis = float(((r['lgb_q90_1m'].values - r['lgb_q10_1m'].values) + (2 / 0.2) * np.maximum(r['lgb_q10_1m'].values - ar, 0) + (2 / 0.2) * np.maximum(ar - r['lgb_q90_1m'].values, 0)).mean())
cqr_cov = cqr_wis = 0
if 'cqr_q10_1m' in r.columns:
cq10 = r['cqr_q10_1m'].values
cq90 = r['cqr_q90_1m'].values
cqr_cov = float(((ar >= cq10) & (ar <= cq90)).mean())
cqr_wis = float(((cq90 - cq10) + (2 / 0.2) * np.maximum(cq10 - ar, 0) + (2 / 0.2) * np.maximum(ar - cq90, 0)).mean())
return {
'cov_1m': round(cov_1m * 100, 1), 'wis_1m': round(wis_1m, 4),
'naive_wis': round(naive_wis, 4), 'cov_hi': round(cov_hi * 100, 1),
'cov_3m': round(cov_3m * 100, 1), 'vol_rmse': round(vol_rmse, 4),
'vol_corr': round(vol_corr, 3), 'n': n,
'lgb_cov': round(lgb_cov * 100, 1), 'lgb_wis': round(lgb_wis, 4),
'cqr_cov': round(cqr_cov * 100, 1), 'cqr_wis': round(cqr_wis, 4),
}
# โ”€โ”€ API Endpoints โ”€โ”€
@app.get("/api/benchmarks")
def get_benchmarks():
"""List available benchmarks."""
available = []
for bm in PRICE_COLS:
path = os.path.join(OUTPUT_DIR, f'v2_results_{bm}.csv')
if os.path.exists(path):
available.append(bm)
return available or ['WTI']
@app.get("/api/results/{benchmark}")
def get_results(benchmark: str):
"""Get time series results for a benchmark."""
df = _load_results(benchmark)
# Forward-fill NaN predictions so the latest "future" month has values
pred_cols = [c for c in df.columns if any(c.startswith(p) for p in
['pred_q', 'qr_q', 'lgb_q', 'cqr_q', 'factor_', 'shap_'])]
for c in pred_cols:
if c in df.columns:
df[c] = df[c].ffill()
return [_process_row(row) for _, row in df.iterrows()]
@app.get("/api/eval/{benchmark}")
def get_eval(benchmark: str):
"""Get evaluation metrics for a benchmark."""
df = _load_results(benchmark)
return _compute_eval(df)
@app.get("/api/nlg/{benchmark}")
def get_nlg(benchmark: str):
"""Get NLG reports for a benchmark."""
path = os.path.join(OUTPUT_DIR, f'v2_nlg_{benchmark}.json')
if not os.path.exists(path):
path = OUTPUT_FILES['nlg']
return _load_json(path)
@app.get("/api/scenarios")
def get_scenarios():
return _load_json(OUTPUT_FILES['scenarios'])
@app.get("/api/regime")
def get_regime():
return _load_json(OUTPUT_FILES['regime'])
@app.get("/api/hedging")
def get_hedging():
data = _load_json(OUTPUT_FILES['hedging'])
# Enrich tool_comparison with static metadata for display
TOOL_META = {
'futures': {'cost': 'ไฝŽ(ไฟ่ฏ้‡‘)', 'downside_protection': '100%', 'upside_participation': '0%', 'complexity': 'ไฝŽ', 'best_for': '็กฎๅฎšๆ€ง้œ€ๆฑ‚ใ€้”ๅฎšๆˆๆœฌ'},
'put': {'cost': 'ไธญ(ๆƒๅˆฉ้‡‘)', 'downside_protection': '100%', 'upside_participation': '100%', 'complexity': 'ไธญ', 'best_for': 'ไฟ็•™ไธŠ่กŒ็ฉบ้—ด'},
'collar': {'cost': 'ๆžไฝŽ/้›ถ', 'downside_protection': '90%', 'upside_participation': 'ๆœ‰้™', 'complexity': '้ซ˜', 'best_for': '้ข„็ฎ—ๆ•ๆ„Ÿใ€้™ไปทๅฏนๅ†ฒ'},
}
if isinstance(data, dict):
for ind_key, ind_data in data.items():
if isinstance(ind_data, dict) and 'tool_comparison' in ind_data:
for tc in ind_data['tool_comparison']:
if isinstance(tc, dict):
meta = TOOL_META.get(tc.get('tool', ''), {})
for mk, mv in meta.items():
if mk not in tc:
tc[mk] = mv
return data
@app.get("/api/backtest")
def get_backtest():
return _load_json(OUTPUT_FILES['backtest'])
@app.get("/api/events")
def get_events():
"""Get event timeline for causal narrative chain."""
return _load_json(os.path.join(OUTPUT_DIR, 'event_timeline.json'), [])
@app.get("/api/ablation")
def get_ablation():
return _load_json(OUTPUT_FILES['ablation'], [])
@app.get("/api/quality")
def get_quality():
"""Dynamic data quality with Chinese names, proper sources, and live latest_value."""
# Feature metadata: name_zh, source, source_detail, frequency, factor_group, lag
META = {
'WTI_spot': {'zh': 'WTIๅŽŸๆฒน็Žฐ่ดงไปท', 'src': 'FRED', 'src_detail': 'FRED DCOILWTICO', 'freq': 'dailyโ†’monthly', 'group': 'Price', 'lag': 1},
'Brent_spot': {'zh': 'BrentๅŽŸๆฒน็Žฐ่ดงไปท', 'src': 'FRED', 'src_detail': 'FRED DCOILBRENTEU', 'freq': 'dailyโ†’monthly', 'group': 'Price', 'lag': 1},
'natgas_spot_henry': {'zh': 'ๅคฉ็„ถๆฐ”็Žฐ่ดง(Henry Hub)', 'src': 'FRED', 'src_detail': 'FRED DHHNGSP', 'freq': 'dailyโ†’monthly', 'group': 'Price', 'lag': 1},
'iron_ore_spot': {'zh': '้“็Ÿฟ็Ÿณ็Žฐ่ดงไปท', 'src': 'World Bank', 'src_detail': 'Pink Sheet (้“็Ÿฟ็ŸณCFRๅคฉๆดฅ)', 'freq': 'monthly', 'group': 'Price', 'lag': 30},
'gold_spot': {'zh': '้ป„้‡‘็Žฐ่ดงไปท', 'src': 'FRED', 'src_detail': 'FRED GOLDAMGBD228NLBM', 'freq': 'dailyโ†’monthly', 'group': 'Price', 'lag': 1},
'pmi_us_mfg': {'zh': '็พŽๅ›ฝๅˆถ้€ ไธšPMI', 'src': 'FRED', 'src_detail': 'FRED MANEMP/ISM', 'freq': 'monthly', 'group': 'Demand', 'lag': 5},
'ipi_us': {'zh': '็พŽๅ›ฝๅทฅไธš็”ŸไบงๆŒ‡ๆ•ฐ', 'src': 'FRED', 'src_detail': 'FRED INDPRO', 'freq': 'monthly', 'group': 'Demand', 'lag': 15},
'nonfarm_us': {'zh': '็พŽๅ›ฝ้žๅ†œๅฐฑไธšไบบๆ•ฐ', 'src': 'FRED', 'src_detail': 'FRED PAYEMS', 'freq': 'monthly', 'group': 'Demand', 'lag': 5},
'usd_index': {'zh': '็พŽๅ…ƒๆŒ‡ๆ•ฐ(DXY)', 'src': 'FRED', 'src_detail': 'FRED DTWEXBGS', 'freq': 'dailyโ†’monthly', 'group': 'Demand', 'lag': 1},
'cpi_us': {'zh': '็พŽๅ›ฝCPI(ๅŒๆฏ”)', 'src': 'FRED', 'src_detail': 'FRED CPIAUCSL', 'freq': 'monthly', 'group': 'Demand', 'lag': 12},
'fed_funds_rate': {'zh': '่”้‚ฆๅŸบ้‡‘ๅˆฉ็އ', 'src': 'FRED', 'src_detail': 'FRED FEDFUNDS', 'freq': 'monthly', 'group': 'Demand', 'lag': 1},
'yield_spread_10y2y': {'zh': '็พŽๅ€บๅˆฉๅทฎ(10Y-2Y)', 'src': 'FRED', 'src_detail': 'FRED T10Y2Y', 'freq': 'dailyโ†’monthly', 'group': 'Demand', 'lag': 1},
'vix': {'zh': 'VIXๆณขๅŠจ็އๆŒ‡ๆ•ฐ', 'src': 'FRED', 'src_detail': 'FRED VIXCLS', 'freq': 'dailyโ†’monthly', 'group': 'Risk', 'lag': 1},
'gpr_index': {'zh': 'ๅœฐ็ผ˜ๆ”ฟๆฒป้ฃŽ้™ฉๆŒ‡ๆ•ฐ(GPR)', 'src': 'GPR', 'src_detail': 'Caldara & Iacoviello', 'freq': 'monthly', 'group': 'Risk', 'lag': 30},
'us_oil_inventory_total':{'zh': '็พŽๅ›ฝๅŽŸๆฒนๅ•†ไธšๅบ“ๅญ˜', 'src': 'EIA', 'src_detail': 'EIA WCESTUS1', 'freq': 'weeklyโ†’monthly', 'group': 'Supply', 'lag': 5},
'us_crude_production': {'zh': '็พŽๅ›ฝๅŽŸๆฒนไบง้‡', 'src': 'EIA', 'src_detail': 'EIA MCRFPUS2', 'freq': 'monthly', 'group': 'Supply', 'lag': 60},
'rig_count_us_new': {'zh': '็พŽๅ›ฝ็Ÿณๆฒน้’ปไบ•ๆ•ฐ', 'src': 'Baker Hughes', 'src_detail': 'Baker Hughes Rig Count', 'freq': 'weeklyโ†’monthly', 'group': 'Supply', 'lag': 3},
'supply_saudi': {'zh': 'ๆฒ™็‰นๅŽŸๆฒนไบง้‡', 'src': 'OPEC', 'src_detail': 'OPEC MOMR', 'freq': 'monthly', 'group': 'Supply', 'lag': 15},
# Derived features (computed from source features)
'vix_lag1': {'zh': 'VIXๆปžๅŽ1ๆœŸ', 'src': 'ๆดพ็”Ÿ่ฎก็ฎ—', 'src_detail': 'VIX t-1', 'freq': 'monthly', 'group': 'Risk_Geo', 'lag': 0},
'vix_lag2': {'zh': 'VIXๆปžๅŽ2ๆœŸ', 'src': 'ๆดพ็”Ÿ่ฎก็ฎ—', 'src_detail': 'VIX t-2', 'freq': 'monthly', 'group': 'Risk_Geo', 'lag': 0},
'geo_shock_count': {'zh': 'ๅœฐ็ผ˜ๅ†ฒๅ‡ปไบ‹ไปถๆ•ฐ', 'src': 'ๆดพ็”Ÿ่ฎก็ฎ—', 'src_detail': 'GPR่ถ…้˜ˆๅ€ผ่ฎกๆ•ฐ', 'freq': 'monthly', 'group': 'Risk_Geo', 'lag': 0},
'geo_active_events': {'zh': 'ๆดป่ทƒๅœฐ็ผ˜ไบ‹ไปถๆ•ฐ', 'src': 'ๆดพ็”Ÿ่ฎก็ฎ—', 'src_detail': 'ไบ‹ไปถๆ—ถ้—ด็บฟๆดป่ทƒ่ฎกๆ•ฐ', 'freq': 'monthly', 'group': 'Risk_Geo', 'lag': 0},
'mom1m_lag1': {'zh': 'ๆฒนไปทๅŠจ้‡(1MๆปžๅŽ)', 'src': 'ๆดพ็”Ÿ่ฎก็ฎ—', 'src_detail': 'WTIๆœˆๆ”ถ็›Š็އ t-1', 'freq': 'monthly', 'group': 'Technical', 'lag': 0},
'hist_vol_12m': {'zh': '12ๆœˆๅކๅฒๆณขๅŠจ็އ', 'src': 'ๆดพ็”Ÿ่ฎก็ฎ—', 'src_detail': 'WTI 12M ๆปšๅŠจstd', 'freq': 'monthly', 'group': 'Technical', 'lag': 0},
'rsi12m': {'zh': '12ๆœˆRSIๆŒ‡ๆ ‡', 'src': 'ๆดพ็”Ÿ่ฎก็ฎ—', 'src_detail': 'WTI 12M RSI', 'freq': 'monthly', 'group': 'Technical', 'lag': 0},
}
# Try to read actual panel data for live values
panel = None
for pp in ['output/panel_monthly_live.csv', 'output/panel_monthly.csv']:
if os.path.exists(pp):
try:
panel = pd.read_csv(pp, index_col=0, parse_dates=True)
except:
pass
break
# Also try the static quality report as fallback
static = _load_json(OUTPUT_FILES.get('quality', ''), {})
result = {}
from config import FEATURES
all_feats = list(META.keys())
# Also add any features in FEATURES list not in META
for f in FEATURES:
if f not in all_feats:
all_feats.append(f)
for feat in all_feats:
meta = META.get(feat, {})
sq = static.get(feat, {})
# Get latest_value from panel
latest_value = None
total_months = 0
missing = 0
missing_rate = 0.0
first_valid = None
last_valid = None
staleness = None
status = 'OK'
if panel is not None and feat in panel.columns:
series = panel[feat].dropna()
total_months = len(panel)
missing = int(panel[feat].isna().sum())
missing_rate = round(missing / total_months, 3) if total_months > 0 else 0
if len(series) > 0:
latest_value = round(float(series.iloc[-1]), 4)
first_valid = str(series.index[0])[:10]
last_valid = str(series.index[-1])[:10]
staleness = (pd.Timestamp.now() - series.index[-1]).days
else:
# Fall back to static data
latest_value = sq.get('latest_value')
total_months = sq.get('total_months', 0)
missing = sq.get('missing', 0)
missing_rate = sq.get('missing_rate', 0)
first_valid = sq.get('first_valid')
last_valid = sq.get('last_valid')
staleness = sq.get('staleness_days')
# Determine status
if missing_rate > 0.3:
status = 'HIGH_MISSING'
elif staleness and staleness > 60:
status = 'STALE'
else:
status = 'OK'
result[feat] = {
'name_zh': meta.get('zh', feat),
'source': meta.get('src', sq.get('source', 'CSV')),
'source_detail': meta.get('src_detail', ''),
'factor_group': meta.get('group', sq.get('factor_group', 'Other')),
'frequency': meta.get('freq', sq.get('frequency', 'monthly')),
'release_lag_days': meta.get('lag', sq.get('release_lag_days', 0)),
'total_months': total_months,
'missing': missing,
'missing_rate': missing_rate,
'first_valid': first_valid,
'last_valid': last_valid,
'staleness_days': staleness,
'latest_value': latest_value,
'status': status,
}
return result
@app.get("/api/lineage")
def get_lineage():
lineage = _load_json(OUTPUT_FILES.get('lineage', ''), {})
# Enrich sources if present
if 'sources' in lineage:
src = lineage['sources']
# Fix CSV source name
if 'CSV' in src:
src['Baker Hughes'] = {'name': 'Baker Hughes Rig Count', 'url': 'https://rigcount.bakerhughes.com/', 'type': 'ๅ…ฌๅผ€CSV', 'features_count': 1}
src['World Bank'] = {'name': 'World Bank Pink Sheet', 'url': 'https://www.worldbank.org/en/research/commodity-markets', 'type': 'ๅ…ฌๅผ€Excel', 'features_count': 1}
src['OPEC'] = {'name': 'OPEC Monthly Oil Market Report', 'url': 'https://www.opec.org/opec_web/en/', 'type': 'ๅ…ฌๅผ€PDF/CSV', 'features_count': 1}
src['GPR'] = {'name': 'Geopolitical Risk Index', 'url': 'https://www.matteoiacoviello.com/gpr.htm', 'type': 'ๅ…ฌๅผ€CSV', 'features_count': 1}
del src['CSV']
return lineage
@app.get("/api/feat_sel")
def get_feat_sel():
return _load_json(OUTPUT_FILES['feat_sel'])
@app.get("/api/causal")
def get_causal():
return _load_json(os.path.join(OUTPUT_DIR, 'causal_analysis.json'), {})
# โ”€โ”€ AI Agent Chat โ”€โ”€
class ChatRequest(BaseModel):
message: str
session_id: Optional[str] = None
_sessions = {}
@app.post("/api/chat")
async def chat(req: ChatRequest):
"""AI Agent chat endpoint."""
try:
from agent.chat import chat_with_agent
session_id = req.session_id or 'default'
history = _sessions.get(session_id, [])
reply, history = chat_with_agent(req.message, history)
_sessions[session_id] = history
return {"reply": reply}
except Exception as e:
import traceback
tb = traceback.format_exc()
print(f"[AGENT ERROR] {type(e).__name__}: {e}\n{tb}")
error_msg = f"โš ๏ธ Agent ่ฐƒ็”จๅ‡บ้”™ ({type(e).__name__}): {str(e)}"
if "timeout" in str(e).lower() or "connect" in str(e).lower():
error_msg += "\n๐Ÿ”„ LLM ๆœๅŠกๆš‚ๆ—ถไธๅฏ็”จ๏ผŒ่ฏท็จๅŽ้‡่ฏ•ใ€‚"
return {"reply": error_msg}
# โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•
# Live Oil Prices โ€” AKShare real-time daily data
# โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•
import time as _time
_price_cache = {"data": None, "ts": 0}
_news_cache = {"data": None, "ts": 0}
CACHE_TTL = 3600 # 1 hour cache
def _fetch_live_prices():
"""Fetch latest oil prices from AKShare (daily frequency)."""
now = _time.time()
if _price_cache["data"] and (now - _price_cache["ts"]) < CACHE_TTL:
return _price_cache["data"]
try:
import akshare as ak
result = {}
# WTI Crude (CL)
try:
df_cl = ak.futures_foreign_hist(symbol='CL')
if len(df_cl) >= 2:
cur = df_cl.iloc[-1]
prev = df_cl.iloc[-2]
price = float(cur['close'])
change = round(price - float(prev['close']), 2)
pct = round(change / float(prev['close']) * 100, 2)
result['wti'] = {
'price': price, 'change': change, 'pct': pct,
'date': str(cur['date'])[:10]
}
except Exception as e:
print(f"[LIVE] WTI fetch failed: {e}")
# Brent Crude โ€” estimate from WTI + typical spread (~$3-5)
if 'wti' in result:
wti_p = result['wti']['price']
# Use historical Brent-WTI spread
brent_spread = 3.8
brent_price = round(wti_p + brent_spread, 2)
brent_change = result['wti']['change']
brent_pct = round(brent_change / (brent_price - brent_change) * 100, 2)
result['brent'] = {
'price': brent_price, 'change': brent_change, 'pct': brent_pct,
'date': result['wti']['date'],
'note': 'estimated_from_spread'
}
# Natural Gas (NG - Henry Hub)
try:
df_ng = ak.futures_foreign_hist(symbol='NG')
if len(df_ng) >= 2:
cur = df_ng.iloc[-1]
prev = df_ng.iloc[-2]
price = float(cur['close'])
change = round(price - float(prev['close']), 2)
pct = round(change / float(prev['close']) * 100, 2)
result['natgas'] = {
'price': price, 'change': change, 'pct': pct,
'date': str(cur['date'])[:10]
}
except Exception as e:
print(f"[LIVE] NG fetch failed: {e}")
if result:
_price_cache["data"] = result
_price_cache["ts"] = now
print(f"[LIVE] Prices updated: WTI=${result.get('wti',{}).get('price','?')}, "
f"Brent=${result.get('brent',{}).get('price','?')}, "
f"NG=${result.get('natgas',{}).get('price','?')}")
return result
except Exception as e:
print(f"[LIVE] Price fetch error: {e}")
return {}
def _fetch_live_news():
"""Fetch latest oil-related news from Google News RSS."""
now = _time.time()
if _news_cache["data"] and (now - _news_cache["ts"]) < CACHE_TTL:
return _news_cache["data"]
import urllib.request
import re
from datetime import datetime
news_items = []
# Google News RSS for oil price
feeds = [
("https://news.google.com/rss/search?q=oil+price+crude+OPEC&hl=en-US&gl=US&ceid=US:en", "en"),
("https://news.google.com/rss/search?q=ๆฒนไปท+ๅŽŸๆฒน+OPEC&hl=zh-CN&gl=CN&ceid=CN:zh-Hans", "zh"),
]
for feed_url, lang in feeds:
try:
req = urllib.request.Request(feed_url, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req, timeout=8) as resp:
data = resp.read().decode('utf-8', errors='replace')
# Parse XML items
items = re.findall(r'<item>(.*?)</item>', data, re.DOTALL)
for item_xml in items[:8]:
title = re.search(r'<title>(.*?)</title>', item_xml)
source = re.search(r'<source[^>]*>(.*?)</source>', item_xml)
pub_date = re.search(r'<pubDate>(.*?)</pubDate>', item_xml)
if title:
title_text = title.group(1).strip()
# Clean HTML entities
title_text = title_text.replace('&amp;', '&').replace('&lt;', '<').replace('&gt;', '>').replace('&#39;', "'").replace('&quot;', '"')
src = source.group(1).strip() if source else 'News'
# Parse date
date_str = ''
if pub_date:
try:
dt = datetime.strptime(pub_date.group(1).strip()[:25],
'%a, %d %b %Y %H:%M:%S')
date_str = dt.strftime('%m-%d %H:%M')
except:
date_str = pub_date.group(1).strip()[:16]
# Auto-tag based on keywords
tag = _auto_tag(title_text)
news_items.append({
'text': title_text,
'src': src,
'time': date_str,
'tag': tag,
'lang': lang,
})
except Exception as e:
print(f"[NEWS] Feed fetch error ({lang}): {e}")
if news_items:
_news_cache["data"] = news_items[:15]
_news_cache["ts"] = now
print(f"[NEWS] Fetched {len(news_items)} news items")
return news_items[:15]
def _auto_tag(text: str) -> str:
"""Auto-tag news based on keywords."""
t = text.lower()
if any(w in t for w in ['opec', 'supply', 'production', 'output', 'barrel',
'ไบง้‡', 'ๅ‡ไบง', 'ๅขžไบง', 'ไพ›็ป™', 'ๅบ“ๅญ˜', 'inventory']):
return 'ไพ›็ป™'
if any(w in t for w in ['demand', 'consumption', 'growth', 'recession',
'้œ€ๆฑ‚', 'ๆถˆ่ดน', 'ๅขž้•ฟ', '่กฐ้€€', 'gdp']):
return '้œ€ๆฑ‚'
if any(w in t for w in ['war', 'sanction', 'iran', 'russia', 'conflict', 'military',
'ๅˆถ่ฃ', 'ๅ†ฒ็ช', 'ๅœฐ็ผ˜', 'ๆˆ˜ไบ‰', 'tariff', 'ๅ…ณ็จŽ', 'trump']):
return 'ๅœฐ็ผ˜'
if any(w in t for w in ['fed', 'rate', 'inflation', 'dollar', 'central bank',
'ๅˆฉ็އ', '้€š่ƒ€', '็พŽ่”ๅ‚จ', 'ๅคฎ่กŒ', '็พŽๅ…ƒ']):
return 'ๅฎ่ง‚'
if any(w in t for w in ['renewable', 'ev', 'solar', 'wind', 'transition', 'climate',
'ๆ–ฐ่ƒฝๆบ', '็ขณ', 'ๆฐ”ๅ€™', '่ฝฌๅž‹']):
return 'ๆ”ฟ็ญ–'
return 'ๅธ‚ๅœบ'
@app.get("/api/live-prices")
def api_live_prices():
"""Return latest daily oil prices (WTI, Brent, Natural Gas)."""
data = _fetch_live_prices()
if not data:
raise HTTPException(status_code=503, detail="Unable to fetch live prices")
return data
@app.get("/api/live-news")
def api_live_news():
"""Return latest oil-related news from RSS feeds."""
data = _fetch_live_news()
return {"items": data}
# โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•
# Static file serving (for Docker / HuggingFace deployment)
# โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•
_frontend_dist = os.path.join(BASE_DIR, 'frontend', 'dist')
if os.path.isdir(_frontend_dist):
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse
# Serve static assets (JS/CSS/images)
app.mount("/assets", StaticFiles(directory=os.path.join(_frontend_dist, "assets")), name="static-assets")
# Serve favicon and other root files
@app.get("/favicon.svg")
async def favicon():
return FileResponse(os.path.join(_frontend_dist, "favicon.svg"))
# Catch-all: serve index.html for SPA routing
@app.get("/{full_path:path}")
async def serve_spa(full_path: str):
file_path = os.path.join(_frontend_dist, full_path)
if os.path.isfile(file_path):
return FileResponse(file_path)
return FileResponse(os.path.join(_frontend_dist, "index.html"))
if __name__ == '__main__':
import uvicorn
port = int(os.environ.get("PORT", 8765))
print("=" * 60)
print(f"ๆฒนๅˆƒๆœ‰ไฝ™ OilVerse API โ€” http://localhost:{port}")
print("=" * 60)
uvicorn.run(app, host="0.0.0.0", port=port)