JamieYuu commited on
Commit
a18310b
·
verified ·
1 Parent(s): 2453fe8

Add playground inference script

Browse files
Files changed (1) hide show
  1. hf_playground_inference.py +466 -0
hf_playground_inference.py ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ from pathlib import Path
4
+ from typing import Dict, List, Optional, Tuple
5
+
6
+ import joblib
7
+ import numpy as np
8
+ import pandas as pd
9
+ import torch
10
+ from transformers import AutoModel, AutoTokenizer
11
+
12
+
13
+ DEFAULT_ARTIFACTS_DIR = Path("outputs_compare_models")
14
+ NUMERIC_FEATURE_NAMES = [
15
+ "btc_open_lag1",
16
+ "btc_high_lag1",
17
+ "btc_low_lag1",
18
+ "btc_close_lag1",
19
+ "btc_volume_lag1",
20
+ "fng_value_lag1",
21
+ "btc_return_lag1",
22
+ "btc_volatility_lag1",
23
+ "btc_volume_change_vs_7d_lag1",
24
+ ]
25
+
26
+
27
+ def pick_device() -> torch.device:
28
+ if torch.cuda.is_available():
29
+ return torch.device("cuda")
30
+ if getattr(torch.backends, "mps", None) and torch.backends.mps.is_available():
31
+ return torch.device("mps")
32
+ return torch.device("cpu")
33
+
34
+
35
+ def resolve_default_model_name(artifacts_dir: Path, fallback: str) -> str:
36
+ metrics_path = artifacts_dir / "metrics_xgb_cls_vs_numeric.json"
37
+ if not metrics_path.exists():
38
+ return fallback
39
+
40
+ try:
41
+ with metrics_path.open("r", encoding="utf-8") as f:
42
+ metrics = json.load(f)
43
+ model_name = metrics.get("text_model")
44
+ if isinstance(model_name, str) and model_name.strip():
45
+ return model_name.strip()
46
+ except Exception:
47
+ pass
48
+
49
+ return fallback
50
+
51
+
52
+ def infer_required_text_dim(artifacts_dir: Path) -> Optional[int]:
53
+ model_path = artifacts_dir / "xgb_model.joblib"
54
+ scaler_path = artifacts_dir / "numeric_scaler.joblib"
55
+ encoder_path = artifacts_dir / "fng_onehot_encoder.joblib"
56
+ if not model_path.exists() or not scaler_path.exists() or not encoder_path.exists():
57
+ return None
58
+
59
+ try:
60
+ xgb_model = joblib.load(model_path)
61
+ scaler = joblib.load(scaler_path)
62
+ encoder = joblib.load(encoder_path)
63
+ total_dim = getattr(xgb_model, "n_features_in_", None)
64
+ num_dim = int(getattr(scaler, "n_features_in_", 0))
65
+ cat_dim = int(sum(len(c) for c in getattr(encoder, "categories_", [])))
66
+ if total_dim is None:
67
+ return None
68
+ text_dim = int(total_dim) - num_dim - cat_dim
69
+ if text_dim <= 0:
70
+ return None
71
+ return text_dim
72
+ except Exception:
73
+ return None
74
+
75
+
76
+ def pick_model_from_required_dim(
77
+ required_text_dim: Optional[int],
78
+ metrics_model_name: str,
79
+ ) -> str:
80
+ if required_text_dim == 768:
81
+ return "ProsusAI/finbert"
82
+ if required_text_dim == 256:
83
+ return "boltuix/bert-lite"
84
+ return metrics_model_name
85
+
86
+
87
+ class PlaygroundPredictor:
88
+ def __init__(
89
+ self,
90
+ artifacts_dir: Path,
91
+ model_name: str,
92
+ tokenizer_path: Optional[str],
93
+ max_length: int,
94
+ batch_size: int,
95
+ ) -> None:
96
+ self.artifacts_dir = artifacts_dir
97
+ self.max_length = max_length
98
+ self.batch_size = batch_size
99
+ self.device = pick_device()
100
+
101
+ model_path = artifacts_dir / "xgb_model.joblib"
102
+ scaler_path = artifacts_dir / "numeric_scaler.joblib"
103
+ encoder_path = artifacts_dir / "fng_onehot_encoder.joblib"
104
+
105
+ if not model_path.exists():
106
+ raise FileNotFoundError(f"Missing model artifact: {model_path}")
107
+ if not scaler_path.exists():
108
+ raise FileNotFoundError(f"Missing scaler artifact: {scaler_path}")
109
+ if not encoder_path.exists():
110
+ raise FileNotFoundError(f"Missing encoder artifact: {encoder_path}")
111
+
112
+ self.xgb_model = joblib.load(model_path)
113
+ self.scaler = joblib.load(scaler_path)
114
+ self.encoder = joblib.load(encoder_path)
115
+
116
+ tokenizer_source: str
117
+ if tokenizer_path and Path(tokenizer_path).exists():
118
+ tokenizer_source = tokenizer_path
119
+ elif (artifacts_dir / "tokenizer_config.json").exists() or (
120
+ artifacts_dir / "tokenizer.json"
121
+ ).exists():
122
+ tokenizer_source = str(artifacts_dir)
123
+ else:
124
+ tokenizer_source = model_name
125
+
126
+ self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_source)
127
+ self.text_model = AutoModel.from_pretrained(model_name).to(self.device)
128
+ self.text_model.eval()
129
+
130
+ self.expected_feature_count = getattr(self.xgb_model, "n_features_in_", None)
131
+
132
+ @staticmethod
133
+ def _safe_float(value: object, default: float) -> float:
134
+ if value is None:
135
+ return float(default)
136
+ try:
137
+ if isinstance(value, str) and not value.strip():
138
+ return float(default)
139
+ return float(value)
140
+ except Exception:
141
+ return float(default)
142
+
143
+ def _normalize_row(self, row: Dict[str, object]) -> Dict[str, object]:
144
+ # Allow either user-friendly now-values or explicit lagged values.
145
+ btc_price_now = self._safe_float(row.get("btc_price_now"), 0.0)
146
+ btc_open = self._safe_float(row.get("btc_open_lag1"), btc_price_now)
147
+ btc_high = self._safe_float(row.get("btc_high_lag1"), btc_open)
148
+ btc_low = self._safe_float(row.get("btc_low_lag1"), btc_open)
149
+ btc_close = self._safe_float(row.get("btc_close_lag1"), btc_open)
150
+ btc_volume = self._safe_float(row.get("btc_volume_lag1"), 1.0)
151
+ fng_value = self._safe_float(
152
+ row.get("fng_value_lag1", row.get("fng_value")),
153
+ 50.0,
154
+ )
155
+
156
+ # If optional engineered fields are missing, derive conservative defaults.
157
+ btc_return = self._safe_float(
158
+ row.get("btc_return_lag1"),
159
+ (btc_close / btc_open - 1.0) if btc_open != 0 else 0.0,
160
+ )
161
+ btc_volatility = self._safe_float(
162
+ row.get("btc_volatility_lag1"),
163
+ ((btc_high - btc_low) / btc_open) if btc_open != 0 else 0.0,
164
+ )
165
+ volume_7d_avg = self._safe_float(row.get("btc_volume_7d_avg_lag1"), btc_volume)
166
+ btc_volume_change = self._safe_float(
167
+ row.get("btc_volume_change_vs_7d_lag1"),
168
+ (btc_volume / volume_7d_avg - 1.0) if volume_7d_avg != 0 else 0.0,
169
+ )
170
+
171
+ fng_cls = (
172
+ str(
173
+ row.get(
174
+ "fng_classification_lag1",
175
+ row.get("fng_classification", "Neutral"),
176
+ )
177
+ )
178
+ .strip()
179
+ .title()
180
+ )
181
+
182
+ text = str(row.get("text", "")).strip()
183
+ if not text:
184
+ raise ValueError("Each row must include non-empty 'text'.")
185
+
186
+ return {
187
+ "text": text,
188
+ "btc_open_lag1": btc_open,
189
+ "btc_high_lag1": btc_high,
190
+ "btc_low_lag1": btc_low,
191
+ "btc_close_lag1": btc_close,
192
+ "btc_volume_lag1": btc_volume,
193
+ "fng_value_lag1": fng_value,
194
+ "fng_classification_lag1": fng_cls,
195
+ "btc_return_lag1": btc_return,
196
+ "btc_volatility_lag1": btc_volatility,
197
+ "btc_volume_change_vs_7d_lag1": btc_volume_change,
198
+ }
199
+
200
+ def _embed_texts(self, texts: List[str]) -> np.ndarray:
201
+ embs: List[np.ndarray] = []
202
+ with torch.no_grad():
203
+ for i in range(0, len(texts), self.batch_size):
204
+ batch_texts = texts[i : i + self.batch_size]
205
+ enc = self.tokenizer(
206
+ batch_texts,
207
+ truncation=True,
208
+ padding=True,
209
+ max_length=self.max_length,
210
+ return_tensors="pt",
211
+ )
212
+ input_ids = enc["input_ids"].to(self.device)
213
+ attention_mask = enc["attention_mask"].to(self.device)
214
+ outputs = self.text_model(
215
+ input_ids=input_ids,
216
+ attention_mask=attention_mask,
217
+ )
218
+ cls_vec = outputs.last_hidden_state[:, 0, :].cpu().numpy()
219
+ embs.append(cls_vec)
220
+ return np.vstack(embs).astype(np.float32)
221
+
222
+ def _build_numeric_features(self, rows: List[Dict[str, object]]) -> np.ndarray:
223
+ df = pd.DataFrame(rows)
224
+ x_num = self.scaler.transform(df[NUMERIC_FEATURE_NAMES])
225
+ x_cat = self.encoder.transform(df[["fng_classification_lag1"]])
226
+ return np.hstack([x_num, x_cat]).astype(np.float32)
227
+
228
+ def predict_rows(self, raw_rows: List[Dict[str, object]]) -> pd.DataFrame:
229
+ normalized = [self._normalize_row(row) for row in raw_rows]
230
+ texts = [r["text"] for r in normalized]
231
+
232
+ x_text = self._embed_texts(texts)
233
+ x_num = self._build_numeric_features(normalized)
234
+ x_all = np.hstack([x_text, x_num]).astype(np.float32)
235
+
236
+ if self.expected_feature_count is not None and x_all.shape[1] != int(
237
+ self.expected_feature_count
238
+ ):
239
+ raise ValueError(
240
+ "Feature mismatch: model expects "
241
+ f"{self.expected_feature_count} columns but got {x_all.shape[1]}. "
242
+ "Check model_name/tokenizer/artifacts consistency."
243
+ )
244
+
245
+ proba_up = self.xgb_model.predict_proba(x_all)[:, 1]
246
+ pred = (proba_up >= 0.5).astype(int)
247
+ confidence = np.maximum(proba_up, 1.0 - proba_up)
248
+ signed_score = 2.0 * proba_up - 1.0
249
+
250
+ out = pd.DataFrame(normalized)
251
+ out["pred_class"] = pred
252
+ out["sentiment"] = np.where(pred == 1, "Bullish", "Bearish")
253
+ out["score"] = signed_score
254
+ out["prob_up"] = proba_up
255
+ out["confidence"] = confidence
256
+ return out
257
+
258
+
259
+ def load_rows_from_input_file(path: Path) -> List[Dict[str, object]]:
260
+ if not path.exists():
261
+ raise FileNotFoundError(f"Input file not found: {path}")
262
+
263
+ suffix = path.suffix.lower()
264
+ if suffix == ".csv":
265
+ df = pd.read_csv(path)
266
+ elif suffix in {".parquet", ".pq"}:
267
+ df = pd.read_parquet(path)
268
+ elif suffix == ".json":
269
+ df = pd.read_json(path)
270
+ else:
271
+ raise ValueError("Supported input formats: .csv, .parquet, .pq, .json")
272
+
273
+ return df.to_dict(orient="records")
274
+
275
+
276
+ def save_predictions(df: pd.DataFrame, output_path: Path) -> None:
277
+ output_path.parent.mkdir(parents=True, exist_ok=True)
278
+ suffix = output_path.suffix.lower()
279
+ if suffix == ".csv":
280
+ df.to_csv(output_path, index=False)
281
+ elif suffix in {".parquet", ".pq"}:
282
+ df.to_parquet(output_path, index=False)
283
+ elif suffix == ".json":
284
+ df.to_json(output_path, orient="records", force_ascii=False, indent=2)
285
+ else:
286
+ raise ValueError("Supported output formats: .csv, .parquet, .pq, .json")
287
+
288
+
289
+ def build_arg_parser() -> argparse.ArgumentParser:
290
+ parser = argparse.ArgumentParser(
291
+ description=(
292
+ "Playground inference for crypto-news sentiment using text + BTC + FNG. "
293
+ "Supports single input, dataset batch, and optional Gradio UI."
294
+ )
295
+ )
296
+
297
+ parser.add_argument("--artifacts_dir", type=str, default=str(DEFAULT_ARTIFACTS_DIR))
298
+ parser.add_argument("--model_name", type=str, default="auto")
299
+ parser.add_argument("--tokenizer_path", type=str, default="")
300
+ parser.add_argument("--max_length", type=int, default=96)
301
+ parser.add_argument("--batch_size", type=int, default=32)
302
+
303
+ sub = parser.add_subparsers(dest="mode", required=True)
304
+
305
+ single = sub.add_parser("single", help="Predict one news row.")
306
+ single.add_argument("--text", type=str, required=True)
307
+ single.add_argument("--btc_price_now", type=float, default=0.0)
308
+ single.add_argument("--fng_value", type=float, required=True)
309
+ single.add_argument("--fng_classification", type=str, required=True)
310
+ single.add_argument("--btc_open_lag1", type=float)
311
+ single.add_argument("--btc_high_lag1", type=float)
312
+ single.add_argument("--btc_low_lag1", type=float)
313
+ single.add_argument("--btc_close_lag1", type=float)
314
+ single.add_argument("--btc_volume_lag1", type=float)
315
+ single.add_argument("--btc_return_lag1", type=float)
316
+ single.add_argument("--btc_volatility_lag1", type=float)
317
+ single.add_argument("--btc_volume_change_vs_7d_lag1", type=float)
318
+
319
+ batch = sub.add_parser("batch", help="Predict a full dataset file.")
320
+ batch.add_argument("--input_path", type=str, required=True)
321
+ batch.add_argument("--output_path", type=str, required=True)
322
+
323
+ ui = sub.add_parser("ui", help="Launch Gradio playground UI.")
324
+ ui.add_argument("--host", type=str, default="0.0.0.0")
325
+ ui.add_argument("--port", type=int, default=7860)
326
+ ui.add_argument("--share", action="store_true")
327
+
328
+ return parser
329
+
330
+
331
+ def run_single(args: argparse.Namespace, predictor: PlaygroundPredictor) -> None:
332
+ raw_row = {
333
+ "text": args.text,
334
+ "btc_price_now": args.btc_price_now,
335
+ "fng_value": args.fng_value,
336
+ "fng_classification": args.fng_classification,
337
+ "btc_open_lag1": args.btc_open_lag1,
338
+ "btc_high_lag1": args.btc_high_lag1,
339
+ "btc_low_lag1": args.btc_low_lag1,
340
+ "btc_close_lag1": args.btc_close_lag1,
341
+ "btc_volume_lag1": args.btc_volume_lag1,
342
+ "btc_return_lag1": args.btc_return_lag1,
343
+ "btc_volatility_lag1": args.btc_volatility_lag1,
344
+ "btc_volume_change_vs_7d_lag1": args.btc_volume_change_vs_7d_lag1,
345
+ }
346
+ pred_df = predictor.predict_rows([raw_row])
347
+ print(pred_df.to_json(orient="records", indent=2))
348
+
349
+
350
+ def run_batch(args: argparse.Namespace, predictor: PlaygroundPredictor) -> None:
351
+ rows = load_rows_from_input_file(Path(args.input_path))
352
+ pred_df = predictor.predict_rows(rows)
353
+ save_predictions(pred_df, Path(args.output_path))
354
+ print(f"Predictions saved: {args.output_path}")
355
+ print(f"Rows processed: {len(pred_df)}")
356
+
357
+
358
+ def create_gradio_app(predictor: PlaygroundPredictor):
359
+ try:
360
+ import gradio as gr # type: ignore[import-not-found]
361
+ except ImportError as exc:
362
+ raise ImportError(
363
+ "gradio is not installed. Run: pip install gradio"
364
+ ) from exc
365
+
366
+ def predict_one(
367
+ text: str,
368
+ btc_price_now: float,
369
+ fng_value: float,
370
+ fng_classification: str,
371
+ ) -> Tuple[str, float, float, float]:
372
+ rows = [
373
+ {
374
+ "text": text,
375
+ "btc_price_now": btc_price_now,
376
+ "fng_value": fng_value,
377
+ "fng_classification": fng_classification,
378
+ }
379
+ ]
380
+ out = predictor.predict_rows(rows).iloc[0]
381
+ return (
382
+ str(out["sentiment"]),
383
+ float(out["score"]),
384
+ float(out["confidence"]),
385
+ float(out["prob_up"]),
386
+ )
387
+
388
+ with gr.Blocks(title="Crypto News Sentiment Playground") as demo:
389
+ gr.Markdown("# Crypto News Sentiment Playground")
390
+ gr.Markdown(
391
+ "Enter a news snippet plus market context to get class, score, and confidence."
392
+ )
393
+
394
+ with gr.Row():
395
+ text = gr.Textbox(
396
+ label="News Text",
397
+ lines=6,
398
+ placeholder="Paste a crypto news piece here...",
399
+ )
400
+
401
+ with gr.Row():
402
+ btc_price_now = gr.Number(label="BTC Price Now", value=70000)
403
+ fng_value = gr.Number(label="FNG Index", value=50)
404
+ fng_classification = gr.Dropdown(
405
+ choices=["Extreme Fear", "Fear", "Neutral", "Greed"],
406
+ value="Neutral",
407
+ label="FNG Classification",
408
+ )
409
+
410
+ run_btn = gr.Button("Generate Sentiment")
411
+
412
+ sentiment = gr.Textbox(label="Sentiment")
413
+ score = gr.Number(label="Score (-1 to +1)")
414
+ confidence = gr.Number(label="Confidence (0 to 1)")
415
+ prob_up = gr.Number(label="Probability of Up Move")
416
+
417
+ run_btn.click(
418
+ fn=predict_one,
419
+ inputs=[text, btc_price_now, fng_value, fng_classification],
420
+ outputs=[sentiment, score, confidence, prob_up],
421
+ )
422
+
423
+ return demo
424
+
425
+
426
+ def run_ui(args: argparse.Namespace, predictor: PlaygroundPredictor) -> None:
427
+ app = create_gradio_app(predictor)
428
+ app.launch(server_name=args.host, server_port=args.port, share=args.share)
429
+
430
+
431
+ def main() -> None:
432
+ parser = build_arg_parser()
433
+ args = parser.parse_args()
434
+
435
+ artifacts_dir = Path(args.artifacts_dir)
436
+ model_name = args.model_name
437
+ if model_name == "auto":
438
+ metrics_model = resolve_default_model_name(
439
+ artifacts_dir=artifacts_dir,
440
+ fallback="boltuix/bert-lite",
441
+ )
442
+ required_text_dim = infer_required_text_dim(artifacts_dir)
443
+ model_name = pick_model_from_required_dim(required_text_dim, metrics_model)
444
+
445
+ tokenizer_path = args.tokenizer_path.strip() or None
446
+
447
+ predictor = PlaygroundPredictor(
448
+ artifacts_dir=artifacts_dir,
449
+ model_name=model_name,
450
+ tokenizer_path=tokenizer_path,
451
+ max_length=args.max_length,
452
+ batch_size=args.batch_size,
453
+ )
454
+
455
+ if args.mode == "single":
456
+ run_single(args, predictor)
457
+ elif args.mode == "batch":
458
+ run_batch(args, predictor)
459
+ elif args.mode == "ui":
460
+ run_ui(args, predictor)
461
+ else:
462
+ raise ValueError(f"Unsupported mode: {args.mode}")
463
+
464
+
465
+ if __name__ == "__main__":
466
+ main()