hamverbot commited on
Commit
4da2f74
·
verified ·
1 Parent(s): b4d06ee

Upload run.py

Browse files
Files changed (1) hide show
  1. run.py +181 -0
run.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ RTB Bidding Algorithms Benchmark — Main Entry Point
4
+ ====================================================
5
+
6
+ Complete pipeline:
7
+ 1. Load Criteo_x4 data
8
+ 2. Train CTR prediction model (Logistic Regression baseline or FinalMLP)
9
+ 3. Run first-price auction simulations
10
+ 4. Compare all bidding algorithms:
11
+ - DualOGD (Wang et al. 2023) — Lagrangian dual + online gradient descent
12
+ - TwoSidedDual — Budget cap + spend floor
13
+ - ValueShading — Value shading for first-price
14
+ - RLB (Cai et al. 2017) — MDP-based reinforcement learning
15
+ - Linear — Proportional baseline
16
+ - Threshold — Fixed-if-pCTR baseline
17
+ 5. Generate results tables and plots
18
+
19
+ Usage:
20
+ python run.py # Quick run with 100K rows
21
+ python run.py --max_rows 500000 # Larger dataset
22
+ python run.py --sweep # Full hyperparameter sweep
23
+ python run.py --train_ctr # Train FinalMLP CTR model
24
+ python run.py --budget 10000 --T 20000 # Custom budget/time
25
+ """
26
+ import sys
27
+ import os
28
+ import json
29
+ import argparse
30
+ import numpy as np
31
+ import pandas as pd
32
+ from datasets import load_dataset
33
+ from sklearn.linear_model import LogisticRegression
34
+ from sklearn.model_selection import train_test_split
35
+ from sklearn.preprocessing import LabelEncoder, StandardScaler
36
+
37
+ # Ensure src is in path
38
+ sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
39
+
40
+
41
+ def main():
42
+ parser = argparse.ArgumentParser(description='RTB Bidding Algorithms Benchmark')
43
+
44
+ # Data options
45
+ parser.add_argument('--max_rows', type=int, default=100000, help='Max Criteo rows')
46
+ parser.add_argument('--data_dir', type=str, default='/app/data')
47
+
48
+ # Auction options
49
+ parser.add_argument('--budget', type=float, default=5000.0, help='Total budget')
50
+ parser.add_argument('--T', type=int, default=10000, help='Number of auctions')
51
+ parser.add_argument('--vpc', type=float, default=50.0, help='Value per click')
52
+ parser.add_argument('--k', type=float, default=0.8, help='Min spend fraction')
53
+
54
+ # Experiment options
55
+ parser.add_argument('--n_runs', type=int, default=3, help='Number of runs')
56
+ parser.add_argument('--seed', type=int, default=42)
57
+ parser.add_argument('--sweep', action='store_true', help='Run hyperparameter sweep')
58
+ parser.add_argument('--train_ctr', action='store_true', help='Train FinalMLP CTR model')
59
+ parser.add_argument('--ctr_model_path', type=str, default='/app/models/finalmlp_ctr.pt')
60
+
61
+ # Output
62
+ parser.add_argument('--output_dir', type=str, default='/app/results')
63
+
64
+ args = parser.parse_args()
65
+
66
+ os.makedirs(args.output_dir, exist_ok=True)
67
+ os.makedirs('/app/models', exist_ok=True)
68
+
69
+ print("=" * 70)
70
+ print("RTB BIDDING ALGORITHMS BENCHMARK")
71
+ print("=" * 70)
72
+ print(f"Budget: {args.budget}, Auctions: {args.T}, Value/Click: {args.vpc}")
73
+ print(f"Min spend: {args.k*100:.0f}%, Runs: {args.n_runs}")
74
+
75
+ # Step 1: Load data
76
+ print("\n[1/4] Loading data...")
77
+ ds = load_dataset("reczoo/Criteo_x4", split="train", streaming=True)
78
+ rows = []
79
+ for i, row in enumerate(ds):
80
+ if i >= args.max_rows:
81
+ break
82
+ rows.append(row)
83
+
84
+ df = pd.DataFrame(rows)
85
+ print(f" Loaded {len(df)} rows, CTR: {df['Label'].mean():.4f}")
86
+
87
+ # Preprocess
88
+ dense_cols = [f'I{i}' for i in range(1, 14)]
89
+ sparse_cols = [f'C{i}' for i in range(1, 27)]
90
+
91
+ for col in dense_cols:
92
+ df[col] = df[col].fillna(df[col].median())
93
+ for col in sparse_cols:
94
+ df[col] = df[col].fillna("MISSING")
95
+ df[col] = LabelEncoder().fit_transform(df[col].astype(str))
96
+
97
+ scaler = StandardScaler()
98
+ dense_data = scaler.fit_transform(df[dense_cols].values)
99
+ for i, col in enumerate(dense_cols):
100
+ df[col] = dense_data[:, i]
101
+
102
+ sparse_data = df[sparse_cols].values.astype(np.float32)
103
+ sparse_data = (sparse_data - sparse_data.mean(axis=0)) / (sparse_data.std(axis=0) + 1e-8)
104
+ for i, col in enumerate(sparse_cols):
105
+ df[col] = sparse_data[:, i]
106
+
107
+ feature_cols = dense_cols + sparse_cols
108
+ X = df[feature_cols].values.astype(np.float32)
109
+ y = df['Label'].values.astype(np.float32)
110
+
111
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=args.seed)
112
+ print(f" Train: {len(X_train)}, Test: {len(X_test)}")
113
+
114
+ # Step 2: CTR model
115
+ print("\n[2/4] Training CTR model...")
116
+ ctr_model = LogisticRegression(max_iter=500, C=0.1, random_state=args.seed)
117
+ ctr_model.fit(X_train, y_train)
118
+ from sklearn.metrics import roc_auc_score
119
+ train_auc = roc_auc_score(y_train, ctr_model.predict_proba(X_train)[:, 1])
120
+ test_auc = roc_auc_score(y_test, ctr_model.predict_proba(X_test)[:, 1])
121
+ print(f" Train AUC: {train_auc:.4f}, Test AUC: {test_auc:.4f}")
122
+
123
+ pctr_test = ctr_model.predict_proba(X_test)[:, 1]
124
+ print(f" pCTR mean: {pctr_test.mean():.4f}")
125
+
126
+ # Step 3: Run benchmark
127
+ print("\n[3/4] Running bidding benchmark...")
128
+
129
+ if args.sweep:
130
+ from src.benchmark.sweep import run_sweep, analyze_sweep
131
+ results = run_sweep(
132
+ X_test, y_test, ctr_model,
133
+ T=min(args.T, len(X_test)),
134
+ output_path=os.path.join(args.output_dir, 'sweep_results.json')
135
+ )
136
+ analyze_sweep(results)
137
+ else:
138
+ from src.benchmark.run_comparison import run_benchmark, aggregate_results
139
+
140
+ all_results, _ = run_benchmark(
141
+ X_test, y_test, ctr_model,
142
+ budget=args.budget,
143
+ T=min(args.T, len(X_test)),
144
+ value_per_click=args.vpc,
145
+ k=args.k,
146
+ n_runs=args.n_runs,
147
+ seed=args.seed
148
+ )
149
+
150
+ aggregated = aggregate_results(all_results)
151
+
152
+ # Step 4: Save
153
+ print("\n[4/4] Saving results...")
154
+ output = {
155
+ 'config': vars(args),
156
+ 'ctr_model': {'train_auc': train_auc, 'test_auc': test_auc},
157
+ 'aggregated': aggregated,
158
+ }
159
+
160
+ output_file = os.path.join(args.output_dir, 'benchmark_results.json')
161
+ with open(output_file, 'w') as f:
162
+ json.dump(output, f, indent=2, default=str)
163
+
164
+ print(f"\nResults saved to {output_file}")
165
+
166
+ # Summary table
167
+ print("\n" + "=" * 70)
168
+ print("FINAL RANKING (by clicks)")
169
+ print("=" * 70)
170
+ ranking = sorted(aggregated.items(), key=lambda x: x[1]['clicks_mean'], reverse=True)
171
+ for i, (name, stats) in enumerate(ranking):
172
+ medal = ['🥇', '🥈', '🥉'][i] if i < 3 else ' '
173
+ print(f"{medal} {name:<18} {stats['clicks_mean']:>8.0f} clicks | "
174
+ f"CPC: {stats['cpc_mean']:>7.2f} | "
175
+ f"Budget: {stats['budget_used_mean']:>5.1%}")
176
+
177
+ print("\nDone!")
178
+
179
+
180
+ if __name__ == '__main__':
181
+ main()