hamverbot's picture
Upload run.py
4da2f74 verified
#!/usr/bin/env python3
"""
RTB Bidding Algorithms Benchmark β€” Main Entry Point
====================================================
Complete pipeline:
1. Load Criteo_x4 data
2. Train CTR prediction model (Logistic Regression baseline or FinalMLP)
3. Run first-price auction simulations
4. Compare all bidding algorithms:
- DualOGD (Wang et al. 2023) β€” Lagrangian dual + online gradient descent
- TwoSidedDual β€” Budget cap + spend floor
- ValueShading β€” Value shading for first-price
- RLB (Cai et al. 2017) β€” MDP-based reinforcement learning
- Linear β€” Proportional baseline
- Threshold β€” Fixed-if-pCTR baseline
5. Generate results tables and plots
Usage:
python run.py # Quick run with 100K rows
python run.py --max_rows 500000 # Larger dataset
python run.py --sweep # Full hyperparameter sweep
python run.py --train_ctr # Train FinalMLP CTR model
python run.py --budget 10000 --T 20000 # Custom budget/time
"""
import sys
import os
import json
import argparse
import numpy as np
import pandas as pd
from datasets import load_dataset
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
# Ensure src is in path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
def main():
parser = argparse.ArgumentParser(description='RTB Bidding Algorithms Benchmark')
# Data options
parser.add_argument('--max_rows', type=int, default=100000, help='Max Criteo rows')
parser.add_argument('--data_dir', type=str, default='/app/data')
# Auction options
parser.add_argument('--budget', type=float, default=5000.0, help='Total budget')
parser.add_argument('--T', type=int, default=10000, help='Number of auctions')
parser.add_argument('--vpc', type=float, default=50.0, help='Value per click')
parser.add_argument('--k', type=float, default=0.8, help='Min spend fraction')
# Experiment options
parser.add_argument('--n_runs', type=int, default=3, help='Number of runs')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--sweep', action='store_true', help='Run hyperparameter sweep')
parser.add_argument('--train_ctr', action='store_true', help='Train FinalMLP CTR model')
parser.add_argument('--ctr_model_path', type=str, default='/app/models/finalmlp_ctr.pt')
# Output
parser.add_argument('--output_dir', type=str, default='/app/results')
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
os.makedirs('/app/models', exist_ok=True)
print("=" * 70)
print("RTB BIDDING ALGORITHMS BENCHMARK")
print("=" * 70)
print(f"Budget: {args.budget}, Auctions: {args.T}, Value/Click: {args.vpc}")
print(f"Min spend: {args.k*100:.0f}%, Runs: {args.n_runs}")
# Step 1: Load data
print("\n[1/4] Loading data...")
ds = load_dataset("reczoo/Criteo_x4", split="train", streaming=True)
rows = []
for i, row in enumerate(ds):
if i >= args.max_rows:
break
rows.append(row)
df = pd.DataFrame(rows)
print(f" Loaded {len(df)} rows, CTR: {df['Label'].mean():.4f}")
# Preprocess
dense_cols = [f'I{i}' for i in range(1, 14)]
sparse_cols = [f'C{i}' for i in range(1, 27)]
for col in dense_cols:
df[col] = df[col].fillna(df[col].median())
for col in sparse_cols:
df[col] = df[col].fillna("MISSING")
df[col] = LabelEncoder().fit_transform(df[col].astype(str))
scaler = StandardScaler()
dense_data = scaler.fit_transform(df[dense_cols].values)
for i, col in enumerate(dense_cols):
df[col] = dense_data[:, i]
sparse_data = df[sparse_cols].values.astype(np.float32)
sparse_data = (sparse_data - sparse_data.mean(axis=0)) / (sparse_data.std(axis=0) + 1e-8)
for i, col in enumerate(sparse_cols):
df[col] = sparse_data[:, i]
feature_cols = dense_cols + sparse_cols
X = df[feature_cols].values.astype(np.float32)
y = df['Label'].values.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=args.seed)
print(f" Train: {len(X_train)}, Test: {len(X_test)}")
# Step 2: CTR model
print("\n[2/4] Training CTR model...")
ctr_model = LogisticRegression(max_iter=500, C=0.1, random_state=args.seed)
ctr_model.fit(X_train, y_train)
from sklearn.metrics import roc_auc_score
train_auc = roc_auc_score(y_train, ctr_model.predict_proba(X_train)[:, 1])
test_auc = roc_auc_score(y_test, ctr_model.predict_proba(X_test)[:, 1])
print(f" Train AUC: {train_auc:.4f}, Test AUC: {test_auc:.4f}")
pctr_test = ctr_model.predict_proba(X_test)[:, 1]
print(f" pCTR mean: {pctr_test.mean():.4f}")
# Step 3: Run benchmark
print("\n[3/4] Running bidding benchmark...")
if args.sweep:
from src.benchmark.sweep import run_sweep, analyze_sweep
results = run_sweep(
X_test, y_test, ctr_model,
T=min(args.T, len(X_test)),
output_path=os.path.join(args.output_dir, 'sweep_results.json')
)
analyze_sweep(results)
else:
from src.benchmark.run_comparison import run_benchmark, aggregate_results
all_results, _ = run_benchmark(
X_test, y_test, ctr_model,
budget=args.budget,
T=min(args.T, len(X_test)),
value_per_click=args.vpc,
k=args.k,
n_runs=args.n_runs,
seed=args.seed
)
aggregated = aggregate_results(all_results)
# Step 4: Save
print("\n[4/4] Saving results...")
output = {
'config': vars(args),
'ctr_model': {'train_auc': train_auc, 'test_auc': test_auc},
'aggregated': aggregated,
}
output_file = os.path.join(args.output_dir, 'benchmark_results.json')
with open(output_file, 'w') as f:
json.dump(output, f, indent=2, default=str)
print(f"\nResults saved to {output_file}")
# Summary table
print("\n" + "=" * 70)
print("FINAL RANKING (by clicks)")
print("=" * 70)
ranking = sorted(aggregated.items(), key=lambda x: x[1]['clicks_mean'], reverse=True)
for i, (name, stats) in enumerate(ranking):
medal = ['πŸ₯‡', 'πŸ₯ˆ', 'πŸ₯‰'][i] if i < 3 else ' '
print(f"{medal} {name:<18} {stats['clicks_mean']:>8.0f} clicks | "
f"CPC: {stats['cpc_mean']:>7.2f} | "
f"Budget: {stats['budget_used_mean']:>5.1%}")
print("\nDone!")
if __name__ == '__main__':
main()