Property_Inference / sample_submission.py
maitri01's picture
Update sample_submission.py
3914dca verified
"""
Sample submission script for Task 23: Property Inference.
Auxiliary dataset: https://huggingface.co/datasets/SprintML/Property_Inference
Download auxiliary.npz from there and place it in the same directory as this script.
Steps:
1. Query the predict API for each model using auxiliary probe data
2. Compute a confidence score per model (higher = more likely World A)
3. Save predictions to submission.csv
4. Submit submission.csv to the hackathon platform
Submission format (submission.csv):
model_id,score
0,0.73
1,0.41
...
Where score is a float in [0, 1]:
- Higher score = more likely World A (70% male training data)
- Lower score = more likely World B (50% male training data)
- 0.5 = uncertain (this is the default for missing model_ids)
All 200 model_ids should be included. Missing ones default to 0.5.
API response format:
{
"model_id": 0,
"batch_size": 100,
"labels": [[4, 7], [2, 9], ...], # top-2 predicted classes per record
"probs": [[0.52, 0.21], [0.44, 0.18], ...] # top-2 softmax probabilities per record
}
API rate limits:
- Per model: 2 minute cooldown after a successful query
- Failed requests: 2 minute cooldown
- Max batch size: 500 records per request
"""
import csv
import json
import os
import numpy as np
import requests
# ── Configuration ──────────────────────────────────────────────────────────────
BASE_URL = "http://35.192.205.84:80"
API_KEY = "YOUR_API_KEY_HERE"
TASK_ID = "23-property-inference"
# Paths (relative to this script)
HERE = os.path.dirname(os.path.abspath(__file__))
MODEL_IDS = json.load(open(os.path.join(HERE, "model_ids.json")))
AUXILIARY_NPZ = np.load(os.path.join(HERE, "auxiliary.npz"))
PROBE_FEATURES = AUXILIARY_NPZ["features"].tolist() # shape (10000, 10), already normalized
OUTPUT_CSV = "submission.csv"
HEADERS = {"X-API-Key": API_KEY, "Content-Type": "application/json"}
# ── Query API ──────────────────────────────────────────────────────────────────
def query_model(model_id: int, features: list) -> dict:
"""
Query the predict API for one model.
Returns a dict with:
labels: list[list[int]] β€” top-2 predicted classes per record
probs: list[list[float]] β€” top-2 softmax probabilities per record
"""
resp = requests.post(
f"{BASE_URL}/23-property-inference/predict",
headers=HEADERS,
json={"model_id": model_id, "features": features},
timeout=30,
)
resp.raise_for_status()
data = resp.json()
return {"labels": data["labels"], "probs": data["probs"]}
# ── Score computation (replace with your own method) ───────────────────────────
def compute_score(labels: list, probs: list) -> float:
"""
Compute a confidence score in [0, 1] that a model belongs to World A.
You have access to:
labels: list[list[int]] β€” top-2 predicted class indices per record
probs: list[list[float]] β€” top-2 softmax probabilities per record
This baseline returns a random score β€” replace with your actual method.
Returns:
float in [0, 1] β€” higher means more likely World A
"""
# Placeholder: random score. Replace with your actual method.
score = float(np.random.uniform(0, 1))
return float(np.clip(score, 0.001, 0.999))
# ── Main ───────────────────────────────────────────────────────────────────────
def main():
predictions = {}
# Use first 100 probe records (max batch size is 500)
probe_batch = PROBE_FEATURES[:100]
for model_id in MODEL_IDS:
print(f"Querying model {model_id}...")
try:
result = query_model(model_id, probe_batch)
score = compute_score(result["labels"], result["probs"])
except requests.exceptions.HTTPError as e:
if e.response.status_code == 429:
print(f" Rate limited on model {model_id} β€” skipping (will default to 0.5)")
continue
else:
raise
predictions[model_id] = score
print(f" score={score:.4f}")
# Write CSV
with open(OUTPUT_CSV, "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(["model_id", "score"])
for model_id in MODEL_IDS:
score = predictions.get(model_id, 0.5)
writer.writerow([model_id, round(score, 6)])
print(f"\nSaved {len(predictions)} predictions to {OUTPUT_CSV}")
print(f"Missing models (defaulted to 0.5): {len(MODEL_IDS) - len(predictions)}")
# Submit
print("\nSubmitting...")
with open(OUTPUT_CSV, "rb") as f:
resp = requests.post(
f"{BASE_URL}/submit/{TASK_ID}",
headers={"X-API-Key": API_KEY},
files={"file": (OUTPUT_CSV, f, "text/csv")},
timeout=120,
)
print("Response:", resp.json())
if __name__ == "__main__":
main()