maitri01 commited on
Commit
3914dca
Β·
verified Β·
1 Parent(s): 24fc92e

Update sample_submission.py

Browse files
Files changed (1) hide show
  1. sample_submission.py +31 -13
sample_submission.py CHANGED
@@ -20,14 +20,21 @@ Where score is a float in [0, 1]:
20
  - Higher score = more likely World A (70% male training data)
21
  - Lower score = more likely World B (50% male training data)
22
  - 0.5 = uncertain (this is the default for missing model_ids)
23
- - Do not submit scores of exactly 0 or 1
24
 
25
  All 200 model_ids should be included. Missing ones default to 0.5.
26
 
 
 
 
 
 
 
 
 
27
  API rate limits:
28
- - Per model: 15 minute cooldown after a successful query
29
  - Failed requests: 2 minute cooldown
30
- - Max batch size: 100 records per request
31
  """
32
 
33
  import csv
@@ -53,8 +60,14 @@ OUTPUT_CSV = "submission.csv"
53
  HEADERS = {"X-API-Key": API_KEY, "Content-Type": "application/json"}
54
 
55
  # ── Query API ──────────────────────────────────────────────────────────────────
56
- def query_model(model_id: int, features: list) -> list:
57
- """Query the predict API and return predicted labels."""
 
 
 
 
 
 
58
  resp = requests.post(
59
  f"{BASE_URL}/23-property-inference/predict",
60
  headers=HEADERS,
@@ -62,16 +75,20 @@ def query_model(model_id: int, features: list) -> list:
62
  timeout=30,
63
  )
64
  resp.raise_for_status()
65
- return resp.json()["labels"]
 
66
 
67
 
68
  # ── Score computation (replace with your own method) ───────────────────────────
69
- def compute_score(labels_a: list, labels_b: list = None) -> float:
70
  """
71
  Compute a confidence score in [0, 1] that a model belongs to World A.
72
 
73
- This baseline compares label distributions across two queries β€” you should
74
- replace this with your own property inference method.
 
 
 
75
 
76
  Returns:
77
  float in [0, 1] β€” higher means more likely World A
@@ -85,14 +102,14 @@ def compute_score(labels_a: list, labels_b: list = None) -> float:
85
  def main():
86
  predictions = {}
87
 
88
- # Use first 100 probe records (max batch size)
89
  probe_batch = PROBE_FEATURES[:100]
90
 
91
  for model_id in MODEL_IDS:
92
  print(f"Querying model {model_id}...")
93
  try:
94
- labels = query_model(model_id, probe_batch)
95
- score = compute_score(labels)
96
  except requests.exceptions.HTTPError as e:
97
  if e.response.status_code == 429:
98
  print(f" Rate limited on model {model_id} β€” skipping (will default to 0.5)")
@@ -100,6 +117,7 @@ def main():
100
  else:
101
  raise
102
  predictions[model_id] = score
 
103
 
104
  # Write CSV
105
  with open(OUTPUT_CSV, "w", newline="") as f:
@@ -125,4 +143,4 @@ def main():
125
 
126
 
127
  if __name__ == "__main__":
128
- main()
 
20
  - Higher score = more likely World A (70% male training data)
21
  - Lower score = more likely World B (50% male training data)
22
  - 0.5 = uncertain (this is the default for missing model_ids)
 
23
 
24
  All 200 model_ids should be included. Missing ones default to 0.5.
25
 
26
+ API response format:
27
+ {
28
+ "model_id": 0,
29
+ "batch_size": 100,
30
+ "labels": [[4, 7], [2, 9], ...], # top-2 predicted classes per record
31
+ "probs": [[0.52, 0.21], [0.44, 0.18], ...] # top-2 softmax probabilities per record
32
+ }
33
+
34
  API rate limits:
35
+ - Per model: 2 minute cooldown after a successful query
36
  - Failed requests: 2 minute cooldown
37
+ - Max batch size: 500 records per request
38
  """
39
 
40
  import csv
 
60
  HEADERS = {"X-API-Key": API_KEY, "Content-Type": "application/json"}
61
 
62
  # ── Query API ──────────────────────────────────────────────────────────────────
63
+ def query_model(model_id: int, features: list) -> dict:
64
+ """
65
+ Query the predict API for one model.
66
+
67
+ Returns a dict with:
68
+ labels: list[list[int]] β€” top-2 predicted classes per record
69
+ probs: list[list[float]] β€” top-2 softmax probabilities per record
70
+ """
71
  resp = requests.post(
72
  f"{BASE_URL}/23-property-inference/predict",
73
  headers=HEADERS,
 
75
  timeout=30,
76
  )
77
  resp.raise_for_status()
78
+ data = resp.json()
79
+ return {"labels": data["labels"], "probs": data["probs"]}
80
 
81
 
82
  # ── Score computation (replace with your own method) ───────────────────────────
83
+ def compute_score(labels: list, probs: list) -> float:
84
  """
85
  Compute a confidence score in [0, 1] that a model belongs to World A.
86
 
87
+ You have access to:
88
+ labels: list[list[int]] β€” top-2 predicted class indices per record
89
+ probs: list[list[float]] β€” top-2 softmax probabilities per record
90
+
91
+ This baseline returns a random score β€” replace with your actual method.
92
 
93
  Returns:
94
  float in [0, 1] β€” higher means more likely World A
 
102
  def main():
103
  predictions = {}
104
 
105
+ # Use first 100 probe records (max batch size is 500)
106
  probe_batch = PROBE_FEATURES[:100]
107
 
108
  for model_id in MODEL_IDS:
109
  print(f"Querying model {model_id}...")
110
  try:
111
+ result = query_model(model_id, probe_batch)
112
+ score = compute_score(result["labels"], result["probs"])
113
  except requests.exceptions.HTTPError as e:
114
  if e.response.status_code == 429:
115
  print(f" Rate limited on model {model_id} β€” skipping (will default to 0.5)")
 
117
  else:
118
  raise
119
  predictions[model_id] = score
120
+ print(f" score={score:.4f}")
121
 
122
  # Write CSV
123
  with open(OUTPUT_CSV, "w", newline="") as f:
 
143
 
144
 
145
  if __name__ == "__main__":
146
+ main()