IvoHoese adyk07 commited on
Commit
5224096
Β·
1 Parent(s): cf89964

Upload 4 files (#5)

Browse files

- Upload 4 files (925a10bf4b342895ffd5e48401b205344f27451c)


Co-authored-by: Aditya Kumar <adyk07@users.noreply.huggingface.co>

Files changed (4) hide show
  1. auxiliary.npz +3 -0
  2. model_ids.json +1 -0
  3. preprocessing.json +24 -0
  4. sample_submission.py +128 -0
auxiliary.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0326887f8592d02b364f1609ee11e48fc344c830f3181844e275f37477aea27
3
+ size 480514
model_ids.json ADDED
@@ -0,0 +1 @@
 
 
1
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199]
preprocessing.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "num_features": 10,
3
+ "num_classes": 100,
4
+ "col_sex_code": 0,
5
+ "col_total_charges": 8,
6
+ "sex_code_encoding": {
7
+ "0.0": "Male",
8
+ "1.0": "Female"
9
+ },
10
+ "target": "PRINC_SURG_PROC_CODE",
11
+ "note": "10 features (THCIC_ID excluded). Normalized by column max. Labels are procedure codes 0-99.",
12
+ "columns": [
13
+ "SEX_CODE",
14
+ "TYPE_OF_ADMISSION",
15
+ "SOURCE_OF_ADMISSION",
16
+ "LENGTH_OF_STAY",
17
+ "PAT_AGE",
18
+ "PAT_STATUS",
19
+ "RACE",
20
+ "ETHNICITY",
21
+ "TOTAL_CHARGES",
22
+ "ADMITTING_DIAGNOSIS"
23
+ ]
24
+ }
sample_submission.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Sample submission script for Task 23: Property Inference.
3
+
4
+ Auxiliary dataset: https://huggingface.co/datasets/SprintML/Property_Inference
5
+ Download auxiliary.npz from there and place it in the same directory as this script.
6
+
7
+ Steps:
8
+ 1. Query the predict API for each model using auxiliary probe data
9
+ 2. Compute a confidence score per model (higher = more likely World A)
10
+ 3. Save predictions to submission.csv
11
+ 4. Submit submission.csv to the hackathon platform
12
+
13
+ Submission format (submission.csv):
14
+ model_id,score
15
+ 0,0.73
16
+ 1,0.41
17
+ ...
18
+
19
+ Where score is a float in [0, 1]:
20
+ - Higher score = more likely World A (70% male training data)
21
+ - Lower score = more likely World B (50% male training data)
22
+ - 0.5 = uncertain (this is the default for missing model_ids)
23
+ - Do not submit scores of exactly 0 or 1
24
+
25
+ All 200 model_ids should be included. Missing ones default to 0.5.
26
+
27
+ API rate limits:
28
+ - Per model: 15 minute cooldown after a successful query
29
+ - Failed requests: 2 minute cooldown
30
+ - Max batch size: 100 records per request
31
+ """
32
+
33
+ import csv
34
+ import json
35
+ import os
36
+
37
+ import numpy as np
38
+ import requests
39
+
40
+ # ── Configuration ──────────────────────────────────────────────────────────────
41
+ BASE_URL = "http://35.192.205.84:80"
42
+ API_KEY = "YOUR_API_KEY_HERE"
43
+ TASK_ID = "23-property-inference"
44
+
45
+ # Paths (relative to this script)
46
+ HERE = os.path.dirname(os.path.abspath(__file__))
47
+ MODEL_IDS = json.load(open(os.path.join(HERE, "model_ids.json")))
48
+ AUXILIARY_NPZ = np.load(os.path.join(HERE, "auxiliary.npz"))
49
+
50
+ PROBE_FEATURES = AUXILIARY_NPZ["features"].tolist() # shape (10000, 10), already normalized
51
+ OUTPUT_CSV = "submission.csv"
52
+
53
+ HEADERS = {"X-API-Key": API_KEY, "Content-Type": "application/json"}
54
+
55
+ # ── Query API ──────────────────────────────────────────────────────────────────
56
+ def query_model(model_id: int, features: list) -> list:
57
+ """Query the predict API and return predicted labels."""
58
+ resp = requests.post(
59
+ f"{BASE_URL}/23-property-inference/predict",
60
+ headers=HEADERS,
61
+ json={"model_id": model_id, "features": features},
62
+ timeout=30,
63
+ )
64
+ resp.raise_for_status()
65
+ return resp.json()["labels"]
66
+
67
+
68
+ # ── Score computation (replace with your own method) ───────────────────────────
69
+ def compute_score(labels_a: list, labels_b: list = None) -> float:
70
+ """
71
+ Compute a confidence score in [0, 1] that a model belongs to World A.
72
+
73
+ This baseline compares label distributions across two queries β€” you should
74
+ replace this with your own property inference method.
75
+
76
+ Returns:
77
+ float in [0, 1] β€” higher means more likely World A
78
+ """
79
+ # Placeholder: random score. Replace with your actual method.
80
+ score = float(np.random.uniform(0, 1))
81
+ return float(np.clip(score, 0.001, 0.999))
82
+
83
+
84
+ # ── Main ───────────────────────────────────────────────────────────────────────
85
+ def main():
86
+ predictions = {}
87
+
88
+ # Use first 100 probe records (max batch size)
89
+ probe_batch = PROBE_FEATURES[:100]
90
+
91
+ for model_id in MODEL_IDS:
92
+ print(f"Querying model {model_id}...")
93
+ try:
94
+ labels = query_model(model_id, probe_batch)
95
+ score = compute_score(labels)
96
+ except requests.exceptions.HTTPError as e:
97
+ if e.response.status_code == 429:
98
+ print(f" Rate limited on model {model_id} β€” skipping (will default to 0.5)")
99
+ continue
100
+ else:
101
+ raise
102
+ predictions[model_id] = score
103
+
104
+ # Write CSV
105
+ with open(OUTPUT_CSV, "w", newline="") as f:
106
+ writer = csv.writer(f)
107
+ writer.writerow(["model_id", "score"])
108
+ for model_id in MODEL_IDS:
109
+ score = predictions.get(model_id, 0.5)
110
+ writer.writerow([model_id, round(score, 6)])
111
+
112
+ print(f"\nSaved {len(predictions)} predictions to {OUTPUT_CSV}")
113
+ print(f"Missing models (defaulted to 0.5): {len(MODEL_IDS) - len(predictions)}")
114
+
115
+ # Submit
116
+ print("\nSubmitting...")
117
+ with open(OUTPUT_CSV, "rb") as f:
118
+ resp = requests.post(
119
+ f"{BASE_URL}/submit/{TASK_ID}",
120
+ headers={"X-API-Key": API_KEY},
121
+ files={"file": (OUTPUT_CSV, f, "text/csv")},
122
+ timeout=120,
123
+ )
124
+ print("Response:", resp.json())
125
+
126
+
127
+ if __name__ == "__main__":
128
+ main()