maitri01 commited on
Commit
0481b33
·
verified ·
1 Parent(s): 92917b5

Update task_template.py

Browse files
Files changed (1) hide show
  1. task_template.py +135 -135
task_template.py CHANGED
@@ -1,135 +1,135 @@
1
- import csv
2
- import random
3
- import zipfile
4
- import requests
5
- from pathlib import Path
6
- from collections import Counter
7
-
8
- import torch
9
- from torch.utils.data import DataLoader, Dataset
10
- from torchvision import transforms, models, datasets
11
- from PIL import Image
12
-
13
-
14
- # ----------------------------
15
- # CONFIG
16
- # ----------------------------
17
- ZIP_FILE = "Dataset.zip" # Path to dataset zip
18
- DATASET_DIR = Path("dataset") # Unzipped folder
19
- SUBMISSION_FILE = "submission.csv"
20
- LABELS = ["var16", "var20", "var24", "var30", "rarb", "rarl", "rarxl", "rarxxl", "outlier"] # Donot change this
21
-
22
- # Leaderboard submission
23
- SERVER_URL = "http://34.122.51.94:80"
24
- API_KEY = None # teams insert their assigned token here
25
- TASK_ID = "15-model-tracer"
26
-
27
- # ----------------------------
28
- # UNZIP DATASET
29
- # ----------------------------
30
- if not DATASET_DIR.exists():
31
- print("Unzipping dataset...")
32
- with zipfile.ZipFile(ZIP_FILE, "r") as zip_ref:
33
- zip_ref.extractall(DATASET_DIR)
34
- else:
35
- print("Dataset already extracted.")
36
-
37
-
38
- # ----------------------------
39
- # TRANSFORMS
40
- # ----------------------------
41
- transform = transforms.Compose([
42
- transforms.Resize((224, 224)),
43
- transforms.ToTensor(),
44
- ])
45
-
46
-
47
- # ----------------------------s
48
- # DATASETS & DATALOADERS
49
- # ----------------------------
50
- print("Loading datasets...")
51
-
52
- train_dataset = datasets.ImageFolder(root=DATASET_DIR / "train", transform=transform)
53
- val_dataset = datasets.ImageFolder(root=DATASET_DIR / "val", transform=transform)
54
-
55
- # Custom dataset for unlabeled test images
56
- class TestDataset(Dataset):
57
- def __init__(self, root, transform=None):
58
- self.root = Path(root)
59
- self.files = sorted(list(self.root.glob("*.*"))) # all files
60
- self.transform = transform
61
-
62
- def __len__(self):
63
- return len(self.files)
64
-
65
- def __getitem__(self, idx):
66
- img_path = self.files[idx]
67
- image = Image.open(img_path).convert("RGB")
68
- if self.transform:
69
- image = self.transform(image)
70
- return {"image": image, "image_name": img_path.name}
71
-
72
- test_dataset = TestDataset(DATASET_DIR / "test", transform=transform)
73
-
74
- train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True, num_workers=4)
75
- val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False, num_workers=4)
76
- test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=4)
77
-
78
- # Print classes and per-class counts for train/val
79
- def _print_class_stats(name: str, ds):
80
- counts = Counter(getattr(ds, "targets", []))
81
- print(f"{name} classes: {ds.classes}")
82
- for cls, idx in ds.class_to_idx.items():
83
- print(f" {cls}: {counts.get(idx, 0)}")
84
-
85
- _print_class_stats("Train", train_dataset)
86
- _print_class_stats("Val", val_dataset)
87
-
88
- print(f"Train size: {len(train_dataset)} | Val size: {len(val_dataset)} | Test size: {len(test_dataset)}")
89
-
90
-
91
- # ----------------------------
92
- # EXAMPLE MODEL (ResNet18)
93
- # ----------------------------
94
- print("Building dummy model...")
95
- model = models.resnet18(weights=None, num_classes=len(LABELS)) # untrained
96
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
97
- model = model.to(device)
98
-
99
-
100
- # ----------------------------
101
- # DUMMY INFERENCE ON TEST / DUMMY SUBMISSION
102
- # ----------------------------
103
- print("Generating random predictions for submission...")
104
- preds = []
105
- for batch in test_loader:
106
- for fname in batch["image_name"]:
107
- label = random.choice(LABELS) # random baseline
108
- preds.append([fname, label])
109
-
110
- # ----------------------------
111
- # SAVE SUBMISSION
112
- # ----------------------------
113
- with open(SUBMISSION_FILE, "w", newline="", encoding="utf-8") as f:
114
- writer = csv.writer(f)
115
- writer.writerow(["image_name", "label"])
116
- writer.writerows(preds)
117
-
118
- print(f"Saved submission file to {SUBMISSION_FILE}")
119
- print("Format: image_name,label")
120
-
121
-
122
- # ----------------------------
123
- # SUBMIT TO LEADERBOARD SERVER
124
- # ----------------------------
125
- if API_KEY is None:
126
- print("No TOKEN provided. Please set your team TOKEN in this script to submit.")
127
- else:
128
- print("Submitting to leaderboard server...")
129
-
130
- response = requests.post(
131
- f"{SERVER_URL}/submit/{TASK_ID}",
132
- files={"file": open(SUBMISSION_FILE, "rb")},
133
- headers={"X-API-Key": API_KEY},
134
- )
135
- print("Server response:", response.json())
 
1
+ import csv
2
+ import random
3
+ import zipfile
4
+ import requests
5
+ from pathlib import Path
6
+ from collections import Counter
7
+
8
+ import torch
9
+ from torch.utils.data import DataLoader, Dataset
10
+ from torchvision import transforms, models, datasets
11
+ from PIL import Image
12
+
13
+
14
+ # ----------------------------
15
+ # CONFIG
16
+ # ----------------------------
17
+ ZIP_FILE = "Dataset.zip" # Path to dataset zip
18
+ DATASET_DIR = Path("dataset") # Unzipped folder
19
+ SUBMISSION_FILE = "submission.csv"
20
+ LABELS = ["var16", "var20", "var24", "var30", "rarb", "rarl", "rarxl", "rarxxl", "outlier"] # Donot change this
21
+
22
+ # Leaderboard submission
23
+ SERVER_URL = "http://35.192.205.84:80"
24
+ API_KEY = None # teams insert their assigned token here
25
+ TASK_ID = "15-model-tracer"
26
+
27
+ # ----------------------------
28
+ # UNZIP DATASET
29
+ # ----------------------------
30
+ if not DATASET_DIR.exists():
31
+ print("Unzipping dataset...")
32
+ with zipfile.ZipFile(ZIP_FILE, "r") as zip_ref:
33
+ zip_ref.extractall(DATASET_DIR)
34
+ else:
35
+ print("Dataset already extracted.")
36
+
37
+
38
+ # ----------------------------
39
+ # TRANSFORMS
40
+ # ----------------------------
41
+ transform = transforms.Compose([
42
+ transforms.Resize((224, 224)),
43
+ transforms.ToTensor(),
44
+ ])
45
+
46
+
47
+ # ----------------------------s
48
+ # DATASETS & DATALOADERS
49
+ # ----------------------------
50
+ print("Loading datasets...")
51
+
52
+ train_dataset = datasets.ImageFolder(root=DATASET_DIR / "train", transform=transform)
53
+ val_dataset = datasets.ImageFolder(root=DATASET_DIR / "val", transform=transform)
54
+
55
+ # Custom dataset for unlabeled test images
56
+ class TestDataset(Dataset):
57
+ def __init__(self, root, transform=None):
58
+ self.root = Path(root)
59
+ self.files = sorted(list(self.root.glob("*.*"))) # all files
60
+ self.transform = transform
61
+
62
+ def __len__(self):
63
+ return len(self.files)
64
+
65
+ def __getitem__(self, idx):
66
+ img_path = self.files[idx]
67
+ image = Image.open(img_path).convert("RGB")
68
+ if self.transform:
69
+ image = self.transform(image)
70
+ return {"image": image, "image_name": img_path.name}
71
+
72
+ test_dataset = TestDataset(DATASET_DIR / "test", transform=transform)
73
+
74
+ train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True, num_workers=4)
75
+ val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False, num_workers=4)
76
+ test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=4)
77
+
78
+ # Print classes and per-class counts for train/val
79
+ def _print_class_stats(name: str, ds):
80
+ counts = Counter(getattr(ds, "targets", []))
81
+ print(f"{name} classes: {ds.classes}")
82
+ for cls, idx in ds.class_to_idx.items():
83
+ print(f" {cls}: {counts.get(idx, 0)}")
84
+
85
+ _print_class_stats("Train", train_dataset)
86
+ _print_class_stats("Val", val_dataset)
87
+
88
+ print(f"Train size: {len(train_dataset)} | Val size: {len(val_dataset)} | Test size: {len(test_dataset)}")
89
+
90
+
91
+ # ----------------------------
92
+ # EXAMPLE MODEL (ResNet18)
93
+ # ----------------------------
94
+ print("Building dummy model...")
95
+ model = models.resnet18(weights=None, num_classes=len(LABELS)) # untrained
96
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
97
+ model = model.to(device)
98
+
99
+
100
+ # ----------------------------
101
+ # DUMMY INFERENCE ON TEST / DUMMY SUBMISSION
102
+ # ----------------------------
103
+ print("Generating random predictions for submission...")
104
+ preds = []
105
+ for batch in test_loader:
106
+ for fname in batch["image_name"]:
107
+ label = random.choice(LABELS) # random baseline
108
+ preds.append([fname, label])
109
+
110
+ # ----------------------------
111
+ # SAVE SUBMISSION
112
+ # ----------------------------
113
+ with open(SUBMISSION_FILE, "w", newline="", encoding="utf-8") as f:
114
+ writer = csv.writer(f)
115
+ writer.writerow(["image_name", "label"])
116
+ writer.writerows(preds)
117
+
118
+ print(f"Saved submission file to {SUBMISSION_FILE}")
119
+ print("Format: image_name,label")
120
+
121
+
122
+ # ----------------------------
123
+ # SUBMIT TO LEADERBOARD SERVER
124
+ # ----------------------------
125
+ if API_KEY is None:
126
+ print("No TOKEN provided. Please set your team TOKEN in this script to submit.")
127
+ else:
128
+ print("Submitting to leaderboard server...")
129
+
130
+ response = requests.post(
131
+ f"{SERVER_URL}/submit/{TASK_ID}",
132
+ files={"file": open(SUBMISSION_FILE, "rb")},
133
+ headers={"X-API-Key": API_KEY},
134
+ )
135
+ print("Server response:", response.json())