label stringlengths 6 189 | latent sequencelengths 5 5 |
|---|---|
a plane with a wooden handle and a wooden handle
| [
[
[
[
0.8046875,
0.50390625,
-2.6875
],
[
-2.515625,
-4.0625,
-1.34375
],
[
-4.6875,
-0.2177734375,
-1.078125
]
],
[
[
0.3046875,
-0.0966796875,
0.3984375
],
... |
a collection of comic books on a table
| [
[
[
[
0.390625,
0.0703125,
1.453125
],
[
-3.125,
-1.5234375,
-0.0205078125
],
[
-1.046875,
2.34375,
1.4296875
]
],
[
[
-4.3125,
-2.21875,
-1.875
],
[
... |
a brown and white bird standing on gravel
| [
[
[
[
-0.375,
1.125,
-0.12890625
],
[
1.328125,
1.9375,
1.84375
],
[
0.765625,
-0.703125,
4
]
],
[
[
-1.1953125,
-2.609375,
1.8359375
],
[
-0.4414... |
a green plant with a green stem
| [
[
[
[
2.5625,
4.0625,
-4.5625
],
[
-2.359375,
-1.078125,
-2.21875
],
[
-3.84375,
3.59375,
-0.359375
]
],
[
[
-2.359375,
-1.71875,
6.0625
],
[
-0.9... |
a sting ray swimming in the ocean
| [
[
[
[
-0.2109375,
-0.55859375,
-0.2490234375
],
[
-0.48828125,
0.6171875,
-0.62890625
],
[
-0.39453125,
-1.0234375,
-1.1875
]
],
[
[
-0.39453125,
-0.0272216796875,
-0... |
a person is under water
| [
[
[
[
-3.8125,
-0.2734375,
-3.3125
],
[
-0.1904296875,
-0.40625,
-0.890625
],
[
-0.83203125,
-4.28125,
-0.83984375
]
],
[
[
-2.171875,
-3.828125,
-0.30078125
],... |
a woman wearing a brown sweater
| [
[
[
[
0.275390625,
0.314453125,
1.4296875
],
[
-3.03125,
-0.00750732421875,
-2.828125
],
[
-1.4609375,
-0.54296875,
-2.4375
]
],
[
[
1.8671875,
0.33984375,
-2.796875
... |
a black grand piano in a living room
| [
[
[
[
-2.640625,
-1.5078125,
-1.9765625
],
[
-1.78125,
0.283203125,
-4.75
],
[
-1.46875,
0.34765625,
-4.875
]
],
[
[
-4.65625,
-2.28125,
2.46875
],
[
... |
a young boy sitting on a balance beam
| [
[
[
[
1.96875,
-3.78125,
-1.6328125
],
[
1.0625,
0.08447265625,
-0.375
],
[
-0.1162109375,
-0.53125,
0.01312255859375
]
],
[
[
0.76171875,
-1.4609375,
-0.1552734375
... |
a silver apple with four syringes sticking out of it
| [
[
[
[
3.765625,
1.125,
2.140625
],
[
2.171875,
5.28125,
-2.828125
],
[
3.75,
2.234375,
-2.4375
]
],
[
[
3.265625,
2.15625,
4
],
[
6.65625,
... |
a trilobite fossil on a rock with a black background
| [
[
[
[
-2.796875,
0.734375,
1.953125
],
[
0.5234375,
-0.63671875,
-2.71875
],
[
1.046875,
1.53125,
-1.6875
]
],
[
[
3.265625,
-1.2734375,
4.1875
],
[
... |
a man with a hat on
| [
[
[
[
0.40234375,
-0.34375,
0.73828125
],
[
2.6875,
-0.87890625,
1.046875
],
[
-0.251953125,
0.50390625,
0.0244140625
]
],
[
[
2.984375,
-3.0625,
1.5859375
],
... |
a person sitting on a chair with their feet on the ground
| [
[
[
[
0.12255859375,
2.359375,
-1.421875
],
[
2.9375,
-1.421875,
-0.984375
],
[
3.703125,
2.3125,
1.640625
]
],
[
[
-0.9453125,
2.78125,
-1.1875
],
[
... |
End of preview. Expand in Data Studio
class ImageNet96Dataset(torch.utils.data.Dataset):
def __init__(
self, hf_ds, text_enc, tokenizer, bs, ddp, col_label="label", col_latent="latent"
):
self.hf_ds=hf_ds
self.col_label, self.col_latent = col_label, col_latent
self.text_enc, self.tokenizer = text_enc, tokenizer
self.tokenizer.padding_side = "right"
self.prompt_len = 50
if ddp:
self.sampler = DistributedSampler(hf_ds, shuffle = True, seed = seed)
else:
self.sampler = RandomSampler(hf_ds, generator = torch.manual_seed(seed))
self.dataloader = DataLoader(
hf_ds, sampler=self.sampler, collate_fn=self.collate, batch_size=bs, num_workers=4, prefetch_factor=2
)
def collate(self, items):
labels = [i[self.col_label] for i in items]
# latents shape [B, num_aug, 32, 3, 3]
latents = torch.Tensor([i[self.col_latent] for i in items])
B, num_aug, _, _, _ = latents.shape
# pick random augmentation -> latents shape [B, 32, 3, 3]
aug_idx = torch.randint(0, num_aug, (B,)) # Random int between 0-4 for each batch item
batch_idx = torch.arange(B)
latents = latents[batch_idx, aug_idx]
return labels, latents
def __iter__(self):
for labels, latents in self.dataloader:
label_embs, label_atnmasks = self.encode_prompts(labels)
latents = latents.to(dtype).to(device)
yield labels, latents, label_embs, label_atnmasks
def encode_prompts(self, prompts):
prompts_tok = self.tokenizer(
prompts, padding="max_length", truncation=True, max_length=self.prompt_len, return_tensors="pt"
)
with torch.no_grad():
prompts_encoded = self.text_enc(**prompts_tok.to(self.text_enc.device))
return prompts_encoded.last_hidden_state, prompts_tok.attention_mask
def __len__(self):
return len(self.dataloader)
- Downloads last month
- 16