Datasets:
File size: 3,347 Bytes
a72e446 6955e32 a72e446 6955e32 a72e446 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 | """
Convert a parquet-format HuggingFace dataset back to raw image files.
Input: a local directory of .parquet shards OR an HF repo ID
Output: GSD/{split}/{image,mask,reflections,edge}/<image_id>.png + metadata.jsonl
"""
import argparse
import json
from pathlib import Path
from datasets import load_dataset, load_from_disk
# split -> (has_reflections, has_edge)
SPLITS = {
"train": (True, True),
"extra": (False, True),
"test": (False, False),
}
def save_split(ds, split: str, has_reflections: bool, has_edge: bool, out_dir: Path):
split_dir = out_dir / split
img_dir = split_dir / "image"; img_dir.mkdir(parents=True, exist_ok=True)
mask_dir = split_dir / "mask"; mask_dir.mkdir(parents=True, exist_ok=True)
refl_dir = split_dir / "reflections" if has_reflections else None
edge_dir = split_dir / "edge" if has_edge else None
if refl_dir: refl_dir.mkdir(parents=True, exist_ok=True)
if edge_dir: edge_dir.mkdir(parents=True, exist_ok=True)
rows = []
for i, sample in enumerate(ds):
stem = sample.get("image_id") or f"{i:06d}"
img_path = img_dir / f"{stem}.jpg"
mask_path = mask_dir / f"{stem}.png"
sample["image"].save(img_path)
sample["mask"].save(mask_path)
refl_rel = edge_rel = ""
if has_reflections and sample.get("reflections") is not None:
p = refl_dir / f"{stem}.png"
sample["reflections"].save(p)
refl_rel = f"reflections/{p.name}"
if has_edge and sample.get("edge") is not None:
p = edge_dir / f"{stem}.png"
sample["edge"].save(p)
edge_rel = f"edge/{p.name}"
rows.append({
"file_name": f"image/{img_path.name}",
"mask": f"mask/{mask_path.name}",
"reflections": refl_rel,
"edge": edge_rel,
})
if (i + 1) % 100 == 0:
print(f" {i + 1}/{len(ds)}")
# (split_dir / "metadata.jsonl").write_text(
# "\n".join(json.dumps(r) for r in rows)
# )
print(f" saved {len(rows)} samples -> {split_dir}")
def main():
parser = argparse.ArgumentParser(description="Convert parquet dataset to raw images")
src = parser.add_mutually_exclusive_group(required=True)
src.add_argument("--repo", help="HuggingFace repo ID, e.g. garrying/GSD")
src.add_argument("--local", help="Path to a directory saved with save_to_disk()")
parser.add_argument("--out", default="GSD", help="Output root directory (default: ./GSD)")
parser.add_argument("--splits", nargs="+", choices=list(SPLITS), default=list(SPLITS),
help="Which splits to convert (default: all)")
args = parser.parse_args()
out_dir = Path(args.out)
out_dir.mkdir(parents=True, exist_ok=True)
for split in args.splits:
has_reflections, has_edge = SPLITS[split]
print(f"\nLoading {split}...")
if args.repo:
ds = load_dataset(args.repo, split=split)
else:
ds = load_from_disk(str(Path(args.local) / split))
print(f" {len(ds)} samples — saving images...")
save_split(ds, split, has_reflections, has_edge, out_dir)
print(f"\nDone! Raw files in: {out_dir.resolve()}")
if __name__ == "__main__":
main()
|