File size: 6,048 Bytes
9bc98d9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 | import argparse
import json
from pathlib import Path
import pandas as pd
from utils import fuse_csv, fuse_tif
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint", type=str, required=True,
help="Checkpoint config")
parser.add_argument("--last-only", action="store_true",
help="Only do the last dataset (for debugging)")
parser.add_argument("--recover_unfinished", action="store_true",
help="Recover from samples_unfinished.json and skip finished datasets")
args = parser.parse_args()
# -------------------------
# Load checkpoint config
# -------------------------
with open(f"./src/fuse_esdac/configs/{args.checkpoint}.json", "r") as f:
cfg = json.load(f)
input_json = Path(f"./src/fuse_esdac/outputs/{cfg.get('input')}/samples.json")
datasets = cfg.get("datasets", [])
if args.last_only and datasets:
datasets = datasets[-1:]
# -------------------------
# Prepare output dir (needed for unfinished save/recovery)
# -------------------------
out_dir = Path(f"./src/fuse_esdac/outputs/{args.checkpoint}")
out_dir.mkdir(parents=True, exist_ok=True)
out_json = out_dir / f"samples.json"
out_var_csv = out_dir / f"stat_variable.csv"
unfinished_json = out_dir / "samples_unfinished.json"
finished_datasets_json = out_dir / "datasets_finished.json"
# -------------------------
# Load previous fused dict (normal) OR recover unfinished
# -------------------------
if args.recover_unfinished:
if not unfinished_json.exists():
raise FileNotFoundError(f"--recover_unfinished requires {unfinished_json} to exist")
with open(unfinished_json, "r") as f:
big_output_dict = json.load(f)
if finished_datasets_json.exists():
with open(finished_datasets_json, "r") as f:
finished_datasets = set(json.load(f))
else:
finished_datasets = set()
# Skip finished ones (preserve order)
datasets = [d for d in datasets if d not in finished_datasets]
else:
finished_datasets = set()
if input_json and Path(input_json).exists():
with open(input_json, "r") as f:
big_output_dict = json.load(f)
else:
big_output_dict = {}
# -------------------------
# Fuse datasets
# -------------------------
for dataset in datasets:
try:
data_dir = Path(f"./datasets/esdac/{dataset}/processed")
schema_path = Path(f"./src/esdac/{dataset}/fuse_schema.json")
with open(schema_path, "r") as f:
schema = json.load(f)
for filename in schema.keys():
if filename.endswith(".csv"):
csv_path = data_dir / filename
big_output_dict = fuse_csv(
source_file=csv_path,
schema=schema[filename],
source_name=dataset,
current_dict=big_output_dict,
)
elif filename.endswith("@tif"):
tif_path = data_dir / filename[:-4]
big_output_dict = fuse_tif(
source_file=tif_path,
schema=schema[filename],
source_name=dataset,
current_dict=big_output_dict,
)
# Mark dataset finished (for recovery skipping)
finished_datasets.add(dataset)
with open(finished_datasets_json, "w") as f:
json.dump(sorted(finished_datasets), f, indent=2, ensure_ascii=False)
except Exception:
# Save partial progress and re-raise
with open(unfinished_json, "w") as f:
json.dump(big_output_dict, f, indent=2, ensure_ascii=False)
raise
# -------------------------
# Save outputs
# -------------------------
with open(out_json, "w") as f:
json.dump(big_output_dict, f, indent=2, ensure_ascii=False)
# -------------------------
# Variable-level table
# -------------------------
table = {}
for sample_key, sample in big_output_dict.items():
prefix = sample_key.split("_")[0]
if prefix not in table:
table[prefix] = {}
for domain, section in sample.items():
if isinstance(section, dict):
for var, leaf in section.items():
if isinstance(leaf, dict):
if leaf.get("value") not in (None, "", "null"):
key = f"{domain}:{var}"
table[prefix][key] = table[prefix].get(key, 0) + 1
# Count raw samples per prefix (independent of filled variables)
sample_count = {}
for sample_key in big_output_dict.keys():
prefix = sample_key.split("_")[0]
sample_count[prefix] = sample_count.get(prefix, 0) + 1
df_var = (
pd.DataFrame.from_dict(table, orient="index")
.fillna(0)
.infer_objects()
.astype(int)
)
# Ensure all prefixes are present even if they had no filled variables
all_prefixes = sorted(sample_count.keys())
if not df_var.empty:
df_var = df_var.reindex(all_prefixes, fill_value=0)
else:
df_var = pd.DataFrame(index=all_prefixes)
# Add true sample counts per row
df_var["TOTAL_SAMPLES"] = df_var.index.map(lambda p: sample_count.get(p, 0)).astype(int)
# Add TOTAL row with correct sums and total sample count
totals_no_samples_col = df_var.drop(columns=["TOTAL_SAMPLES"]).sum(axis=0).astype(int)
df_var.loc["SUM"] = 0 # initialize the row
df_var.loc["SUM", totals_no_samples_col.index] = totals_no_samples_col
df_var.loc["SUM", "TOTAL_SAMPLES"] = int(sum(sample_count.values()))
df_var = df_var.astype(int)
df_var.to_csv(out_var_csv)
|