lucas-mega / src /fuse_esdac /x_01_fuse.py
Kuangdai
Initial release of LUCAS-MEGA
9bc98d9
import argparse
import json
from pathlib import Path
import pandas as pd
from utils import fuse_csv, fuse_tif
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint", type=str, required=True,
help="Checkpoint config")
parser.add_argument("--last-only", action="store_true",
help="Only do the last dataset (for debugging)")
parser.add_argument("--recover_unfinished", action="store_true",
help="Recover from samples_unfinished.json and skip finished datasets")
args = parser.parse_args()
# -------------------------
# Load checkpoint config
# -------------------------
with open(f"./src/fuse_esdac/configs/{args.checkpoint}.json", "r") as f:
cfg = json.load(f)
input_json = Path(f"./src/fuse_esdac/outputs/{cfg.get('input')}/samples.json")
datasets = cfg.get("datasets", [])
if args.last_only and datasets:
datasets = datasets[-1:]
# -------------------------
# Prepare output dir (needed for unfinished save/recovery)
# -------------------------
out_dir = Path(f"./src/fuse_esdac/outputs/{args.checkpoint}")
out_dir.mkdir(parents=True, exist_ok=True)
out_json = out_dir / f"samples.json"
out_var_csv = out_dir / f"stat_variable.csv"
unfinished_json = out_dir / "samples_unfinished.json"
finished_datasets_json = out_dir / "datasets_finished.json"
# -------------------------
# Load previous fused dict (normal) OR recover unfinished
# -------------------------
if args.recover_unfinished:
if not unfinished_json.exists():
raise FileNotFoundError(f"--recover_unfinished requires {unfinished_json} to exist")
with open(unfinished_json, "r") as f:
big_output_dict = json.load(f)
if finished_datasets_json.exists():
with open(finished_datasets_json, "r") as f:
finished_datasets = set(json.load(f))
else:
finished_datasets = set()
# Skip finished ones (preserve order)
datasets = [d for d in datasets if d not in finished_datasets]
else:
finished_datasets = set()
if input_json and Path(input_json).exists():
with open(input_json, "r") as f:
big_output_dict = json.load(f)
else:
big_output_dict = {}
# -------------------------
# Fuse datasets
# -------------------------
for dataset in datasets:
try:
data_dir = Path(f"./datasets/esdac/{dataset}/processed")
schema_path = Path(f"./src/esdac/{dataset}/fuse_schema.json")
with open(schema_path, "r") as f:
schema = json.load(f)
for filename in schema.keys():
if filename.endswith(".csv"):
csv_path = data_dir / filename
big_output_dict = fuse_csv(
source_file=csv_path,
schema=schema[filename],
source_name=dataset,
current_dict=big_output_dict,
)
elif filename.endswith("@tif"):
tif_path = data_dir / filename[:-4]
big_output_dict = fuse_tif(
source_file=tif_path,
schema=schema[filename],
source_name=dataset,
current_dict=big_output_dict,
)
# Mark dataset finished (for recovery skipping)
finished_datasets.add(dataset)
with open(finished_datasets_json, "w") as f:
json.dump(sorted(finished_datasets), f, indent=2, ensure_ascii=False)
except Exception:
# Save partial progress and re-raise
with open(unfinished_json, "w") as f:
json.dump(big_output_dict, f, indent=2, ensure_ascii=False)
raise
# -------------------------
# Save outputs
# -------------------------
with open(out_json, "w") as f:
json.dump(big_output_dict, f, indent=2, ensure_ascii=False)
# -------------------------
# Variable-level table
# -------------------------
table = {}
for sample_key, sample in big_output_dict.items():
prefix = sample_key.split("_")[0]
if prefix not in table:
table[prefix] = {}
for domain, section in sample.items():
if isinstance(section, dict):
for var, leaf in section.items():
if isinstance(leaf, dict):
if leaf.get("value") not in (None, "", "null"):
key = f"{domain}:{var}"
table[prefix][key] = table[prefix].get(key, 0) + 1
# Count raw samples per prefix (independent of filled variables)
sample_count = {}
for sample_key in big_output_dict.keys():
prefix = sample_key.split("_")[0]
sample_count[prefix] = sample_count.get(prefix, 0) + 1
df_var = (
pd.DataFrame.from_dict(table, orient="index")
.fillna(0)
.infer_objects()
.astype(int)
)
# Ensure all prefixes are present even if they had no filled variables
all_prefixes = sorted(sample_count.keys())
if not df_var.empty:
df_var = df_var.reindex(all_prefixes, fill_value=0)
else:
df_var = pd.DataFrame(index=all_prefixes)
# Add true sample counts per row
df_var["TOTAL_SAMPLES"] = df_var.index.map(lambda p: sample_count.get(p, 0)).astype(int)
# Add TOTAL row with correct sums and total sample count
totals_no_samples_col = df_var.drop(columns=["TOTAL_SAMPLES"]).sum(axis=0).astype(int)
df_var.loc["SUM"] = 0 # initialize the row
df_var.loc["SUM", totals_no_samples_col.index] = totals_no_samples_col
df_var.loc["SUM", "TOTAL_SAMPLES"] = int(sum(sample_count.values()))
df_var = df_var.astype(int)
df_var.to_csv(out_var_csv)