lucas-mega / src /fuse_esdac /x_04_table.py
Kuangdai
Initial release of LUCAS-MEGA
9bc98d9
import json
import os
import pickle
from collections import defaultdict, OrderedDict
import numpy as np
import pandas as pd
from tqdm import tqdm
# ----------------------------- CONFIG -------------------------------- #
INPUT_JSON = "./src/fuse_esdac/outputs/03_photo_gadm/samples_with_photo_gadm.pkl"
OUTPUT_CSV = "./src/fuse_esdac/outputs/04_table/final_table.csv"
OUTPUT_DICT_PKL = "./src/fuse_esdac/outputs/04_table/final_dict.pkl"
OUTPUT_DICT_JSON = "./src/fuse_esdac/outputs/04_table/final_dict.json"
OUTPUT_META = "./src/fuse_esdac/outputs/04_table/column_meta.json"
OUTPUT_META_NAMES = "./src/fuse_esdac/outputs/04_table/column_names.json"
# --------------------------------------------------------------------- #
# Null normalization rules
NULL_STRINGS = {
"nan", "null", "none", "", "na", "n/a", "missing", "unknown", "unknown1"
}
def is_null_value(v):
# Real null
if v is None:
return True
# String forms
if isinstance(v, str):
s = v.strip().lower()
if s in NULL_STRINGS:
return True
return False
FILL_VALUES_TO_NULL = {-9000, -90000}
NEGATIVE_FEATURE_KEYWORDS = ("density", "percentage", "content", "stock")
NONE_POSITIVE_FEATURE_KEYWORDS = ("ph_",)
def numeric_or_none(v):
try:
return float(v)
except (TypeError, ValueError):
return None
def clean_numeric_scalar_value(header, v):
if is_null_value(v):
return None
x = numeric_or_none(v)
if x is None:
return v
if x in FILL_VALUES_TO_NULL:
return None
h = header.lower()
if any(k in h for k in NEGATIVE_FEATURE_KEYWORDS) and x < -0.00001:
return None
if any(k in h for k in NONE_POSITIVE_FEATURE_KEYWORDS) and x < 0.00001:
return None
return v
def clean_numeric_vector_value(header, v):
if is_null_value(v):
return None
if not isinstance(v, list):
return v
cleaned = [
clean_numeric_scalar_value(header, x)
for x in v
]
if all(x is None for x in cleaned):
return None
return cleaned
def main():
with open(INPUT_JSON, "rb") as f:
data = pickle.load(f)
# 1) Discover variables and enforce unit consistency on (theme:var)
var_to_unit = {} # "theme:var" -> unit_str ('' if missing)
for sid, sample in data.items():
# force presence of required non-variable fields (surface schema issues early)
_ = sample["LAT_LONG"]
_ = sample["GADM_IDS"]
_ = sample["GADM_NAMES"]
_ = sample["COUNTRY_CODE"]
for theme, theme_obj in sample.items():
if theme in ("LAT_LONG", "GADM_IDS", "GADM_NAMES", "COUNTRY_CODE",
"SAMPLE_DATE", "SAMPLE_DEPTH_RANGE_CM", "SAMPLE_SOURCE_DATASET"):
continue
# theme_obj is expected to be a dict of variables
for var, var_obj in theme_obj.items():
# required fields per your spec (value, source_dataset, distance_to_grid; unit optional)
_ = var_obj["value"]
_ = var_obj["source_dataset"]
_ = var_obj["distance_to_grid (m)"]
unit = var_obj.get("unit", "") # empty string if missing
key = f"{theme}:{var}"
if key not in var_to_unit:
var_to_unit[key] = unit
else:
if var_to_unit[key] != unit:
print(f"WARNING: inconsistent unit for '{key}': "
f"using '{var_to_unit[key]}' but encountered '{unit}'")
# 2) Build a stable mapping to final column headers with unit parentheses (including empty ())
def header_for(key):
unit = var_to_unit[key]
return f"{key} ({unit})" if unit else key
base_cols = ["id", "LAT_LONG", "GADM_IDS", "GADM_NAMES", "COUNTRY_CODE",
"SAMPLE_DATE", "SAMPLE_DEPTH_RANGE_CM", "SAMPLE_SOURCE_DATASET"]
var_cols = [header_for(key) for key in var_to_unit.keys()]
all_cols = base_cols + var_cols
# 3) Populate rows and collect meta
meta_sources = defaultdict(set) # header -> set of datasets
meta_distances = defaultdict(list) # header -> list of numeric distances (non-null only)
table = []
for sid, sample in tqdm(data.items(), desc="Building table"):
row = OrderedDict.fromkeys(all_cols, None)
row["id"] = sid
row["LAT_LONG"] = sample["LAT_LONG"]
row["GADM_IDS"] = sample["GADM_IDS"]
row["GADM_NAMES"] = sample["GADM_NAMES"]
row["COUNTRY_CODE"] = sample["COUNTRY_CODE"]
row["SAMPLE_DATE"] = sample.get("SAMPLE_DATE")
row["SAMPLE_DEPTH_RANGE_CM"] = sample.get("SAMPLE_DEPTH_RANGE_CM")
row["SAMPLE_SOURCE_DATASET"] = sample.get("SAMPLE_SOURCE_DATASET")
for theme, theme_obj in sample.items():
if theme in ("LAT_LONG", "GADM_IDS", "GADM_NAMES", "COUNTRY_CODE",
"SAMPLE_DATE", "SAMPLE_DEPTH_RANGE_CM", "SAMPLE_SOURCE_DATASET"):
continue
for var, var_obj in theme_obj.items():
key = f"{theme}:{var}"
header = header_for(key)
v = var_obj["value"]
if is_null_value(v):
v = None
row[header] = v # write to final header (with unit parentheses)
# --- meta: collect sources regardless of v ---
src = var_obj["source_dataset"]
if isinstance(src, str) and src.strip():
meta_sources[header].add(src)
# --- meta: collect non-null distances only ---
dist = var_obj["distance_to_grid (m)"]
if not is_null_value(dist):
try:
meta_distances[header].append(float(dist))
except (TypeError, ValueError):
# Surface bad distance type immediately
raise TypeError(f"distance_to_grid (m) not numeric for '{header}' in sample '{sid}': {dist!r}")
table.append(row)
df = pd.DataFrame(table, columns=all_cols)
# 4) Warn for columns that are entirely null
for col in var_cols:
if df[col].isna().all():
print(f"WARNING: column has all null values -> {col}")
# 5) Build column_meta.json before cleaning
def py_type(x):
if isinstance(x, bool):
return "bool"
if isinstance(x, int):
return "int"
if isinstance(x, float):
return "float"
if isinstance(x, str):
return "string"
return None # ignore other types
meta = {}
total_rows = len(df)
for header in var_cols:
# ----- distance stats -----
dists = np.array(meta_distances[header], dtype=float) if meta_distances[header] else np.array([])
if dists.size > 0:
stats = {
"min": float(np.min(dists)),
"max": float(np.max(dists)),
"mean": float(np.mean(dists)),
"std": float(np.std(dists)),
"median": float(np.median(dists)),
}
else:
stats = {
"min": None,
"max": None,
"mean": None,
"std": None,
"median": None,
}
# ----- detect array-valued or scalar -----
col_vals = df[header].tolist()
first_non_null = next((v for v in col_vals if v is not None), None)
if isinstance(first_non_null, list):
is_array = True
flat_vals = []
for v in col_vals:
if isinstance(v, list):
flat_vals.extend(v)
type_counts = {}
for v in flat_vals:
t = py_type(v)
if t:
type_counts[t] = type_counts.get(t, 0) + 1
else:
is_array = False
type_counts = {}
for v in col_vals:
t = py_type(v)
if t:
type_counts[t] = type_counts.get(t, 0) + 1
datatype = max(type_counts.items(), key=lambda x: x[1])[0] if type_counts else "unknown"
null_fraction = float(df[header].isna().sum()) / total_rows
meta[header] = {
"source_datasets": sorted(meta_sources[header]),
"distance_to_grid_stats (m)": stats,
"null_fraction": null_fraction,
"datatype": datatype,
"is_array_valued": is_array,
"description": "",
}
# 6) Clean numeric scalar columns only
cleaned_cells = {}
cleaned_columns = defaultdict(int)
for header in var_cols:
if meta[header]["datatype"] not in ("int", "float"):
continue
if not meta[header]["is_array_valued"]:
for row_i, v in df[header].items():
new_v = clean_numeric_scalar_value(header, v)
if not (
pd.isna(new_v) and pd.isna(v)
) and new_v != v:
df.at[row_i, header] = new_v
cleaned_cells[(row_i, header)] = new_v
cleaned_columns[header] += 1
else:
for row_i, v in df[header].items():
new_v = clean_numeric_vector_value(header, v)
if new_v != v:
df.at[row_i, header] = new_v
cleaned_cells[(row_i, header)] = new_v
cleaned_columns[header] += 1
if cleaned_columns:
print("Cleaned columns:")
for header, n in sorted(cleaned_columns.items()):
print(f" {header}: {n} cells")
else:
print("Cleaned columns: none")
# 7) Write cleaned values back to dict
for row_i, row in df.iterrows():
sid = row["id"]
sample = data[sid]
for theme, theme_obj in sample.items():
if theme in ("LAT_LONG", "GADM_IDS", "GADM_NAMES", "COUNTRY_CODE",
"SAMPLE_DATE", "SAMPLE_DEPTH_RANGE_CM", "SAMPLE_SOURCE_DATASET"):
continue
for var, var_obj in theme_obj.items():
key = f"{theme}:{var}"
header = header_for(key)
if (row_i, header) in cleaned_cells:
var_obj["value"] = cleaned_cells[(row_i, header)]
# 8) Save outputs
os.makedirs(os.path.dirname(OUTPUT_CSV), exist_ok=True)
df_out = df.copy()
df_out = df_out.replace({np.nan: None})
df_out.to_csv(OUTPUT_CSV, index=False, encoding="utf-8", na_rep="")
# Write cleaned dict as pickle
with open(OUTPUT_DICT_PKL, "wb") as f:
pickle.dump(data, f)
# Write cleaned dict as json
with open(OUTPUT_DICT_JSON, "w", encoding="utf-8") as f:
json.dump(
data,
f,
indent=2,
ensure_ascii=False
)
with open(OUTPUT_META, "w", encoding="utf-8") as f:
json.dump(meta, f, indent=2)
with open(OUTPUT_META_NAMES, "w", encoding="utf-8") as f:
json.dump(
{"column_names": sorted(meta.keys())},
f,
indent=2,
ensure_ascii=False,
)
print("Done.")
print(f"Rows: {len(df)} Columns: {len(df.columns)}")
print(f"Cleaned cells: {len(cleaned_cells)}")
print(
f"Saved: {OUTPUT_CSV}, "
f"{OUTPUT_DICT_PKL}, {OUTPUT_DICT_JSON}, "
f"{OUTPUT_META}, {OUTPUT_META_NAMES}"
)
if __name__ == "__main__":
main()