| import json |
| import os |
| import pickle |
| from collections import defaultdict, OrderedDict |
|
|
| import numpy as np |
| import pandas as pd |
| from tqdm import tqdm |
|
|
| |
| INPUT_JSON = "./src/fuse_esdac/outputs/03_photo_gadm/samples_with_photo_gadm.pkl" |
| OUTPUT_CSV = "./src/fuse_esdac/outputs/04_table/final_table.csv" |
| OUTPUT_DICT_PKL = "./src/fuse_esdac/outputs/04_table/final_dict.pkl" |
| OUTPUT_DICT_JSON = "./src/fuse_esdac/outputs/04_table/final_dict.json" |
| OUTPUT_META = "./src/fuse_esdac/outputs/04_table/column_meta.json" |
| OUTPUT_META_NAMES = "./src/fuse_esdac/outputs/04_table/column_names.json" |
| |
|
|
| |
| NULL_STRINGS = { |
| "nan", "null", "none", "", "na", "n/a", "missing", "unknown", "unknown1" |
| } |
|
|
|
|
| def is_null_value(v): |
| |
| if v is None: |
| return True |
| |
| if isinstance(v, str): |
| s = v.strip().lower() |
| if s in NULL_STRINGS: |
| return True |
| return False |
|
|
|
|
| FILL_VALUES_TO_NULL = {-9000, -90000} |
| NEGATIVE_FEATURE_KEYWORDS = ("density", "percentage", "content", "stock") |
| NONE_POSITIVE_FEATURE_KEYWORDS = ("ph_",) |
|
|
|
|
| def numeric_or_none(v): |
| try: |
| return float(v) |
| except (TypeError, ValueError): |
| return None |
|
|
|
|
| def clean_numeric_scalar_value(header, v): |
| if is_null_value(v): |
| return None |
|
|
| x = numeric_or_none(v) |
| if x is None: |
| return v |
|
|
| if x in FILL_VALUES_TO_NULL: |
| return None |
|
|
| h = header.lower() |
| if any(k in h for k in NEGATIVE_FEATURE_KEYWORDS) and x < -0.00001: |
| return None |
| if any(k in h for k in NONE_POSITIVE_FEATURE_KEYWORDS) and x < 0.00001: |
| return None |
|
|
| return v |
|
|
|
|
| def clean_numeric_vector_value(header, v): |
| if is_null_value(v): |
| return None |
|
|
| if not isinstance(v, list): |
| return v |
|
|
| cleaned = [ |
| clean_numeric_scalar_value(header, x) |
| for x in v |
| ] |
|
|
| if all(x is None for x in cleaned): |
| return None |
|
|
| return cleaned |
|
|
|
|
| def main(): |
| with open(INPUT_JSON, "rb") as f: |
| data = pickle.load(f) |
|
|
| |
| var_to_unit = {} |
| for sid, sample in data.items(): |
| |
| _ = sample["LAT_LONG"] |
| _ = sample["GADM_IDS"] |
| _ = sample["GADM_NAMES"] |
| _ = sample["COUNTRY_CODE"] |
|
|
| for theme, theme_obj in sample.items(): |
| if theme in ("LAT_LONG", "GADM_IDS", "GADM_NAMES", "COUNTRY_CODE", |
| "SAMPLE_DATE", "SAMPLE_DEPTH_RANGE_CM", "SAMPLE_SOURCE_DATASET"): |
| continue |
|
|
| |
| for var, var_obj in theme_obj.items(): |
| |
| _ = var_obj["value"] |
| _ = var_obj["source_dataset"] |
| _ = var_obj["distance_to_grid (m)"] |
| unit = var_obj.get("unit", "") |
|
|
| key = f"{theme}:{var}" |
| if key not in var_to_unit: |
| var_to_unit[key] = unit |
| else: |
| if var_to_unit[key] != unit: |
| print(f"WARNING: inconsistent unit for '{key}': " |
| f"using '{var_to_unit[key]}' but encountered '{unit}'") |
|
|
| |
| def header_for(key): |
| unit = var_to_unit[key] |
| return f"{key} ({unit})" if unit else key |
|
|
| base_cols = ["id", "LAT_LONG", "GADM_IDS", "GADM_NAMES", "COUNTRY_CODE", |
| "SAMPLE_DATE", "SAMPLE_DEPTH_RANGE_CM", "SAMPLE_SOURCE_DATASET"] |
| var_cols = [header_for(key) for key in var_to_unit.keys()] |
| all_cols = base_cols + var_cols |
|
|
| |
| meta_sources = defaultdict(set) |
| meta_distances = defaultdict(list) |
|
|
| table = [] |
| for sid, sample in tqdm(data.items(), desc="Building table"): |
| row = OrderedDict.fromkeys(all_cols, None) |
|
|
| row["id"] = sid |
| row["LAT_LONG"] = sample["LAT_LONG"] |
| row["GADM_IDS"] = sample["GADM_IDS"] |
| row["GADM_NAMES"] = sample["GADM_NAMES"] |
| row["COUNTRY_CODE"] = sample["COUNTRY_CODE"] |
| row["SAMPLE_DATE"] = sample.get("SAMPLE_DATE") |
| row["SAMPLE_DEPTH_RANGE_CM"] = sample.get("SAMPLE_DEPTH_RANGE_CM") |
| row["SAMPLE_SOURCE_DATASET"] = sample.get("SAMPLE_SOURCE_DATASET") |
|
|
| for theme, theme_obj in sample.items(): |
| if theme in ("LAT_LONG", "GADM_IDS", "GADM_NAMES", "COUNTRY_CODE", |
| "SAMPLE_DATE", "SAMPLE_DEPTH_RANGE_CM", "SAMPLE_SOURCE_DATASET"): |
| continue |
|
|
| for var, var_obj in theme_obj.items(): |
| key = f"{theme}:{var}" |
| header = header_for(key) |
|
|
| v = var_obj["value"] |
| if is_null_value(v): |
| v = None |
| row[header] = v |
|
|
| |
| src = var_obj["source_dataset"] |
| if isinstance(src, str) and src.strip(): |
| meta_sources[header].add(src) |
|
|
| |
| dist = var_obj["distance_to_grid (m)"] |
| if not is_null_value(dist): |
| try: |
| meta_distances[header].append(float(dist)) |
| except (TypeError, ValueError): |
| |
| raise TypeError(f"distance_to_grid (m) not numeric for '{header}' in sample '{sid}': {dist!r}") |
|
|
| table.append(row) |
|
|
| df = pd.DataFrame(table, columns=all_cols) |
|
|
| |
| for col in var_cols: |
| if df[col].isna().all(): |
| print(f"WARNING: column has all null values -> {col}") |
|
|
| |
| def py_type(x): |
| if isinstance(x, bool): |
| return "bool" |
| if isinstance(x, int): |
| return "int" |
| if isinstance(x, float): |
| return "float" |
| if isinstance(x, str): |
| return "string" |
| return None |
|
|
| meta = {} |
| total_rows = len(df) |
|
|
| for header in var_cols: |
| |
| dists = np.array(meta_distances[header], dtype=float) if meta_distances[header] else np.array([]) |
| if dists.size > 0: |
| stats = { |
| "min": float(np.min(dists)), |
| "max": float(np.max(dists)), |
| "mean": float(np.mean(dists)), |
| "std": float(np.std(dists)), |
| "median": float(np.median(dists)), |
| } |
| else: |
| stats = { |
| "min": None, |
| "max": None, |
| "mean": None, |
| "std": None, |
| "median": None, |
| } |
|
|
| |
| col_vals = df[header].tolist() |
| first_non_null = next((v for v in col_vals if v is not None), None) |
|
|
| if isinstance(first_non_null, list): |
| is_array = True |
| flat_vals = [] |
| for v in col_vals: |
| if isinstance(v, list): |
| flat_vals.extend(v) |
| type_counts = {} |
| for v in flat_vals: |
| t = py_type(v) |
| if t: |
| type_counts[t] = type_counts.get(t, 0) + 1 |
| else: |
| is_array = False |
| type_counts = {} |
| for v in col_vals: |
| t = py_type(v) |
| if t: |
| type_counts[t] = type_counts.get(t, 0) + 1 |
|
|
| datatype = max(type_counts.items(), key=lambda x: x[1])[0] if type_counts else "unknown" |
| null_fraction = float(df[header].isna().sum()) / total_rows |
|
|
| meta[header] = { |
| "source_datasets": sorted(meta_sources[header]), |
| "distance_to_grid_stats (m)": stats, |
| "null_fraction": null_fraction, |
| "datatype": datatype, |
| "is_array_valued": is_array, |
| "description": "", |
| } |
|
|
| |
| cleaned_cells = {} |
| cleaned_columns = defaultdict(int) |
|
|
| for header in var_cols: |
| if meta[header]["datatype"] not in ("int", "float"): |
| continue |
| if not meta[header]["is_array_valued"]: |
| for row_i, v in df[header].items(): |
| new_v = clean_numeric_scalar_value(header, v) |
| if not ( |
| pd.isna(new_v) and pd.isna(v) |
| ) and new_v != v: |
| df.at[row_i, header] = new_v |
| cleaned_cells[(row_i, header)] = new_v |
| cleaned_columns[header] += 1 |
| else: |
| for row_i, v in df[header].items(): |
| new_v = clean_numeric_vector_value(header, v) |
| if new_v != v: |
| df.at[row_i, header] = new_v |
| cleaned_cells[(row_i, header)] = new_v |
| cleaned_columns[header] += 1 |
|
|
| if cleaned_columns: |
| print("Cleaned columns:") |
| for header, n in sorted(cleaned_columns.items()): |
| print(f" {header}: {n} cells") |
| else: |
| print("Cleaned columns: none") |
|
|
| |
| for row_i, row in df.iterrows(): |
| sid = row["id"] |
| sample = data[sid] |
|
|
| for theme, theme_obj in sample.items(): |
| if theme in ("LAT_LONG", "GADM_IDS", "GADM_NAMES", "COUNTRY_CODE", |
| "SAMPLE_DATE", "SAMPLE_DEPTH_RANGE_CM", "SAMPLE_SOURCE_DATASET"): |
| continue |
|
|
| for var, var_obj in theme_obj.items(): |
| key = f"{theme}:{var}" |
| header = header_for(key) |
|
|
| if (row_i, header) in cleaned_cells: |
| var_obj["value"] = cleaned_cells[(row_i, header)] |
|
|
| |
| os.makedirs(os.path.dirname(OUTPUT_CSV), exist_ok=True) |
|
|
| df_out = df.copy() |
| df_out = df_out.replace({np.nan: None}) |
| df_out.to_csv(OUTPUT_CSV, index=False, encoding="utf-8", na_rep="") |
|
|
| |
| with open(OUTPUT_DICT_PKL, "wb") as f: |
| pickle.dump(data, f) |
|
|
| |
| with open(OUTPUT_DICT_JSON, "w", encoding="utf-8") as f: |
| json.dump( |
| data, |
| f, |
| indent=2, |
| ensure_ascii=False |
| ) |
|
|
| with open(OUTPUT_META, "w", encoding="utf-8") as f: |
| json.dump(meta, f, indent=2) |
|
|
| with open(OUTPUT_META_NAMES, "w", encoding="utf-8") as f: |
| json.dump( |
| {"column_names": sorted(meta.keys())}, |
| f, |
| indent=2, |
| ensure_ascii=False, |
| ) |
|
|
| print("Done.") |
| print(f"Rows: {len(df)} Columns: {len(df.columns)}") |
| print(f"Cleaned cells: {len(cleaned_cells)}") |
| print( |
| f"Saved: {OUTPUT_CSV}, " |
| f"{OUTPUT_DICT_PKL}, {OUTPUT_DICT_JSON}, " |
| f"{OUTPUT_META}, {OUTPUT_META_NAMES}" |
| ) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|