lucas-mega / src /fuse_esdac /utils.py
Kuangdai
Initial release of LUCAS-MEGA
9bc98d9
import builtins
import json
import keyword
import math
import re
from datetime import datetime # noqa
from pathlib import Path
from textwrap import indent
from typing import List
from typing import Optional, Callable, Tuple
from typing import Union, Dict, Any
import numpy as np
import pandas as pd
import rasterio
from pyproj import Geod
from pyproj import Transformer
from rasterio.transform import Affine
from tqdm import tqdm
def _nan_to_none(obj):
if isinstance(obj, dict):
return {k: _nan_to_none(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [_nan_to_none(v) for v in obj]
elif isinstance(obj, float) and math.isnan(obj):
return None
else:
return obj
def resolve_code(code: Any,
codebook: Union[str, Dict[str, Any]],
top_key: str = None,
ignore_error_codes=None) -> Optional[str]:
"""
Resolve a hierarchical code into its labels by prefix-walking the code.
- If code is in ignore_error_codes or empty -> return None silently.
- If code not found in codebook and not ignored -> raise KeyError.
- top_key missing is a hard error (still raises).
"""
# normalize ignore list
ignore_error_codes = [] if ignore_error_codes is None else list(ignore_error_codes)
# accept non-str inputs; only None/empty-string -> None
if code is None or (isinstance(code, str) and not code.strip()):
return None
# normalize code to string; "3.0" -> "3"
code_str = str(code).strip()
try:
f = float(code_str)
if f.is_integer():
code_str = str(int(f))
except Exception:
# keep original string if not a number
pass
# fast path: ignored codes
if code_str in ignore_error_codes:
return None
# Load codebook
if isinstance(codebook, str):
try:
with open("src/fuse_esdac/codebooks/" + codebook + ".json", "r", encoding="utf-8") as fs:
codebook_dict = json.load(fs)
except FileNotFoundError as e:
raise FileNotFoundError(f"Codebook file not found: {codebook}") from e
except json.JSONDecodeError as e:
raise ValueError(f"Invalid JSON in codebook file: {codebook}") from e
elif isinstance(codebook, dict):
codebook_dict = codebook
else:
raise TypeError(f"codebook must be a file path (str) or dict, got {type(codebook).__name__}")
# Pick source dict (top_key remains strict)
src = codebook_dict if top_key is None else codebook_dict.get(top_key)
if src is None:
raise KeyError(f"top_key '{top_key}' not found in codebook")
if not isinstance(src, dict):
raise TypeError(f"Top-level codebook section must be a dict, got {type(src).__name__}")
labels: List[str] = []
level_dict = src
# progressive prefixes
prefixes = [code_str[:i] for i in range(1, len(code_str) + 1)]
for pref in prefixes:
if not isinstance(level_dict, dict):
raise ValueError(f"Invalid structure: expected dict at prefix '{pref}', got {type(level_dict).__name__}")
if pref not in level_dict:
break
node = level_dict[pref]
if isinstance(node, dict):
if "value" not in node:
raise KeyError(f"Missing 'value' key in node for prefix '{pref}'")
if "children" not in node:
raise KeyError(f"Missing 'children' key in node for prefix '{pref}'")
if not isinstance(node["value"], str):
raise TypeError(f"'value' for prefix '{pref}' must be a string, got {type(node['value']).__name__}")
if not isinstance(node["children"], dict):
raise TypeError(f"'children' for prefix '{pref}' must be a dict, got {type(node['children']).__name__}")
labels.append(node["value"])
level_dict = node["children"]
elif isinstance(node, str):
if not node.strip():
raise ValueError(f"Empty string label for prefix '{pref}'")
labels.append(node)
break # leaf node, stop walking
else:
raise TypeError(f"Unexpected node type for prefix '{pref}': {type(node).__name__}")
# post-check: no match -> ignore or raise
if len(labels) == 0:
if code_str in ignore_error_codes:
return None
raise KeyError(f"Code '{code_str}' not found in codebook")
return code_str + ": " + " => ".join(labels)
def as_float(number: Any, keep_str=True, ignore_substr="-999", factor=1.0) -> Optional[float]:
"""Convert to float; empty CSV values -> None; real errors -> raise."""
if number is None or (isinstance(number, str) and number.strip() == ""):
return None
if ignore_substr is not None and ignore_substr in number:
return None
try:
return float(number) * factor
except (ValueError, TypeError) as e:
if keep_str:
return number.strip()
raise ValueError(f"as_float: cannot convert {number!r}") from e
def as_int_id(number: Any, allow_letters: bool = False) -> Optional[str]:
"""
Convert to integer-like string.
If allow_letters is True, allow prefix/suffix letters around digits (e.g., 'AA024322BB'),
but reject strings with interleaved letters (e.g., 'A2323B232C').
"""
if number is None:
raise ValueError("as_int_id: input is None")
s = str(number).strip()
if not allow_letters:
try:
return str(int(float(s)))
except (ValueError, TypeError) as e:
raise ValueError(f"as_int_id: cannot convert {number!r}") from e
else:
# Allow only prefix/suffix letters, core must be digits
m = re.fullmatch(r"[A-Za-z]*([0-9]+)[A-Za-z]*", s)
if not m:
raise ValueError(f"as_int_id: invalid alphanumeric format {number!r}")
return m.group(1)
def _sanitize_identifier(name: str, used: set) -> str:
"""Convert arbitrary string to a unique, valid Python identifier."""
# Replace non-identifier chars with underscores and strip leading/trailing underscores
ident = re.sub(r'\W+', '_', str(name)).strip('_')
# Empty or starts with digit -> prefix with underscore
if not ident or ident[0].isdigit():
ident = f"_{ident}" if ident else "_arg"
# Avoid keywords and builtins
if keyword.iskeyword(ident) or ident in dir(builtins):
ident += "_"
# Ensure uniqueness
base = ident
i = 1
while ident in used:
ident = f"{base}_{i}"
i += 1
used.add(ident)
return ident
def make_callable(expr: str, returns_str: bool = False) -> Union[Callable, Tuple[Callable, str]]:
"""
Compile an expression string with placeholders into a callable.
Placeholders of the form 【name】 are replaced by valid Python parameter names,
preserving order and uniqueness. A mapping from safe names to original placeholders
is attached to the returned function as `_arg_map` and `_arg_order`.
Args:
expr: Expression string containing placeholders.
returns_str: If True, return (callable, function_definition_str); otherwise return callable.
Returns:
Callable or (Callable, str): The compiled function, and optionally the source code.
"""
# find placeholders
orig_vars = re.findall(r'【(.*?)】', expr)
orig_vars = list(dict.fromkeys(orig_vars)) # dedupe preserving order
# build safe arg names + mapping
used = set()
safe_args = []
arg_map = {}
for v in orig_vars:
s = _sanitize_identifier(v, used)
safe_args.append(s)
arg_map[s] = v
# replace placeholders in expression with safe arg names
body_expr = expr
for s in safe_args:
orig = arg_map[s]
body_expr = body_expr.replace(f"【{orig}】", s)
# define function text
args_str = ", ".join(safe_args)
func_str = f"def func({args_str}):\n"
func_str += indent(f"return {body_expr}", " ")
# exec in isolated globals that still see current globals
exec_globals = globals().copy()
local_vars = {}
exec(func_str, exec_globals, local_vars)
fn = local_vars["func"]
# attach mapping for callers
setattr(fn, "_arg_map", arg_map) # safe -> original
setattr(fn, "_arg_order", tuple(safe_args)) # ordered safe args
if returns_str:
return fn, func_str
return fn
def _eval_expr(expr: str, row: Dict[str, Any]) -> Any:
"""
Evaluate a schema expression against a CSV row.
- If any referenced CSV value is empty/None -> return None (no noise).
- Otherwise: print detailed error and return None.
"""
if expr is None:
return None
try:
fn, func_src = make_callable(expr, returns_str=True)
kwargs = {}
had_empty_input = False
for safe in getattr(fn, "_arg_order", []):
orig = fn._arg_map[safe]
raw = row.get(orig, None)
# normalize to str or None; treat "" as empty
if raw is None:
had_empty_input = True
kwargs[safe] = None
else:
s = str(raw)
if s == "":
had_empty_input = True
kwargs[safe] = None
else:
kwargs[safe] = s
return fn(**kwargs)
except Exception as e:
if 'had_empty_input' in locals() and had_empty_input:
return None
print("=== EVAL ERROR ===")
print("expr:", expr)
try:
print("compiled:\n", func_src)
except Exception:
pass
try:
print("kwargs:", kwargs)
except Exception:
pass
print("error:", repr(e))
print("==================")
return None
def _labels_to_list(s: Optional[str]) -> list:
if not isinstance(s, str):
return []
parts = [p.strip() for p in s.split(",")]
return [p for p in parts if p]
def fuse_csv(
source_file: Union[str, Path],
schema: Dict[str, Any],
source_name: str,
current_dict: Dict[str, Any],
) -> Dict[str, Any]:
"""
Fuse CSV rows into hierarchical dict.
New convention:
- Thematic sections map var_name -> expression directly.
- Units parsed from "(...)" in var_name.
- SHARED merged into every variable.
- ENTRY_KEY handling:
* If ENTRY_KEY starts with '^', treat as regex to match existing keys (never create new).
* Otherwise, treat as literal key (create new if not present).
"""
df = pd.read_csv(source_file, dtype=str, keep_default_na=False, na_values=[])
out_dict = dict(current_dict) if current_dict else {}
CONTROL_KEYS = {"ENTRY_KEY", "LAT_LONG", "COUNTRY_CODE", "SAMPLE_DATE", "SAMPLE_DEPTH_RANGE_CM", "SHARED"}
thematic_keys = [k for k in schema.keys() if k not in CONTROL_KEYS]
filename_for_bar = Path(source_file).name if isinstance(source_file, (str, Path)) else str(source_file)
for _, row in tqdm(df.iterrows(), total=len(df), desc=filename_for_bar):
row_dict = row.to_dict()
entry_key_expr = schema.get("ENTRY_KEY")
entry_key_val = _eval_expr(entry_key_expr, row_dict)
if entry_key_val in (None, ""):
continue
lat_long = _eval_expr(schema.get("LAT_LONG"), row_dict)
country_code = _eval_expr(schema.get("COUNTRY_CODE"), row_dict)
sample_date = _eval_expr(schema.get("SAMPLE_DATE"), row_dict)
sample_depth_range_cm = _eval_expr(schema.get("SAMPLE_DEPTH_RANGE_CM"), row_dict)
# Shared metadata
shared_evaluated = {sk: _eval_expr(sv, row_dict)
for sk, sv in (schema.get("SHARED", {}) or {}).items()}
shared_evaluated["source_dataset"] = source_name
if "distance_to_grid (m)" not in shared_evaluated:
shared_evaluated["distance_to_grid (m)"] = 0.0
# Step 1. Resolve target entries
if str(entry_key_val).startswith("^"): # regex matching
pattern = re.compile(str(entry_key_val))
targets = [k for k in out_dict.keys() if pattern.search(k)]
is_new_sample = False
# never create new
else: # exact match
if lat_long is None:
continue
if lat_long[0] is None or lat_long[1] is None:
continue
if entry_key_val not in out_dict:
out_dict[entry_key_val] = {}
targets = [entry_key_val]
is_new_sample = True
# Step 2. Apply update
for key in targets:
sample = out_dict[key]
if is_new_sample:
assert lat_long is not None
sample["LAT_LONG"] = lat_long
assert country_code is not None
sample["COUNTRY_CODE"] = country_code
assert sample_date is not None
sample["SAMPLE_DATE"] = sample_date
assert sample_depth_range_cm is not None
sample["SAMPLE_DEPTH_RANGE_CM"] = sample_depth_range_cm
assert source_name is not None
sample["SAMPLE_SOURCE_DATASET"] = source_name
for sec_name in thematic_keys:
sec_schema = schema.get(sec_name, {}) or {}
section_data = {}
for var_name, expr in sec_schema.items():
value = _eval_expr(expr, row_dict)
m = re.match(r"^([^\s()]+)\s*\((.+)\)\s*$", var_name)
if m:
clean_name = m.group(1).strip()
unit = m.group(2).strip()
else:
clean_name, unit = var_name.strip(), None
leaf = {"value": value}
if unit:
leaf["unit"] = unit
leaf.update(shared_evaluated)
section_data[clean_name] = leaf
existing_section = sample.get(sec_name, {})
merged_section = dict(existing_section)
for var_name, leaf in section_data.items():
if var_name not in merged_section:
merged_section[var_name] = leaf # only add if missing
sample[sec_name] = merged_section
return out_dict
def fuse_tif(
source_file: Union[str, Path],
schema: Dict[str, Any],
source_name: str,
current_dict: Dict[str, Any],
) -> Dict[str, Any]:
out_dict = dict(current_dict) if current_dict else {}
CONTROL_KEYS = {"CRS_STRING"}
thematic_keys = [k for k in schema.keys() if k not in CONTROL_KEYS]
keys, lats, lons = [], [], []
for k, s in out_dict.items():
if "LAT_LONG" in s and s["LAT_LONG"] not in (None, "", "null"):
lat, lon = s["LAT_LONG"]
keys.append(k)
lats.append(lat)
lons.append(lon)
if not keys:
return out_dict
keys = np.array(keys)
lats = np.array(lats, dtype=float)
lons = np.array(lons, dtype=float)
tif_dir = Path(source_file)
geod = Geod(ellps="WGS84")
def _sample_one_entry(tif_entry):
value_map = None
scale_factor = 1.0
if isinstance(tif_entry, list):
if len(tif_entry) == 2:
if isinstance(tif_entry[1], dict):
tif_key, value_map = tif_entry
elif isinstance(tif_entry[1], (int, float)):
tif_key, scale_factor = tif_entry
else:
raise ValueError(
f"Error parsing entry {tif_entry}: second item must be a dict (codebook) or a number (scale factor)"
)
elif len(tif_entry) == 1 and isinstance(tif_entry[0], str):
# simple single-item list, same as plain string
tif_key = tif_entry[0]
else:
# multi-entry list → likely a profile list, handle outside
raise ValueError(
f"Error parsing entry {tif_entry}: unexpected list form (did you mean a profile list?)"
)
else:
tif_key = tif_entry
tif_path = tif_dir / f"{tif_key}.standardized.tif"
with rasterio.open(tif_path) as ds:
arr = ds.read(1)
nodata = ds.nodata
inv_affine: Affine = ~ds.transform
if "CRS_STRING" in schema:
crs = rasterio.crs.CRS.from_string(schema["CRS_STRING"])
else:
crs = ds.crs
if crs is None or "LOCAL_CS" in crs.to_wkt():
raise ValueError(f"{tif_key}.standardized.tif: CRS not available. Please provide CRS_STRING")
transformer = Transformer.from_crs("EPSG:4326", crs, always_xy=True)
xs, ys = transformer.transform(lons, lats)
cols, rows = inv_affine * (xs, ys)
rows = np.round(rows).astype(int)
cols = np.round(cols).astype(int)
in_bounds = (rows >= 0) & (rows < ds.height) & (cols >= 0) & (cols < ds.width)
values = np.empty(len(keys), dtype=object)
values[:] = np.nan
dists = np.full(len(keys), np.nan, dtype=float)
if np.any(in_bounds):
r = rows[in_bounds]
c = cols[in_bounds]
raw_vals = arr[r, c]
x_centers, y_centers = ds.transform * (c, r)
lats_geo = lats[in_bounds]
lons_geo = lons[in_bounds]
if crs.is_geographic:
_, _, d_valid = geod.inv(lons_geo, lats_geo, x_centers, y_centers)
else:
xs_valid = xs[in_bounds]
ys_valid = ys[in_bounds]
d_valid = np.sqrt((xs_valid - x_centers) ** 2 + (ys_valid - y_centers) ** 2)
if nodata is not None:
nd_mask = np.isclose(raw_vals, nodata, equal_nan=True)
else:
nd_mask = np.zeros_like(raw_vals, dtype=bool)
out_vals = []
for rv, is_nd in zip(raw_vals, nd_mask):
if is_nd:
out_vals.append(np.nan)
else:
if value_map is not None:
try:
code_str = str(int(round(float(rv))))
except Exception:
code_str = str(int(round(rv)))
out_vals.append(code_str + ": " + str(value_map.get(code_str, "Unknown")))
else:
out_vals.append(float(rv) * scale_factor)
values[in_bounds] = out_vals
d_valid = np.array(d_valid, dtype=float)
d_valid[nd_mask] = np.nan
dists[in_bounds] = d_valid
return values, dists, in_bounds
all_vars = []
for sec_name in thematic_keys:
for var_name, tif_entry in schema[sec_name].items():
all_vars.append((sec_name, var_name, tif_entry))
for sec_name, var_name, tif_entry in tqdm(all_vars, desc=f"Processing variables in {tif_dir.name}"):
m = re.match(r"^(.*)\s+\(([^()]*)\)$", var_name)
if m:
clean_name, unit = m.group(1).strip(), m.group(2).strip()
else:
clean_name, unit = var_name.strip(), None
is_profile = (
isinstance(tif_entry, list)
and not (
len(tif_entry) == 2
and isinstance(tif_entry[1], (dict, int, float))
)
)
if is_profile:
per_entry_values = []
per_entry_dists = []
union_in_bounds = np.zeros(len(keys), dtype=bool)
for entry in tif_entry:
vals, dists, in_bounds = _sample_one_entry(entry)
per_entry_values.append(vals)
per_entry_dists.append(dists)
union_in_bounds |= in_bounds
for idx, key in enumerate(keys):
if not union_in_bounds[idx]:
continue
sample = out_dict[key]
section = sample.setdefault(sec_name, {})
if clean_name in section:
continue
value_list = [vals[idx] for vals in per_entry_values]
dist_candidates = []
for d in per_entry_dists:
di = d[idx]
if not (isinstance(di, float) and np.isnan(di)):
dist_candidates.append(float(di))
dist_reduced = (max(dist_candidates) if dist_candidates else None)
leaf = {
"value": value_list,
**({"unit": unit} if unit else {}),
"distance_to_grid (m)": dist_reduced,
"source_dataset": source_name
}
leaf = _nan_to_none(leaf)
section[clean_name] = leaf
else:
vals, dists, in_bounds = _sample_one_entry(tif_entry)
ib_idx = np.where(in_bounds)[0]
for ii in ib_idx:
key = keys[ii]
sample = out_dict[key]
section = sample.setdefault(sec_name, {})
if clean_name in section:
continue
raw_val = vals[ii]
val = None if (isinstance(raw_val, float) and np.isnan(raw_val)) else raw_val
dist_val = float(dists[ii]) if not np.isnan(dists[ii]) else np.nan
leaf = {
"value": val,
**({"unit": unit} if unit else {}),
"distance_to_grid (m)": dist_val,
"source_dataset": source_name
}
leaf = _nan_to_none(leaf)
section[clean_name] = leaf
return out_dict