File size: 2,856 Bytes
9bc98d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
from pathlib import Path

import numpy as np
import pandas as pd

from src.process.io import prepare_dirs, unzip, unzip_nested, upload, copy_to_local
from src.process.processor import process_shp

# === Step 1: Set up local and drive directory paths ===
local_processed_dir, drive_download_dir, drive_processed_dir = (
    prepare_dirs("esdac", Path(__file__).parent.stem)
)

# === Step 2: Extract all ZIP files from drive/download to local/processed ===
unzip(local_processed_dir, drive_download_dir)
unzip_nested(local_processed_dir)
copy_to_local(local_processed_dir, drive_download_dir, files=[
    "LUCAS2015_AncillaryData_20201007.csv"
])

# === Step 3: Process all SHP files under local/processed ===
move_list = []
for tif_path in sorted(local_processed_dir.rglob("*.shp")):
    move_list = process_shp(tif_path, move_list)

# === Step 4: Process assets ===
spec_dir = local_processed_dir / "LUCAS2015_spectra/LUCAS2015_Soil_Spectra_EU28"
out_path = local_processed_dir / "assets/psd"
out_path.mkdir(parents=True, exist_ok=True)

for spec_path in sorted(spec_dir.rglob("*.csv")):
    df = pd.read_csv(spec_path)

    # Identify metadata columns to exclude
    meta_cols = {"source", "SampleID", "PointID", "NUTS_0", "SampleN"}

    # Extract spectral columns (handle both "spc.xxx" and plain numeric names)
    spc_cols = [
        c for c in df.columns
        if c not in meta_cols and (c.replace(".", "", 1).isdigit() or c.startswith("spc."))
    ]

    # Parse numeric axis values from the column names
    x_vals = np.array([
        float(c.replace("spc.", "")) if c.startswith("spc.") else float(c)
        for c in spc_cols
    ])

    # Sort columns by numeric x
    sort_idx = np.argsort(x_vals)
    x_vals = x_vals[sort_idx]
    spc_cols_sorted = [spc_cols[i] for i in sort_idx]

    for _, row in df.iterrows():
        point_id = row["PointID"]
        sample_id = '0'  # main data csv does not contain sample id

        y_vals = row[spc_cols_sorted].to_numpy(dtype=np.float32)
        arr = np.column_stack([x_vals, y_vals])

        fname = out_path / f"lucas2015_{point_id}_{sample_id}.npz"
        np.savez(fname, psd=arr)

    print(f"✅ PSD Spectrum data saved in assets")

# === NEW Step: Zip PSD folder and upload only the zip ===
psd_dir = out_path
zip_path = local_processed_dir / "assets/psd.zip"

import zipfile
with zipfile.ZipFile(zip_path, "w", compression=zipfile.ZIP_DEFLATED) as z:
    for npz_file in psd_dir.glob("*.npz"):
        z.write(npz_file, arcname=npz_file.name)

print(f"✅ Created ZIP archive: {zip_path.name}")
move_list.append(zip_path)  # only zip is uploaded

# === Step 5: Collect all generated CSVs and upload to drive/processed ===
csvs = list(local_processed_dir.rglob("*.csv"))
for csv_path in csvs:
    move_list.append(csv_path)

upload(local_processed_dir, drive_processed_dir, move_list)