| import os |
| import zipfile |
| import geopandas as gpd |
| import pandas as pd |
| from tqdm import tqdm |
| import warnings |
| import multiprocessing as mp |
| import sys |
|
|
| |
| warnings.filterwarnings("ignore", category=RuntimeWarning, |
| message="driver GML does not support open option DRIVER") |
| warnings.filterwarnings("ignore", category=RuntimeWarning, |
| message="Non closed ring detected. To avoid accepting it, set the OGR_GEOMETRY_ACCEPT_UNCLOSED_RING configuration option to NO") |
|
|
|
|
| def process_region(region_zip_path, output_dir): |
| """Processes a single region zip file to extract and save commune and parcel data.""" |
| region_name = os.path.basename(region_zip_path).replace(".zip", "") |
| all_communes = [] |
| all_parcels = [] |
|
|
| try: |
| with zipfile.ZipFile(region_zip_path, 'r') as region_zip: |
| city_zip_names = [f.filename for f in region_zip.filelist if f.filename.endswith('.zip')] |
|
|
| for city_zip_name in city_zip_names: |
| city_zip_path = region_zip.open(city_zip_name) |
|
|
| try: |
| with zipfile.ZipFile(city_zip_path, 'r') as city_zip: |
| commune_zip_names = [f.filename for f in city_zip.filelist if f.filename.endswith('.zip')] |
|
|
| for commune_zip_name in commune_zip_names: |
| try: |
| commune_zip_path = city_zip.open(commune_zip_name) |
| with zipfile.ZipFile(commune_zip_path, 'r') as commune_zip: |
| |
| gml_files = [f.filename for f in commune_zip.filelist if |
| f.filename.endswith('.gml')] |
|
|
| commune_gml = next((f for f in gml_files if '_map.gml' in f), |
| None) |
| parcel_gml = next((f for f in gml_files if '_ple.gml' in f), |
| None) |
|
|
| if commune_gml: |
| try: |
| commune_gdf = gpd.read_file(commune_zip.open(commune_gml), |
| driver='GML') |
| all_communes.append(commune_gdf) |
| except Exception as e: |
| print( |
| f"Error reading commune GML {commune_gml} from {commune_zip_name}: {e}") |
|
|
| if parcel_gml: |
| try: |
| parcel_gdf = gpd.read_file(commune_zip.open(parcel_gml), |
| driver='GML') |
| all_parcels.append(parcel_gdf) |
| except Exception as e: |
| print( |
| f"Error reading parcel GML {parcel_gml} from {commune_zip_name}: {e}") |
|
|
| except zipfile.BadZipFile as e: |
| print(f"Bad Zip file encountered: {commune_zip_name} - {e}") |
| except Exception as e: |
| print(f"Error processing {commune_zip_name}: {e}") |
|
|
| except zipfile.BadZipFile as e: |
| print(f"Bad Zip file encountered: {city_zip_name} - {e}") |
| except Exception as e: |
| print(f"Error processing {city_zip_name}: {e}") |
| except zipfile.BadZipFile as e: |
| print(f"Bad Zip file encountered: {region_zip_name} - {e}") |
| except Exception as e: |
| print(f"Error processing {region_zip_name}: {e}") |
|
|
| |
| try: |
| if all_communes: |
| communes_gdf = gpd.GeoDataFrame(pd.concat(all_communes, ignore_index=True)) |
|
|
| |
| if all_communes and hasattr(all_communes[0], 'crs') and all_communes[0].crs: |
| try: |
| communes_gdf.crs = all_communes[0].crs |
| except AttributeError as e: |
| print(f"Could not set CRS: {e}") |
| else: |
| print("WARNING: CRS information is missing from the input data.") |
|
|
| |
| problem_columns = [] |
| for col in communes_gdf.columns: |
| if col != 'geometry': |
| try: |
| communes_gdf[col] = pd.to_numeric(communes_gdf[col], errors='raise') |
| except (ValueError, TypeError): |
| problem_columns.append(col) |
|
|
| for col in problem_columns: |
| communes_gdf[col] = communes_gdf[col].astype(str) |
|
|
| |
| if 'msGeometry' in communes_gdf.columns: |
| communes_gdf = communes_gdf.set_geometry('msGeometry') |
| elif 'geometry' in communes_gdf.columns: |
| communes_gdf = communes_gdf.set_geometry('geometry') |
| else: |
| print( |
| "WARNING: No 'geometry' or 'msGeometry' column found in commune data. Spatial operations will not work.") |
|
|
| communes_gdf.to_parquet( |
| os.path.join(output_dir, f"{region_name}_communes.geoparquet"), |
| compression='gzip') |
| print( |
| f"Successfully saved {region_name} communes to {output_dir}/{region_name}_communes.geoparquet") |
|
|
| if all_parcels: |
| parcels_gdf = gpd.GeoDataFrame(pd.concat(all_parcels, ignore_index=True)) |
|
|
| |
| if all_parcels and hasattr(all_parcels[0], 'crs') and all_parcels[0].crs: |
| try: |
| parcels_gdf.crs = all_parcels[0].crs |
| except AttributeError as e: |
| print(f"Could not set CRS: {e}") |
| else: |
| print("WARNING: CRS information is missing from the input data.") |
|
|
| |
| problem_columns = [] |
| for col in parcels_gdf.columns: |
| if col != 'geometry': |
| try: |
| parcels_gdf[col] = pd.to_numeric(parcels_gdf[col], errors='raise') |
| except (ValueError, TypeError): |
| problem_columns.append(col) |
|
|
| for col in problem_columns: |
| parcels_gdf[col] = parcels_gdf[col].astype(str) |
|
|
| |
| if 'msGeometry' in parcels_gdf.columns: |
| parcels_gdf = parcels_gdf.set_geometry('msGeometry') |
| elif 'geometry' in parcels_gdf.columns: |
| parcels_gdf = parcels_gdf.set_geometry('geometry') |
| else: |
| print( |
| "WARNING: No 'geometry' or 'msGeometry' column found in parcel data. Spatial operations will not work.") |
|
|
| parcels_gdf.to_parquet(os.path.join(output_dir, f"{region_name}_parcels.geoparquet"), |
| compression='gzip') |
| print( |
| f"Successfully saved {region_name} parcels to {output_dir}/{region_name}_parcels.geoparquet") |
|
|
| except Exception as e: |
| print(f"Error saving GeoParquet files for {region_name}: {e}") |
|
|
|
|
| def process_italy_data_unzipped_parallel(root_dir, output_dir, num_processes=mp.cpu_count()): |
| """ |
| Processes the Italian data in parallel, leveraging multiprocessing. |
| """ |
|
|
| os.makedirs(output_dir, exist_ok=True) |
| region_zip_paths = [os.path.join(root_dir, f) for f in os.listdir(root_dir) if f.endswith('.zip')] |
| total_regions = len(region_zip_paths) |
|
|
| |
| if __name__ == '__main__': |
| |
| if sys.platform == 'darwin': |
| mp.set_start_method('spawn') |
|
|
| with mp.Pool(processes=num_processes) as pool: |
| |
| results = list(tqdm(pool.starmap(process_region, [(region_zip_path, output_dir) for region_zip_path in region_zip_paths]), total=total_regions, desc="Overall Progress: Regions")) |
|
|
|
|
| |
| root_dir = "ITALIA" |
| output_dir = "output" |
| num_processes = mp.cpu_count() |
|
|
| process_italy_data_unzipped_parallel(root_dir, output_dir, num_processes) |