| import pandas as pd |
| import gzip |
| import csv |
| import requests |
| from requests.adapters import HTTPAdapter, Retry |
| import urllib3 |
|
|
| import urllib.parse |
| from io import StringIO |
|
|
|
|
| |
| do_verify = False |
|
|
| if not do_verify: |
| urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) |
|
|
| |
| s = requests.Session() |
|
|
| retries = Retry(total=3, |
| backoff_factor=0.1, |
| status_forcelist=[ 500, 502, 503, 504 ]) |
|
|
| s.mount('https://', HTTPAdapter(max_retries=retries)) |
|
|
| |
| def load_and_clean_csv(file_path): |
| valid_rows = [] |
| invalid_rows = [] |
|
|
| index = 0 |
|
|
| |
| with gzip.open(file_path, 'rt', newline='\r\n', encoding='utf-8') as f: |
| reader = csv.reader(f) |
| header = next(reader) |
| header.append("primaryImageSmall") |
| valid_rows.append(header) |
| expected_columns = len(header) - 1 |
| |
| for line in f: |
| try: |
| |
| row = next(csv.reader([line])) |
| index = index + 1 |
| |
| if len(row) == expected_columns: |
| |
| object_id = row[4] |
| image_url = fetch_primary_image_small(object_id) |
| image_url = image_url.replace(" ","%20") |
| image_url = image_url.replace(u'\u2013',"–") |
| row.append(image_url) |
| valid_rows.append(row) |
|
|
| if index % 100 == 0: |
| print("Fetched " + str(index) +" image URLs") |
| else: |
| print("Invalid: "+object_id) |
| print(row) |
| invalid_rows.append(line) |
| except Exception as e: |
| print(e) |
| print("Invalid + error: "+object_id) |
| invalid_rows.append(line) |
|
|
| print(f"Found {len(invalid_rows)} invalid rows") |
| return valid_rows, invalid_rows |
|
|
| |
| def test_csv(file_path): |
| valid_rows = [] |
| invalid_rows = [] |
|
|
| index = 0 |
|
|
| |
| with gzip.open(file_path, 'rt', newline='\r\n', encoding='utf-8') as f: |
| reader = csv.reader(f) |
| header = next(reader) |
| valid_rows.append(header) |
| expected_columns = len(header) |
|
|
| for line in f: |
| try: |
| |
| row = next(csv.reader([line])) |
| index = index + 1 |
| if len(row) == expected_columns: |
| object_id = row[4] |
| print(object_id) |
| valid_rows.append(row) |
| else: |
| print("Invalid: "+object_id) |
| print(len(row), expected_columns) |
| print(row) |
| invalid_rows.append(line) |
| except Exception as e: |
| print(e) |
| print("Invalid + error: "+object_id) |
| invalid_rows.append(line) |
|
|
| print(f"Found {len(invalid_rows)} invalid rows") |
| return valid_rows, invalid_rows |
|
|
| |
| def fetch_primary_image_small(object_id): |
| url = f"https://collectionapi.metmuseum.org/public/collection/v1/objects/{object_id}" |
| try: |
| response = s.get(url, verify=do_verify) |
| response.raise_for_status() |
| data = response.json() |
| |
| return data.get("primaryImageSmall", "") |
| except Exception as e: |
| print(f"Error fetching image for object ID {object_id}: {e}") |
| return "" |
|
|
| |
| def save_cleaned_csv(valid_rows, output_path): |
| with gzip.open(output_path, 'wt', newline='') as f: |
| writer = csv.writer(f) |
| writer.writerows(valid_rows) |
| print(f"Cleaned data saved to {output_path}") |
|
|
| def main(): |
| input_file = 'metadata.csv.gz' |
| output_file = 'metadata_images.csv.gz' |
|
|
| |
| |
|
|
| |
| valid_rows, invalid_rows = load_and_clean_csv(input_file) |
|
|
| |
| save_cleaned_csv(valid_rows, output_file) |
|
|
| if __name__ == "__main__": |
| main() |
|
|