| |
| """ |
| Improved script to find all JSON files starting with 'chat_history_output' |
| and copy them to data/preprocess folder with shorter filenames. |
| """ |
|
|
| import os |
| import shutil |
| import glob |
| import hashlib |
| from pathlib import Path |
|
|
| def generate_short_name(original_path, counter): |
| """Generate a short, unique filename.""" |
| |
| path_parts = original_path.split(os.sep) |
| |
| |
| model_name = None |
| dimension = None |
| for part in path_parts: |
| if part in ['vc2', 'vc09', 'modelscope', 'latte1', 'vc10-large', 'sdxl-1.0', 'sd-2.1', 'sd-1.4', 'sd-3']: |
| model_name = part |
| if part.startswith('2024-') and 'How_' in part: |
| |
| question = part.split('-', 3)[-1] if len(part.split('-', 3)) > 3 else part |
| |
| words = question.replace('How_', '').replace('_', ' ').split()[:3] |
| dimension = '_'.join(words[:3]).replace('?', '') |
| |
| |
| path_hash = hashlib.md5(original_path.encode()).hexdigest()[:8] |
| |
| |
| parts = [] |
| if model_name: |
| parts.append(model_name) |
| if dimension: |
| parts.append(dimension[:30]) |
| parts.append(f"hash_{path_hash}") |
| parts.append(f"id_{counter:04d}") |
| |
| return f"{'_'.join(parts)}.json" |
|
|
| def find_and_copy_chat_history_files(): |
| """Find all chat_history_output*.json files and copy them to data/preprocess.""" |
| |
| |
| |
| source_dir = "/home/data2/sltian/code/evaluation_agent_dev/ea-data/agent/t2i_results" |
| |
| |
| dest_dir = "/home/data2/sltian/code/evaluation_agent_dev/data/preprocess-t2i" |
| |
| |
| os.makedirs(dest_dir, exist_ok=True) |
| |
| |
| pattern = os.path.join(source_dir, "**/*chat_history*.json") |
| chat_files = glob.glob(pattern, recursive=True) |
| |
| print(f"Found {len(chat_files)} *chat_history*.json files") |
| |
| copied_files = [] |
| counter = 1 |
| |
| for file_path in chat_files: |
| |
| short_filename = generate_short_name(file_path, counter) |
| |
| |
| dest_file = os.path.join(dest_dir, short_filename) |
| |
| |
| try: |
| shutil.copy2(file_path, dest_file) |
| copied_files.append((file_path, dest_file)) |
| print(f"Copied [{counter:4d}]: {os.path.basename(file_path)} -> {short_filename}") |
| counter += 1 |
| except Exception as e: |
| print(f"Error copying {file_path}: {e}") |
| |
| print(f"\nSuccessfully copied {len(copied_files)} files to {dest_dir}") |
| |
| |
| mapping_file = os.path.join(dest_dir, "detailed_file_mapping.txt") |
| with open(mapping_file, "w") as f: |
| f.write("Short Filename -> Original Path\n") |
| f.write("=" * 80 + "\n") |
| for orig, copied in copied_files: |
| short_name = os.path.basename(copied) |
| f.write(f"{short_name} -> {orig}\n") |
| |
| print(f"Created detailed file mapping at: {mapping_file}") |
| |
| |
| summary_file = os.path.join(dest_dir, "summary_by_model.txt") |
| model_counts = {} |
| for orig, copied in copied_files: |
| path_parts = orig.split(os.sep) |
| model = None |
| for part in path_parts: |
| if part in ['vc2', 'vc09', 'modelscope', 'latte1', 'vc10-large', 'sdxl-1.0', 'sd-2.1', 'sd-1.4', 'sd-3']: |
| model = part |
| break |
| if model: |
| model_counts[model] = model_counts.get(model, 0) + 1 |
| |
| with open(summary_file, "w") as f: |
| f.write("Summary by Model\n") |
| f.write("=" * 30 + "\n") |
| for model, count in sorted(model_counts.items()): |
| f.write(f"{model}: {count} files\n") |
| f.write(f"\nTotal: {sum(model_counts.values())} files\n") |
| |
| print(f"Created summary at: {summary_file}") |
|
|
| if __name__ == "__main__": |
| find_and_copy_chat_history_files() |