experiments / swap_analysis /heatmap_all_layers.py
ch-min's picture
Add files using upload-large-folder tool
3404d44 verified
raw
history blame
7.77 kB
"""
heatmap_all_layers.py β€” Generate delta-similarity heatmaps for ALL layers.
Reads pre-computed delta_similarity_{scale}_L{layer}_{tag}.csv files and
generates heatmap plots for every layer.
Before running, verifies that the NPZ file contains all expected layers.
If the NPZ is missing layers, the script warns and exits (no partial output).
Output structure:
results/{model}/plots/all/heatmap/heatmap_{scale}_L{layer}_all_pairs.png
results/{model}/plots/all/heatmap/heatmap_{scale}_L{layer}_both_correct.png
Usage:
python heatmap_all_layers.py --model qwen_super --scale qwen3_235b
python heatmap_all_layers.py --model qwen_super --scale qwen3_235b --overwrite
python heatmap_all_layers.py # all models, all scales
"""
import argparse
import glob
import os
import re
import matplotlib
matplotlib.use('Agg')
import numpy as np
import pandas as pd
# Import plot function and color constants from the main pipeline
import sys
_HERE = os.path.dirname(os.path.abspath(__file__))
if _HERE not in sys.path:
sys.path.insert(0, _HERE)
from swap_analysis import plot_delta_heatmap, CATEGORY_ORDER
RESULTS_DIR = os.path.join(_HERE, 'results')
TAGS = ['all_pairs', 'both_correct']
# ---------------------------------------------------------------------------
# NPZ completeness check
# ---------------------------------------------------------------------------
def check_npz_layers(npz_path: str):
"""Return sorted list of layer indices present in the NPZ (via orig_L* keys).
Returns an empty list if the file doesn't exist or has no orig_L* keys.
"""
if not os.path.exists(npz_path):
return []
data = np.load(npz_path, allow_pickle=True)
layer_keys = [k for k in data.files if k.startswith('orig_L')]
data.close()
return sorted(int(k.replace('orig_L', '')) for k in layer_keys)
def check_csv_layers(csv_dir: str, scale: str, tag: str):
"""Return sorted list of layer indices that have a delta_similarity CSV."""
pattern = os.path.join(csv_dir, f'delta_similarity_{scale}_L*_{tag}.csv')
files = glob.glob(pattern)
layers = []
for fpath in files:
m = re.search(rf'delta_similarity_{re.escape(scale)}_L(\d+)_{re.escape(tag)}\.csv$',
os.path.basename(fpath))
if m:
layers.append(int(m.group(1)))
return sorted(layers)
# ---------------------------------------------------------------------------
# Per-model/scale processing
# ---------------------------------------------------------------------------
def process(model: str, scale: str, model_dir: str, overwrite: bool) -> int:
npz_path = os.path.join(model_dir, 'npz', f'vectors_{scale}.npz')
csv_dir = os.path.join(model_dir, 'csv')
out_dir = os.path.join(model_dir, 'plots', 'all', 'heatmap')
# ── 1. Check NPZ completeness ────────────────────────────────────────────
npz_layers = check_npz_layers(npz_path)
if not npz_layers:
print(f' [{model}/{scale}] NPZ not found or empty: {npz_path}')
return 0
# Cross-check: CSVs should cover the same layers for all_pairs
csv_layers = check_csv_layers(csv_dir, scale, 'all_pairs')
missing = set(npz_layers) - set(csv_layers)
if missing:
print(f' [{model}/{scale}] WARNING: {len(missing)} NPZ layers have no CSV '
f'(e.g. L{sorted(missing)[:5]}). '
f'Re-run inference to regenerate missing CSVs. Skipping.')
return 0
print(f' [{model}/{scale}] {len(npz_layers)} layers (L{npz_layers[0]}–L{npz_layers[-1]})')
os.makedirs(out_dir, exist_ok=True)
# ── 2. Generate plots ────────────────────────────────────────────────────
# Load category validity if available (for unreliable cat markers)
cat_validity = {}
cv_path = os.path.join(model_dir, 'json', f'category_validity_{scale}.json')
if os.path.exists(cv_path):
import json
with open(cv_path) as f:
cat_validity = json.load(f)
unreliable = [c for c, v in cat_validity.items() if not v.get('reliable', True)]
saved = 0
for i, layer in enumerate(npz_layers):
print(f' L{layer:>3} ({i+1}/{len(npz_layers)})', end='\r', flush=True)
for tag in TAGS:
out_path = os.path.join(out_dir, f'heatmap_{scale}_L{layer}_{tag}.png')
if not overwrite and os.path.exists(out_path):
continue
csv_path = os.path.join(csv_dir, f'delta_similarity_{scale}_L{layer}_{tag}.csv')
if not os.path.exists(csv_path):
continue # both_correct CSV may not exist for all layers
df = pd.read_csv(csv_path, index_col=0)
# Ensure canonical category order (drop missing, keep order)
available = [c for c in CATEGORY_ORDER if c in df.index]
if not available:
continue
df = df.loc[available, available]
title = (f'{model.upper()} ({scale}) β€” Delta Heatmap L{layer} '
f'({"both-correct" if tag == "both_correct" else "all pairs"})')
cond_unreliable = unreliable if tag == 'all_pairs' else []
plot_delta_heatmap(df, title, out_path, unreliable_cats=cond_unreliable)
saved += 1
print() # newline after progress
return saved
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description='Generate delta-similarity heatmaps for all layers from pre-computed CSVs')
parser.add_argument('--model', help='Restrict to one model directory (e.g. qwen_super)')
parser.add_argument('--scale', help='Restrict to one scale (e.g. qwen3_235b)')
parser.add_argument('--overwrite', action='store_true',
help='Regenerate plots even if they already exist')
parser.add_argument('--results-dir', default=RESULTS_DIR,
help='Path to results/ directory')
args = parser.parse_args()
results_dir = args.results_dir
if not os.path.isdir(results_dir):
print(f'Results directory not found: {results_dir}')
return
model_dirs = sorted(
m for m in os.listdir(results_dir)
if os.path.isdir(os.path.join(results_dir, m))
)
if args.model:
model_dirs = [m for m in model_dirs if m == args.model]
if not model_dirs:
print(f"Model '{args.model}' not found in {results_dir}")
return
total_saved = 0
for model in model_dirs:
model_dir = os.path.join(results_dir, model)
npz_dir = os.path.join(model_dir, 'npz')
if not os.path.isdir(npz_dir):
continue
npz_files = sorted(
f for f in os.listdir(npz_dir)
if f.startswith('vectors_') and f.endswith('.npz')
)
if args.scale:
npz_files = [f for f in npz_files
if re.match(rf'vectors_{re.escape(args.scale)}\.npz$', f)]
if not npz_files:
print(f'[{model}] no matching NPZ files, skipping')
continue
for npz_file in npz_files:
m = re.match(r'vectors_(.+)\.npz$', npz_file)
if not m:
continue
scale = m.group(1)
n = process(model, scale, model_dir, args.overwrite)
total_saved += n
print(f'\nDone. Saved {total_saved} heatmap plots.')
if __name__ == '__main__':
main()