File size: 7,769 Bytes
3404d44 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 | """
heatmap_all_layers.py β Generate delta-similarity heatmaps for ALL layers.
Reads pre-computed delta_similarity_{scale}_L{layer}_{tag}.csv files and
generates heatmap plots for every layer.
Before running, verifies that the NPZ file contains all expected layers.
If the NPZ is missing layers, the script warns and exits (no partial output).
Output structure:
results/{model}/plots/all/heatmap/heatmap_{scale}_L{layer}_all_pairs.png
results/{model}/plots/all/heatmap/heatmap_{scale}_L{layer}_both_correct.png
Usage:
python heatmap_all_layers.py --model qwen_super --scale qwen3_235b
python heatmap_all_layers.py --model qwen_super --scale qwen3_235b --overwrite
python heatmap_all_layers.py # all models, all scales
"""
import argparse
import glob
import os
import re
import matplotlib
matplotlib.use('Agg')
import numpy as np
import pandas as pd
# Import plot function and color constants from the main pipeline
import sys
_HERE = os.path.dirname(os.path.abspath(__file__))
if _HERE not in sys.path:
sys.path.insert(0, _HERE)
from swap_analysis import plot_delta_heatmap, CATEGORY_ORDER
RESULTS_DIR = os.path.join(_HERE, 'results')
TAGS = ['all_pairs', 'both_correct']
# ---------------------------------------------------------------------------
# NPZ completeness check
# ---------------------------------------------------------------------------
def check_npz_layers(npz_path: str):
"""Return sorted list of layer indices present in the NPZ (via orig_L* keys).
Returns an empty list if the file doesn't exist or has no orig_L* keys.
"""
if not os.path.exists(npz_path):
return []
data = np.load(npz_path, allow_pickle=True)
layer_keys = [k for k in data.files if k.startswith('orig_L')]
data.close()
return sorted(int(k.replace('orig_L', '')) for k in layer_keys)
def check_csv_layers(csv_dir: str, scale: str, tag: str):
"""Return sorted list of layer indices that have a delta_similarity CSV."""
pattern = os.path.join(csv_dir, f'delta_similarity_{scale}_L*_{tag}.csv')
files = glob.glob(pattern)
layers = []
for fpath in files:
m = re.search(rf'delta_similarity_{re.escape(scale)}_L(\d+)_{re.escape(tag)}\.csv$',
os.path.basename(fpath))
if m:
layers.append(int(m.group(1)))
return sorted(layers)
# ---------------------------------------------------------------------------
# Per-model/scale processing
# ---------------------------------------------------------------------------
def process(model: str, scale: str, model_dir: str, overwrite: bool) -> int:
npz_path = os.path.join(model_dir, 'npz', f'vectors_{scale}.npz')
csv_dir = os.path.join(model_dir, 'csv')
out_dir = os.path.join(model_dir, 'plots', 'all', 'heatmap')
# ββ 1. Check NPZ completeness ββββββββββββββββββββββββββββββββββββββββββββ
npz_layers = check_npz_layers(npz_path)
if not npz_layers:
print(f' [{model}/{scale}] NPZ not found or empty: {npz_path}')
return 0
# Cross-check: CSVs should cover the same layers for all_pairs
csv_layers = check_csv_layers(csv_dir, scale, 'all_pairs')
missing = set(npz_layers) - set(csv_layers)
if missing:
print(f' [{model}/{scale}] WARNING: {len(missing)} NPZ layers have no CSV '
f'(e.g. L{sorted(missing)[:5]}). '
f'Re-run inference to regenerate missing CSVs. Skipping.')
return 0
print(f' [{model}/{scale}] {len(npz_layers)} layers (L{npz_layers[0]}βL{npz_layers[-1]})')
os.makedirs(out_dir, exist_ok=True)
# ββ 2. Generate plots ββββββββββββββββββββββββββββββββββββββββββββββββββββ
# Load category validity if available (for unreliable cat markers)
cat_validity = {}
cv_path = os.path.join(model_dir, 'json', f'category_validity_{scale}.json')
if os.path.exists(cv_path):
import json
with open(cv_path) as f:
cat_validity = json.load(f)
unreliable = [c for c, v in cat_validity.items() if not v.get('reliable', True)]
saved = 0
for i, layer in enumerate(npz_layers):
print(f' L{layer:>3} ({i+1}/{len(npz_layers)})', end='\r', flush=True)
for tag in TAGS:
out_path = os.path.join(out_dir, f'heatmap_{scale}_L{layer}_{tag}.png')
if not overwrite and os.path.exists(out_path):
continue
csv_path = os.path.join(csv_dir, f'delta_similarity_{scale}_L{layer}_{tag}.csv')
if not os.path.exists(csv_path):
continue # both_correct CSV may not exist for all layers
df = pd.read_csv(csv_path, index_col=0)
# Ensure canonical category order (drop missing, keep order)
available = [c for c in CATEGORY_ORDER if c in df.index]
if not available:
continue
df = df.loc[available, available]
title = (f'{model.upper()} ({scale}) β Delta Heatmap L{layer} '
f'({"both-correct" if tag == "both_correct" else "all pairs"})')
cond_unreliable = unreliable if tag == 'all_pairs' else []
plot_delta_heatmap(df, title, out_path, unreliable_cats=cond_unreliable)
saved += 1
print() # newline after progress
return saved
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description='Generate delta-similarity heatmaps for all layers from pre-computed CSVs')
parser.add_argument('--model', help='Restrict to one model directory (e.g. qwen_super)')
parser.add_argument('--scale', help='Restrict to one scale (e.g. qwen3_235b)')
parser.add_argument('--overwrite', action='store_true',
help='Regenerate plots even if they already exist')
parser.add_argument('--results-dir', default=RESULTS_DIR,
help='Path to results/ directory')
args = parser.parse_args()
results_dir = args.results_dir
if not os.path.isdir(results_dir):
print(f'Results directory not found: {results_dir}')
return
model_dirs = sorted(
m for m in os.listdir(results_dir)
if os.path.isdir(os.path.join(results_dir, m))
)
if args.model:
model_dirs = [m for m in model_dirs if m == args.model]
if not model_dirs:
print(f"Model '{args.model}' not found in {results_dir}")
return
total_saved = 0
for model in model_dirs:
model_dir = os.path.join(results_dir, model)
npz_dir = os.path.join(model_dir, 'npz')
if not os.path.isdir(npz_dir):
continue
npz_files = sorted(
f for f in os.listdir(npz_dir)
if f.startswith('vectors_') and f.endswith('.npz')
)
if args.scale:
npz_files = [f for f in npz_files
if re.match(rf'vectors_{re.escape(args.scale)}\.npz$', f)]
if not npz_files:
print(f'[{model}] no matching NPZ files, skipping')
continue
for npz_file in npz_files:
m = re.match(r'vectors_(.+)\.npz$', npz_file)
if not m:
continue
scale = m.group(1)
n = process(model, scale, model_dir, args.overwrite)
total_saved += n
print(f'\nDone. Saved {total_saved} heatmap plots.')
if __name__ == '__main__':
main()
|