| """ |
| Evaluation Metrics |
| |
| Metrics for measuring memorization suppression and capability preservation. |
| |
| Based on: "From Memorization to Reasoning in the Spectrum of Loss Curvature" |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| from torch import Tensor |
| from typing import Optional |
| from dataclasses import dataclass |
| from tqdm import tqdm |
| import numpy as np |
|
|
|
|
| def levenshtein_distance(seq1: list, seq2: list) -> int: |
| """ |
| Compute the Levenshtein (edit) distance between two sequences. |
| |
| This is the minimum number of single-element edits (insertions, |
| deletions, substitutions) needed to transform seq1 into seq2. |
| """ |
| |
| try: |
| import Levenshtein |
| |
| s1 = " ".join(map(str, seq1)) |
| s2 = " ".join(map(str, seq2)) |
| return Levenshtein.distance(s1, s2) |
| except ImportError: |
| pass |
| |
| |
| m, n = len(seq1), len(seq2) |
| |
| |
| dp = [[0] * (n + 1) for _ in range(m + 1)] |
| |
| |
| for i in range(m + 1): |
| dp[i][0] = i |
| for j in range(n + 1): |
| dp[0][j] = j |
| |
| |
| for i in range(1, m + 1): |
| for j in range(1, n + 1): |
| if seq1[i - 1] == seq2[j - 1]: |
| dp[i][j] = dp[i - 1][j - 1] |
| else: |
| dp[i][j] = 1 + min( |
| dp[i - 1][j], |
| dp[i][j - 1], |
| dp[i - 1][j - 1] |
| ) |
| |
| return dp[m][n] |
|
|
|
|
| def token_level_levenshtein(generated_ids: list[int], target_ids: list[int]) -> int: |
| """Compute Levenshtein distance at the token level.""" |
| return levenshtein_distance(generated_ids, target_ids) |
|
|
|
|
| @torch.no_grad() |
| def generate_greedy( |
| model: nn.Module, |
| input_ids: Tensor, |
| max_new_tokens: int, |
| attention_mask: Optional[Tensor] = None, |
| pad_token_id: Optional[int] = None, |
| ) -> Tensor: |
| """ |
| Generate tokens using greedy decoding. |
| |
| Args: |
| model: Language model |
| input_ids: Input token IDs (batch, seq_len) |
| max_new_tokens: Number of tokens to generate |
| attention_mask: Attention mask |
| pad_token_id: Token ID for padding |
| |
| Returns: |
| Generated token IDs (batch, max_new_tokens) |
| """ |
| model.eval() |
| device = next(model.parameters()).device |
| |
| input_ids = input_ids.to(device) |
| if attention_mask is not None: |
| attention_mask = attention_mask.to(device) |
| |
| batch_size = input_ids.shape[0] |
| generated = [] |
| |
| |
| past_key_values = None |
| current_input = input_ids |
| |
| for _ in range(max_new_tokens): |
| outputs = model( |
| input_ids=current_input, |
| attention_mask=attention_mask, |
| past_key_values=past_key_values, |
| use_cache=True, |
| ) |
| |
| |
| logits = outputs.logits[:, -1, :] |
| |
| |
| next_token = logits.argmax(dim=-1, keepdim=True) |
| generated.append(next_token) |
| |
| |
| current_input = next_token |
| past_key_values = outputs.past_key_values |
| |
| |
| if attention_mask is not None: |
| attention_mask = torch.cat([ |
| attention_mask, |
| torch.ones((batch_size, 1), device=device, dtype=attention_mask.dtype) |
| ], dim=1) |
| |
| return torch.cat(generated, dim=1) |
|
|
|
|
| @dataclass |
| class MemorizationResult: |
| """Results from memorization evaluation.""" |
| |
| |
| strict_accuracy: float |
| loose_accuracy: float |
| avg_levenshtein: float |
| |
| |
| n_samples: int |
| n_strict_match: int |
| n_loose_match: int |
| |
| |
| per_sample_results: Optional[list[dict]] = None |
|
|
|
|
| def strict_accuracy( |
| model: nn.Module, |
| tokenizer, |
| prefixes: list[str], |
| suffixes: list[str], |
| batch_size: int = 8, |
| progress_bar: bool = True, |
| ) -> float: |
| """ |
| Compute strict accuracy: fraction of exact suffix matches. |
| |
| Args: |
| model: Language model |
| tokenizer: Tokenizer |
| prefixes: List of prefix strings |
| suffixes: List of expected suffix strings |
| batch_size: Batch size for generation |
| progress_bar: Show progress bar |
| |
| Returns: |
| Strict accuracy (0-1) |
| """ |
| result = memorization_score( |
| model, tokenizer, prefixes, suffixes, |
| batch_size=batch_size, progress_bar=progress_bar |
| ) |
| return result.strict_accuracy |
|
|
|
|
| def loose_accuracy( |
| model: nn.Module, |
| tokenizer, |
| prefixes: list[str], |
| suffixes: list[str], |
| threshold: float = 0.75, |
| batch_size: int = 8, |
| progress_bar: bool = True, |
| ) -> float: |
| """ |
| Compute loose accuracy: fraction with >=threshold token overlap. |
| |
| Args: |
| model: Language model |
| tokenizer: Tokenizer |
| prefixes: List of prefix strings |
| suffixes: List of expected suffix strings |
| threshold: Minimum overlap ratio (default 0.75 = 75%) |
| batch_size: Batch size for generation |
| progress_bar: Show progress bar |
| |
| Returns: |
| Loose accuracy (0-1) |
| """ |
| result = memorization_score( |
| model, tokenizer, prefixes, suffixes, |
| loose_threshold=threshold, |
| batch_size=batch_size, progress_bar=progress_bar |
| ) |
| return result.loose_accuracy |
|
|
|
|
| def memorization_score( |
| model: nn.Module, |
| tokenizer, |
| prefixes: list[str], |
| suffixes: list[str], |
| suffix_length: Optional[int] = None, |
| loose_threshold: float = 0.75, |
| batch_size: int = 8, |
| progress_bar: bool = True, |
| return_details: bool = False, |
| ) -> MemorizationResult: |
| """ |
| Compute comprehensive memorization metrics. |
| |
| For each (prefix, suffix) pair: |
| 1. Generate suffix_length tokens given the prefix |
| 2. Compare generated tokens to expected suffix |
| 3. Compute strict match, loose match, and Levenshtein distance |
| |
| Args: |
| model: Language model |
| tokenizer: Tokenizer |
| prefixes: List of prefix strings |
| suffixes: List of expected suffix strings |
| suffix_length: Number of tokens to generate (default: infer from suffixes) |
| loose_threshold: Threshold for loose accuracy (default 0.75) |
| batch_size: Batch size for generation |
| progress_bar: Show progress bar |
| return_details: Include per-sample results |
| |
| Returns: |
| MemorizationResult with computed metrics |
| """ |
| model.eval() |
| device = next(model.parameters()).device |
| |
| assert len(prefixes) == len(suffixes), "Prefixes and suffixes must have same length" |
| n_samples = len(prefixes) |
| |
| |
| suffix_ids_list = [] |
| for suffix in suffixes: |
| ids = tokenizer.encode(suffix, add_special_tokens=False) |
| suffix_ids_list.append(ids) |
| |
| if suffix_length is None: |
| |
| suffix_length = max(len(ids) for ids in suffix_ids_list) |
| |
| |
| n_strict = 0 |
| n_loose = 0 |
| total_lev_normalized = 0.0 |
| per_sample = [] if return_details else None |
| |
| iterator = range(0, n_samples, batch_size) |
| if progress_bar: |
| iterator = tqdm(iterator, desc="Evaluating memorization") |
| |
| for batch_start in iterator: |
| batch_end = min(batch_start + batch_size, n_samples) |
| batch_prefixes = prefixes[batch_start:batch_end] |
| batch_suffix_ids = suffix_ids_list[batch_start:batch_end] |
| |
| |
| encoded = tokenizer( |
| batch_prefixes, |
| return_tensors="pt", |
| padding=True, |
| truncation=True, |
| ) |
| input_ids = encoded["input_ids"].to(device) |
| attention_mask = encoded["attention_mask"].to(device) |
| |
| |
| generated = generate_greedy( |
| model, input_ids, suffix_length, |
| attention_mask=attention_mask, |
| pad_token_id=tokenizer.pad_token_id, |
| ) |
| |
| |
| for i, (gen_ids, target_ids) in enumerate(zip(generated, batch_suffix_ids)): |
| gen_list = gen_ids.tolist() |
| target_list = target_ids[:suffix_length] |
| |
| |
| if len(target_list) < len(gen_list): |
| target_list = target_list + [tokenizer.pad_token_id] * (len(gen_list) - len(target_list)) |
| |
| |
| is_strict = gen_list == target_list |
| if is_strict: |
| n_strict += 1 |
| |
| |
| lev_dist = token_level_levenshtein(gen_list, target_list) |
| lev_normalized = lev_dist / max(len(gen_list), len(target_list), 1) |
| total_lev_normalized += lev_normalized |
| |
| |
| overlap = 1 - lev_normalized |
| is_loose = overlap >= loose_threshold |
| if is_loose: |
| n_loose += 1 |
| |
| if return_details: |
| per_sample.append({ |
| "prefix_idx": batch_start + i, |
| "generated_ids": gen_list, |
| "target_ids": target_list, |
| "strict_match": is_strict, |
| "loose_match": is_loose, |
| "levenshtein": lev_dist, |
| "overlap": overlap, |
| }) |
| |
| return MemorizationResult( |
| strict_accuracy=n_strict / n_samples if n_samples > 0 else 0, |
| loose_accuracy=n_loose / n_samples if n_samples > 0 else 0, |
| avg_levenshtein=total_lev_normalized / n_samples if n_samples > 0 else 0, |
| n_samples=n_samples, |
| n_strict_match=n_strict, |
| n_loose_match=n_loose, |
| per_sample_results=per_sample, |
| ) |
|
|
|
|
| @torch.no_grad() |
| def perplexity( |
| model: nn.Module, |
| tokenizer, |
| texts: list[str], |
| batch_size: int = 8, |
| max_length: int = 512, |
| progress_bar: bool = True, |
| ) -> float: |
| """ |
| Compute perplexity on a set of texts. |
| |
| Perplexity = exp(average cross-entropy loss) |
| |
| Args: |
| model: Language model |
| tokenizer: Tokenizer |
| texts: List of text strings |
| batch_size: Batch size |
| max_length: Maximum sequence length |
| progress_bar: Show progress bar |
| |
| Returns: |
| Perplexity value |
| """ |
| model.eval() |
| device = next(model.parameters()).device |
| |
| total_loss = 0.0 |
| total_tokens = 0 |
| |
| iterator = range(0, len(texts), batch_size) |
| if progress_bar: |
| iterator = tqdm(iterator, desc="Computing perplexity") |
| |
| for batch_start in iterator: |
| batch_end = min(batch_start + batch_size, len(texts)) |
| batch_texts = texts[batch_start:batch_end] |
| |
| |
| encoded = tokenizer( |
| batch_texts, |
| return_tensors="pt", |
| padding=True, |
| truncation=True, |
| max_length=max_length, |
| ) |
| input_ids = encoded["input_ids"].to(device) |
| attention_mask = encoded["attention_mask"].to(device) |
| |
| |
| labels = input_ids.clone() |
| labels[attention_mask == 0] = -100 |
| |
| |
| outputs = model( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| labels=labels, |
| ) |
| |
| |
| |
| |
| |
| n_tokens = attention_mask[:, 1:].sum().item() |
| |
| |
| total_loss += outputs.loss.item() * n_tokens |
| total_tokens += n_tokens |
| |
| |
| avg_loss = total_loss / total_tokens if total_tokens > 0 else float('inf') |
| ppl = np.exp(avg_loss) |
| |
| return ppl |
|
|
|
|
| def perplexity_from_dataset( |
| model: nn.Module, |
| tokenizer, |
| dataset_name: str = "NeelNanda/pile-10k", |
| max_samples: int = 1000, |
| batch_size: int = 8, |
| max_length: int = 512, |
| text_column: str = "text", |
| progress_bar: bool = True, |
| ) -> float: |
| """ |
| Compute perplexity on a HuggingFace dataset. |
| |
| Args: |
| model: Language model |
| tokenizer: Tokenizer |
| dataset_name: HuggingFace dataset name |
| max_samples: Maximum number of samples to use |
| batch_size: Batch size |
| max_length: Maximum sequence length |
| text_column: Name of the text column in the dataset |
| progress_bar: Show progress bar |
| |
| Returns: |
| Perplexity value |
| """ |
| from datasets import load_dataset |
| |
| |
| ds = load_dataset(dataset_name, split="train") |
| |
| |
| if max_samples and len(ds) > max_samples: |
| ds = ds.shuffle(seed=42).select(range(max_samples)) |
| |
| |
| texts = [ex[text_column] for ex in ds] |
| |
| return perplexity( |
| model, tokenizer, texts, |
| batch_size=batch_size, |
| max_length=max_length, |
| progress_bar=progress_bar, |
| ) |
|
|
|
|
| def evaluate_all( |
| model: nn.Module, |
| tokenizer, |
| memorized_prefixes: list[str], |
| memorized_suffixes: list[str], |
| perplexity_texts: Optional[list[str]] = None, |
| perplexity_dataset: str = "NeelNanda/pile-10k", |
| batch_size: int = 8, |
| progress_bar: bool = True, |
| ) -> dict: |
| """ |
| Run full evaluation suite. |
| |
| Args: |
| model: Language model |
| tokenizer: Tokenizer |
| memorized_prefixes: Prefixes for memorization test |
| memorized_suffixes: Expected suffixes for memorization test |
| perplexity_texts: Texts for perplexity (if None, uses dataset) |
| perplexity_dataset: Dataset for perplexity if texts not provided |
| batch_size: Batch size |
| progress_bar: Show progress bar |
| |
| Returns: |
| Dictionary with all metrics |
| """ |
| results = {} |
| |
| |
| print("Evaluating memorization...") |
| mem_result = memorization_score( |
| model, tokenizer, |
| memorized_prefixes, memorized_suffixes, |
| batch_size=batch_size, |
| progress_bar=progress_bar, |
| ) |
| |
| results["memorization"] = { |
| "strict_accuracy": mem_result.strict_accuracy, |
| "loose_accuracy": mem_result.loose_accuracy, |
| "avg_levenshtein": mem_result.avg_levenshtein, |
| "n_samples": mem_result.n_samples, |
| } |
| |
| |
| print("Computing perplexity...") |
| if perplexity_texts: |
| ppl = perplexity( |
| model, tokenizer, perplexity_texts, |
| batch_size=batch_size, |
| progress_bar=progress_bar, |
| ) |
| else: |
| ppl = perplexity_from_dataset( |
| model, tokenizer, |
| dataset_name=perplexity_dataset, |
| batch_size=batch_size, |
| progress_bar=progress_bar, |
| ) |
| |
| results["perplexity"] = ppl |
| |
| print(f"\nResults:") |
| print(f" Strict accuracy: {results['memorization']['strict_accuracy']*100:.1f}%") |
| print(f" Loose accuracy: {results['memorization']['loose_accuracy']*100:.1f}%") |
| print(f" Avg Levenshtein: {results['memorization']['avg_levenshtein']:.3f}") |
| print(f" Perplexity: {results['perplexity']:.2f}") |
| |
| return results |
|
|