PEFT
qlora
sft
trl
qwen3
tmf921
intent-based-networking
network-slicing
rtx-6000-ada
ml-intern
nraptisss commited on
Commit
7474a91
·
verified ·
1 Parent(s): a91c7ff

Add weak-layer stage2 dataset builder

Browse files
Files changed (1) hide show
  1. scripts/build_weak_layer_dataset.py +119 -0
scripts/build_weak_layer_dataset.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Build a weak-layer-focused second-stage SFT dataset.
3
+
4
+ Creates local parquet files:
5
+ <output_dir>/train.parquet
6
+ <output_dir>/validation.parquet
7
+ <output_dir>/manifest.json
8
+
9
+ The train split contains:
10
+ - all examples from weak target layers,
11
+ - extra duplicated rows for very rare weak layers up to a configurable minimum,
12
+ - a replay buffer sampled from non-weak layers to reduce catastrophic forgetting.
13
+
14
+ All eval/test reporting should still use the official research-sota OOD splits.
15
+ """
16
+ import argparse
17
+ import json
18
+ from pathlib import Path
19
+
20
+ import pandas as pd
21
+ from datasets import load_dataset
22
+
23
+ WEAK_LAYERS_DEFAULT = [
24
+ "o1_nrm",
25
+ "a1_policy",
26
+ "tmf921_lifecycle_report",
27
+ "tmf921_lifecycle_monitor",
28
+ "tmf921_lifecycle_scale",
29
+ ]
30
+
31
+
32
+ def parse_args():
33
+ p = argparse.ArgumentParser()
34
+ p.add_argument("--dataset", default="nraptisss/TMF921-intent-to-config-research-sota")
35
+ p.add_argument("--train_split", default="train_sota")
36
+ p.add_argument("--validation_split", default="validation")
37
+ p.add_argument("--output_dir", required=True)
38
+ p.add_argument("--weak_layers", nargs="+", default=WEAK_LAYERS_DEFAULT)
39
+ p.add_argument("--rare_min_per_layer", type=int, default=1500, help="Duplicate weak layers with fewer rows to this minimum")
40
+ p.add_argument("--replay_ratio", type=float, default=0.30, help="Replay rows as fraction of final weak rows")
41
+ p.add_argument("--seed", type=int, default=42)
42
+ return p.parse_args()
43
+
44
+
45
+ def main():
46
+ args = parse_args()
47
+ out = Path(args.output_dir)
48
+ out.mkdir(parents=True, exist_ok=True)
49
+
50
+ ds = load_dataset(args.dataset)
51
+ train = ds[args.train_split].to_pandas()
52
+ val = ds[args.validation_split].to_pandas()
53
+
54
+ weak_layers = set(args.weak_layers)
55
+ weak_parts = []
56
+ layer_counts_before = {}
57
+ layer_counts_after = {}
58
+
59
+ for layer in args.weak_layers:
60
+ part = train[train["target_layer"] == layer].copy()
61
+ layer_counts_before[layer] = int(len(part))
62
+ if len(part) == 0:
63
+ continue
64
+ if len(part) < args.rare_min_per_layer:
65
+ reps = []
66
+ needed = args.rare_min_per_layer - len(part)
67
+ for i in range(needed):
68
+ r = part.iloc[i % len(part)].copy(deep=True)
69
+ original_id = r["id"]
70
+ r["id"] = f"{original_id}-stage2weak-{i:05d}"
71
+ r["is_augmented"] = True
72
+ r["augmentation_type"] = f"stage2_weak_duplicate_{layer}"
73
+ r["source_id"] = r.get("source_id", original_id)
74
+ reps.append(r)
75
+ if reps:
76
+ part = pd.concat([part, pd.DataFrame(reps)], ignore_index=True)
77
+ layer_counts_after[layer] = int(len(part))
78
+ weak_parts.append(part)
79
+
80
+ weak_df = pd.concat(weak_parts, ignore_index=True) if weak_parts else pd.DataFrame(columns=train.columns)
81
+ nonweak = train[~train["target_layer"].isin(weak_layers)].copy()
82
+ replay_n = min(len(nonweak), int(len(weak_df) * args.replay_ratio))
83
+ replay = nonweak.sample(n=replay_n, random_state=args.seed).copy() if replay_n > 0 else pd.DataFrame(columns=train.columns)
84
+ replay["augmentation_type"] = replay.get("augmentation_type", "none").astype(str) + "+stage2_replay"
85
+
86
+ stage2 = pd.concat([weak_df, replay], ignore_index=True).sample(frac=1.0, random_state=args.seed).reset_index(drop=True)
87
+ stage2["stage2_role"] = stage2["target_layer"].apply(lambda x: "weak" if x in weak_layers else "replay")
88
+ val = val.copy()
89
+ val["stage2_role"] = "validation"
90
+
91
+ train_path = out / "train.parquet"
92
+ val_path = out / "validation.parquet"
93
+ stage2.to_parquet(train_path, index=False)
94
+ val.to_parquet(val_path, index=False)
95
+
96
+ manifest = {
97
+ "source_dataset": args.dataset,
98
+ "train_split": args.train_split,
99
+ "validation_split": args.validation_split,
100
+ "output_dir": str(out),
101
+ "weak_layers": args.weak_layers,
102
+ "rare_min_per_layer": args.rare_min_per_layer,
103
+ "replay_ratio": args.replay_ratio,
104
+ "seed": args.seed,
105
+ "rows_train_stage2": int(len(stage2)),
106
+ "rows_validation": int(len(val)),
107
+ "weak_rows_total_after_duplication": int(len(weak_df)),
108
+ "replay_rows": int(len(replay)),
109
+ "layer_counts_before": layer_counts_before,
110
+ "layer_counts_after": layer_counts_after,
111
+ "stage2_role_counts": stage2["stage2_role"].value_counts().to_dict(),
112
+ "target_layer_counts": stage2["target_layer"].value_counts().to_dict(),
113
+ }
114
+ (out / "manifest.json").write_text(json.dumps(manifest, indent=2, ensure_ascii=False))
115
+ print(json.dumps(manifest, indent=2, ensure_ascii=False))
116
+
117
+
118
+ if __name__ == "__main__":
119
+ main()