anonymouscla commited on
Commit
a42b388
·
verified ·
1 Parent(s): f315b81

Initial anonymous release: Qwen3.5-9B judge LoRA (subq+human, ckpt-294)

Browse files
README.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen3.5-9B
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - lora
7
+ - peft
8
+ - judge
9
+ - video-evaluation
10
+ - anonymous-release
11
+ ---
12
+
13
+ # Anonymous Release — Judge LoRA Adapter (Qwen3.5-9B)
14
+
15
+ A LoRA adapter for **Qwen/Qwen3.5-9B** trained as a judge model that scores
16
+ generated videos against physical-law sub-rubrics derived from text prompts.
17
+ Released anonymously alongside the companion dataset
18
+ [`anonymouscla/physground`](https://huggingface.co/datasets/anonymouscla/physground).
19
+
20
+ ## Files
21
+
22
+ ```
23
+ adapter_config.json # PEFT/LoRA config (base_model = Qwen/Qwen3.5-9B)
24
+ adapter_model.safetensors # LoRA weights (~167 MB)
25
+ additional_config.json # ms-swift extras (lora_dtype / lr ratios)
26
+ training_args.json # sanitized training hyperparameters
27
+ ```
28
+
29
+ ## Training
30
+
31
+ | Item | Value |
32
+ | --- | --- |
33
+ | Base model | `Qwen/Qwen3.5-9B` |
34
+ | Tuning method | LoRA via PEFT (rank 32, α 64, dropout 0.05) |
35
+ | Target modules | All linear layers in the language model (vision tower frozen; merger limited to `linear_fc1`/`linear_fc2`) |
36
+ | Precision | bf16 with gradient checkpointing |
37
+ | Optimizer | AdamW (fused), lr = 1e-4, cosine schedule, warmup 5% |
38
+ | Batch | 1 × 8 grad-accum × 4 GPUs (global batch 32) |
39
+ | Epochs / steps | 1.0 epoch / 294 steps |
40
+ | Best eval loss | 0.1063 (step 294) |
41
+ | Framework | ms-swift 4.1.2, PEFT 0.19.1, DeepSpeed ZeRO-2 |
42
+
43
+ The training mixture combines automatically derived sub-question judgements
44
+ with human-rated samples (the `subq+human` split). See the companion
45
+ anonymous dataset for prompts, physical-law tags, and example videos.
46
+
47
+ ## Usage
48
+
49
+ ```python
50
+ from peft import PeftModel
51
+ from transformers import AutoModelForCausalLM, AutoTokenizer
52
+
53
+ base_id = "Qwen/Qwen3.5-9B"
54
+ adapter_dir = "." # this directory
55
+
56
+ tokenizer = AutoTokenizer.from_pretrained(base_id)
57
+ base = AutoModelForCausalLM.from_pretrained(base_id, torch_dtype="bfloat16", device_map="auto")
58
+ model = PeftModel.from_pretrained(base, adapter_dir)
59
+ model.eval()
60
+ ```
61
+
62
+ The adapter expects the standard Qwen 3.5 chat template (`qwen3_5`) and a
63
+ prompt that asks the judge to answer one or more sub-rubric questions about a
64
+ candidate video frame/caption. Greedy decoding (`temperature = 0`) with
65
+ `max_new_tokens = 64` matches the training-time generation config.
66
+
67
+ ## License
68
+
69
+ The base model is released by its respective authors; this LoRA adapter is
70
+ shared for anonymous review purposes. No identifying metadata is included.
adapter_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "Qwen/Qwen3.5-9B",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "lora_ga_config": null,
23
+ "megatron_config": null,
24
+ "megatron_core": "megatron.core",
25
+ "modules_to_save": [],
26
+ "peft_type": "LORA",
27
+ "peft_version": "0.19.1",
28
+ "qalora_group_size": 16,
29
+ "r": 32,
30
+ "rank_pattern": {},
31
+ "revision": null,
32
+ "target_modules": "^(model\\.language_model(?=\\.).*\\.(o_proj|out_proj|in_proj_qkv|gate_proj|k_proj|in_proj_z|down_proj|v_proj|q_proj|in_proj_b|up_proj|in_proj_a)|model\\.visual\\.merger(?=\\.).*\\.(linear_fc2|linear_fc1))$",
33
+ "target_parameters": null,
34
+ "task_type": "CAUSAL_LM",
35
+ "trainable_token_indices": null,
36
+ "use_bdlora": null,
37
+ "use_dora": false,
38
+ "use_qalora": false,
39
+ "use_rslora": false
40
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:880543b4bbc572e58980d58690b23ea9262d7ecbdd980bdf5a9139dfe022c881
3
+ size 174336432
additional_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lora_dtype": null, "lorap_lr_ratio": null, "lorap_emb_lr": 1e-06}
training_args.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_comment": "Sanitized excerpt of the training configuration. Local paths and tracking IDs removed.",
3
+ "base_model": "Qwen/Qwen3.5-9B",
4
+ "model_type": "qwen3_5",
5
+ "template": "qwen3_5",
6
+ "task_type": "causal_lm",
7
+ "torch_dtype": "bfloat16",
8
+ "max_length": 8192,
9
+ "max_new_tokens": 64,
10
+ "tuner": {
11
+ "type": "lora",
12
+ "lora_rank": 32,
13
+ "lora_alpha": 64,
14
+ "lora_dropout": 0.05,
15
+ "lora_bias": "none",
16
+ "target_modules": "all-linear (language model only; vision merger limited to linear_fc1/linear_fc2)",
17
+ "use_dora": false,
18
+ "use_rslora": false,
19
+ "freeze_vit": true,
20
+ "freeze_aligner": false
21
+ },
22
+ "optimizer": {
23
+ "name": "adamw_torch_fused",
24
+ "learning_rate": 1e-4,
25
+ "weight_decay": 0.1,
26
+ "adam_beta1": 0.9,
27
+ "adam_beta2": 0.95,
28
+ "adam_epsilon": 1e-8,
29
+ "max_grad_norm": 1.0,
30
+ "lr_scheduler_type": "cosine",
31
+ "warmup_ratio": 0.05,
32
+ "aligner_lr": 2e-6
33
+ },
34
+ "training": {
35
+ "num_train_epochs": 1.0,
36
+ "per_device_train_batch_size": 1,
37
+ "gradient_accumulation_steps": 8,
38
+ "world_size": 4,
39
+ "global_batch_size": 32,
40
+ "bf16": true,
41
+ "gradient_checkpointing": true,
42
+ "seed": 42,
43
+ "data_seed": 42,
44
+ "deepspeed_zero_stage": 2,
45
+ "total_steps": 294,
46
+ "best_eval_loss": 0.1063,
47
+ "best_step": 294
48
+ },
49
+ "framework": {
50
+ "ms_swift_version": "4.1.2",
51
+ "peft_version": "0.19.1"
52
+ }
53
+ }