|
|
| import os |
| import sys |
| import argparse |
| import json |
| import torch |
| from dataclasses import dataclass, field |
| from typing import Optional, Dict, Any |
|
|
| |
| current_dir = os.getcwd() |
| if 'VidChain' in current_dir: |
| root_dir = os.path.join(current_dir, "..", "..") |
| else: |
| root_dir = current_dir |
| sys.path.append(root_dir) |
|
|
| try: |
| import transformers |
| from transformers import HfArgumentParser |
| from vtimellm.train.train_dpo import ( |
| ModelArguments, |
| TrainingArguments, |
| DataArguments, |
| train |
| ) |
| from vtimellm.train.dataset import make_supervised_dpo_data_module |
| from vtimellm.model import VTimeLLMLlamaForCausalLM |
| from vtimellm.model.builder import load_lora |
| from vtimellm.mm_utils import print_trainable_parameters |
| from trl.trl.trainer import DPOTrainer |
| print("✓ Training modules imported successfully") |
| except ImportError as e: |
| print(f"✗ Error importing training modules: {e}") |
| print("Make sure you're in the correct environment and all dependencies are installed") |
| sys.exit(1) |
|
|
|
|
| @dataclass |
| class DPOConfig: |
| """Configuration class for DPO training demo""" |
| |
| |
| model_name_or_path: str = "checkpoints/vicuna-7b-v1.5" |
| stage2_path: str = "checkpoints/vtimellm-vicuna-v1-5-7b-stage2" |
| stage3_path: str = "checkpoints/vtimellm-vicuna-v1-5-7b-stage3" |
| stage4_path: str = "checkpoints/vtimellm-vicuna-v1-5-7b-activitynet-stage4" |
| pretrain_mm_mlp_adapter: str = "checkpoints/vtimellm-vicuna-v1-5-7b-stage1/mm_projector.bin" |
| |
| |
| data_path: str = "data/activitynet/mdpo-train.json" |
| data_folder: str = "data/activitynet/videos/train" |
| feat_folder: str = "data/activitynet/clipvitl14-vtimellm.pth" |
| |
| |
| output_dir: str = "outputs/vtimellm-dpo-demo" |
| num_train_epochs: int = 1 |
| per_device_train_batch_size: int = 2 |
| gradient_accumulation_steps: int = 4 |
| learning_rate: float = 1e-6 |
| model_max_length: int = 2048 |
| |
| |
| beta: float = 0.5 |
| dpo_alpha: float = 1.0 |
| gamma: float = 0.0 |
| |
| |
| lora_enable: bool = True |
| lora_r: int = 64 |
| lora_alpha: int = 128 |
| lora_dropout: float = 0.05 |
| |
| |
| bf16: bool = True |
| gradient_checkpointing: bool = True |
| dataloader_num_workers: int = 4 |
| logging_steps: int = 10 |
| save_steps: int = 500 |
| save_total_limit: int = 3 |
| warmup_ratio: float = 0.1 |
| weight_decay: float = 0.0 |
| freeze_mm_mlp_adapter: bool = True |
|
|
|
|
| def check_training_requirements(config: DPOConfig) -> bool: |
| """Check if all requirements for training are met""" |
| print("🔍 Checking training requirements...") |
| |
| |
| model_files = [ |
| config.model_name_or_path, |
| config.stage2_path, |
| config.stage3_path, |
| config.stage4_path, |
| config.pretrain_mm_mlp_adapter |
| ] |
| |
| missing_files = [] |
| for file_path in model_files: |
| if not os.path.exists(file_path): |
| missing_files.append(file_path) |
| |
| if missing_files: |
| print("❌ Missing model files:") |
| for file_path in missing_files: |
| print(f" - {file_path}") |
| print("\nPlease download the required model checkpoints.") |
| return False |
| |
| |
| if not os.path.exists(config.data_path): |
| print(f"❌ Training data not found: {config.data_path}") |
| print("Please prepare your DPO training data.") |
| return False |
| |
| |
| if not torch.cuda.is_available(): |
| print("⚠ No CUDA GPU available. Training will be very slow on CPU.") |
| else: |
| gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3 |
| print(f"✓ GPU available: {torch.cuda.get_device_name(0)} ({gpu_memory:.1f} GB)") |
| |
| if gpu_memory < 16: |
| print("⚠ Warning: GPU memory is less than 16GB. Consider reducing batch size.") |
| |
| |
| os.makedirs(config.output_dir, exist_ok=True) |
| print(f"✓ Output directory: {config.output_dir}") |
| |
| print("✅ Training requirements check completed") |
| return True |
|
|
|
|
| def create_training_arguments(config: DPOConfig) -> tuple: |
| """Create training arguments from config""" |
| |
| |
| model_args = ModelArguments( |
| model_name_or_path=config.model_name_or_path, |
| stage2_path=config.stage2_path, |
| stage3_path=config.stage3_path, |
| stage4_path=config.stage4_path, |
| pretrain_mm_mlp_adapter=config.pretrain_mm_mlp_adapter, |
| version="v1" |
| ) |
| |
| |
| data_args = DataArguments( |
| data_path=config.data_path, |
| data_folder=config.data_folder, |
| feat_folder=config.feat_folder |
| ) |
| |
| |
| training_args = TrainingArguments( |
| output_dir=config.output_dir, |
| num_train_epochs=config.num_train_epochs, |
| per_device_train_batch_size=config.per_device_train_batch_size, |
| gradient_accumulation_steps=config.gradient_accumulation_steps, |
| learning_rate=config.learning_rate, |
| model_max_length=config.model_max_length, |
| bf16=config.bf16, |
| gradient_checkpointing=config.gradient_checkpointing, |
| dataloader_num_workers=config.dataloader_num_workers, |
| logging_steps=config.logging_steps, |
| save_steps=config.save_steps, |
| save_total_limit=config.save_total_limit, |
| warmup_ratio=config.warmup_ratio, |
| weight_decay=config.weight_decay, |
| freeze_mm_mlp_adapter=config.freeze_mm_mlp_adapter, |
| |
| |
| beta=config.beta, |
| dpo_alpha=config.dpo_alpha, |
| gamma=config.gamma, |
| train4dpo=True, |
| |
| |
| lora_enable=config.lora_enable, |
| lora_r=config.lora_r, |
| lora_alpha=config.lora_alpha, |
| lora_dropout=config.lora_dropout, |
| |
| |
| no_cuda=False, |
| local_rank=-1, |
| dataloader_pin_memory=False, |
| |
| |
| training_stage=3, |
| finetuning=True, |
| evaluation_strategy="no", |
| save_strategy="steps", |
| lr_scheduler_type="cosine", |
| tf32=True, |
| report_to="none", |
| remove_unused_columns=False |
| ) |
| |
| return model_args, data_args, training_args |
|
|
|
|
| def load_and_prepare_model(model_args, training_args): |
| """Load and prepare the model for DPO training""" |
| print("🤖 Loading and preparing model...") |
| |
| |
| model = VTimeLLMLlamaForCausalLM.from_pretrained( |
| model_args.model_name_or_path, |
| cache_dir=training_args.cache_dir, |
| torch_dtype=torch.bfloat16 if training_args.bf16 else torch.float16 |
| ) |
| model.config.use_cache = False |
| |
| |
| tokenizer = transformers.AutoTokenizer.from_pretrained( |
| model_args.model_name_or_path, |
| cache_dir=training_args.cache_dir, |
| model_max_length=training_args.model_max_length, |
| padding_side="right", |
| use_fast=False, |
| ) |
| tokenizer.pad_token = tokenizer.unk_token |
| |
| |
| model.get_model().initialize_vision_modules(model_args) |
| model.cuda() |
| |
| |
| print("📥 Loading stage 2 weights...") |
| model = load_lora(model, model_args.stage2_path) |
| model = model.merge_and_unload() |
| |
| |
| print("📥 Loading stage 3 weights...") |
| model = load_lora(model, model_args.stage3_path) |
| model = model.merge_and_unload() |
| |
| |
| print("📥 Loading stage 4 weights...") |
| model = load_lora(model, model_args.stage4_path) |
| model = model.merge_and_unload() |
| |
| |
| if training_args.lora_enable: |
| from peft import LoraConfig, get_peft_model |
| |
| |
| def find_all_linear_explicit_names(model): |
| cls = torch.nn.Linear |
| lora_module_names = set() |
| multimodal_keywords = ['mm_projector', 'vision_tower', 'vision_resampler'] |
| for name, module in model.named_modules(): |
| if any(mm_keyword in name for mm_keyword in multimodal_keywords): |
| continue |
| if isinstance(module, cls): |
| lora_module_names.add(name) |
| if 'lm_head' in lora_module_names: |
| lora_module_names.remove('lm_head') |
| return list(lora_module_names) |
| |
| explicit_linear = find_all_linear_explicit_names(model) |
| lora_config = LoraConfig( |
| r=training_args.lora_r, |
| lora_alpha=training_args.lora_alpha, |
| target_modules=explicit_linear, |
| lora_dropout=training_args.lora_dropout, |
| bias="none", |
| task_type="CAUSAL_LM", |
| ) |
| |
| print("🔧 Adding LoRA adapters...") |
| model = get_peft_model(model, lora_config) |
| print_trainable_parameters(model) |
| |
| |
| model = model.cuda() |
| |
| return model, tokenizer |
|
|
|
|
| def save_training_results(model, trainer, training_args, config): |
| """Save training results and model""" |
| print("💾 Saving training results...") |
| |
| |
| if training_args.lora_enable: |
| from vtimellm.train.train_dpo import get_peft_state_maybe_zero_3, get_peft_state_non_lora_maybe_zero_3 |
| |
| state_dict = get_peft_state_maybe_zero_3( |
| model.named_parameters(), training_args.lora_bias |
| ) |
| non_lora_state_dict = get_peft_state_non_lora_maybe_zero_3( |
| model.named_parameters() |
| ) |
| |
| if training_args.local_rank in [0, -1]: |
| model.config.save_pretrained(training_args.output_dir) |
| model.save_pretrained(training_args.output_dir, state_dict=state_dict) |
| torch.save(non_lora_state_dict, os.path.join(training_args.output_dir, 'non_lora_trainables.bin')) |
| |
| |
| config_dict = { |
| "model_paths": { |
| "model_name_or_path": config.model_name_or_path, |
| "stage2_path": config.stage2_path, |
| "stage3_path": config.stage3_path, |
| "stage4_path": config.stage4_path, |
| }, |
| "training_params": { |
| "num_train_epochs": config.num_train_epochs, |
| "per_device_train_batch_size": config.per_device_train_batch_size, |
| "learning_rate": config.learning_rate, |
| "beta": config.beta, |
| "dpo_alpha": config.dpo_alpha, |
| }, |
| "lora_params": { |
| "lora_r": config.lora_r, |
| "lora_alpha": config.lora_alpha, |
| "lora_dropout": config.lora_dropout, |
| } |
| } |
| |
| with open(os.path.join(training_args.output_dir, "training_config.json"), "w") as f: |
| json.dump(config_dict, f, indent=2) |
| |
| print(f"✅ Results saved to: {training_args.output_dir}") |
|
|
|
|
| def demo_dpo_training(config: DPOConfig): |
| """Main function for DPO training demo""" |
| print("🎬 VTimeLLM DPO Training Demo") |
| print("=" * 50) |
| |
| |
| print("🔧 Setting up single GPU training...") |
| if torch.cuda.is_available(): |
| |
| torch.cuda.set_device(0) |
| print(f"✓ Using GPU: {torch.cuda.get_device_name(0)}") |
| |
| |
| os.environ['CUDA_VISIBLE_DEVICES'] = '0' |
| os.environ['OMPI_COMM_WORLD_SIZE'] = '1' |
| print("✓ Environment variables set for single GPU") |
| else: |
| print("⚠ No CUDA GPU available") |
| |
| |
| if not check_training_requirements(config): |
| print("❌ Training requirements not met. Please fix the issues above.") |
| return False |
| |
| try: |
| |
| model_args, data_args, training_args = create_training_arguments(config) |
| |
| |
| model, tokenizer = load_and_prepare_model(model_args, training_args) |
| |
| |
| trainer = create_dpo_trainer(model, tokenizer, data_args, training_args) |
| |
| |
| train_model(trainer, training_args) |
| |
| |
| save_training_results(model, trainer, training_args, config) |
| |
| print("\n🎉 DPO Training Demo Completed Successfully!") |
| print(f"📁 Model saved to: {config.output_dir}") |
| print("\nYou can now use the trained model for inference!") |
| |
| return True |
| |
| except Exception as e: |
| print(f"❌ Training failed with error: {e}") |
| import traceback |
| traceback.print_exc() |
| return False |
|
|
|
|
| def create_sample_config(): |
| """Create a sample configuration for students""" |
| return DPOConfig( |
| |
| per_device_train_batch_size=1, |
| gradient_accumulation_steps=2, |
| num_train_epochs=1, |
| logging_steps=5, |
| save_steps=100, |
| save_total_limit=2 |
| ) |
|
|
|
|
| def main(): |
| """Main function with command line interface""" |
| |
| config = DPOConfig() |
| print("⚙️ Using default configuration") |
| check_only = False |
| if check_only: |
| |
| check_training_requirements(config) |
| else: |
| |
| demo_dpo_training(config) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|