CUDA_VISIBLE_DEVICES=0,1,2,3 torchrun --nnodes=1 --nproc_per_node=4 --node_rank 0 \ fastvideo/train_g2rpo_hps_clip_merge.py \ --seed 42 \ --pretrained_model_name_or_path /mnt/dolphinfs/ssd_pool/docker/user/hadoop-videogen-hl/hadoop-camera3d/zhangshengjun/checkpoints/G2RPO/ckpt/flux \ --resume_ckpt /mnt/dolphinfs/ssd_pool/docker/user/hadoop-videogen-hl/hadoop-camera3d/zhangshengjun/checkpoints/G2RPO/save_exp/hps_clip_merge_step/ckpt/checkpoint-200-0 \ --hps_path /mnt/dolphinfs/ssd_pool/docker/user/hadoop-videogen-hl/hadoop-camera3d/zhangshengjun/checkpoints/G2RPO/ckpt/hps/HPS_v2.1_compressed.pt \ --hps_clip_path /mnt/dolphinfs/ssd_pool/docker/user/hadoop-videogen-hl/hadoop-camera3d/zhangshengjun/checkpoints/G2RPO/ckpt/CLIP-ViT-H-14-laion2B-s32B-b79K/open_clip_pytorch_model.bin \ --clip_score_path /mnt/dolphinfs/ssd_pool/docker/user/hadoop-videogen-hl/hadoop-camera3d/zhangshengjun/checkpoints/G2RPO/ckpt/clip_score \ --data_json_path /mnt/dolphinfs/ssd_pool/docker/user/hadoop-videogen-hl/hadoop-camera3d/zhangshengjun/checkpoints/G2RPO/rl_embeddings/videos2caption.json \ --train_batch_size 1 \ --num_latent_t 1 \ --sp_size 1 \ --train_sp_batch_size 1 \ --dataloader_num_workers 4 \ --max_train_steps 301 \ --init_steps 200 \ --learning_rate 2e-6 \ --mixed_precision bf16 \ --checkpointing_steps 10 \ --cfg 0.0 \ --output_dir /mnt/dolphinfs/ssd_pool/docker/user/hadoop-videogen-hl/hadoop-camera3d/zhangshengjun/checkpoints/G2RPO/save_exp/hps_clip_merge_resume_200 \ --h 1024 \ --w 1024 \ --t 1 \ --sampling_steps 16 \ --eta 0.7 \ --lr_warmup_steps 0 \ --sampler_seed 1223627 \ --max_grad_norm 1.0 \ --weight_decay 0.0001 \ --num_generations 2 \ --shift 3 \ --init_same_noise \ --clip_range 1e-4 \ --adv_clip_max 5.0 \ --eta_step_list 0 \ --eta_step_merge_list 1 \ --granular_list 1 \