GeleiSG commited on
Commit
799fe96
·
verified ·
1 Parent(s): b8f0119

Upload 3 files

Browse files
checkpoint-2000.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:508c34acc05a5f13e0748286b860d3bf14629f8e073c908e18c44fe2a2360449
3
+ size 1079730682
combined_captions_1021.json ADDED
The diff for this file is too large to render. See raw diff
 
train_lora.sh ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # export MODEL_NAME="models/Diffusion_Transformer/Wan2.1-T2V-14B"
2
+ # export DATASET_NAME="datasets/internal_datasets/"
3
+ # export DATASET_META_NAME="datasets/internal_datasets/metadata.json"
4
+ # # NCCL_IB_DISABLE=1 and NCCL_P2P_DISABLE=1 are used in multi nodes without RDMA.
5
+ # # export NCCL_IB_DISABLE=1
6
+ # # export NCCL_P2P_DISABLE=1
7
+ # NCCL_DEBUG=INFO
8
+
9
+ # accelerate launch --mixed_precision="bf16" scripts/wan2.1/train_lora.py \
10
+ # --config_path="config/wan2.1/wan_civitai.yaml" \
11
+ # --pretrained_model_name_or_path=$MODEL_NAME \
12
+ # --train_data_dir=$DATASET_NAME \
13
+ # --train_data_meta=$DATASET_META_NAME \
14
+ # --image_sample_size=1024 \
15
+ # --video_sample_size=256 \
16
+ # --token_sample_size=512 \
17
+ # --video_sample_stride=2 \
18
+ # --video_sample_n_frames=81 \
19
+ # --train_batch_size=1 \
20
+ # --video_repeat=1 \
21
+ # --gradient_accumulation_steps=1 \
22
+ # --dataloader_num_workers=8 \
23
+ # --num_train_epochs=100 \
24
+ # --checkpointing_steps=50 \
25
+ # --learning_rate=1e-04 \
26
+ # --seed=42 \
27
+ # --output_dir="output_dir" \
28
+ # --gradient_checkpointing \
29
+ # --mixed_precision="bf16" \
30
+ # --adam_weight_decay=3e-2 \
31
+ # --adam_epsilon=1e-10 \
32
+ # --vae_mini_batch=1 \
33
+ # --max_grad_norm=0.05 \
34
+ # --random_hw_adapt \
35
+ # --training_with_video_token_length \
36
+ # --enable_bucket \
37
+ # --uniform_sampling \
38
+ # --low_vram
39
+
40
+ # Training command for I2V
41
+ export MODEL_NAME="/mnt/data/public_ckpt/videogen_public/Wan-AI/Wan2.1-I2V-14B-480P"
42
+ export DATASET_NAME=""
43
+ export DATASET_META_NAME="/mnt/data/2002-ssd/filtered_clips_from_static/lora_train_combined_datasets/combined_captions_1021.json"
44
+ # export CHECKPOINT="checkpoint-800"
45
+ # export NCCL_IB_DISABLE=1
46
+ # export NCCL_P2P_DISABLE=1
47
+ NCCL_DEBUG=INFO
48
+
49
+ accelerate launch --use_deepspeed --deepspeed_config_file config/zero_stage2_config.json --deepspeed_multinode_launcher standard scripts/wan2.1/train_lora.py \
50
+ --config_path="config/wan2.1/wan_civitai.yaml" \
51
+ --pretrained_model_name_or_path=$MODEL_NAME \
52
+ --train_data_dir=$DATASET_NAME \
53
+ --train_data_meta=$DATASET_META_NAME \
54
+ --image_sample_size=512 \
55
+ --video_sample_size=632 \
56
+ --token_sample_size=632 \
57
+ --video_sample_stride=1 \
58
+ --video_sample_n_frames=81 \
59
+ --train_batch_size=1 \
60
+ --video_repeat=1 \
61
+ --gradient_accumulation_steps=1 \
62
+ --dataloader_num_workers=8 \
63
+ --num_train_epochs=1000 \
64
+ --checkpointing_steps=500 \
65
+ --learning_rate=2e-05 \
66
+ --seed=42 \
67
+ --output_dir="output_dir_14b_1022_7directions_8batchsize_2e-5lr" \
68
+ --gradient_checkpointing \
69
+ --mixed_precision="bf16" \
70
+ --adam_weight_decay=3e-2 \
71
+ --adam_epsilon=1e-10 \
72
+ --vae_mini_batch=1 \
73
+ --max_grad_norm=0.05 \
74
+ --random_hw_adapt \
75
+ --training_with_video_token_length \
76
+ --enable_bucket \
77
+ --low_vram \
78
+ --train_mode="i2v" \
79
+ --save_state \
80
+ # --resume_from_checkpoint="latest" \
81
+ # --uniform_sampling \
82
+