plorav5 / .job_config.json
comfyuiman's picture
Upload folder using huggingface_hub
ab644fa verified
{
"job": "extension",
"config": {
"name": "PLORAV5_LTX",
"process": [
{
"type": "diffusion_trainer",
"training_folder": "/app/ai-toolkit/output",
"sqlite_db_path": "/app/ai-toolkit/aitk_db.db",
"device": "cuda",
"trigger_word": null,
"performance_log_every": 10,
"network": {
"type": "lora",
"linear": 32,
"linear_alpha": 32,
"conv": 16,
"conv_alpha": 16,
"lokr_full_rank": true,
"lokr_factor": -1,
"network_kwargs": {
"ignore_if_contains": []
}
},
"save": {
"dtype": "bf16",
"save_every": 500,
"max_step_saves_to_keep": 400,
"save_format": "diffusers",
"push_to_hub": false
},
"datasets": [
{
"folder_path": "/app/ai-toolkit/datasets/49",
"mask_path": null,
"mask_min_value": 0.1,
"default_caption": "",
"caption_ext": "txt",
"caption_dropout_rate": 0.01,
"cache_latents_to_disk": true,
"is_reg": false,
"network_weight": 1,
"resolution": [
512
],
"controls": [],
"shrink_video_to_frames": true,
"num_frames": 49,
"flip_x": false,
"flip_y": false,
"num_repeats": 1,
"do_i2v": false,
"fps": 24
},
{
"folder_path": "/app/ai-toolkit/datasets/73",
"mask_path": null,
"mask_min_value": 0.1,
"default_caption": "",
"caption_ext": "txt",
"caption_dropout_rate": 0.01,
"cache_latents_to_disk": true,
"is_reg": false,
"network_weight": 1,
"resolution": [
512
],
"controls": [],
"shrink_video_to_frames": true,
"num_frames": 73,
"flip_x": false,
"flip_y": false,
"num_repeats": 1
},
{
"folder_path": "/app/ai-toolkit/datasets/81",
"mask_path": null,
"mask_min_value": 0.1,
"default_caption": "",
"caption_ext": "txt",
"caption_dropout_rate": 0.01,
"cache_latents_to_disk": true,
"is_reg": false,
"network_weight": 1,
"resolution": [
512
],
"controls": [],
"shrink_video_to_frames": true,
"num_frames": 81,
"flip_x": false,
"flip_y": false,
"num_repeats": 1
},
{
"folder_path": "/app/ai-toolkit/datasets/97",
"mask_path": null,
"mask_min_value": 0.1,
"default_caption": "",
"caption_ext": "txt",
"caption_dropout_rate": 0.01,
"cache_latents_to_disk": true,
"is_reg": false,
"network_weight": 1,
"resolution": [
512
],
"controls": [],
"shrink_video_to_frames": true,
"num_frames": 97,
"flip_x": false,
"flip_y": false,
"num_repeats": 1
},
{
"folder_path": "/app/ai-toolkit/datasets/images",
"mask_path": null,
"mask_min_value": 0.1,
"default_caption": "",
"caption_ext": "txt",
"caption_dropout_rate": 0.01,
"cache_latents_to_disk": true,
"is_reg": false,
"network_weight": 1,
"resolution": [
512,
768
],
"controls": [],
"shrink_video_to_frames": true,
"num_frames": 1,
"flip_x": false,
"flip_y": false,
"num_repeats": 1
},
{
"folder_path": "/app/ai-toolkit/datasets/89",
"mask_path": null,
"mask_min_value": 0.1,
"default_caption": "",
"caption_ext": "txt",
"caption_dropout_rate": 0.01,
"cache_latents_to_disk": true,
"is_reg": false,
"network_weight": 1,
"resolution": [
512
],
"controls": [],
"shrink_video_to_frames": true,
"num_frames": 89,
"flip_x": false,
"flip_y": false,
"num_repeats": 1
}
],
"train": {
"batch_size": 1,
"bypass_guidance_embedding": false,
"steps": 20000,
"gradient_accumulation": 1,
"train_unet": true,
"train_text_encoder": false,
"gradient_checkpointing": true,
"noise_scheduler": "flowmatch",
"optimizer": "adamw8bit",
"timestep_type": "weighted",
"content_or_style": "balanced",
"optimizer_params": {
"weight_decay": 0.0001
},
"unload_text_encoder": false,
"cache_text_embeddings": true,
"lr": 0.0001,
"ema_config": {
"use_ema": false,
"ema_decay": 0.99
},
"skip_first_sample": false,
"force_first_sample": false,
"disable_sampling": true,
"dtype": "bf16",
"diff_output_preservation": false,
"diff_output_preservation_multiplier": 1,
"diff_output_preservation_class": "person",
"switch_boundary_every": 1,
"loss_type": "mse"
},
"logging": {
"log_every": 1,
"use_ui_logger": true
},
"model": {
"name_or_path": "Lightricks/LTX-2",
"quantize": true,
"qtype": "qfloat8",
"quantize_te": true,
"qtype_te": "uint4",
"arch": "ltx2",
"low_vram": true,
"model_kwargs": {},
"layer_offloading": false,
"layer_offloading_text_encoder_percent": 1,
"layer_offloading_transformer_percent": 1
},
"sample": {
"sampler": "flowmatch",
"sample_every": 250,
"width": 768,
"height": 768,
"samples": [],
"neg": "",
"seed": 42,
"walk_seed": true,
"guidance_scale": 4,
"sample_steps": 25,
"num_frames": 121,
"fps": 24
}
}
]
},
"meta": {
"name": "[name]",
"version": "1.0"
}
}