Text-to-Video
Diffusers
Safetensors
English
FARWanAnyFlowPipeline
Any-Step
Text-to-Video
Image-to-Video
Video-to-Video
Instructions to use nvidia/AnyFlow-FAR-Wan2.1-14B-Diffusers with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Diffusers
How to use nvidia/AnyFlow-FAR-Wan2.1-14B-Diffusers with Diffusers:
pip install -U diffusers transformers accelerate
import torch from diffusers import DiffusionPipeline # switch to "mps" for apple devices pipe = DiffusionPipeline.from_pretrained("nvidia/AnyFlow-FAR-Wan2.1-14B-Diffusers", dtype=torch.bfloat16, device_map="cuda") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipe(prompt).images[0] - Notebooks
- Google Colab
- Kaggle
| { | |
| "_class_name": "FAR_Wan_Transformer3DModel", | |
| "_diffusers_version": "0.35.1", | |
| "_name_or_path": "Wan-AI/Wan2.1-T2V-14B-Diffusers", | |
| "added_kv_proj_dim": null, | |
| "attention_head_dim": 128, | |
| "chunk_partition": [ | |
| 1, | |
| 3, | |
| 3, | |
| 3, | |
| 3, | |
| 3, | |
| 3, | |
| 2 | |
| ], | |
| "compressed_patch_size": [ | |
| 1, | |
| 4, | |
| 4 | |
| ], | |
| "cross_attn_norm": true, | |
| "deltatime_type": "r", | |
| "eps": 1e-06, | |
| "ffn_dim": 13824, | |
| "freq_dim": 256, | |
| "full_chunk_limit": 3, | |
| "gate_value": 0.25, | |
| "image_dim": null, | |
| "in_channels": 16, | |
| "init_far_model": true, | |
| "init_flowmap_model": true, | |
| "num_attention_heads": 40, | |
| "num_layers": 40, | |
| "out_channels": 16, | |
| "patch_size": [ | |
| 1, | |
| 2, | |
| 2 | |
| ], | |
| "qk_norm": "rms_norm_across_heads", | |
| "rope_max_seq_len": 1024, | |
| "text_dim": 4096 | |
| } | |