| |
| |
| |
| |
| |
|
|
| |
| prompt = "object, a red apple" |
| negative_prompt = "glass, cup" |
| seed = 42 |
| steps = 50 |
| guidance = 5 |
|
|
| import torch |
| from tinyflux.model.zoo import ModelZoo |
| from tinyflux.trainer.sampling import Sampler |
| from PIL import Image |
| from IPython.display import display |
|
|
| device = "cuda" if torch.cuda.is_available() else "cpu" |
| dtype = torch.bfloat16 if device == "cuda" else torch.float32 |
|
|
| |
| print("Loading models...") |
| zoo = ModelZoo(device=device, dtype=dtype) |
| zoo.load_vae() |
| zoo.load_clip() |
| zoo.load_t5() |
| model = zoo.load_tinyflux( |
| source="AbstractPhil/tiny-flux-deep", |
| load_ema=False, |
| ema_path="checkpoints/step_409244.safetensors", |
| ) |
| print("✓ Models loaded") |
|
|
| |
| sampler = Sampler( |
| zoo=zoo, |
| model=model, |
| num_steps=steps, |
| guidance_scale=guidance, |
| shift=3.0, |
| device=device, |
| dtype=dtype, |
| ) |
|
|
| |
| print(f"\nGenerating: {prompt}") |
| images = sampler.generate([prompt], seed=seed, negative_prompt=negative_prompt) |
|
|
| |
| img = (images[0].permute(1, 2, 0).cpu().float().numpy() * 255).astype("uint8") |
| display(Image.fromarray(img)) |
| print(f"Seed: {seed}, Steps: {steps}, CFG: {guidance}") |