diff --git "a/Qwen destill.ipynb" "b/Qwen destill.ipynb" --- "a/Qwen destill.ipynb" +++ "b/Qwen destill.ipynb" @@ -1 +1 @@ -{"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"9QVc2_k_bL3P","executionInfo":{"status":"aborted","timestamp":1775006100621,"user_tz":-120,"elapsed":107065,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"}}},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","source":["# ============================= CELL 1: Prepare Latents + Distill 768-dim Text Encoder (Fixed Dtype) =============================\n","# @title 1. Full Preparation – Latents + 768-dim Distilled Encoder (Dtype Fixed)\n","\n","import os\n","import zipfile\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import numpy as np\n","from google.colab import drive\n","from PIL import Image\n","from tqdm import tqdm\n","from diffusers import AutoencoderKL\n","from transformers import AutoTokenizer, AutoModel\n","from datasets import Dataset as HFDataset\n","from torch.utils.data import Dataset\n","from peft import LoraConfig, get_peft_model\n","from transformers import Trainer, TrainingArguments, set_seed\n","\n","set_seed(42)\n","drive.mount('/content/drive', force_remount=True)\n","\n","zip_path = '/content/drive/MyDrive/my_set.zip' # @param {type:'string'}\n","\n","# ====================== 1. Extract Data ======================\n","print(\"πŸ“¦ Extracting zip...\")\n","extract_dir = \"/content/data\"\n","os.makedirs(extract_dir, exist_ok=True)\n","\n","with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_dir)\n","\n","image_files = [f for f in os.listdir(extract_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.webp'))]\n","print(f\"βœ… Found {len(image_files)} images\")\n","\n","text_files = sorted([f for f in os.listdir(extract_dir) if f.endswith('.txt') and f[0].isdigit()])\n","texts = []\n","for tf in text_files:\n"," with open(os.path.join(extract_dir, tf), \"r\", encoding=\"utf-8\") as f:\n"," content = f.read().strip()\n"," if content:\n"," texts.append(content)\n","\n","print(f\"βœ… Loaded {len(texts)} captions\")\n","\n","# ====================== 2. Encode Images β†’ Flux VAE Latents ======================\n","latent_dir = \"/content/drive/MyDrive/flux_klein_latents\"\n","if os.path.exists(latent_dir) and len([f for f in os.listdir(latent_dir) if f.endswith(\".pt\")]) == len(image_files):\n"," print(f\"βœ… Using existing latents from {latent_dir}\")\n","else:\n"," print(\"\\nπŸŒ€ Encoding images to Flux VAE latents...\")\n"," vae = AutoencoderKL.from_pretrained(\n"," \"black-forest-labs/FLUX.1-dev\",\n"," subfolder=\"vae\",\n"," torch_dtype=torch.float32,\n"," device_map=\"auto\"\n"," )\n"," vae.eval()\n","\n"," os.makedirs(latent_dir, exist_ok=True)\n","\n"," with torch.no_grad():\n"," for img_file in tqdm(image_files, desc=\"Encoding latents\"):\n"," img_path = os.path.join(extract_dir, img_file)\n"," image = Image.open(img_path).convert(\"RGB\").resize((1024, 1024), Image.LANCZOS)\n","\n"," pixel_values = (torch.from_numpy(np.array(image)).permute(2, 0, 1).unsqueeze(0).float() / 255.0)\n"," pixel_values = pixel_values.to(vae.device, dtype=vae.dtype) * 2.0 - 1.0\n","\n"," latents = vae.encode(pixel_values).latent_dist.sample() * vae.config.scaling_factor\n"," latent_name = os.path.splitext(img_file)[0] + \".pt\"\n"," torch.save(latents.cpu(), os.path.join(latent_dir, latent_name))\n","\n"," del vae\n"," torch.cuda.empty_cache()\n"," print(f\"βœ… Latents saved to {latent_dir}\")\n","\n","# ====================== 3. Compute Teacher Embeddings & Project to 768 ======================\n","print(\"\\nπŸ“ Computing teacher embeddings and projecting to 768-dim...\")\n","\n","teacher_model_name = \"Qwen/Qwen3-Embedding-0.6B\"\n","tokenizer = AutoTokenizer.from_pretrained(teacher_model_name)\n","teacher_model = AutoModel.from_pretrained(\n"," teacher_model_name,\n"," torch_dtype=torch.float16,\n"," device_map=\"auto\",\n"," trust_remote_code=True\n",")\n","teacher_model.eval()\n","\n","teacher_embeddings = []\n","with torch.no_grad():\n"," for text in tqdm(texts, desc=\"Teacher encoding\"):\n"," inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(teacher_model.device)\n"," outputs = teacher_model(**inputs)\n"," emb = outputs.last_hidden_state.mean(dim=1).squeeze(0).cpu()\n"," teacher_embeddings.append(emb)\n","\n","teacher_embeddings_1024 = torch.stack(teacher_embeddings)\n","print(f\"βœ… Teacher embeddings (1024): {teacher_embeddings_1024.shape}\")\n","\n","# Fix: Move everything to float32 before projection\n","teacher_embeddings_1024 = teacher_embeddings_1024.to(torch.float32)\n","\n","teacher_proj = nn.Linear(1024, 768, dtype=torch.float32).to(\"cuda\")\n","with torch.no_grad():\n"," teacher_embeddings_768 = teacher_proj(teacher_embeddings_1024.to(\"cuda\")).cpu()\n","\n","print(f\"βœ… Projected teacher embeddings (768): {teacher_embeddings_768.shape}\")\n","\n","# Save projected teacher embeddings\n","torch.save({\n"," \"embeddings\": teacher_embeddings_768,\n"," \"texts\": texts,\n"," \"dim\": 768\n","}, \"/content/drive/MyDrive/qwen_embeddings_768.pt\")\n","\n","del teacher_model, teacher_proj\n","torch.cuda.empty_cache()\n","\n","# ====================== 4. Distill Student to 768-dim ======================\n","print(\"\\nπŸ‘¨β€πŸŽ“ Distilling student to 768-dim...\")\n","\n","student_model_name = \"Qwen/Qwen2.5-0.5B\"\n","base_student = AutoModel.from_pretrained(\n"," student_model_name, torch_dtype=torch.float32, device_map=\"auto\", trust_remote_code=True\n",")\n","\n","lora_config = LoraConfig(\n"," r=16,\n"," lora_alpha=32,\n"," target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"],\n"," lora_dropout=0.05,\n"," bias=\"none\",\n"," task_type=\"FEATURE_EXTRACTION\"\n",")\n","student_model = get_peft_model(base_student, lora_config)\n","\n","projection = nn.Linear(base_student.config.hidden_size, 768).to(\"cuda\")\n","projection.train()\n","\n","hf_dataset = HFDataset.from_dict({\"text\": texts})\n","\n","class DistillationDataset(Dataset):\n"," def __init__(self, hf_dataset, tokenizer, teacher_embs, max_length=512):\n"," self.dataset = hf_dataset\n"," self.tokenizer = tokenizer\n"," self.teacher_embs = teacher_embs\n"," self.max_length = max_length\n","\n"," def __len__(self): return len(self.dataset)\n","\n"," def __getitem__(self, idx):\n"," text = self.dataset[idx][\"text\"]\n"," inputs = self.tokenizer(text, padding=\"max_length\", truncation=True, max_length=self.max_length, return_tensors=\"pt\")\n"," return {\n"," \"input_ids\": inputs[\"input_ids\"].squeeze(0),\n"," \"attention_mask\": inputs[\"attention_mask\"].squeeze(0),\n"," \"labels\": self.teacher_embs[idx],\n"," }\n","\n","distill_dataset = DistillationDataset(hf_dataset, tokenizer, teacher_embeddings_768)\n","\n","def collate_fn(batch):\n"," return {\n"," \"input_ids\": torch.stack([item[\"input_ids\"] for item in batch]),\n"," \"attention_mask\": torch.stack([item[\"attention_mask\"] for item in batch]),\n"," \"labels\": torch.stack([item[\"labels\"] for item in batch])\n"," }\n","\n","class DistillTrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," labels = inputs.pop(\"labels\").to(\"cuda\") # (B, 768)\n","\n"," outputs = model(input_ids=inputs[\"input_ids\"], attention_mask=inputs[\"attention_mask\"])\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," student_emb = projection(hidden) # (B, 768)\n","\n"," student_norm = F.normalize(student_emb, p=2, dim=1)\n"," teacher_norm = F.normalize(labels, p=2, dim=1)\n","\n"," mse_loss = F.mse_loss(student_norm, teacher_norm)\n"," cos_loss = (1 - F.cosine_similarity(student_norm, teacher_norm, dim=1)).mean()\n"," loss = 0.25 * mse_loss + 0.75 * cos_loss\n","\n"," return (loss, outputs) if return_outputs else loss\n","\n","training_args = TrainingArguments(\n"," output_dir=\"./distilled_qwen_768\",\n"," per_device_train_batch_size=4,\n"," num_train_epochs=50,\n"," learning_rate=2e-4,\n"," fp16=True,\n"," logging_steps=50,\n"," save_strategy=\"no\",\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n",")\n","\n","trainer = DistillTrainer(\n"," model=student_model,\n"," args=training_args,\n"," train_dataset=distill_dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"πŸš€ Starting distillation to 768-dim...\")\n","trainer.train()\n","\n","# ====================== Save ======================\n","distilled_save_dir = \"/content/drive/MyDrive/distilled_qwen_768_for_flux\"\n","os.makedirs(distilled_save_dir, exist_ok=True)\n","student_model.save_pretrained(distilled_save_dir)\n","tokenizer.save_pretrained(distilled_save_dir)\n","torch.save(projection.state_dict(), f\"{distilled_save_dir}/projection.pth\")\n","\n","print(f\"\\nβœ… SUCCESS! 768-dim distilled encoder saved to {distilled_save_dir}\")\n","print(f\" Latents are ready in {latent_dir}\")\n","print(\" You can now run Cell 2.\")\n","\n","torch.cuda.empty_cache()"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000,"referenced_widgets":["95814e9c8dc44b518b9a7c97a3484846","c8d9604547d54bf4bcd76c9568d1c754","0037badf0b5a40bf8426dd2d9d9f1ef8","d716d08f79b34055a3298fea2c3b1a08","25b3d3b1a733453187ba4e03352a393a","f6e5aacbfaee458d8d6ed39a195445fa","6f1a999c78cb469aaab1d5fc7332a8f1","a883a9d294804336a21b6f3d895a4b85","80b2b7ddaca64eda852fed5f56e80395","4feaac6c0d8d4b139e7490f872566d4c","74720c6b5d654dc4a59b965376ce7fe3","7defd7439a03491ebc1b660858c577ee","dd9be47036f64ece95b3455117722a4e","0c8d6e583a8244fd8159585dd58c8a45","bdaf9a838bf348d09e4450b1974593aa","c0c9c02f8f5f469e96606a78b3b6db24","ac1da439af86454bbac1b3c150fefb76","313b48a9c28d4a9183ae889e2fc7e08e","0fa5d5e1534d42baad036989fdab2c6a","27dff4fb434f4a8990049bb4351679d1","31811d22cfd448eeba179efecc4d44f3","e33a15f3347a4aabab5e076030ca220e","e43448abb87e4196804286cecc565d30","1352574ca081414e9538a0c4863058ad","db4ad97cdd304b59b142923d72f7cd4d","c62510463ae740b2a86a33a31f059fd1","bc0f4dff61fe457189e5435b2eebdaed","49a783e6966d4a49af34d14ee771bbf4","c541995d44eb46bb98dac68272260ce0","7e6fccc2a9d444b59eab6a0d767c56c7","335bbfe93389441aba6da69656bf3ca3","8ae514c0dfe542ebaeabbd7c8da2479e","0672d199b0b842008faf8c4ab28ec6b7","1a8a324acc584b0fb60105b0d82c5db8","318c90f9f3f04ecabe874c4b10b3a369","9aea109f54574d0bb452b5771bc00426","edd77ba9c2c147d0b063bbdaf3df2ed7","9e7d560cec704906b003a629589e8efb","1b5f053d18674aa587c0e634ce6e483b","0917a3542fdb4b1e97e274ea06c80b23","d6b6c8ecd9a64c9db42d1fd7a86cff4c","78bb15a7ec6947db8f5ad6b10f8c646a","ea5a9b3a607347e9bb8268243de5a507","02c44deba420470ba7b239d0f5e77c44"]},"id":"lfqwgHHYn4bW","executionInfo":{"status":"ok","timestamp":1775007673685,"user_tz":-120,"elapsed":1311321,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"}},"outputId":"2d2e6911-52b2-4e3c-8762-8b7e5dc4c402"},"execution_count":3,"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n","πŸ“¦ Extracting zip...\n","βœ… Found 250 images\n","βœ… Loaded 250 captions\n","βœ… Using existing latents from /content/drive/MyDrive/flux_klein_latents\n","\n","πŸ“ Computing teacher embeddings and projecting to 768-dim...\n"]},{"output_type":"display_data","data":{"text/plain":["Loading weights: 0%| | 0/310 [00:00"],"text/html":["\n","
\n"," \n"," \n"," [3150/3150 20:44, Epoch 50/50]\n","
\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
StepTraining Loss
500.306407
1000.068270
1500.046029
2000.040868
2500.037660
3000.037948
3500.036726
4000.036342
4500.035160
5000.036195
5500.034873
6000.034670
6500.033394
7000.034004
7500.032754
8000.032264
8500.031586
9000.031454
9500.029183
10000.029279
10500.029389
11000.027533
11500.026483
12000.026171
12500.026032
13000.025056
13500.024446
14000.024373
14500.023412
15000.022667
15500.022285
16000.021616
16500.021353
17000.021070
17500.020380
18000.020133
18500.019799
19000.018946
19500.018862
20000.017926
20500.018461
21000.017476
21500.017427
22000.017166
22500.016884
23000.016194
23500.016499
24000.015926
24500.015583
25000.015570
25500.015187
26000.015098
26500.014849
27000.014774
27500.014445
28000.014186
28500.014396
29000.014029
29500.014093
30000.013511
30500.013682
31000.013810
31500.013562

"]},"metadata":{}},{"output_type":"stream","name":"stdout","text":["\n","βœ… SUCCESS! 768-dim distilled encoder saved to /content/drive/MyDrive/distilled_qwen_768_for_flux\n"," Latents are ready in /content/drive/MyDrive/flux_klein_latents\n"," You can now run Cell 2.\n"]}]},{"cell_type":"code","execution_count":null,"metadata":{"id":"9IGpdiL9BBr6","executionInfo":{"status":"aborted","timestamp":1775006100585,"user_tz":-120,"elapsed":0,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"}}},"outputs":[],"source":["# ============================= CELL 2: Save All Assets to Drive =============================\n","# @title 2. Save Latents + New Distilled Encoder\n","\n","import os\n","import torch\n","\n","print(\"πŸ’Ύ Saving all assets to Google Drive...\")\n","\n","# Ensure directories exist\n","os.makedirs(\"/content/drive/MyDrive/flux_klein_latents\", exist_ok=True)\n","os.makedirs(\"/content/drive/MyDrive/distilled_qwen_768_for_flux\", exist_ok=True)\n","\n","# Move latents if not already there\n","# (assuming they are already saved in Cell 1)\n","\n","print(\"βœ… Latents are in /content/drive/MyDrive/flux_klein_latents\")\n","print(\"βœ… New 768-dim distilled model is in /content/drive/MyDrive/distilled_qwen_768_for_flux\")\n","\n","print(\"\\nπŸŽ‰ All data is safely saved on Google Drive.\")\n","print(\" You can now **disconnect and delete the runtime** if you want.\")\n","print(\" Everything needed for training is on Drive.\")\n","print(\" When you come back, start from Cell 3.\")"]},{"cell_type":"code","source":["# ================================================\n","# CELL 3: Auto Disconnect Colab Session\n","# ================================================\n","\n","print(\"πŸ”Œ Disconnecting Colab session in 15 seconds...\")\n","import time\n","time.sleep(3)\n","\n","from google.colab import runtime\n","runtime.unassign()\n","\n","print(\"Session disconnected.\")"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":73},"id":"FQF71-mvmlc1","executionInfo":{"status":"ok","timestamp":1775007678343,"user_tz":-120,"elapsed":4651,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"}},"outputId":"ab97d8ee-a278-4d0b-9a6b-9f99a47946ab"},"execution_count":4,"outputs":[{"output_type":"stream","name":"stdout","text":["πŸ”Œ Disconnecting Colab session in 15 seconds...\n","Session disconnected.\n"]}]},{"cell_type":"markdown","metadata":{"id":"cfshTDIFM5ND"},"source":["You can disconnect the colab past thispoint. All data from cells 1 and 2 are saved to drive."]},{"cell_type":"code","execution_count":1,"metadata":{"id":"ZZaadi1VBK6Z","colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"status":"ok","timestamp":1775037684456,"user_tz":-120,"elapsed":65934,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"}},"outputId":"b1868b7a-dc28-4dd1-c8b9-c23005d53eb1"},"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n","βœ… Dependencies installed and parameters set.\n"," Distilled encoder: /content/drive/MyDrive/distilled_qwen_768_for_flux\n"," Batch size: 1 | Epochs: 8\n","\n","πŸ” Quick VRAM check:\n","memory.used [MiB], memory.total [MiB]\n","3 MiB, 15360 MiB\n"]}],"source":["# ============================= CELL 3: Install Dependencies & Setup =============================\n","# @title 3. Install Dependencies + Setup Parameters\n","\n","!pip install -q diffusers transformers peft accelerate datasets tqdm\n","\n","import os\n","import torch\n","from google.colab import drive, userdata\n","from transformers import AutoTokenizer, AutoModel\n","from peft import PeftModel\n","import gc\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# ====================== Parameters ======================\n","DISTILLED_DIR = \"/content/drive/MyDrive/distilled_qwen_768_for_flux\"\n","LATENT_DIR = \"/content/drive/MyDrive/flux_klein_latents\"\n","FINAL_LORA_DIR = \"/content/drive/MyDrive/flux_klein_lora_final\"\n","\n","BATCH_SIZE = 1 # Keep low for safety\n","NUM_EPOCHS = 8\n","LEARNING_RATE = 1e-4\n","LORA_RANK = 32\n","LORA_ALPHA = 32\n","\n","print(\"βœ… Dependencies installed and parameters set.\")\n","print(f\" Distilled encoder: {DISTILLED_DIR}\")\n","print(f\" Batch size: {BATCH_SIZE} | Epochs: {NUM_EPOCHS}\")\n","\n","# Optional: quick check\n","print(\"\\nπŸ” Quick VRAM check:\")\n","!nvidia-smi --query-gpu=memory.used,memory.total --format=csv"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"fYbQfjC9RBWY","colab":{"base_uri":"https://localhost:8080/","height":403,"referenced_widgets":["79b369a7323440c99a2e015dd979be21","01c583deaecb4ac698e129b48e0314b4","ca5e5f3a32734d66960d52284bbd9114","f94cd562f8264342a2a486fb2f6073d0","96748059b87847bba153debc0b7429d1","ef6d5f8203084bbab388a8267d726966","83413a807bee41619f55a36f3b0a8b79","3eace6b6073e44d58c44f5c8e31fbe29","d2169f49fa7645bba6799bf5dd746529","34a10b8852e44923b50bcce811b361c8","b04e3f34d0994ee1b03e5a8e4ee32661","374264f2b3dc4b949b521238ec1bc5ff","a594f6caa4fd4cc19b4f2c707eea565a","4743ff73d29945f9b14989a7f060432f","c2e6f02c929b49a5aeff100ba1b0c967","21906080febc4e87a5f71555e848c293","a51853bf0d1a4ccc8f2f1715cd0b106e","1f38f74042134a46972a14b914579468","d9df9b1110be49cbbf5064bbeb805387","1d362219aeb14bedbacbd26d7279bd0d","dd2c0d78295f4fa1b01fa8e2ce746eb2","ef575574cf7c4fa7b71a07833529194f","6b28e42f6eac46b885bbcfa5f214aa34","5a944b1b49d740b0b0395d710b204ff0","e4ef583912074fcf83c49a85003a4bc4","841a3b705f514901bd7b9b2fb5603460","30289f01d99a4c73a204684d02898220","675682b1156d4254bb0118b3436cce6d","1e8b471b275144929dd33a05139968ff","5489e21441ce4e44b1beec11e695d0fe","a2d2189312904d3282bd24ab88fe30ac","9b7fcfc97ef641758dd3dc3de7af2e32","20485ca3800141dc9942888cc187331e"]},"outputId":"03428952-2f55-4265-882d-883eec7d6d29"},"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n","=== CELL 4 START - Clean Restart ===\n","Current VRAM used: 1.85 GB\n","\n","[1/5] Loading distilled 768-dim Qwen encoder...\n"]},{"output_type":"display_data","data":{"text/plain":["Loading weights: 0%| | 0/290 [00:00 0.94:\n"," print(\"πŸŽ‰ Excellent alignment! Your distillation is high quality.\")\n","elif cos_sims.mean().item() > 0.90:\n"," print(\"βœ… Good alignment. Safe for LoRA training.\")\n","else:\n"," print(\"⚠️ Alignment is moderate. Consider more distillation epochs or higher LoRA rank.\")"],"metadata":{"id":"kMiMZmie9n1V","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ============================= CELL 2.c: Fixed with 64β†’128 Projection for Klein =============================\n","# @title 2.c – Test with standard packing + small 64β†’128 projection\n","\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","\n","text = texts[0]\n","print(f\"πŸ§ͺ Test text: {text[:100]}...\")\n","\n","# 1. Student embedding\n","inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(\"cuda\")\n","with torch.no_grad():\n"," outputs = student(**inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," emb_768 = projection(hidden).to(torch.bfloat16)\n","\n","print(f\"βœ… emb_768 shape: {emb_768.shape}\")\n","\n","# 2. Text side (correct)\n","encoder_hidden = emb_768.unsqueeze(1).repeat(1, 1, 10) # (1, 1, 7680)\n","pooled_projections = emb_768\n","\n","# 3. Rotary IDs\n","txt_ids = torch.zeros((1, 3), device=\"cuda\", dtype=torch.bfloat16)\n","img_ids = torch.zeros((4096, 3), device=\"cuda\", dtype=torch.bfloat16)\n","\n","# 4. Latent packing (64 ch) + projection to 128\n","dummy_latent = torch.randn(1, 16, 128, 128, device=\"cuda\", dtype=torch.bfloat16)\n","b, c, h, w = dummy_latent.shape\n","latent_packed = dummy_latent.view(b, c, h//2, 2, w//2, 2)\n","latent_packed = latent_packed.permute(0, 2, 4, 1, 3, 5).contiguous()\n","hidden_states = latent_packed.view(b, (h//2)*(w//2), c * 4) # (1, 4096, 64)\n","\n","# Small fixed projection 64 β†’ 128\n","proj_64_to_128 = nn.Linear(64, 128, bias=False, dtype=torch.bfloat16, device=\"cuda\")\n","hidden_states = proj_64_to_128(hidden_states)\n","\n","print(f\"hidden_states after 64β†’128: {hidden_states.shape}\")\n","\n","timestep = torch.tensor([1000.0], device=\"cuda\", dtype=torch.bfloat16)\n","\n","# 5. Forward pass\n","print(\"\\n=== Running forward pass ===\")\n","try:\n"," output = transformer(\n"," hidden_states=hidden_states,\n"," timestep=timestep,\n"," encoder_hidden_states=encoder_hidden,\n"," pooled_projections=pooled_projections,\n"," txt_ids=txt_ids,\n"," img_ids=img_ids,\n"," return_dict=False\n"," )[0]\n"," print(\"πŸŽ‰ SUCCESS! Forward pass works.\")\n"," print(f\"Output shape: {output.shape}\")\n","except Exception as e:\n"," print(\"❌ Failed:\")\n"," print(str(e))"],"metadata":{"cellView":"form","id":"9c0V4D7sCP2B"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Cell 3\n"],"metadata":{"id":"dz6FDD1aBSCt"}},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":73},"executionInfo":{"elapsed":4651,"status":"ok","timestamp":1775007678343,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"},"user_tz":-120},"id":"FQF71-mvmlc1","outputId":"ab97d8ee-a278-4d0b-9a6b-9f99a47946ab"},"outputs":[{"name":"stdout","output_type":"stream","text":["πŸ”Œ Disconnecting Colab session in 15 seconds...\n","Session disconnected.\n"]}],"source":["# ================================================\n","# CELL 3: Auto Disconnect Colab Session\n","# ================================================\n","\n","print(\"πŸ”Œ Disconnecting Colab session in 15 seconds...\")\n","import time\n","time.sleep(3)\n","\n","from google.colab import runtime\n","runtime.unassign()\n","\n","print(\"Session disconnected.\")"]},{"cell_type":"markdown","metadata":{"id":"cfshTDIFM5ND"},"source":["You can disconnect the colab past thispoint. All data from cells 1 and 2 are saved to drive."]},{"cell_type":"code","execution_count":null,"metadata":{"id":"sWEzuqsmvKua"},"outputs":[],"source":["# ============================= CELL 3.a: Install Dependencies + Setup Parameters + Load Qwen Text Encoder =============================\n","# @title 3.a Setup + Load Text Encoder (one-time for precompute)\n","\n","!pip install -q diffusers transformers peft accelerate datasets tqdm\n","\n","import os\n","import torch\n","import gc\n","from google.colab import drive\n","from diffusers import Flux2KleinPipeline\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# ====================== Parameters ======================\n","LATENT_DIR = \"/content/drive/MyDrive/flux_klein_latents\"\n","FINAL_LORA_DIR = \"/content/drive/MyDrive/flux_klein_lora_final\"\n","\n","BATCH_SIZE = 1 # Keep at 1 for safety with variable sequence lengths\n","NUM_EPOCHS = 8\n","LEARNING_RATE = 1e-4\n","LORA_RANK = 32\n","LORA_ALPHA = 32\n","\n","print(\"βœ… Dependencies installed and parameters set.\")\n","print(f\" Batch size: {BATCH_SIZE} | Epochs: {NUM_EPOCHS}\")\n","print(f\" Latents from: {LATENT_DIR}\")\n","print(f\" Final LoRA will be saved to: {FINAL_LORA_DIR}\")\n","\n","# ====================== Load Pipeline + Text Encoder ======================\n","print(\"\\nπŸ”„ Loading FLUX.2-klein-base-4B pipeline (Qwen3-4B text encoder)...\")\n","\n","pipe = Flux2KleinPipeline.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," torch_dtype=torch.bfloat16,\n"," device_map=\"balanced\",\n"," low_cpu_mem_usage=True\n",")\n","\n","text_encoder = pipe.text_encoder\n","tokenizer = pipe.tokenizer\n","\n","# Force to CUDA and enable hidden states output\n","text_encoder = text_encoder.to(\"cuda\")\n","text_encoder.config.output_hidden_states = True\n","text_encoder.eval()\n","\n","print(\"βœ… Text encoder loaded and moved to CUDA\")\n","print(f\"Current VRAM used: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Quick VRAM check\n","print(\"\\nπŸ” Quick VRAM check:\")\n","!nvidia-smi --query-gpu=memory.used,memory.total --format=csv"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"8hDfeHlNvPWE"},"outputs":[],"source":["# ============================= CELL 3.b: Precompute Exact Qwen3-4B Embeddings =============================\n","# @title 3.b Precompute Embeddings (using text encoder from 3.a)\n","\n","import torch\n","from tqdm import tqdm\n","\n","# Load your 250 texts\n","data = torch.load(\"/content/drive/MyDrive/qwen_embeddings_768.pt\", weights_only=False)\n","texts = data[\"texts\"]\n","\n","precomputed = {\n"," \"encoder_hidden_states\": [], # list of (seq_len, hidden_dim)\n"," \"pooled_projections\": [] # list of (hidden_dim,)\n","}\n","\n","with torch.no_grad():\n"," for i, raw_text in enumerate(tqdm(texts, desc=\"Precomputing embeddings\")):\n"," text = raw_text.strip()\n"," if not text:\n"," text = \"a photo of a scene\"\n","\n"," inputs = tokenizer(\n"," text,\n"," padding=\"max_length\",\n"," truncation=True,\n"," max_length=512,\n"," return_tensors=\"pt\",\n"," return_attention_mask=True\n"," ).to(\"cuda\")\n","\n"," if inputs[\"input_ids\"].shape[1] == 0:\n"," print(f\"Warning: zero-length sequence for index {i}, using fallback\")\n"," inputs = tokenizer(\n"," \"a photo of a scene\",\n"," padding=\"max_length\",\n"," truncation=True,\n"," max_length=512,\n"," return_tensors=\"pt\",\n"," return_attention_mask=True\n"," ).to(\"cuda\")\n","\n"," outputs = text_encoder(**inputs)\n","\n"," # Handle CausalLMOutputWithPast correctly\n"," if hasattr(outputs, \"hidden_states\") and outputs.hidden_states is not None:\n"," hidden = outputs.hidden_states[-1].squeeze(0).cpu() # final layer: (seq_len, hidden_dim)\n"," elif hasattr(outputs, \"last_hidden_state\"):\n"," hidden = outputs.last_hidden_state.squeeze(0).cpu()\n"," else:\n"," print(f\"Warning: unexpected output for text {i}, using logits as fallback\")\n"," hidden = outputs.logits.squeeze(0).cpu()\n","\n"," pooled = hidden.mean(dim=0).cpu() # (hidden_dim,)\n","\n"," precomputed[\"encoder_hidden_states\"].append(hidden)\n"," precomputed[\"pooled_projections\"].append(pooled)\n","\n","print(f\"βœ… Successfully precomputed embeddings for {len(texts)} texts\")\n","torch.save(precomputed, \"/content/drive/MyDrive/klein_exact_embeddings.pt\")\n","print(\"Saved to /content/drive/MyDrive/klein_exact_embeddings.pt\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"px5wQJZ2vTUf"},"outputs":[],"source":["# ============================= CELL 3.c: Unload Text Encoder + Prepare Workspace =============================\n","# @title 3.c Cleanup – Unload Qwen Encoder\n","\n","import gc\n","\n","# Unload pipeline and text encoder\n","if 'pipe' in globals():\n"," del pipe\n","if 'text_encoder' in globals():\n"," del text_encoder\n","if 'tokenizer' in globals():\n"," del tokenizer\n","\n","torch.cuda.empty_cache()\n","gc.collect()\n","\n","print(\"βœ… Text encoder and pipeline fully unloaded from VRAM\")\n","print(f\"Current VRAM used: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Quick check for latents\n","latent_files = sorted([f for f in os.listdir(LATENT_DIR) if f.endswith(\".pt\")])\n","print(f\"Found {len(latent_files)} latents ready for training\")"]},{"cell_type":"markdown","metadata":{"id":"GDNO0bonrYAo"},"source":["lora training"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":7639,"status":"ok","timestamp":1775041892679,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"},"user_tz":-120},"id":"llxOztXpvYMO","outputId":"ba09e58b-b148-4838-d108-e8da123b4cdd"},"outputs":[{"name":"stdout","output_type":"stream","text":["=== CELL 4.a – Loading Transformer + LoRA ===\n","Current VRAM before loading: 0.53 GB\n","Loading FLUX.2-klein-base-4B transformer...\n"]},{"name":"stderr","output_type":"stream","text":["Some weights of the model checkpoint at black-forest-labs/FLUX.2-klein-base-4B were not used when initializing FluxTransformer2DModel: \n"," ['double_stream_modulation_img.linear.weight, single_transformer_blocks.3.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.1.attn.to_out.weight, single_transformer_blocks.7.attn.to_out.weight, single_transformer_blocks.12.attn.to_out.weight, single_transformer_blocks.10.attn.to_out.weight, single_transformer_blocks.6.attn.to_qkv_mlp_proj.weight, transformer_blocks.1.ff.linear_out.weight, single_transformer_blocks.11.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.8.attn.to_out.weight, single_transformer_blocks.4.attn.to_qkv_mlp_proj.weight, transformer_blocks.2.ff.linear_in.weight, transformer_blocks.2.ff_context.linear_out.weight, single_transformer_blocks.6.attn.to_out.weight, transformer_blocks.0.ff.linear_in.weight, single_transformer_blocks.18.attn.to_out.weight, single_transformer_blocks.14.attn.to_out.weight, single_transformer_blocks.11.attn.to_out.weight, single_transformer_blocks.12.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.19.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.14.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.4.attn.to_out.weight, transformer_blocks.0.ff_context.linear_in.weight, single_transformer_blocks.15.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.16.attn.to_out.weight, transformer_blocks.3.ff_context.linear_in.weight, transformer_blocks.0.ff.linear_out.weight, transformer_blocks.4.ff_context.linear_out.weight, transformer_blocks.0.ff_context.linear_out.weight, single_transformer_blocks.1.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.10.attn.to_qkv_mlp_proj.weight, transformer_blocks.3.ff.linear_in.weight, single_transformer_blocks.5.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.2.attn.to_qkv_mlp_proj.weight, transformer_blocks.4.ff.linear_out.weight, single_transformer_blocks.17.attn.to_out.weight, single_transformer_blocks.0.attn.to_out.weight, transformer_blocks.1.ff_context.linear_in.weight, single_transformer_blocks.5.attn.to_out.weight, single_transformer_blocks.17.attn.to_qkv_mlp_proj.weight, double_stream_modulation_txt.linear.weight, single_transformer_blocks.3.attn.to_out.weight, transformer_blocks.2.ff_context.linear_in.weight, single_transformer_blocks.16.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.0.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.18.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.13.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.7.attn.to_qkv_mlp_proj.weight, time_guidance_embed.timestep_embedder.linear_1.weight, transformer_blocks.4.ff_context.linear_in.weight, single_transformer_blocks.13.attn.to_out.weight, transformer_blocks.1.ff_context.linear_out.weight, single_transformer_blocks.2.attn.to_out.weight, single_transformer_blocks.9.attn.to_out.weight, single_stream_modulation.linear.weight, time_guidance_embed.timestep_embedder.linear_2.weight, single_transformer_blocks.9.attn.to_qkv_mlp_proj.weight, transformer_blocks.2.ff.linear_out.weight, single_transformer_blocks.19.attn.to_out.weight, transformer_blocks.3.ff.linear_out.weight, transformer_blocks.4.ff.linear_in.weight, transformer_blocks.1.ff.linear_in.weight, transformer_blocks.3.ff_context.linear_out.weight, single_transformer_blocks.15.attn.to_out.weight, single_transformer_blocks.8.attn.to_qkv_mlp_proj.weight']\n","Some weights of FluxTransformer2DModel were not initialized from the model checkpoint at black-forest-labs/FLUX.2-klein-base-4B and are newly initialized: ['transformer_blocks.3.ff.net.0.proj.bias', 'single_transformer_blocks.5.attn.to_v.weight', 'transformer_blocks.4.norm1_context.linear.bias', 'single_transformer_blocks.9.attn.to_q.bias', 'transformer_blocks.2.attn.add_v_proj.bias', 'transformer_blocks.2.attn.add_k_proj.bias', 'transformer_blocks.3.attn.to_k.bias', 'transformer_blocks.1.ff_context.net.0.proj.bias', 'single_transformer_blocks.0.norm.linear.weight', 'transformer_blocks.3.attn.add_q_proj.bias', 'single_transformer_blocks.2.norm.linear.bias', 'single_transformer_blocks.11.attn.to_q.bias', 'transformer_blocks.0.ff_context.net.0.proj.bias', 'single_transformer_blocks.18.attn.to_v.bias', 'single_transformer_blocks.18.attn.to_k.bias', 'single_transformer_blocks.2.attn.to_v.weight', 'transformer_blocks.4.attn.to_k.bias', 'single_transformer_blocks.9.proj_mlp.bias', 'transformer_blocks.4.ff_context.net.0.proj.bias', 'transformer_blocks.3.norm1.linear.weight', 'single_transformer_blocks.16.attn.to_v.weight', 'transformer_blocks.4.ff.net.0.proj.weight', 'transformer_blocks.3.ff.net.2.weight', 'transformer_blocks.4.norm1.linear.weight', 'single_transformer_blocks.15.proj_mlp.bias', 'single_transformer_blocks.18.attn.to_k.weight', 'transformer_blocks.2.ff_context.net.0.proj.weight', 'single_transformer_blocks.10.proj_out.weight', 'single_transformer_blocks.17.proj_out.weight', 'single_transformer_blocks.19.proj_out.weight', 'transformer_blocks.2.ff.net.0.proj.bias', 'single_transformer_blocks.0.proj_out.bias', 'single_transformer_blocks.19.attn.to_q.weight', 'single_transformer_blocks.17.attn.to_k.bias', 'single_transformer_blocks.7.attn.to_q.weight', 'single_transformer_blocks.6.proj_out.bias', 'transformer_blocks.0.norm1.linear.bias', 'single_transformer_blocks.1.attn.to_v.weight', 'single_transformer_blocks.13.norm.linear.bias', 'transformer_blocks.1.norm1.linear.bias', 'transformer_blocks.3.attn.to_q.bias', 'single_transformer_blocks.19.attn.to_q.bias', 'transformer_blocks.0.ff_context.net.2.bias', 'single_transformer_blocks.15.attn.to_v.weight', 'single_transformer_blocks.16.proj_mlp.weight', 'single_transformer_blocks.9.norm.linear.weight', 'transformer_blocks.3.attn.to_add_out.bias', 'single_transformer_blocks.0.attn.to_v.bias', 'single_transformer_blocks.19.attn.to_v.bias', 'transformer_blocks.4.ff_context.net.0.proj.weight', 'single_transformer_blocks.12.norm.linear.bias', 'single_transformer_blocks.15.attn.to_q.weight', 'single_transformer_blocks.4.proj_out.weight', 'single_transformer_blocks.16.attn.to_v.bias', 'transformer_blocks.1.ff.net.2.bias', 'single_transformer_blocks.7.attn.to_v.bias', 'single_transformer_blocks.7.attn.to_q.bias', 'single_transformer_blocks.16.proj_out.bias', 'transformer_blocks.2.ff.net.2.bias', 'single_transformer_blocks.0.norm.linear.bias', 'single_transformer_blocks.2.attn.to_k.bias', 'transformer_blocks.3.ff_context.net.2.weight', 'single_transformer_blocks.8.attn.to_v.bias', 'single_transformer_blocks.11.proj_mlp.bias', 'transformer_blocks.4.ff.net.2.weight', 'single_transformer_blocks.7.attn.to_v.weight', 'single_transformer_blocks.18.attn.to_v.weight', 'single_transformer_blocks.16.proj_out.weight', 'single_transformer_blocks.4.attn.to_q.bias', 'single_transformer_blocks.2.attn.to_k.weight', 'transformer_blocks.1.attn.to_k.bias', 'single_transformer_blocks.1.attn.to_k.weight', 'single_transformer_blocks.13.attn.to_q.weight', 'single_transformer_blocks.4.attn.to_v.weight', 'single_transformer_blocks.3.attn.to_v.weight', 'single_transformer_blocks.9.attn.to_q.weight', 'single_transformer_blocks.16.attn.to_k.weight', 'transformer_blocks.3.norm1_context.linear.bias', 'single_transformer_blocks.15.attn.to_v.bias', 'single_transformer_blocks.1.norm.linear.weight', 'single_transformer_blocks.9.norm.linear.bias', 'transformer_blocks.2.norm1_context.linear.weight', 'single_transformer_blocks.15.attn.to_k.bias', 'single_transformer_blocks.18.norm.linear.bias', 'transformer_blocks.0.attn.add_q_proj.bias', 'norm_out.linear.bias', 'single_transformer_blocks.14.proj_mlp.bias', 'single_transformer_blocks.17.norm.linear.weight', 'single_transformer_blocks.0.proj_mlp.bias', 'single_transformer_blocks.16.norm.linear.bias', 'single_transformer_blocks.2.proj_out.weight', 'single_transformer_blocks.13.attn.to_v.bias', 'single_transformer_blocks.18.norm.linear.weight', 'single_transformer_blocks.5.attn.to_k.bias', 'single_transformer_blocks.10.attn.to_k.bias', 'single_transformer_blocks.5.proj_out.bias', 'single_transformer_blocks.7.attn.to_k.bias', 'single_transformer_blocks.18.proj_out.weight', 'transformer_blocks.2.ff.net.2.weight', 'single_transformer_blocks.6.attn.to_k.bias', 'single_transformer_blocks.11.norm.linear.weight', 'single_transformer_blocks.17.norm.linear.bias', 'single_transformer_blocks.19.proj_mlp.bias', 'single_transformer_blocks.2.proj_out.bias', 'single_transformer_blocks.4.attn.to_v.bias', 'transformer_blocks.3.attn.to_out.0.bias', 'single_transformer_blocks.2.attn.to_q.bias', 'single_transformer_blocks.18.proj_mlp.bias', 'transformer_blocks.4.norm1_context.linear.weight', 'single_transformer_blocks.6.attn.to_v.bias', 'single_transformer_blocks.1.proj_mlp.bias', 'transformer_blocks.1.attn.add_q_proj.bias', 'transformer_blocks.3.attn.add_k_proj.bias', 'transformer_blocks.2.ff.net.0.proj.weight', 'single_transformer_blocks.6.norm.linear.weight', 'transformer_blocks.0.attn.to_q.bias', 'transformer_blocks.2.norm1.linear.weight', 'single_transformer_blocks.4.norm.linear.weight', 'single_transformer_blocks.11.proj_out.weight', 'single_transformer_blocks.19.attn.to_v.weight', 'transformer_blocks.0.attn.add_k_proj.bias', 'single_transformer_blocks.2.proj_mlp.bias', 'transformer_blocks.4.ff_context.net.2.weight', 'transformer_blocks.3.ff.net.2.bias', 'single_transformer_blocks.17.attn.to_v.weight', 'transformer_blocks.4.ff_context.net.2.bias', 'transformer_blocks.1.ff_context.net.2.bias', 'transformer_blocks.4.attn.to_out.0.bias', 'single_transformer_blocks.9.attn.to_k.weight', 'single_transformer_blocks.0.proj_out.weight', 'single_transformer_blocks.7.norm.linear.bias', 'single_transformer_blocks.12.attn.to_v.bias', 'single_transformer_blocks.13.attn.to_k.bias', 'single_transformer_blocks.4.attn.to_q.weight', 'single_transformer_blocks.19.attn.to_k.bias', 'transformer_blocks.0.ff_context.net.0.proj.weight', 'single_transformer_blocks.4.attn.to_k.weight', 'time_text_embed.text_embedder.linear_1.bias', 'transformer_blocks.3.attn.add_v_proj.bias', 'transformer_blocks.1.ff_context.net.0.proj.weight', 'single_transformer_blocks.8.attn.to_q.bias', 'single_transformer_blocks.10.proj_mlp.bias', 'transformer_blocks.4.attn.add_q_proj.bias', 'single_transformer_blocks.4.proj_out.bias', 'single_transformer_blocks.16.attn.to_k.bias', 'transformer_blocks.4.attn.to_add_out.bias', 'transformer_blocks.2.norm1_context.linear.bias', 'single_transformer_blocks.8.attn.to_k.bias', 'transformer_blocks.0.attn.add_v_proj.bias', 'single_transformer_blocks.17.attn.to_k.weight', 'transformer_blocks.2.ff_context.net.0.proj.bias', 'single_transformer_blocks.19.proj_out.bias', 'single_transformer_blocks.10.proj_mlp.weight', 'transformer_blocks.0.ff.net.2.weight', 'single_transformer_blocks.5.attn.to_k.weight', 'single_transformer_blocks.7.proj_mlp.bias', 'single_transformer_blocks.12.proj_out.bias', 'single_transformer_blocks.18.proj_out.bias', 'single_transformer_blocks.8.attn.to_v.weight', 'single_transformer_blocks.14.attn.to_v.weight', 'single_transformer_blocks.12.proj_mlp.weight', 'transformer_blocks.4.norm1.linear.bias', 'transformer_blocks.1.norm1_context.linear.weight', 'single_transformer_blocks.5.attn.to_q.weight', 'single_transformer_blocks.1.proj_out.weight', 'single_transformer_blocks.2.attn.to_q.weight', 'transformer_blocks.1.norm1_context.linear.bias', 'single_transformer_blocks.1.attn.to_q.bias', 'single_transformer_blocks.1.attn.to_q.weight', 'time_text_embed.timestep_embedder.linear_1.bias', 'single_transformer_blocks.12.attn.to_k.bias', 'single_transformer_blocks.6.proj_out.weight', 'single_transformer_blocks.9.proj_out.bias', 'single_transformer_blocks.13.proj_out.weight', 'time_text_embed.text_embedder.linear_1.weight', 'transformer_blocks.2.attn.add_q_proj.bias', 'single_transformer_blocks.7.proj_out.bias', 'single_transformer_blocks.1.proj_mlp.weight', 'single_transformer_blocks.10.norm.linear.weight', 'single_transformer_blocks.5.attn.to_q.bias', 'single_transformer_blocks.0.attn.to_k.weight', 'single_transformer_blocks.18.proj_mlp.weight', 'single_transformer_blocks.12.proj_mlp.bias', 'transformer_blocks.0.ff.net.2.bias', 'single_transformer_blocks.13.norm.linear.weight', 'single_transformer_blocks.3.proj_mlp.bias', 'single_transformer_blocks.11.attn.to_k.bias', 'single_transformer_blocks.18.attn.to_q.weight', 'single_transformer_blocks.1.attn.to_k.bias', 'single_transformer_blocks.4.proj_mlp.weight', 'transformer_blocks.0.attn.to_out.0.bias', 'single_transformer_blocks.6.attn.to_q.weight', 'single_transformer_blocks.6.proj_mlp.weight', 'single_transformer_blocks.3.attn.to_v.bias', 'single_transformer_blocks.13.proj_mlp.bias', 'single_transformer_blocks.19.proj_mlp.weight', 'time_text_embed.timestep_embedder.linear_1.weight', 'single_transformer_blocks.11.attn.to_v.weight', 'transformer_blocks.1.ff.net.0.proj.bias', 'single_transformer_blocks.7.norm.linear.weight', 'single_transformer_blocks.8.proj_out.bias', 'transformer_blocks.3.norm1.linear.bias', 'single_transformer_blocks.14.attn.to_v.bias', 'transformer_blocks.0.ff_context.net.2.weight', 'single_transformer_blocks.16.norm.linear.weight', 'transformer_blocks.0.attn.to_add_out.bias', 'single_transformer_blocks.14.proj_out.weight', 'single_transformer_blocks.9.attn.to_v.bias', 'single_transformer_blocks.14.proj_out.bias', 'single_transformer_blocks.14.norm.linear.bias', 'transformer_blocks.2.attn.to_out.0.bias', 'transformer_blocks.3.attn.to_v.bias', 'transformer_blocks.0.attn.to_v.bias', 'transformer_blocks.4.attn.to_q.bias', 'transformer_blocks.4.attn.add_k_proj.bias', 'transformer_blocks.2.attn.to_q.bias', 'single_transformer_blocks.9.proj_mlp.weight', 'single_transformer_blocks.16.attn.to_q.bias', 'transformer_blocks.3.ff.net.0.proj.weight', 'transformer_blocks.3.norm1_context.linear.weight', 'transformer_blocks.2.attn.to_add_out.bias', 'single_transformer_blocks.14.norm.linear.weight', 'single_transformer_blocks.3.attn.to_k.bias', 'single_transformer_blocks.13.attn.to_v.weight', 'context_embedder.bias', 'transformer_blocks.4.ff.net.0.proj.bias', 'transformer_blocks.0.norm1_context.linear.bias', 'single_transformer_blocks.7.proj_mlp.weight', 'single_transformer_blocks.12.attn.to_v.weight', 'single_transformer_blocks.6.proj_mlp.bias', 'single_transformer_blocks.12.norm.linear.weight', 'single_transformer_blocks.15.attn.to_q.bias', 'single_transformer_blocks.15.norm.linear.weight', 'transformer_blocks.2.norm1.linear.bias', 'single_transformer_blocks.17.attn.to_q.bias', 'single_transformer_blocks.6.attn.to_k.weight', 'proj_out.bias', 'single_transformer_blocks.0.attn.to_q.weight', 'single_transformer_blocks.13.attn.to_q.bias', 'single_transformer_blocks.15.proj_out.weight', 'single_transformer_blocks.12.attn.to_q.weight', 'transformer_blocks.1.ff.net.0.proj.weight', 'single_transformer_blocks.14.proj_mlp.weight', 'single_transformer_blocks.0.proj_mlp.weight', 'transformer_blocks.1.norm1.linear.weight', 'transformer_blocks.1.attn.to_add_out.bias', 'single_transformer_blocks.14.attn.to_q.bias', 'transformer_blocks.3.ff_context.net.0.proj.bias', 'single_transformer_blocks.8.proj_mlp.bias', 'transformer_blocks.2.attn.to_v.bias', 'transformer_blocks.0.attn.to_k.bias', 'single_transformer_blocks.6.norm.linear.bias', 'single_transformer_blocks.10.norm.linear.bias', 'single_transformer_blocks.14.attn.to_q.weight', 'single_transformer_blocks.8.norm.linear.bias', 'single_transformer_blocks.11.attn.to_v.bias', 'single_transformer_blocks.17.proj_out.bias', 'single_transformer_blocks.5.proj_out.weight', 'single_transformer_blocks.4.attn.to_k.bias', 'single_transformer_blocks.15.attn.to_k.weight', 'single_transformer_blocks.18.attn.to_q.bias', 'single_transformer_blocks.5.norm.linear.weight', 'transformer_blocks.0.norm1.linear.weight', 'single_transformer_blocks.11.attn.to_q.weight', 'transformer_blocks.2.ff_context.net.2.weight', 'single_transformer_blocks.3.norm.linear.weight', 'transformer_blocks.4.attn.to_v.bias', 'single_transformer_blocks.19.attn.to_k.weight', 'single_transformer_blocks.10.proj_out.bias', 'single_transformer_blocks.15.norm.linear.bias', 'transformer_blocks.4.attn.add_v_proj.bias', 'single_transformer_blocks.10.attn.to_q.weight', 'single_transformer_blocks.7.proj_out.weight', 'single_transformer_blocks.15.proj_out.bias', 'single_transformer_blocks.9.proj_out.weight', 'single_transformer_blocks.8.norm.linear.weight', 'single_transformer_blocks.2.attn.to_v.bias', 'transformer_blocks.1.attn.add_v_proj.bias', 'single_transformer_blocks.17.attn.to_q.weight', 'single_transformer_blocks.3.proj_out.bias', 'transformer_blocks.3.ff_context.net.2.bias', 'transformer_blocks.1.ff_context.net.2.weight', 'single_transformer_blocks.14.attn.to_k.bias', 'transformer_blocks.2.attn.to_k.bias', 'single_transformer_blocks.5.proj_mlp.bias', 'transformer_blocks.0.norm1_context.linear.weight', 'transformer_blocks.1.attn.to_v.bias', 'single_transformer_blocks.5.attn.to_v.bias', 'single_transformer_blocks.10.attn.to_v.bias', 'single_transformer_blocks.11.proj_mlp.weight', 'transformer_blocks.2.ff_context.net.2.bias', 'single_transformer_blocks.11.attn.to_k.weight', 'transformer_blocks.1.attn.to_out.0.bias', 'transformer_blocks.1.attn.add_k_proj.bias', 'single_transformer_blocks.8.proj_mlp.weight', 'transformer_blocks.1.ff.net.2.weight', 'single_transformer_blocks.4.norm.linear.bias', 'single_transformer_blocks.12.proj_out.weight', 'transformer_blocks.0.ff.net.0.proj.weight', 'single_transformer_blocks.9.attn.to_k.bias', 'single_transformer_blocks.19.norm.linear.weight', 'single_transformer_blocks.8.proj_out.weight', 'single_transformer_blocks.10.attn.to_k.weight', 'single_transformer_blocks.0.attn.to_k.bias', 'single_transformer_blocks.7.attn.to_k.weight', 'single_transformer_blocks.9.attn.to_v.weight', 'single_transformer_blocks.13.proj_out.bias', 'single_transformer_blocks.1.attn.to_v.bias', 'single_transformer_blocks.3.proj_mlp.weight', 'single_transformer_blocks.6.attn.to_v.weight', 'single_transformer_blocks.16.attn.to_q.weight', 'single_transformer_blocks.13.proj_mlp.weight', 'single_transformer_blocks.19.norm.linear.bias', 'single_transformer_blocks.5.norm.linear.bias', 'single_transformer_blocks.10.attn.to_q.bias', 'single_transformer_blocks.17.attn.to_v.bias', 'single_transformer_blocks.0.attn.to_v.weight', 'single_transformer_blocks.10.attn.to_v.weight', 'single_transformer_blocks.3.attn.to_k.weight', 'single_transformer_blocks.1.norm.linear.bias', 'single_transformer_blocks.6.attn.to_q.bias', 'single_transformer_blocks.11.norm.linear.bias', 'time_text_embed.timestep_embedder.linear_2.weight', 'time_text_embed.text_embedder.linear_2.bias', 'single_transformer_blocks.3.norm.linear.bias', 'transformer_blocks.0.ff.net.0.proj.bias', 'single_transformer_blocks.3.attn.to_q.weight', 'single_transformer_blocks.17.proj_mlp.weight', 'transformer_blocks.3.ff_context.net.0.proj.weight', 'single_transformer_blocks.12.attn.to_q.bias', 'single_transformer_blocks.5.proj_mlp.weight', 'single_transformer_blocks.0.attn.to_q.bias', 'single_transformer_blocks.3.proj_out.weight', 'transformer_blocks.4.ff.net.2.bias', 'single_transformer_blocks.4.proj_mlp.bias', 'single_transformer_blocks.2.proj_mlp.weight', 'single_transformer_blocks.16.proj_mlp.bias', 'single_transformer_blocks.17.proj_mlp.bias', 'time_text_embed.text_embedder.linear_2.weight', 'single_transformer_blocks.8.attn.to_q.weight', 'single_transformer_blocks.8.attn.to_k.weight', 'single_transformer_blocks.14.attn.to_k.weight', 'single_transformer_blocks.3.attn.to_q.bias', 'single_transformer_blocks.1.proj_out.bias', 'time_text_embed.timestep_embedder.linear_2.bias', 'single_transformer_blocks.13.attn.to_k.weight', 'single_transformer_blocks.11.proj_out.bias', 'x_embedder.bias', 'transformer_blocks.1.attn.to_q.bias', 'single_transformer_blocks.15.proj_mlp.weight', 'single_transformer_blocks.2.norm.linear.weight', 'single_transformer_blocks.12.attn.to_k.weight']\n","You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"]},{"name":"stdout","output_type":"stream","text":["βœ… Transformer loaded successfully. VRAM: 9.09 GB\n","βœ… LoRA applied successfully\n","VRAM after LoRA: 9.16 GB\n"]}],"source":["# ============================= CELL 4.a: Load Transformer + Apply LoRA =============================\n","# @title 4.a Load Transformer + LoRA (Fixed meta tensor loading)\n","\n","import torch\n","import gc\n","from torch.utils.data import Dataset\n","from peft import LoraConfig, get_peft_model\n","from diffusers import FluxTransformer2DModel\n","from transformers import set_seed\n","\n","set_seed(42)\n","\n","print(\"=== CELL 4.a – Loading Transformer + LoRA ===\")\n","print(f\"Current VRAM before loading: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Load precomputed embeddings\n","precomputed = torch.load(\"/content/drive/MyDrive/klein_exact_embeddings.pt\", weights_only=True)\n","\n","# ====================== Load Transformer (Fixed) ======================\n","print(\"Loading FLUX.2-klein-base-4B transformer...\")\n","\n","transformer = FluxTransformer2DModel.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," subfolder=\"transformer\",\n"," torch_dtype=torch.bfloat16,\n"," low_cpu_mem_usage=False # Critical: prevents meta tensors\n",").to(\"cuda\")\n","\n","print(f\"βœ… Transformer loaded successfully. VRAM: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# ====================== Apply LoRA ======================\n","lora_config = LoraConfig(\n"," r=LORA_RANK,\n"," lora_alpha=LORA_ALPHA,\n"," target_modules=[\n"," \"attn.to_q\", \"attn.to_k\", \"attn.to_v\", \"attn.to_out.0\",\n"," \"attn.to_qkv_mlp_proj\", \"attn.add_q_proj\", \"attn.add_k_proj\",\n"," \"attn.add_v_proj\", \"attn.to_add_out\",\n"," \"ff.linear_in\", \"ff.linear_out\", \"ff_context.linear_in\", \"ff_context.linear_out\"\n"," ],\n"," lora_dropout=0.05,\n"," bias=\"none\"\n",")\n","\n","transformer = get_peft_model(transformer, lora_config)\n","transformer.train()\n","\n","print(\"βœ… LoRA applied successfully\")\n","print(f\"VRAM after LoRA: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":819},"executionInfo":{"elapsed":9783,"status":"error","timestamp":1775042482579,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"},"user_tz":-120},"id":"rx5ngImGxoTT","outputId":"266315e2-e08c-43e8-890b-801d3b63c768"},"outputs":[{"name":"stderr","output_type":"stream","text":["Loading latents: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 250/250 [00:04<00:00, 54.01it/s]\n"]},{"name":"stdout","output_type":"stream","text":["Latents shape: torch.Size([250, 16, 128, 128])\n","\n","πŸš€ Starting training...\n"]},{"ename":"RuntimeError","evalue":"Tensors must have same number of dimensions: got 4 and 5","output_type":"error","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)","\u001b[0;32m/tmp/ipykernel_1539/4055019116.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 114\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"\\nπŸš€ Starting training...\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 116\u001b[0;31m \u001b[0mtrainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/transformers/trainer.py\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(self, resume_from_checkpoint, trial, ignore_keys_for_eval)\u001b[0m\n\u001b[1;32m 2172\u001b[0m \u001b[0mhf_hub_utils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0menable_progress_bars\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2173\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2174\u001b[0;31m return inner_training_loop(\n\u001b[0m\u001b[1;32m 2175\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2176\u001b[0m \u001b[0mresume_from_checkpoint\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mresume_from_checkpoint\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/transformers/trainer.py\u001b[0m in \u001b[0;36m_inner_training_loop\u001b[0;34m(self, batch_size, args, resume_from_checkpoint, trial, ignore_keys_for_eval)\u001b[0m\n\u001b[1;32m 2534\u001b[0m )\n\u001b[1;32m 2535\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2536\u001b[0;31m \u001b[0mtr_loss_step\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtraining_step\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum_items_in_batch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2537\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2538\u001b[0m if (\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/transformers/trainer.py\u001b[0m in \u001b[0;36mtraining_step\u001b[0;34m(self, model, inputs, num_items_in_batch)\u001b[0m\n\u001b[1;32m 3807\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3808\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute_loss_context_manager\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3809\u001b[0;31m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute_loss\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum_items_in_batch\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnum_items_in_batch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3810\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3811\u001b[0m \u001b[0;32mdel\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/tmp/ipykernel_1539/4055019116.py\u001b[0m in \u001b[0;36mcompute_loss\u001b[0;34m(self, model, inputs, return_outputs, num_items_in_batch)\u001b[0m\n\u001b[1;32m 75\u001b[0m \u001b[0mimg_ids\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg_tokens\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m3\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdevice\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbfloat16\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 76\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 77\u001b[0;31m model_output = model(\n\u001b[0m\u001b[1;32m 78\u001b[0m \u001b[0mhidden_states\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnoisy_latents\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 79\u001b[0m \u001b[0mtimestep\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtimesteps\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0;36m1000\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1774\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1775\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1776\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1777\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1778\u001b[0m \u001b[0;31m# torchrec tests the code consistency with the following code\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1785\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1786\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1787\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1788\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1789\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 821\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 822\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 823\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 824\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 825\u001b[0m \u001b[0;31m# To act like a decorator so that it can be popped when doing `extract_model_from_parallel`\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 809\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 810\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 811\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconvert_to_fp32\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 812\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 813\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__getstate__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/amp/autocast_mode.py\u001b[0m in \u001b[0;36mdecorate_autocast\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdecorate_autocast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mautocast_instance\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 44\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 45\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 46\u001b[0m decorate_autocast.__script_unsupported = ( # type: ignore[attr-defined]\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 821\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 822\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 823\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 824\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 825\u001b[0m \u001b[0;31m# To act like a decorator so that it can be popped when doing `extract_model_from_parallel`\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 809\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 810\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 811\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconvert_to_fp32\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 812\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 813\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__getstate__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/amp/autocast_mode.py\u001b[0m in \u001b[0;36mdecorate_autocast\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdecorate_autocast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mautocast_instance\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 44\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 45\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 46\u001b[0m decorate_autocast.__script_unsupported = ( # type: ignore[attr-defined]\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 821\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 822\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 823\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 824\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 825\u001b[0m \u001b[0;31m# To act like a decorator so that it can be popped when doing `extract_model_from_parallel`\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 809\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 810\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 811\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconvert_to_fp32\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 812\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 813\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__getstate__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/amp/autocast_mode.py\u001b[0m in \u001b[0;36mdecorate_autocast\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdecorate_autocast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mautocast_instance\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 44\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 45\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 46\u001b[0m decorate_autocast.__script_unsupported = ( # type: ignore[attr-defined]\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 821\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 822\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 823\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 824\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 825\u001b[0m \u001b[0;31m# To act like a decorator so that it can be popped when doing `extract_model_from_parallel`\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 809\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 810\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 811\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconvert_to_fp32\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 812\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 813\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__getstate__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/amp/autocast_mode.py\u001b[0m in \u001b[0;36mdecorate_autocast\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdecorate_autocast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mautocast_instance\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 44\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 45\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 46\u001b[0m decorate_autocast.__script_unsupported = ( # type: ignore[attr-defined]\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 821\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 822\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 823\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 824\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 825\u001b[0m \u001b[0;31m# To act like a decorator so that it can be popped when doing `extract_model_from_parallel`\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 809\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 810\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 811\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconvert_to_fp32\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 812\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 813\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__getstate__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/amp/autocast_mode.py\u001b[0m in \u001b[0;36mdecorate_autocast\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdecorate_autocast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mautocast_instance\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 44\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 45\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 46\u001b[0m decorate_autocast.__script_unsupported = ( # type: ignore[attr-defined]\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 821\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 822\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 823\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 824\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 825\u001b[0m \u001b[0;31m# To act like a decorator so that it can be popped when doing `extract_model_from_parallel`\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 809\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 810\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 811\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconvert_to_fp32\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 812\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 813\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__getstate__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/amp/autocast_mode.py\u001b[0m in \u001b[0;36mdecorate_autocast\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdecorate_autocast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mautocast_instance\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 44\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 45\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 46\u001b[0m decorate_autocast.__script_unsupported = ( # type: ignore[attr-defined]\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/peft/peft_model.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 919\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_enable_peft_forward_hooks\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 920\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mv\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mk\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mv\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mk\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mspecial_peft_forward_args\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 921\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_base_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 922\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 923\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mgenerate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1774\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1775\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1776\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1777\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1778\u001b[0m \u001b[0;31m# torchrec tests the code consistency with the following code\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1785\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1786\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1787\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1788\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1789\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/diffusers/utils/peft_utils.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 313\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 314\u001b[0m \u001b[0;31m# Execute the forward pass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 315\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mforward_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 316\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 317\u001b[0m \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/diffusers/models/transformers/transformer_flux.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, hidden_states, encoder_hidden_states, pooled_projections, timestep, img_ids, txt_ids, guidance, joint_attention_kwargs, controlnet_block_samples, controlnet_single_block_samples, return_dict, controlnet_blocks_repeat)\u001b[0m\n\u001b[1;32m 724\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 725\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 726\u001b[0;31m encoder_hidden_states, hidden_states = block(\n\u001b[0m\u001b[1;32m 727\u001b[0m \u001b[0mhidden_states\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mhidden_states\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 728\u001b[0m \u001b[0mencoder_hidden_states\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mencoder_hidden_states\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1774\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1775\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1776\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1777\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1778\u001b[0m \u001b[0;31m# torchrec tests the code consistency with the following code\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1785\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1786\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1787\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1788\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1789\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/diffusers/models/transformers/transformer_flux.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, hidden_states, encoder_hidden_states, temb, image_rotary_emb, joint_attention_kwargs)\u001b[0m\n\u001b[1;32m 451\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 452\u001b[0m \u001b[0;31m# Attention.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 453\u001b[0;31m attention_outputs = self.attn(\n\u001b[0m\u001b[1;32m 454\u001b[0m \u001b[0mhidden_states\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnorm_hidden_states\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 455\u001b[0m \u001b[0mencoder_hidden_states\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnorm_encoder_hidden_states\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1774\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1775\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1776\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1777\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1778\u001b[0m \u001b[0;31m# torchrec tests the code consistency with the following code\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1785\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1786\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1787\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1788\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1789\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/diffusers/models/transformers/transformer_flux.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb, **kwargs)\u001b[0m\n\u001b[1;32m 350\u001b[0m )\n\u001b[1;32m 351\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mw\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mk\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mw\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mk\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mattn_parameters\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 352\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprocessor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhidden_states\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mencoder_hidden_states\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mattention_mask\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mimage_rotary_emb\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 353\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 354\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/diffusers/models/transformers/transformer_flux.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, attn, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb)\u001b[0m\n\u001b[1;32m 108\u001b[0m \u001b[0mencoder_key\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mattn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnorm_added_k\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mencoder_key\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 109\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 110\u001b[0;31m \u001b[0mquery\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mencoder_query\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mquery\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 111\u001b[0m \u001b[0mkey\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mencoder_key\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkey\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 112\u001b[0m \u001b[0mvalue\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mencoder_value\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mRuntimeError\u001b[0m: Tensors must have same number of dimensions: got 4 and 5"]}],"source":["# ============================= CELL 4.b: LoRA Training (Robust version for klein) =============================\n","# @title 4.b Training with Precomputed + Careful Reshape\n","\n","from transformers import Trainer, TrainingArguments\n","import torch.nn.functional as F\n","import torch.nn as nn\n","from tqdm import tqdm\n","\n","# Load latents (keep on CPU until needed)\n","latents_list = []\n","for lf in tqdm(sorted([f for f in os.listdir(LATENT_DIR) if f.endswith(\".pt\")]), desc=\"Loading latents\"):\n"," latent = torch.load(os.path.join(LATENT_DIR, lf), weights_only=True)\n"," if latent.dim() == 4 and latent.shape[0] == 1:\n"," latent = latent.squeeze(0)\n"," latents_list.append(latent)\n","latents = torch.stack(latents_list)\n","print(f\"Latents shape: {latents.shape}\")\n","\n","precomputed = torch.load(\"/content/drive/MyDrive/klein_exact_embeddings.pt\", weights_only=True)\n","\n","# Pooled: 2560 β†’ 768 (standard for pooled_projections in FluxTransformer2DModel)\n","pooled_projection = nn.Linear(2560, 768, bias=True, dtype=torch.bfloat16).to(\"cuda\")\n","with torch.no_grad():\n"," min_d = min(2560, 768)\n"," pooled_projection.weight.data[:, :min_d] = torch.eye(min_d, dtype=torch.bfloat16)\n"," pooled_projection.bias.data.zero_()\n","pooled_projection.train()\n","\n","class FluxLoRADataset(Dataset):\n"," def __init__(self, latents, precomputed):\n"," self.latents = latents\n"," self.encoder_hs = precomputed[\"encoder_hidden_states\"]\n"," self.pooled = precomputed[\"pooled_projections\"]\n","\n"," def __len__(self): return len(self.latents)\n","\n"," def __getitem__(self, idx):\n"," return {\n"," \"latent\": self.latents[idx],\n"," \"encoder_hidden_states\": self.encoder_hs[idx], # (seq_len, ~2560)\n"," \"pooled_raw\": self.pooled[idx]\n"," }\n","\n","def collate_fn(batch):\n"," return {\n"," \"latent\": torch.stack([item[\"latent\"] for item in batch]),\n"," \"encoder_hidden_states\": [item[\"encoder_hidden_states\"] for item in batch],\n"," \"pooled_raw\": torch.stack([item[\"pooled_raw\"] for item in batch])\n"," }\n","\n","class FluxLoRATrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," latents = inputs[\"latent\"].to(dtype=torch.bfloat16, device=model.device)\n"," encoder_hs_list = inputs[\"encoder_hidden_states\"]\n"," pooled_raw = inputs[\"pooled_raw\"].to(dtype=torch.bfloat16, device=model.device)\n","\n"," pooled_projections = pooled_projection(pooled_raw)\n","\n"," batch_size = latents.shape[0]\n"," timesteps = torch.rand(batch_size, device=latents.device)\n"," noise = torch.randn_like(latents)\n"," noisy_latents = (1 - timesteps.view(-1, 1, 1, 1)) * latents + timesteps.view(-1, 1, 1, 1) * noise\n","\n"," # === Careful reshape for encoder_hidden_states ===\n"," # Take first item (B=1), move to device, expand to expected width (usually 7680 = 3*2560)\n"," enc = encoder_hs_list[0].to(dtype=torch.bfloat16, device=model.device) # (seq_len, 2560)\n"," encoder_hidden_states = torch.cat([enc, enc, enc], dim=-1) # (seq_len, 7680)\n","\n"," # Placeholders - this is the part that often causes the 4D vs 5D cat error if wrong\n"," seq_len = encoder_hidden_states.shape[0]\n"," txt_ids = torch.zeros((seq_len, 3), device=model.device, dtype=torch.bfloat16)\n","\n"," # Image tokens: for 1024x1024 latents (16x128x128) the number is usually (128*128) // 4 or similar\n"," img_tokens = latents.shape[2] * latents.shape[3] // 4\n"," img_ids = torch.zeros((img_tokens, 3), device=model.device, dtype=torch.bfloat16)\n","\n"," model_output = model(\n"," hidden_states=noisy_latents,\n"," timestep=timesteps * 1000,\n"," encoder_hidden_states=encoder_hidden_states,\n"," pooled_projections=pooled_projections,\n"," txt_ids=txt_ids,\n"," img_ids=img_ids,\n"," return_dict=False\n"," )[0]\n","\n"," target = noise - latents\n"," loss = F.mse_loss(model_output, target)\n","\n"," return (loss, model_output) if return_outputs else loss\n","\n","training_args = TrainingArguments(\n"," output_dir=\"/content/flux_klein_lora\",\n"," per_device_train_batch_size=BATCH_SIZE,\n"," num_train_epochs=NUM_EPOCHS,\n"," learning_rate=LEARNING_RATE,\n"," lr_scheduler_type=\"cosine\",\n"," warmup_steps=50,\n"," bf16=True,\n"," logging_steps=10,\n"," save_strategy=\"epoch\",\n"," save_total_limit=2,\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n"," dataloader_pin_memory=False,\n",")\n","\n","trainer = FluxLoRATrainer(\n"," model=transformer,\n"," args=training_args,\n"," train_dataset=dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"\\nπŸš€ Starting training...\")\n","trainer.train()"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"DRoYrgC-rbkt"},"outputs":[],"source":["# ============================= CELL 4.c: Save LoRA =============================\n","# @title 4.c Save Final LoRA\n","\n","final_lora_dir = FINAL_LORA_DIR\n","os.makedirs(final_lora_dir, exist_ok=True)\n","transformer.save_pretrained(final_lora_dir)\n","\n","print(f\"\\nβœ… Training completed! LoRA saved to: {final_lora_dir}\")\n","torch.cuda.empty_cache()\n","gc.collect()"]},{"cell_type":"markdown","metadata":{"id":"wqiYKR-inOlo"},"source":["lora code original"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","colab":{"background_save":true},"id":"fYbQfjC9RBWY"},"outputs":[],"source":["# ============================= CELL 4: LoRA Training on FLUX.2-klein-base-4B (Clean & Reliable) =============================\n","# @title 4. LoRA Training β€” FLUX.2-klein-base-4B + 768-dim Distilled Qwen\n","\n","import os\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import gc\n","from torch.utils.data import Dataset\n","from tqdm import tqdm\n","from google.colab import drive\n","from transformers import Trainer, TrainingArguments, set_seed\n","from peft import LoraConfig, get_peft_model\n","from diffusers import FluxTransformer2DModel\n","\n","set_seed(42)\n","drive.mount('/content/drive', force_remount=True)\n","\n","print(\"=== CELL 4 START - Clean Restart ===\")\n","print(f\"Current VRAM used: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# ====================== 1. Load Distilled 768-dim Encoder ======================\n","print(\"\\n[1/5] Loading distilled 768-dim Qwen encoder...\")\n","\n","tokenizer = AutoTokenizer.from_pretrained(DISTILLED_DIR)\n","\n","base_qwen = AutoModel.from_pretrained(\n"," \"Qwen/Qwen2.5-0.5B\",\n"," torch_dtype=torch.float32,\n"," device_map=\"auto\",\n"," trust_remote_code=True,\n"," low_cpu_mem_usage=True\n",")\n","\n","student_model = PeftModel.from_pretrained(base_qwen, DISTILLED_DIR)\n","student_model.eval()\n","\n","projection = nn.Linear(base_qwen.config.hidden_size, 768).to(\"cuda\")\n","projection.load_state_dict(torch.load(f\"{DISTILLED_DIR}/projection.pth\", map_location=\"cuda\"))\n","projection.eval()\n","\n","print(\"[DEBUG] Distilled encoder loaded (768-dim)\")\n","\n","# ====================== 2. Load Data ======================\n","print(\"\\n[2/5] Loading texts and latents...\")\n","\n","data = torch.load(\"/content/drive/MyDrive/qwen_embeddings_768.pt\", weights_only=False)\n","texts = data[\"texts\"]\n","print(f\"[DEBUG] Loaded {len(texts)} texts\")\n","\n","latent_files = sorted([f for f in os.listdir(LATENT_DIR) if f.endswith(\".pt\")])\n","latents = []\n","for lf in tqdm(latent_files, desc=\"Loading latents\"):\n"," latent = torch.load(os.path.join(LATENT_DIR, lf), weights_only=False)\n"," if latent.dim() == 4 and latent.shape[0] == 1:\n"," latent = latent.squeeze(0)\n"," latents.append(latent)\n","\n","latents = torch.stack(latents)\n","print(f\"[DEBUG] Latents shape: {latents.shape}\")\n","\n","# ====================== 3. Dataset ======================\n","class FluxLoRADataset(Dataset):\n"," def __init__(self, latents, texts):\n"," self.latents = latents\n"," self.texts = texts\n","\n"," def __len__(self): return len(self.latents)\n","\n"," def __getitem__(self, idx):\n"," return {\"latent\": self.latents[idx], \"text\": self.texts[idx]}\n","\n","dataset = FluxLoRADataset(latents, texts)\n","\n","def collate_fn(batch):\n"," return {\n"," \"latent\": torch.stack([item[\"latent\"] for item in batch]),\n"," \"texts\": [item[\"text\"] for item in batch]\n"," }\n","\n","# ====================== 4. Load Transformer + LoRA ======================\n","print(\"\\n[3/5] Loading FLUX.2-klein-base-4B transformer...\")\n","\n","torch.cuda.empty_cache()\n","gc.collect()\n","\n","transformer = FluxTransformer2DModel.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," subfolder=\"transformer\",\n"," torch_dtype=torch.bfloat16,\n"," low_cpu_mem_usage=False,\n",").to(\"cuda\")\n","\n","print(f\"[DEBUG] Transformer loaded. VRAM: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","lora_config = LoraConfig(\n"," r=LORA_RANK,\n"," lora_alpha=LORA_ALPHA,\n"," target_modules=[\n"," \"attn.to_q\", \"attn.to_k\", \"attn.to_v\", \"attn.to_out.0\",\n"," \"attn.to_qkv_mlp_proj\", \"attn.add_q_proj\", \"attn.add_k_proj\", \"attn.add_v_proj\", \"attn.to_add_out\",\n"," \"ff.linear_in\", \"ff.linear_out\", \"ff_context.linear_in\", \"ff_context.linear_out\"\n"," ],\n"," lora_dropout=0.05,\n"," bias=\"none\"\n",")\n","\n","transformer = get_peft_model(transformer, lora_config)\n","transformer.train()\n","\n","print(\"[DEBUG] LoRA applied successfully\")\n","\n","# ====================== 5. Trainer with Simple Repeat Trick ======================\n","class FluxLoRATrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," latents = inputs[\"latent\"].to(dtype=torch.bfloat16, device=model.device)\n"," raw_texts = inputs[\"texts\"]\n","\n"," # Get 768-dim embedding\n"," text_inputs = tokenizer(raw_texts, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(\"cuda\")\n","\n"," with torch.no_grad():\n"," outputs = student_model(**text_inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," text_emb_768 = projection(hidden).to(dtype=torch.bfloat16) # (B, 768)\n","\n"," batch_size = latents.shape[0]\n"," timesteps = torch.rand(batch_size, device=latents.device)\n"," noise = torch.randn_like(latents)\n","\n"," noisy_latents = (1 - timesteps.view(-1, 1, 1, 1)) * latents + timesteps.view(-1, 1, 1, 1) * noise\n","\n"," # Simple repeat trick for encoder_hidden_states (most stable for single-token)\n"," encoder_hidden_states = text_emb_768.unsqueeze(1).repeat(1, 1, 10) # (B, 1, 7680)\n","\n"," # 2D ids (no batch dimension)\n"," txt_ids = torch.zeros((1, 3), device=latents.device, dtype=torch.bfloat16)\n"," img_ids = torch.zeros((latents.shape[2] * latents.shape[3], 3), device=latents.device, dtype=torch.bfloat16)\n","\n"," model_output = model(\n"," hidden_states=noisy_latents,\n"," timestep=timesteps * 1000,\n"," encoder_hidden_states=encoder_hidden_states,\n"," pooled_projections=text_emb_768,\n"," txt_ids=txt_ids,\n"," img_ids=img_ids,\n"," return_dict=False\n"," )[0]\n","\n"," target = noise - latents\n"," loss = F.mse_loss(model_output, target)\n","\n"," print(f\"[DEBUG] Loss: {loss.item():.6f} | pooled: {text_emb_768.shape} | encoder: {encoder_hidden_states.shape}\")\n"," return (loss, model_output) if return_outputs else loss\n","\n","# ====================== Training ======================\n","training_args = TrainingArguments(\n"," output_dir=\"/content/flux_klein_lora\",\n"," per_device_train_batch_size=BATCH_SIZE,\n"," num_train_epochs=NUM_EPOCHS,\n"," learning_rate=LEARNING_RATE,\n"," lr_scheduler_type=\"cosine\",\n"," warmup_steps=50,\n"," bf16=True,\n"," logging_steps=10,\n"," save_strategy=\"epoch\",\n"," save_total_limit=2,\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n",")\n","\n","trainer = FluxLoRATrainer(\n"," model=transformer,\n"," args=training_args,\n"," train_dataset=dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"\\nπŸš€ Starting LoRA training with simple repeat trick...\")\n","trainer.train()\n","\n","# ====================== Save ======================\n","final_lora_dir = \"/content/drive/MyDrive/flux_klein_lora_final\"\n","os.makedirs(final_lora_dir, exist_ok=True)\n","transformer.save_pretrained(final_lora_dir)\n","\n","print(f\"\\nβœ… Training completed!\")\n","print(f\" LoRA saved to: {final_lora_dir}\")\n","print(\" You can now use this LoRA with your 768-dim distilled Qwen for inference.\")\n","\n","torch.cuda.empty_cache()\n","gc.collect()"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0} \ No newline at end of file