{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.12.12","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[{"sourceType":"datasetVersion","sourceId":15970688,"datasetId":10241843,"databundleVersionId":16931172}],"dockerImageVersionId":31329,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"!pip install torch_geometric","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-04-27T13:09:35.944197Z","iopub.execute_input":"2026-04-27T13:09:35.944792Z","iopub.status.idle":"2026-04-27T13:09:43.554302Z","shell.execute_reply.started":"2026-04-27T13:09:35.944758Z","shell.execute_reply":"2026-04-27T13:09:43.553566Z"}},"outputs":[{"name":"stdout","text":"Collecting torch_geometric\n Downloading torch_geometric-2.7.0-py3-none-any.whl.metadata (63 kB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m63.7/63.7 kB\u001b[0m \u001b[31m2.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hRequirement already satisfied: aiohttp in /usr/local/lib/python3.12/dist-packages (from torch_geometric) (3.13.3)\nRequirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from torch_geometric) (2026.2.0)\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.12/dist-packages (from torch_geometric) (3.1.6)\nRequirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from torch_geometric) (2.0.2)\nRequirement already satisfied: psutil>=5.8.0 in /usr/local/lib/python3.12/dist-packages (from torch_geometric) (5.9.5)\nRequirement already satisfied: pyparsing in /usr/local/lib/python3.12/dist-packages (from torch_geometric) (3.3.2)\nRequirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from torch_geometric) (2.32.4)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.12/dist-packages (from torch_geometric) (4.67.3)\nRequirement already satisfied: xxhash in /usr/local/lib/python3.12/dist-packages (from torch_geometric) (3.6.0)\nRequirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->torch_geometric) (2.6.1)\nRequirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->torch_geometric) (1.4.0)\nRequirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->torch_geometric) (25.4.0)\nRequirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp->torch_geometric) (1.8.0)\nRequirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp->torch_geometric) (6.7.1)\nRequirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->torch_geometric) (0.4.1)\nRequirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->torch_geometric) (1.22.0)\nRequirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.12/dist-packages (from jinja2->torch_geometric) (3.0.3)\nRequirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->torch_geometric) (3.4.4)\nRequirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.12/dist-packages (from requests->torch_geometric) (3.11)\nRequirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->torch_geometric) (2.5.0)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.12/dist-packages (from requests->torch_geometric) (2026.1.4)\nRequirement already satisfied: typing-extensions>=4.2 in /usr/local/lib/python3.12/dist-packages (from aiosignal>=1.4.0->aiohttp->torch_geometric) (4.15.0)\nDownloading torch_geometric-2.7.0-py3-none-any.whl (1.3 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m23.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hInstalling collected packages: torch_geometric\nSuccessfully installed torch_geometric-2.7.0\n","output_type":"stream"}],"execution_count":3},{"cell_type":"code","source":"import shutil\nimport os\n\nsource_dir = '/kaggle/input/datasets/mitkir/shapenet/shapenetcore_partanno_segmentation_benchmark_v0_normal'\ntarget_dir = '/kaggle/working/data/ShapeNetPart/raw'\nif False:\n # Hedef dizin yoksa oluştur (güvenlik için)\n os.makedirs(target_dir, exist_ok=True)\n \n for item in os.listdir(source_dir):\n s = os.path.join(source_dir, item)\n d = os.path.join(target_dir, item)\n \n if os.path.isdir(s):\n # Eğer klasörse kopyala (zaten varsa hata vermemesi için dirs_exist_ok=True)\n shutil.copytree(s, d, dirs_exist_ok=True)\n else:\n # Eğer dosyaysa kopyala\n shutil.copy2(s, d)\n \n\nprint(\"Kopyalama tamamlandı.\")\n\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-04-22T19:42:43.020607Z","iopub.execute_input":"2026-04-22T19:42:43.021417Z","iopub.status.idle":"2026-04-22T19:45:54.830398Z","shell.execute_reply.started":"2026-04-22T19:42:43.021376Z","shell.execute_reply":"2026-04-22T19:45:54.829658Z"}},"outputs":[{"name":"stdout","text":"Kopyalama tamamlandı.\n","output_type":"stream"}],"execution_count":17},{"cell_type":"code","source":"import torch\n\n# Sistemdeki mevcut PyTorch ve CUDA sürümlerini tespit et\ntorch_version = torch.__version__.split('+')[0]\ncuda_version = torch.version.cuda.replace('.', '') # Örn: '12.1' -> '121'\n\n# Özel tekerlek (wheel) linkini dinamik olarak oluştur\nurl = f\"https://data.pyg.org/whl/torch-{torch_version}+cu{cuda_version}.html\"\n\nprint(f\"Sistem Tespiti: PyTorch {torch_version} | CUDA {cuda_version}\")\nprint(f\"İndirme Linki: {url}\\n\")\nprint(\"Kurulum başlatılıyor...\\n\")\n\n# Kurulumu dinamik link ile tetikle\n!pip install torch_cluster -f {url}","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-04-27T13:09:54.487289Z","iopub.execute_input":"2026-04-27T13:09:54.487759Z","iopub.status.idle":"2026-04-27T13:10:06.184412Z","shell.execute_reply.started":"2026-04-27T13:09:54.487727Z","shell.execute_reply":"2026-04-27T13:10:06.183662Z"}},"outputs":[{"name":"stdout","text":"Sistem Tespiti: PyTorch 2.10.0 | CUDA 128\nİndirme Linki: https://data.pyg.org/whl/torch-2.10.0+cu128.html\n\nKurulum başlatılıyor...\n\nLooking in links: https://data.pyg.org/whl/torch-2.10.0+cu128.html\nCollecting torch_cluster\n Downloading https://data.pyg.org/whl/torch-2.10.0%2Bcu128/torch_cluster-1.6.3%2Bpt210cu128-cp312-cp312-linux_x86_64.whl (3.5 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.5/3.5 MB\u001b[0m \u001b[31m24.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hRequirement already satisfied: scipy in /usr/local/lib/python3.12/dist-packages (from torch_cluster) (1.16.3)\nRequirement already satisfied: numpy<2.6,>=1.25.2 in /usr/local/lib/python3.12/dist-packages (from scipy->torch_cluster) (2.0.2)\nInstalling collected packages: torch_cluster\nSuccessfully installed torch_cluster-1.6.3+pt210cu128\n","output_type":"stream"}],"execution_count":4},{"cell_type":"code","source":"import os\nimport time\nimport math\nimport random\nfrom dataclasses import dataclass\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch_geometric.data import Data\nfrom torch_geometric.datasets import ShapeNet\nfrom torch_geometric.loader import DataLoader\nfrom torch_geometric.nn import MessagePassing\nfrom torch_geometric.utils import to_dense_batch\nfrom torch_cluster import fps, radius_graph, knn\nimport torch.compiler\nfrom torch.amp import autocast, GradScaler\n\nimport triton\nimport triton.language as tl\n\n# Dynamo Graph Break uyarılarını gidermek ve skaler çıktıları yakalamak için (KRİTİK)\ntorch._dynamo.config.capture_scalar_outputs = True\n\n# -----------------------------------------------------------------------------\n# TRITON CLIFFORD KERNEL (Özel C++ Seviyesi Optimizasyon)\n# -----------------------------------------------------------------------------\n@triton.jit\ndef _clifford_prod_fwd_kernel(a_ptr, b_ptr, out_ptr, n_elements, BLOCK_SIZE: tl.constexpr):\n pid = tl.program_id(0)\n offsets = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)\n mask = offsets < n_elements\n \n a0 = tl.load(a_ptr + offsets * 8 + 0, mask=mask, other=0.0)\n a1 = tl.load(a_ptr + offsets * 8 + 1, mask=mask, other=0.0)\n a2 = tl.load(a_ptr + offsets * 8 + 2, mask=mask, other=0.0)\n a3 = tl.load(a_ptr + offsets * 8 + 3, mask=mask, other=0.0)\n a4 = tl.load(a_ptr + offsets * 8 + 4, mask=mask, other=0.0)\n a5 = tl.load(a_ptr + offsets * 8 + 5, mask=mask, other=0.0)\n a6 = tl.load(a_ptr + offsets * 8 + 6, mask=mask, other=0.0)\n a7 = tl.load(a_ptr + offsets * 8 + 7, mask=mask, other=0.0)\n\n b0 = tl.load(b_ptr + offsets * 8 + 0, mask=mask, other=0.0)\n b1 = tl.load(b_ptr + offsets * 8 + 1, mask=mask, other=0.0)\n b2 = tl.load(b_ptr + offsets * 8 + 2, mask=mask, other=0.0)\n b3 = tl.load(b_ptr + offsets * 8 + 3, mask=mask, other=0.0)\n b4 = tl.load(b_ptr + offsets * 8 + 4, mask=mask, other=0.0)\n b5 = tl.load(b_ptr + offsets * 8 + 5, mask=mask, other=0.0)\n b6 = tl.load(b_ptr + offsets * 8 + 6, mask=mask, other=0.0)\n b7 = tl.load(b_ptr + offsets * 8 + 7, mask=mask, other=0.0)\n \n res0 = a0*b0 + a1*b1 + a2*b2 + a3*b3 - a4*b4 - a5*b5 - a6*b6 - a7*b7\n res1 = a0*b1 + a1*b0 - a2*b4 + a3*b6 + a4*b2 - a5*b7 - a6*b3 - a7*b5\n res2 = a0*b2 + a1*b4 + a2*b0 - a3*b5 - a4*b1 + a5*b3 - a6*b7 - a7*b6\n res3 = a0*b3 - a1*b6 + a2*b5 + a3*b0 + a4*b7 - a5*b2 + a6*b1 - a7*b4\n res4 = a0*b4 + a1*b2 - a2*b1 + a3*b7 + a4*b0 - a5*b6 + a6*b5 - a7*b3\n res5 = a0*b5 + a1*b7 + a2*b3 - a3*b2 + a4*b6 + a5*b0 - a6*b4 - a7*b1\n res6 = a0*b6 - a1*b3 + a2*b7 + a3*b1 - a4*b5 + a5*b4 + a6*b0 - a7*b2\n res7 = a0*b7 + a1*b5 + a2*b6 + a3*b4 + a4*b3 + a5*b1 + a6*b2 + a7*b0\n \n tl.store(out_ptr + offsets * 8 + 0, res0, mask=mask)\n tl.store(out_ptr + offsets * 8 + 1, res1, mask=mask)\n tl.store(out_ptr + offsets * 8 + 2, res2, mask=mask)\n tl.store(out_ptr + offsets * 8 + 3, res3, mask=mask)\n tl.store(out_ptr + offsets * 8 + 4, res4, mask=mask)\n tl.store(out_ptr + offsets * 8 + 5, res5, mask=mask)\n tl.store(out_ptr + offsets * 8 + 6, res6, mask=mask)\n tl.store(out_ptr + offsets * 8 + 7, res7, mask=mask)\n\n\nclass CliffordProductFunc(torch.autograd.Function):\n @staticmethod\n def forward(ctx, a, b):\n a = a.contiguous()\n b = b.contiguous()\n ctx.save_for_backward(a, b)\n \n out = torch.empty_like(a)\n n_elements = a.numel() // 8\n grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)\n \n _clifford_prod_fwd_kernel[grid](a, b, out, n_elements, BLOCK_SIZE=1024)\n return out\n\n @staticmethod\n def backward(ctx, grad_output):\n a, b = ctx.saved_tensors\n grad_output = grad_output.contiguous()\n grad_a = grad_b = None\n \n rev_mask = torch.tensor([1, 1, 1, 1, -1, -1, -1, -1], device=a.device, dtype=a.dtype)\n \n if ctx.needs_input_grad[0]:\n b_rev = (b * rev_mask).contiguous()\n grad_a = triton_clifford_product(grad_output, b_rev)\n \n if ctx.needs_input_grad[1]:\n a_rev = (a * rev_mask).contiguous()\n grad_b = triton_clifford_product(a_rev, grad_output)\n \n return grad_a, grad_b\n\ndef triton_clifford_product(a, b):\n a_exp, b_exp = torch.broadcast_tensors(a, b)\n return CliffordProductFunc.apply(a_exp, b_exp)\n\n\n@torch.compiler.disable\ndef safe_fps(*args, **kwargs):\n return fps(*args, **kwargs)\n\n@torch.compiler.disable\ndef safe_radius_graph(*args, **kwargs):\n return radius_graph(*args, **kwargs)\n\n@torch.compiler.disable\ndef safe_knn(*args, **kwargs):\n return knn(*args, **kwargs)\n\n# -----------------------------------------------------------------------------\n# Constants & ShapeNet Mappings (16 Categories, 50 Parts)\n# -----------------------------------------------------------------------------\nSEG_CLASSES = {\n 'Airplane': [0, 1, 2, 3], 'Bag': [4, 5], 'Cap': [6, 7], 'Car': [8, 9, 10, 11],\n 'Chair': [12, 13, 14, 15], 'Earphone': [16, 17, 18], 'Guitar': [19, 20, 21],\n 'Knife': [22, 23], 'Lamp': [24, 25, 26, 27], 'Laptop': [28, 29],\n 'Motorbike': [30, 31, 32, 33, 34, 35], 'Mug': [36, 37], 'Pistol': [38, 39, 40],\n 'Rocket': [41, 42, 43], 'Skateboard': [44, 45, 46], 'Table': [47, 48, 49]\n}\nCATEGORY_TO_PARTS = list(SEG_CLASSES.values())\n\ndef seed_everything(seed: int = 42) -> None:\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\nclass FixedPointsDeterministic:\n def __init__(self, num: int):\n self.num = num\n def __call__(self, data: Data) -> Data:\n num_nodes = data.num_nodes\n if num_nodes >= self.num:\n choice = torch.randperm(num_nodes)[:self.num]\n else:\n extra = torch.randint(0, num_nodes, (self.num - num_nodes,))\n choice = torch.cat([torch.randperm(num_nodes), extra], dim=0)\n for key, item in data:\n if torch.is_tensor(item) and item.size(0) == num_nodes:\n data[key] = item[choice]\n return data\n\nclass NormalizeUnitSphere:\n def __call__(self, data: Data) -> Data:\n pos = data.pos\n pos = pos - pos.mean(dim=0, keepdim=True)\n scale = pos.norm(dim=1).max().clamp(min=1e-8)\n data.pos = pos / scale\n return data\n\ndef precompute_fps_indices(data: Data, ratio1: float = 0.5, ratio2: float = 0.25) -> Data:\n pos = data.pos\n batch = torch.zeros(pos.size(0), dtype=torch.long, device=pos.device)\n data.fps_idx_1 = safe_fps(pos, batch, ratio=ratio1).cpu().long()\n \n pos2 = pos[data.fps_idx_1]\n batch2 = torch.zeros(pos2.size(0), dtype=torch.long, device=pos2.device)\n data.fps_idx_2 = safe_fps(pos2, batch2, ratio=ratio2).cpu().long()\n return data\n\ndef materialize_split(root: str, split: str, num_points: int, seed: int, force_rebuild: bool = False):\n cache_file = os.path.join(root, f\"all_{split}_n{num_points}_seed{seed}_fps.pt\")\n if os.path.exists(cache_file) and not force_rebuild:\n print(f\"[{time.strftime('%H:%M:%S')}] Cache bulundu: {cache_file}\")\n return torch.load(cache_file, weights_only=False)\n\n print(f\"[{time.strftime('%H:%M:%S')}] {split} split (Tüm Kategoriler) materyalize ediliyor...\")\n seed_everything(seed)\n transform = lambda data: precompute_fps_indices(NormalizeUnitSphere()(FixedPointsDeterministic(num_points)(data)))\n\n dataset = ShapeNet(\n root=root,\n categories=['Airplane', 'Bag', 'Cap', 'Car', 'Chair', 'Earphone', 'Guitar', \n 'Knife', 'Lamp', 'Laptop', 'Motorbike', 'Mug', 'Pistol', \n 'Rocket', 'Skateboard', 'Table'], \n include_normals=False,\n split=split,\n transform=transform,\n )\n\n materialized = []\n for i in range(len(dataset)):\n item = dataset[i].clone()\n item = Data(**{k: v.clone() if torch.is_tensor(v) else v for k, v in item})\n materialized.append(item)\n if (i + 1) % 1000 == 0 or (i + 1) == len(dataset):\n print(f\" {split}: {i + 1}/{len(dataset)}\")\n\n torch.save(materialized, cache_file)\n return materialized\n\n# -----------------------------------------------------------------------------\n# EAGER GRAPH PREPARATION (Eğitim Döngüsünde JIT Öncesi Eager Modda Çalışır)\n# -----------------------------------------------------------------------------\n@torch.no_grad()\ndef prepare_batch_data(data):\n pos, batch = data.pos, data.batch\n num_nodes = torch.bincount(batch)\n offset = torch.cumsum(num_nodes, dim=0) - num_nodes\n fps_idx_1 = (data.fps_idx_1.view(num_nodes.size(0), -1) + offset.view(-1, 1)).reshape(-1)\n\n pos2, batch2 = pos[fps_idx_1], batch[fps_idx_1]\n\n num_nodes_2 = torch.bincount(batch2)\n fps_idx_2 = (data.fps_idx_2.view(num_nodes_2.size(0), -1) + (torch.cumsum(num_nodes_2, dim=0) - num_nodes_2).view(-1, 1)).reshape(-1)\n\n pos3, batch3 = pos2[fps_idx_2], batch2[fps_idx_2]\n\n edge_index_1 = safe_radius_graph(pos, r=0.15, batch=batch, max_num_neighbors=16, loop=True)\n edge_index_2 = safe_radius_graph(pos2, r=0.30, batch=batch2, max_num_neighbors=32, loop=True)\n\n assign_index_32 = safe_knn(pos3, pos2, k=3, batch_x=batch3, batch_y=batch2)\n assign_index_21 = safe_knn(pos2, pos, k=3, batch_x=batch2, batch_y=batch)\n\n max_n3 = int(torch.bincount(batch3).max().item())\n dummy_x = torch.zeros(pos3.size(0), 1, device=pos.device)\n _, x_dense_mask = to_dense_batch(dummy_x, batch3, max_num_nodes=max_n3)\n\n return (fps_idx_1, fps_idx_2, pos2, batch2, pos3, batch3,\n edge_index_1, edge_index_2, assign_index_32, assign_index_21, x_dense_mask)\n\n# -----------------------------------------------------------------------------\n# Model Components\n# -----------------------------------------------------------------------------\n\nclass CliffordDiracLayer(MessagePassing):\n def __init__(self, channels: int):\n super().__init__(aggr=\"add\", node_dim=0)\n self.channels = channels\n self.weight = nn.Linear(channels, channels, bias=False)\n self.distance_mlp = nn.Sequential(\n nn.Linear(1, channels),\n nn.SiLU(),\n nn.Linear(channels, channels),\n )\n self.resonance_mlp = nn.Sequential(\n nn.Linear(7, 16),\n nn.GELU(),\n nn.Linear(16, 1),\n )\n\n def forward(self, x, edge_index, v_ij, dist, edge_mask):\n x_proj = self.weight(x.transpose(1, 2)).transpose(1, 2)\n dist_weight = self.distance_mlp(dist)\n \n msg = self.propagate(edge_index, x=x_proj, v_ij=v_ij, dist_weight=dist_weight, edge_mask=edge_mask)\n gate = self.resonance_gate(x_proj, msg)\n return gate * msg\n\n def message(self, x_j, v_ij, dist_weight, edge_mask):\n v_8d = F.pad(v_ij, (1, 4)) \n v_8d_exp = v_8d.unsqueeze(1).expand_as(x_j)\n \n geom_msg = triton_clifford_product(v_8d_exp, x_j)\n msg = geom_msg * dist_weight.unsqueeze(-1)\n return msg * edge_mask.view(-1, 1, 1).float()\n\n def resonance_gate(self, x_state, msg_state):\n x_norm = F.normalize(x_state, dim=-1)\n msg_norm = F.normalize(msg_state, dim=-1)\n \n gp = triton_clifford_product(x_norm, msg_norm)\n\n scalar_align = gp[..., 0:1]\n vector_align = torch.norm(gp[..., 1:4], dim=-1, keepdim=True)\n bivector_align = torch.norm(gp[..., 4:7], dim=-1, keepdim=True)\n pseudoscalar_align = torch.abs(gp[..., 7:8])\n \n state_norm = torch.norm(x_state, dim=-1, keepdim=True)\n msg_norm_mag = torch.norm(msg_state, dim=-1, keepdim=True)\n delta_norm = torch.norm(msg_state - x_state, dim=-1, keepdim=True)\n\n feats = torch.cat([\n scalar_align, vector_align, bivector_align, pseudoscalar_align,\n state_norm, msg_norm_mag, delta_norm,\n ], dim=-1)\n \n return torch.sigmoid(self.resonance_mlp(feats))\n\nclass CliffordSelfAttention(nn.Module):\n def __init__(self, channels: int):\n super().__init__()\n self.channels = channels\n self.W_q = nn.Linear(channels, channels, bias=False)\n self.W_k = nn.Linear(channels, channels, bias=False)\n self.W_v = nn.Linear(channels, channels, bias=False)\n self.score_net = nn.Sequential(\n nn.Linear(8, 16),\n nn.GELU(),\n nn.Linear(16, 1),\n )\n\n def forward(self, x_dense, mask):\n B, N, C, _ = x_dense.shape\n q = self.W_q(x_dense.transpose(2, 3)).transpose(2, 3)\n k = self.W_k(x_dense.transpose(2, 3)).transpose(2, 3)\n v = self.W_v(x_dense.transpose(2, 3)).transpose(2, 3)\n \n q_expanded = q.unsqueeze(2)\n k_expanded = k.unsqueeze(1)\n \n geom_prod = triton_clifford_product(q_expanded, k_expanded)\n scores = self.score_net(geom_prod.mean(dim=3)).squeeze(-1)\n \n scores = scores.masked_fill(~(mask.unsqueeze(1) & mask.unsqueeze(2)), -10000.0)\n attn = F.softmax(scores / math.sqrt(C), dim=-1)\n return (attn.view(B, N, N, 1, 1) * v.unsqueeze(1)).sum(dim=2) + x_dense\n\n\n# -----------------------------------------------------------------------------\n# Main Architecture (~0.15 M PARAMETREYE DÜŞÜRÜLDÜ)\n# -----------------------------------------------------------------------------\nclass HierarchicalFPSCliffordNet(nn.Module):\n def __init__(self, base_channels: int = 48, num_part_classes: int = 50, num_categories: int = 16):\n super().__init__()\n self.C = base_channels\n self.layer1 = CliffordDiracLayer(base_channels)\n self.layer2 = CliffordDiracLayer(base_channels * 2)\n self.lin1 = nn.Linear(base_channels * 8, (base_channels * 2) * 8)\n self.lin2 = nn.Linear((base_channels * 2) * 8, (base_channels * 4) * 8)\n self.manager = CliffordSelfAttention(base_channels * 4)\n self.cat_emb = nn.Embedding(num_categories, 64)\n\n combined_dim = (base_channels * 4 + base_channels * 2 + base_channels) * 8 + 64\n \n # YENİ HEAD: (64, 64) Bottleneck mimarisi. Modeli doğrudan 0.15M parametreye düşürür.\n self.head = nn.Sequential(\n nn.Linear(combined_dim, 64), nn.BatchNorm1d(64), nn.GELU(), nn.Dropout(0.4),\n nn.Linear(64, 64), nn.BatchNorm1d(64), nn.GELU(), nn.Dropout(0.2),\n nn.Linear(64, num_part_classes)\n )\n\n def forward(self, pos, batch, category, fps_idx_1, fps_idx_2, pos2, batch2, pos3, batch3, \n edge_index_1, edge_index_2, assign_index_32, assign_index_21, x_dense_mask):\n \n x0 = torch.zeros(pos.size(0), self.C, 8, device=pos.device)\n x0[..., 1:4] = pos.unsqueeze(1).expand(-1, self.C, -1)\n\n # ---------------------------------------------------------\n # 1. KATMAN\n # ---------------------------------------------------------\n row1, col1 = edge_index_1[1], edge_index_1[0]\n diff1 = pos[row1] - pos[col1]\n d1 = diff1.norm(dim=-1, keepdim=True).clamp(min=1e-8)\n dummy_mask1 = torch.ones(edge_index_1.size(1), dtype=torch.bool, device=pos.device)\n \n x1 = x0 + self.layer1(x0, edge_index_1, diff1 / d1, d1, dummy_mask1)\n f1 = (x1 / (x1.norm(dim=-1, keepdim=True).mean(dim=1, keepdim=True) + 1e-6)).reshape(x1.size(0), -1)\n\n # ---------------------------------------------------------\n # 2. KATMAN\n # ---------------------------------------------------------\n x2_in = self.lin1(x1[fps_idx_1].reshape(fps_idx_1.numel(), -1)).reshape(-1, self.C * 2, 8)\n \n row2, col2 = edge_index_2[1], edge_index_2[0]\n diff2 = pos2[row2] - pos2[col2]\n d2 = diff2.norm(dim=-1, keepdim=True).clamp(min=1e-8)\n dummy_mask2 = torch.ones(edge_index_2.size(1), dtype=torch.bool, device=pos.device)\n\n x2 = x2_in + self.layer2(x2_in, edge_index_2, diff2 / d2, d2, dummy_mask2)\n f2 = (x2 / (x2.norm(dim=-1, keepdim=True).mean(dim=1, keepdim=True) + 1e-6)).reshape(x2.size(0), -1)\n\n # ---------------------------------------------------------\n # Global Manager\n # ---------------------------------------------------------\n x3_in = self.lin2(x2[fps_idx_2].reshape(fps_idx_2.numel(), -1)).reshape(-1, self.C * 4, 8)\n\n x_dense, _ = to_dense_batch(x3_in.reshape(x3_in.size(0), -1), batch3, max_num_nodes=x_dense_mask.size(1))\n x3 = self.manager(x_dense.view(x_dense.size(0), x_dense.size(1), self.C * 4, 8), x_dense_mask)\n f3 = x3[x_dense_mask].reshape(-1, self.C * 4 * 8)\n\n # ---------------------------------------------------------\n # YUKARI ÖRNEKLEME (Interpolation)\n # ---------------------------------------------------------\n row_32, col_32 = assign_index_32[0], assign_index_32[1]\n out_f3_to_pos2 = torch.zeros(pos2.size(0), f3.size(1), device=pos.device)\n out_f3_to_pos2.scatter_add_(0, row_32.unsqueeze(1).expand(-1, f3.size(1)), f3[col_32])\n out_f3_to_pos2 = out_f3_to_pos2 / 3.0\n \n f2_combined = torch.cat([out_f3_to_pos2, f2], dim=-1)\n \n row_21, col_21 = assign_index_21[0], assign_index_21[1]\n f2_up = torch.zeros(pos.size(0), f2_combined.size(1), device=pos.device)\n f2_up.scatter_add_(0, row_21.unsqueeze(1).expand(-1, f2_combined.size(1)), f2_combined[col_21])\n f2_up = f2_up / 3.0\n \n cat_features = self.cat_emb(category)[batch] \n f1_final = torch.cat([f2_up, f1, cat_features], dim=-1)\n\n return self.head(f1_final)\n\n# -----------------------------------------------------------------------------\n# Evaluation & Metric Tracking\n# -----------------------------------------------------------------------------\ndef get_masked_logits(logits: torch.Tensor, category: torch.Tensor, batch: torch.Tensor) -> torch.Tensor:\n mask = torch.zeros_like(logits, dtype=torch.bool)\n for i in range(logits.size(0)):\n cat_idx = category[batch[i]].item()\n valid_parts = CATEGORY_TO_PARTS[cat_idx]\n mask[i, valid_parts] = True\n logits = logits.masked_fill(~mask, -10000.0)\n return logits\n\ndef compute_shape_miou(pred: torch.Tensor, target: torch.Tensor, valid_parts: list) -> float:\n ious = []\n for cls in valid_parts:\n pred_mask, target_mask = (pred == cls), (target == cls)\n union = (pred_mask | target_mask).sum().item()\n if union == 0: \n ious.append(1.0)\n else:\n ious.append((pred_mask & target_mask).sum().item() / union)\n return float(np.mean(ious))\n\n@torch.no_grad()\ndef evaluate(model, loader, device):\n model.eval()\n total_loss = 0.0\n criterion = nn.CrossEntropyLoss()\n \n shape_ious = []\n category_ious = defaultdict(list)\n\n for data in loader:\n data = data.to(device)\n \n (fps_idx_1, fps_idx_2, pos2, batch2, pos3, batch3, \n edge_index_1, edge_index_2, assign_index_32, assign_index_21, x_dense_mask) = prepare_batch_data(data)\n\n logits = model(data.pos, data.batch, data.category, fps_idx_1, fps_idx_2, pos2, batch2, pos3, batch3,\n edge_index_1, edge_index_2, assign_index_32, assign_index_21, x_dense_mask)\n \n loss = criterion(logits, data.y)\n total_loss += loss.item() * data.num_graphs\n\n masked_logits = get_masked_logits(logits.clone(), data.category, data.batch)\n pred = masked_logits.argmax(dim=-1)\n\n for i in range(data.num_graphs):\n mask = data.batch == i\n cat_idx = data.category[i].item()\n valid_parts = CATEGORY_TO_PARTS[cat_idx]\n \n iou = compute_shape_miou(pred[mask], data.y[mask], valid_parts)\n shape_ious.append(iou)\n category_ious[cat_idx].append(iou)\n\n cat_names = [\n 'Airplane', 'Bag', 'Cap', 'Car', 'Chair', 'Earphone', 'Guitar', 'Knife', \n 'Lamp', 'Laptop', 'Motorbike', 'Mug', 'Pistol', 'Rocket', 'Skateboard', 'Table'\n ]\n\n inst_miou = float(np.mean(shape_ious))\n existing_cats = sorted(category_ious.keys())\n class_miou = float(np.mean([np.mean(category_ious[c]) for c in existing_cats]))\n\n print(\"\\n\" + \"=\"*50)\n print(f\"{'Kategori':<15} | {'mIoU':<10} | {'Örnek Sayısı'}\")\n print(\"-\" * 50)\n for c_idx in existing_cats:\n c_name = cat_names[c_idx] if c_idx < len(cat_names) else f\"Cat_{c_idx}\"\n c_mean = np.mean(category_ious[c_idx]) * 100\n c_count = len(category_ious[c_idx])\n print(f\"{c_name:<15} | {c_mean:<10.2f} | {c_count}\")\n \n print(\"-\" * 50)\n print(f\"SOTA mIoU (Inst.) : {inst_miou * 100:.2f}\")\n print(f\"SOTA mIoU (Class) : {class_miou * 100:.2f}\")\n print(\"=\"*50 + \"\\n\")\n\n return total_loss / len(loader.dataset), inst_miou, class_miou\n\nclass FocalLoss(nn.Module):\n def __init__(self, gamma=2.0, alpha=None, label_smoothing=0.1):\n super().__init__()\n self.gamma = gamma\n self.alpha = alpha\n self.label_smoothing = label_smoothing\n\n def forward(self, inputs, targets):\n ce_loss = F.cross_entropy(inputs, targets, reduction='none', label_smoothing=self.label_smoothing)\n pt = torch.exp(-ce_loss)\n focal_loss = ((1 - pt) ** self.gamma) * ce_loss\n \n if self.alpha is not None:\n alpha_t = self.alpha.gather(0, targets.data.view(-1))\n focal_loss = focal_loss * alpha_t\n\n return focal_loss.mean()\n\n# -----------------------------------------------------------------------------\n# Training Loop\n# -----------------------------------------------------------------------------\n@dataclass\nclass Config:\n root: str = \"./data/ShapeNetPart\"\n num_points: int = 1024\n batch_size: int = 64\n lr: float = 1e-3\n weight_decay: float = 1e-4\n epochs: int = 100\n base_channels: int = 12\n seed: int = 42\n force_rebuild_cache: bool = False\n save_path: str = \"./best_all_categories_clifford-min.pt\"\n\ndef main():\n cfg = Config()\n seed_everything(cfg.seed)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n train_ds = materialize_split(cfg.root, \"trainval\", cfg.num_points, cfg.seed, cfg.force_rebuild_cache)\n test_ds = materialize_split(cfg.root, \"test\", cfg.num_points, cfg.seed + 2, cfg.force_rebuild_cache)\n\n train_loader = DataLoader(train_ds, batch_size=cfg.batch_size, shuffle=True, \n num_workers=4, pin_memory=True, persistent_workers=True, drop_last=True)\n test_loader = DataLoader(test_ds, batch_size=cfg.batch_size, shuffle=False, \n num_workers=4, pin_memory=True, persistent_workers=True)\n\n model = HierarchicalFPSCliffordNet(base_channels=cfg.base_channels, num_part_classes=50, num_categories=16).to(device)\n\n # --- PARAMETRE SAYISI LOGLAMA EKLENTİSİ ---\n total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(\"=\" * 50)\n print(f\"[{time.strftime('%H:%M:%S')}] Model Mimarisi Bilgisi:\")\n print(f\"Toplam Eğitilebilir Parametre: {total_params:,} ({total_params / 1e6:.4f} M)\")\n print(\"=\" * 50)\n # ------------------------------------------\n\n # --- AĞIRLIKLARI AKILLI YÜKLEME (TRANSFER LEARNING) BLOĞU ---\n if os.path.exists(cfg.save_path):\n try:\n print(f\"[{time.strftime('%H:%M:%S')}] Mevcut ağırlıklar akıllı transfer modunda yükleniyor: {cfg.save_path}\")\n checkpoint = torch.load(cfg.save_path, map_location=device, weights_only=True)\n model_state = model.state_dict()\n \n clean_state_dict = {}\n for key, value in checkpoint.items():\n clean_key = key.replace(\"module.\", \"\", 1) if key.startswith(\"module.\") else key\n clean_key = key.replace(\"_orig_mod.\", \"\") if key.startswith(\"_orig_mod.\") else key\n \n # Sadece boyutu eşleşen katmanlar yüklenir (Gövde sağlam kalır, Head atlanır)\n if clean_key in model_state and model_state[clean_key].shape == value.shape:\n clean_state_dict[clean_key] = value\n else:\n print(f\"Atlanıyor: {clean_key} (Boyutlar uyuşmuyor, sıfırdan öğrenilecek)\")\n \n model.load_state_dict(clean_state_dict, strict=False)\n print(\"Gövde ağırlıkları başarıyla yüklendi! Yeni MLP Head eğitilmeye başlanıyor.\")\n except Exception as e:\n print(f\"Ağırlıklar yüklenirken hata oluştu: {e}\")\n print(\"Eğitimi tamamen sıfırdan başlatıyorum...\")\n\n try:\n model = torch.compile(model)\n except Exception as e:\n print(f\"torch.compile hatası: {e}\")\n\n optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=3)\n criterion = FocalLoss(gamma=2.0, label_smoothing=0.0)\n\n scaler = GradScaler('cuda')\n best_inst_miou = -1.0 \n\n for epoch in range(1, cfg.epochs + 1):\n t0 = time.time()\n model.train()\n total_train_loss = 0.0\n\n for data in train_loader:\n data = data.to(device)\n optimizer.zero_grad(set_to_none=True) \n \n (fps_idx_1, fps_idx_2, pos2, batch2, pos3, batch3, \n edge_index_1, edge_index_2, assign_index_32, assign_index_21, x_dense_mask) = prepare_batch_data(data)\n\n with autocast('cuda'):\n raw_logits = model(data.pos, data.batch, data.category, \n fps_idx_1, fps_idx_2, pos2, batch2, pos3, batch3,\n edge_index_1, edge_index_2, assign_index_32, assign_index_21, x_dense_mask)\n \n masked_logits = get_masked_logits(raw_logits, data.category, data.batch)\n loss = criterion(masked_logits, data.y)\n \n scaler.scale(loss).backward()\n \n scaler.unscale_(optimizer)\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n \n scaler.step(optimizer)\n scaler.update()\n \n total_train_loss += loss.item() * data.num_graphs\n\n train_loss = total_train_loss / len(train_loader.dataset)\n \n val_loss, val_inst_miou, val_class_miou = evaluate(model, test_loader, device)\n \n scheduler.step(val_inst_miou)\n current_lr = optimizer.param_groups[0]['lr']\n\n if val_inst_miou > best_inst_miou:\n best_inst_miou = val_inst_miou\n torch.save(model.state_dict(), cfg.save_path)\n mark = \"*\"\n else:\n mark = \" \"\n\n print(f\"Epoch {epoch:02d} | time {time.time()-t0:.1f}s | lr: {current_lr:.2e} | loss: {train_loss:.4f} | val_loss: {val_loss:.4f} | \"\n f\"Val Inst. mIoU: {val_inst_miou*100:.2f} | Val Cls. mIoU: {val_class_miou*100:.2f} {mark}\")\n \nif __name__ == \"__main__\":\n main()","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-04-27T13:12:01.924967Z","iopub.execute_input":"2026-04-27T13:12:01.925987Z"}},"outputs":[{"name":"stdout","text":"[13:12:27] Cache bulundu: ./data/ShapeNetPart/all_trainval_n1024_seed42_fps.pt\n[13:12:31] Cache bulundu: ./data/ShapeNetPart/all_test_n1024_seed44_fps.pt\n==================================================\n[13:12:32] Model Mimarisi Bilgisi:\nToplam Eğitilebilir Parametre: 157,505 (0.1575 M)\n==================================================\n[13:12:32] Mevcut ağırlıklar akıllı transfer modunda yükleniyor: ./best_all_categories_clifford-min.pt\nGövde ağırlıkları başarıyla yüklendi! Yeni MLP Head eğitilmeye başlanıyor.\n","output_type":"stream"},{"name":"stderr","text":"W0427 13:12:50.198000 55 torch/_inductor/utils.py:1679] [0/0_1] Not enough SMs to use max_autotune_gemm mode\n","output_type":"stream"},{"name":"stdout","text":"\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 77.06 | 341\nBag | 75.83 | 14\nCap | 77.02 | 11\nCar | 66.40 | 158\nChair | 87.62 | 704\nEarphone | 68.80 | 14\nGuitar | 87.78 | 159\nKnife | 82.55 | 80\nLamp | 76.41 | 286\nLaptop | 94.20 | 83\nMotorbike | 58.08 | 51\nMug | 85.85 | 38\nPistol | 78.37 | 44\nRocket | 49.13 | 12\nSkateboard | 65.99 | 31\nTable | 80.31 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 80.71\nSOTA mIoU (Class) : 75.71\n==================================================\n\nEpoch 01 | time 1659.1s | lr: 1.00e-03 | loss: 0.0711 | val_loss: 2.4779 | Val Inst. mIoU: 80.71 | Val Cls. mIoU: 75.71 *\n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 76.84 | 341\nBag | 74.12 | 14\nCap | 77.03 | 11\nCar | 64.57 | 158\nChair | 87.57 | 704\nEarphone | 67.66 | 14\nGuitar | 88.28 | 159\nKnife | 83.87 | 80\nLamp | 76.82 | 286\nLaptop | 94.17 | 83\nMotorbike | 57.65 | 51\nMug | 80.65 | 38\nPistol | 77.70 | 44\nRocket | 51.18 | 12\nSkateboard | 65.50 | 31\nTable | 80.08 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 80.52\nSOTA mIoU (Class) : 75.23\n==================================================\n\nEpoch 02 | time 1573.0s | lr: 1.00e-03 | loss: 0.0688 | val_loss: 2.4645 | Val Inst. mIoU: 80.52 | Val Cls. mIoU: 75.23 \n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 77.48 | 341\nBag | 68.29 | 14\nCap | 78.77 | 11\nCar | 65.15 | 158\nChair | 87.09 | 704\nEarphone | 68.98 | 14\nGuitar | 88.58 | 159\nKnife | 83.37 | 80\nLamp | 78.59 | 286\nLaptop | 94.14 | 83\nMotorbike | 54.76 | 51\nMug | 87.58 | 38\nPistol | 77.50 | 44\nRocket | 51.06 | 12\nSkateboard | 67.96 | 31\nTable | 80.67 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 80.91\nSOTA mIoU (Class) : 75.62\n==================================================\n\nEpoch 03 | time 1595.2s | lr: 1.00e-03 | loss: 0.0686 | val_loss: 2.4811 | Val Inst. mIoU: 80.91 | Val Cls. mIoU: 75.62 *\n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 77.45 | 341\nBag | 71.46 | 14\nCap | 79.60 | 11\nCar | 66.93 | 158\nChair | 87.81 | 704\nEarphone | 69.99 | 14\nGuitar | 88.08 | 159\nKnife | 84.15 | 80\nLamp | 78.02 | 286\nLaptop | 94.30 | 83\nMotorbike | 54.61 | 51\nMug | 84.88 | 38\nPistol | 78.86 | 44\nRocket | 49.70 | 12\nSkateboard | 66.66 | 31\nTable | 80.48 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 81.05\nSOTA mIoU (Class) : 75.81\n==================================================\n\nEpoch 04 | time 1583.5s | lr: 1.00e-03 | loss: 0.0679 | val_loss: 2.4898 | Val Inst. mIoU: 81.05 | Val Cls. mIoU: 75.81 *\n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 77.62 | 341\nBag | 72.99 | 14\nCap | 78.56 | 11\nCar | 66.22 | 158\nChair | 87.76 | 704\nEarphone | 67.68 | 14\nGuitar | 88.80 | 159\nKnife | 82.66 | 80\nLamp | 77.35 | 286\nLaptop | 94.25 | 83\nMotorbike | 55.64 | 51\nMug | 86.06 | 38\nPistol | 78.31 | 44\nRocket | 48.00 | 12\nSkateboard | 66.97 | 31\nTable | 80.27 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 80.90\nSOTA mIoU (Class) : 75.57\n==================================================\n\nEpoch 05 | time 1590.6s | lr: 1.00e-03 | loss: 0.0669 | val_loss: 2.4852 | Val Inst. mIoU: 80.90 | Val Cls. mIoU: 75.57 \n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 77.17 | 341\nBag | 76.67 | 14\nCap | 78.84 | 11\nCar | 64.93 | 158\nChair | 87.12 | 704\nEarphone | 69.19 | 14\nGuitar | 88.02 | 159\nKnife | 83.92 | 80\nLamp | 77.42 | 286\nLaptop | 94.41 | 83\nMotorbike | 54.77 | 51\nMug | 86.78 | 38\nPistol | 79.28 | 44\nRocket | 50.38 | 12\nSkateboard | 67.47 | 31\nTable | 80.50 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 80.74\nSOTA mIoU (Class) : 76.05\n==================================================\n\nEpoch 06 | time 1583.1s | lr: 1.00e-03 | loss: 0.0670 | val_loss: 2.5236 | Val Inst. mIoU: 80.74 | Val Cls. mIoU: 76.05 \n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 78.15 | 341\nBag | 74.94 | 14\nCap | 74.06 | 11\nCar | 65.95 | 158\nChair | 87.73 | 704\nEarphone | 67.61 | 14\nGuitar | 88.09 | 159\nKnife | 83.52 | 80\nLamp | 78.27 | 286\nLaptop | 94.43 | 83\nMotorbike | 58.20 | 51\nMug | 82.27 | 38\nPistol | 79.54 | 44\nRocket | 49.13 | 12\nSkateboard | 67.86 | 31\nTable | 80.11 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 80.99\nSOTA mIoU (Class) : 75.62\n==================================================\n\nEpoch 07 | time 1590.6s | lr: 1.00e-03 | loss: 0.0667 | val_loss: 2.4820 | Val Inst. mIoU: 80.99 | Val Cls. mIoU: 75.62 \n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 78.85 | 341\nBag | 74.10 | 14\nCap | 76.69 | 11\nCar | 65.59 | 158\nChair | 87.63 | 704\nEarphone | 70.22 | 14\nGuitar | 88.00 | 159\nKnife | 81.64 | 80\nLamp | 76.52 | 286\nLaptop | 94.22 | 83\nMotorbike | 57.00 | 51\nMug | 83.54 | 38\nPistol | 78.73 | 44\nRocket | 45.39 | 12\nSkateboard | 66.74 | 31\nTable | 79.61 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 80.62\nSOTA mIoU (Class) : 75.28\n==================================================\n\nEpoch 08 | time 1584.6s | lr: 5.00e-04 | loss: 0.0656 | val_loss: 2.4681 | Val Inst. mIoU: 80.62 | Val Cls. mIoU: 75.28 \n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 78.11 | 341\nBag | 74.43 | 14\nCap | 77.79 | 11\nCar | 67.01 | 158\nChair | 87.69 | 704\nEarphone | 70.80 | 14\nGuitar | 88.51 | 159\nKnife | 83.53 | 80\nLamp | 78.04 | 286\nLaptop | 94.52 | 83\nMotorbike | 59.40 | 51\nMug | 86.06 | 38\nPistol | 78.87 | 44\nRocket | 51.04 | 12\nSkateboard | 68.46 | 31\nTable | 80.25 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 81.19\nSOTA mIoU (Class) : 76.53\n==================================================\n\nEpoch 09 | time 1580.3s | lr: 5.00e-04 | loss: 0.0634 | val_loss: 2.4507 | Val Inst. mIoU: 81.19 | Val Cls. mIoU: 76.53 *\n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 77.22 | 341\nBag | 74.28 | 14\nCap | 77.58 | 11\nCar | 66.72 | 158\nChair | 87.54 | 704\nEarphone | 71.22 | 14\nGuitar | 89.08 | 159\nKnife | 83.75 | 80\nLamp | 77.81 | 286\nLaptop | 94.48 | 83\nMotorbike | 58.64 | 51\nMug | 86.48 | 38\nPistol | 78.76 | 44\nRocket | 49.22 | 12\nSkateboard | 67.36 | 31\nTable | 80.50 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 81.08\nSOTA mIoU (Class) : 76.29\n==================================================\n\nEpoch 10 | time 1593.6s | lr: 5.00e-04 | loss: 0.0624 | val_loss: 2.4479 | Val Inst. mIoU: 81.08 | Val Cls. mIoU: 76.29 \n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 78.00 | 341\nBag | 75.02 | 14\nCap | 78.02 | 11\nCar | 67.82 | 158\nChair | 87.65 | 704\nEarphone | 69.55 | 14\nGuitar | 88.76 | 159\nKnife | 83.61 | 80\nLamp | 78.49 | 286\nLaptop | 94.33 | 83\nMotorbike | 59.14 | 51\nMug | 85.98 | 38\nPistol | 78.45 | 44\nRocket | 51.09 | 12\nSkateboard | 67.62 | 31\nTable | 79.94 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 81.14\nSOTA mIoU (Class) : 76.47\n==================================================\n\nEpoch 11 | time 1588.1s | lr: 5.00e-04 | loss: 0.0624 | val_loss: 2.4521 | Val Inst. mIoU: 81.14 | Val Cls. mIoU: 76.47 \n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 77.81 | 341\nBag | 73.47 | 14\nCap | 77.18 | 11\nCar | 66.32 | 158\nChair | 87.52 | 704\nEarphone | 70.73 | 14\nGuitar | 88.74 | 159\nKnife | 83.86 | 80\nLamp | 77.86 | 286\nLaptop | 94.38 | 83\nMotorbike | 59.09 | 51\nMug | 86.91 | 38\nPistol | 78.89 | 44\nRocket | 50.80 | 12\nSkateboard | 69.23 | 31\nTable | 80.54 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 81.16\nSOTA mIoU (Class) : 76.46\n==================================================\n\nEpoch 12 | time 1597.6s | lr: 5.00e-04 | loss: 0.0622 | val_loss: 2.4530 | Val Inst. mIoU: 81.16 | Val Cls. mIoU: 76.46 \n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 77.55 | 341\nBag | 74.78 | 14\nCap | 77.29 | 11\nCar | 66.78 | 158\nChair | 87.33 | 704\nEarphone | 69.79 | 14\nGuitar | 88.88 | 159\nKnife | 81.72 | 80\nLamp | 77.68 | 286\nLaptop | 94.52 | 83\nMotorbike | 59.11 | 51\nMug | 85.48 | 38\nPistol | 77.78 | 44\nRocket | 50.08 | 12\nSkateboard | 67.79 | 31\nTable | 79.92 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 80.81\nSOTA mIoU (Class) : 76.03\n==================================================\n\nEpoch 13 | time 1591.2s | lr: 2.50e-04 | loss: 0.0620 | val_loss: 2.4480 | Val Inst. mIoU: 80.81 | Val Cls. mIoU: 76.03 \n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 78.33 | 341\nBag | 73.56 | 14\nCap | 77.54 | 11\nCar | 67.30 | 158\nChair | 87.83 | 704\nEarphone | 70.69 | 14\nGuitar | 88.93 | 159\nKnife | 84.25 | 80\nLamp | 78.15 | 286\nLaptop | 94.48 | 83\nMotorbike | 59.12 | 51\nMug | 86.23 | 38\nPistol | 78.83 | 44\nRocket | 51.62 | 12\nSkateboard | 68.85 | 31\nTable | 80.46 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 81.37\nSOTA mIoU (Class) : 76.64\n==================================================\n\nEpoch 14 | time 1580.8s | lr: 2.50e-04 | loss: 0.0606 | val_loss: 2.4288 | Val Inst. mIoU: 81.37 | Val Cls. mIoU: 76.64 *\n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 78.21 | 341\nBag | 73.20 | 14\nCap | 77.01 | 11\nCar | 67.68 | 158\nChair | 87.84 | 704\nEarphone | 70.93 | 14\nGuitar | 88.56 | 159\nKnife | 83.74 | 80\nLamp | 77.19 | 286\nLaptop | 94.37 | 83\nMotorbike | 58.63 | 51\nMug | 87.64 | 38\nPistol | 78.78 | 44\nRocket | 52.05 | 12\nSkateboard | 68.68 | 31\nTable | 80.07 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 81.14\nSOTA mIoU (Class) : 76.54\n==================================================\n\nEpoch 15 | time 1578.1s | lr: 2.50e-04 | loss: 0.0605 | val_loss: 2.4176 | Val Inst. mIoU: 81.14 | Val Cls. mIoU: 76.54 \n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 77.96 | 341\nBag | 74.31 | 14\nCap | 75.87 | 11\nCar | 66.90 | 158\nChair | 87.70 | 704\nEarphone | 66.18 | 14\nGuitar | 88.51 | 159\nKnife | 84.00 | 80\nLamp | 77.72 | 286\nLaptop | 94.46 | 83\nMotorbike | 58.19 | 51\nMug | 86.19 | 38\nPistol | 78.48 | 44\nRocket | 51.20 | 12\nSkateboard | 68.76 | 31\nTable | 80.38 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 81.13\nSOTA mIoU (Class) : 76.05\n==================================================\n\nEpoch 16 | time 1561.8s | lr: 2.50e-04 | loss: 0.0603 | val_loss: 2.4308 | Val Inst. mIoU: 81.13 | Val Cls. mIoU: 76.05 \n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 77.89 | 341\nBag | 71.67 | 14\nCap | 77.05 | 11\nCar | 67.12 | 158\nChair | 87.81 | 704\nEarphone | 69.09 | 14\nGuitar | 88.74 | 159\nKnife | 84.15 | 80\nLamp | 78.19 | 286\nLaptop | 94.52 | 83\nMotorbike | 59.05 | 51\nMug | 86.53 | 38\nPistol | 78.75 | 44\nRocket | 52.10 | 12\nSkateboard | 69.37 | 31\nTable | 80.30 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 81.24\nSOTA mIoU (Class) : 76.40\n==================================================\n\nEpoch 17 | time 1549.7s | lr: 2.50e-04 | loss: 0.0602 | val_loss: 2.4141 | Val Inst. mIoU: 81.24 | Val Cls. mIoU: 76.40 \n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 78.28 | 341\nBag | 74.27 | 14\nCap | 76.85 | 11\nCar | 67.06 | 158\nChair | 87.76 | 704\nEarphone | 68.82 | 14\nGuitar | 88.15 | 159\nKnife | 84.16 | 80\nLamp | 77.81 | 286\nLaptop | 94.48 | 83\nMotorbike | 59.62 | 51\nMug | 87.06 | 38\nPistol | 78.90 | 44\nRocket | 52.01 | 12\nSkateboard | 69.30 | 31\nTable | 80.62 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 81.32\nSOTA mIoU (Class) : 76.57\n==================================================\n\nEpoch 18 | time 1553.9s | lr: 1.25e-04 | loss: 0.0601 | val_loss: 2.4132 | Val Inst. mIoU: 81.32 | Val Cls. mIoU: 76.57 \n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 78.15 | 341\nBag | 73.20 | 14\nCap | 76.48 | 11\nCar | 67.77 | 158\nChair | 87.81 | 704\nEarphone | 69.60 | 14\nGuitar | 88.89 | 159\nKnife | 84.14 | 80\nLamp | 77.77 | 286\nLaptop | 94.47 | 83\nMotorbike | 58.68 | 51\nMug | 86.62 | 38\nPistol | 79.21 | 44\nRocket | 51.47 | 12\nSkateboard | 69.16 | 31\nTable | 80.45 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 81.32\nSOTA mIoU (Class) : 76.49\n==================================================\n\nEpoch 19 | time 1594.2s | lr: 1.25e-04 | loss: 0.0592 | val_loss: 2.4206 | Val Inst. mIoU: 81.32 | Val Cls. mIoU: 76.49 \n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 78.41 | 341\nBag | 72.57 | 14\nCap | 76.57 | 11\nCar | 67.88 | 158\nChair | 87.97 | 704\nEarphone | 67.97 | 14\nGuitar | 88.87 | 159\nKnife | 84.08 | 80\nLamp | 77.77 | 286\nLaptop | 94.54 | 83\nMotorbike | 58.94 | 51\nMug | 86.97 | 38\nPistol | 79.20 | 44\nRocket | 51.15 | 12\nSkateboard | 68.95 | 31\nTable | 80.47 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 81.40\nSOTA mIoU (Class) : 76.39\n==================================================\n\nEpoch 20 | time 1590.4s | lr: 1.25e-04 | loss: 0.0591 | val_loss: 2.4084 | Val Inst. mIoU: 81.40 | Val Cls. mIoU: 76.39 *\n\n==================================================\nKategori | mIoU | Örnek Sayısı\n--------------------------------------------------\nAirplane | 78.24 | 341\nBag | 73.53 | 14\nCap | 76.26 | 11\nCar | 67.16 | 158\nChair | 87.83 | 704\nEarphone | 67.88 | 14\nGuitar | 88.73 | 159\nKnife | 84.17 | 80\nLamp | 77.60 | 286\nLaptop | 94.48 | 83\nMotorbike | 58.79 | 51\nMug | 86.42 | 38\nPistol | 78.85 | 44\nRocket | 52.09 | 12\nSkateboard | 69.25 | 31\nTable | 80.17 | 848\n--------------------------------------------------\nSOTA mIoU (Inst.) : 81.19\nSOTA mIoU (Class) : 76.34\n==================================================\n\nEpoch 21 | time 1603.5s | lr: 1.25e-04 | loss: 0.0590 | val_loss: 2.4116 | Val Inst. mIoU: 81.19 | Val Cls. mIoU: 76.34 \n","output_type":"stream"}],"execution_count":null}]}