{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "GfIYHqSbw5i1", "outputId": "ac83b86d-d4cb-41f3-da04-18bfe1130c3b" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\n", "Failed to download (trying next):\n", "HTTP Error 403: Forbidden\n", "\n", "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz\n", "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz to ./data/MNIST/raw/train-images-idx3-ubyte.gz\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "100%|██████████| 9912422/9912422 [00:03<00:00, 2700722.18it/s]\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Extracting ./data/MNIST/raw/train-images-idx3-ubyte.gz to ./data/MNIST/raw\n", "\n", "Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz\n", "Failed to download (trying next):\n", "HTTP Error 403: Forbidden\n", "\n", "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz\n", "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz to ./data/MNIST/raw/train-labels-idx1-ubyte.gz\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "100%|██████████| 28881/28881 [00:00<00:00, 497356.67it/s]\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Extracting ./data/MNIST/raw/train-labels-idx1-ubyte.gz to ./data/MNIST/raw\n", "\n", "Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz\n", "Failed to download (trying next):\n", "HTTP Error 403: Forbidden\n", "\n", "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz\n", "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz to ./data/MNIST/raw/t10k-images-idx3-ubyte.gz\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "100%|██████████| 1648877/1648877 [00:00<00:00, 4524040.32it/s]\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Extracting ./data/MNIST/raw/t10k-images-idx3-ubyte.gz to ./data/MNIST/raw\n", "\n", "Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz\n", "Failed to download (trying next):\n", "HTTP Error 403: Forbidden\n", "\n", "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz\n", "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz to ./data/MNIST/raw/t10k-labels-idx1-ubyte.gz\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "100%|██████████| 4542/4542 [00:00<00:00, 3718627.52it/s]" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Extracting ./data/MNIST/raw/t10k-labels-idx1-ubyte.gz to ./data/MNIST/raw\n", "\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "\n" ] } ], "source": [ "import torch\n", "import torch.nn as nn\n", "import torch.optim as optim\n", "from torchvision import datasets, transforms\n", "\n", "# Transformasi data\n", "transform = transforms.Compose([transforms.ToTensor()])\n", "\n", "# Memuat dataset MNIST\n", "train_dataset = datasets.MNIST(root='./data', train=True, transform=transform, download=True)\n", "train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=64, shuffle=True)" ] }, { "cell_type": "markdown", "source": [ "### Latihan:\n", "#### Implementasi dari awal:\n", "\n", "1. Coba implementasikan neural network sederhana dengan beberapa lapisan hidden dan latih pada dataset sederhana seperti MNIST atau CIFAR-10.\n", "2. Eksperimen dengan fungsi aktivasi yang berbeda, jumlah neuron di setiap lapisan, dan jenis optimizer.\n", "\n", "#### Visualisasi:\n", "\n", "1. Coba visualisasikan bagaimana bobot dan bias dari model berubah seiring dengan proses pelatihan.\n", "2. Visualisasikan loss dan akurasi seiring waktu untuk memahami bagaimana model belajar.\n", "\n", "#### Perbedaan Lapisan dan Arsitektur:\n", "\n", "1. Coba modifikasi arsitektur jaringan (menambah lapisan, mengubah jumlah neuron, dll.) dan lihat bagaimana hal ini mempengaruhi performa model." ], "metadata": { "id": "7HttM3uSyHDc" } }, { "cell_type": "markdown", "source": [ "#### Implementasi Awal" ], "metadata": { "id": "KrbSxsRjyWbm" } }, { "cell_type": "code", "source": [ "## Tambahkan Jumlah Lapisan Neuran\n", "class CustomSizeLayerNN(nn.Module):\n", " def __init__(self, layer_size):\n", " super(CustomSizeLayerNN, self).__init__()\n", " self.layers = nn.ModuleList()\n", " temp_layer_size = layer_size\n", " while temp_layer_size > 64:\n", " if (temp_layer_size == layer_size and len(self.layers) == 0):\n", " self.layers.append(nn.Linear(28*28, temp_layer_size))\n", " else:\n", " self.layers.append(nn.Linear(temp_layer_size, int(temp_layer_size/2)))\n", " temp_layer_size = int(temp_layer_size/2)\n", " self.layers.append(nn.Linear(temp_layer_size, 10))\n", "\n", " def forward(self, x):\n", " x = x.view(-1, 28*28)\n", " for i, layer in enumerate(self.layers):\n", " if i < len(self.layers) - 1:\n", " ## Ganti fungsi aktivasi\n", " x = torch.relu(layer(x))\n", " # x = torch.sigmoid(layer(x))\n", " # x = torch.tanh(layer(x))\n", " else:\n", " x = layer(x)\n", " return x\n", "\n", "# Inisialisasi model, loss function, dan optimizer\n", "model = CustomSizeLayerNN(layer_size=128)\n", "criterion = nn.CrossEntropyLoss()\n", "# Ganti optimizer\n", "# optimizer = optim.SGD(model.parameters(), lr=0.01)\n", "optimizer = optim.Adam(model.parameters(), lr=0.01)\n", "\n", "# Training Loop\n", "for epoch in range(10): # 10 epochs\n", " for images, labels in train_loader:\n", " optimizer.zero_grad()\n", " output = model(images)\n", " loss = criterion(output, labels)\n", " loss.backward()\n", " optimizer.step()\n", " print(f'Epoch {epoch+1}, Loss: {loss.item()}')" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "LU_a_JjtyZ_2", "outputId": "7d70e235-db9e-4169-e719-b188b34a52dd" }, "execution_count": 20, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Epoch 1, Loss: 0.20903132855892181\n", "Epoch 2, Loss: 0.2800661027431488\n", "Epoch 3, Loss: 0.04195380210876465\n", "Epoch 4, Loss: 0.010990972630679607\n", "Epoch 5, Loss: 0.16205546259880066\n", "Epoch 6, Loss: 0.004210921470075846\n", "Epoch 7, Loss: 0.14686189591884613\n", "Epoch 8, Loss: 0.0029904020484536886\n", "Epoch 9, Loss: 0.00271606189198792\n", "Epoch 10, Loss: 0.005243723280727863\n" ] } ] } ] }