| {"question_id": 0, "pytorch_library": "import torch\nimport numpy as np", "pytorch_start_code": "np_array = np.array([1, 2, 3])\n#result =", "pytorch_sol_code": "result = torch.from_numpy(np_array)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert np.allclose(result.numpy(), np_array)"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "np_array = np.array([1, 2, 3])\n#result =", "tensorflow_sol_code": "result = tf.convert_to_tensor(np_array)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert np.allclose(result.numpy(), np_array)"]}} |
| {"question_id": 1, "pytorch_library": "import torch\n", "pytorch_start_code": "\n#result =", "pytorch_sol_code": "result = torch.tensor(3.43)", "pytorch_test_code": {"setup_code": "import numpy as np", "test_cases": ["assert np.isclose(result.item(), 3.43)"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\n#result =", "tensorflow_sol_code": "result = tf.constant(3.43)", "tensorflow_test_code": {"setup_code": "import numpy as np", "test_cases": ["assert np.isclose(result.numpy(), 3.43)"]}} |
| {"question_id": 2, "pytorch_library": "import torch", "pytorch_start_code": "\n#result =", "pytorch_sol_code": "result = torch.arange(1, 10).view(3, 3)", "pytorch_test_code": {"setup_code": "import numpy as np", "test_cases": ["assert np.allclose(result.numpy(), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\n#result =", "tensorflow_sol_code": "result = tf.reshape(tf.range(1, 10), (3, 3))", "tensorflow_test_code": {"setup_code": "import numpy as np", "test_cases": ["assert np.array_equal(result, np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))"]}} |
| {"question_id": 3, "pytorch_library": "import torch", "pytorch_start_code": "\n#result =", "pytorch_sol_code": "input_tensor = torch.arange(1, 6)\nresult = torch.diag(input_tensor)", "pytorch_test_code": {"setup_code": "import numpy as np", "test_cases": ["expected_result = np.diag(np.arange(1, 6))\nassert np.allclose(result.numpy(), expected_result), f'Expected {expected_result}, but got {result.numpy()}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\n#result =", "tensorflow_sol_code": "input_tensor = tf.range(1, 6)\nresult = tf.linalg.diag(input_tensor)", "tensorflow_test_code": {"setup_code": "import numpy as np", "test_cases": ["expected_result = np.diag(np.arange(1, 6))\nassert np.allclose(result.numpy(), expected_result), f'Expected {expected_result}, but got {result.numpy()}'"]}} |
| {"question_id": 4, "pytorch_library": "import torch", "pytorch_start_code": "\n#result =", "pytorch_sol_code": "result = torch.eye(4)", "pytorch_test_code": {"setup_code": "import numpy as np", "test_cases": ["expected_result = np.eye(4)\nassert np.allclose(result.numpy(), expected_result), f'Expected {expected_result}, but got {result.numpy()}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\n#result =", "tensorflow_sol_code": "result = tf.eye(4)", "tensorflow_test_code": {"setup_code": "import numpy as np", "test_cases": ["expected_result = np.eye(4)\nassert np.allclose(result.numpy(), expected_result), f'Expected {expected_result}, but got {result.numpy()}'"]}} |
| {"question_id": 5, "pytorch_library": "import torch", "pytorch_start_code": "tensor1 = torch.tensor([1, 2, 3])\ntensor2 = torch.tensor([4, 5, 6])\n#result =", "pytorch_sol_code": "result = tensor1 + tensor2", "pytorch_test_code": {"setup_code": "import numpy as np", "test_cases": ["assert np.allclose(result.numpy(), np.array([5, 7, 9]))"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "tensor1 = tf.constant([1, 2, 3])\ntensor2 = tf.constant([4, 5, 6])\n#result =", "tensorflow_sol_code": "result = tensor1 + tensor2", "tensorflow_test_code": {"setup_code": "import numpy as np", "test_cases": ["assert np.allclose(result.numpy(), np.array([5, 7, 9]))"]}} |
| {"question_id": 6, "pytorch_library": "import torch", "pytorch_start_code": "tensor1 = torch.tensor([1, 2, 3])\ntensor2 = torch.tensor([4, 5, 6])\n#result =", "pytorch_sol_code": "result = tensor1 - tensor2", "pytorch_test_code": {"setup_code": "import numpy as np", "test_cases": ["assert np.allclose(result.numpy(), np.array([-3, -3, -3]))"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "tensor1 = tf.constant([1, 2, 3])\ntensor2 = tf.constant([4, 5, 6])\n#result =", "tensorflow_sol_code": "result = tensor1 - tensor2", "tensorflow_test_code": {"setup_code": "import numpy as np", "test_cases": ["assert np.allclose(result.numpy(), np.array([-3, -3, -3]))"]}} |
| {"question_id": 7, "pytorch_library": "import torch", "pytorch_start_code": "tensor1 = torch.tensor([[1, 2], [3, 4]])\ntensor2 = torch.tensor([[5, 6], [7, 8]])\n#result =", "pytorch_sol_code": "result = torch.mm(tensor1, tensor2)", "pytorch_test_code": {"setup_code": "import numpy as np", "test_cases": ["assert np.allclose(result.numpy(), np.array([[19, 22], [43, 50]]))"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "tensor1 = tf.constant([[1, 2], [3, 4]])\ntensor2 = tf.constant([[5, 6], [7, 8]])\n#result =", "tensorflow_sol_code": "result = tf.matmul(tensor1, tensor2)", "tensorflow_test_code": {"setup_code": "import numpy as np", "test_cases": ["assert np.allclose(result.numpy(), np.array([[19, 22], [43, 50]]))"]}} |
| {"question_id": 8, "pytorch_library": "import torch\nimport numpy as np", "pytorch_start_code": "\n#result =", "pytorch_sol_code": "result = torch.zeros(5, 6)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert torch.all(result == 0).item()"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "\n#result =", "tensorflow_sol_code": "result = tf.zeros([5, 6])", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert tf.reduce_all(result == 0).numpy()"]}} |
| {"question_id": 9, "pytorch_library": "import torch\nimport numpy as np", "pytorch_start_code": "tensor = torch.tensor([[1, 2, 3], [4, 5, 6]])\n#result =", "pytorch_sol_code": "result = tensor.shape", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert result == (2, 3)"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n#result =", "tensorflow_sol_code": "result = tensor.shape", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert tuple(result.as_list()) == (2, 3)"]}} |
| {"question_id": 10, "pytorch_library": "import torch\nimport numpy as np", "pytorch_start_code": "tensor = torch.tensor([[1, 2, 3], [4, 5, 6]])\n#result =", "pytorch_sol_code": "result = tensor.dim()", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert result == 2"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n#result =", "tensorflow_sol_code": "result = tf.rank(tensor)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert result.numpy() == 2"]}} |
| {"question_id": 11, "pytorch_library": "import torch\nimport numpy as np", "pytorch_start_code": "tensor = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n#result =", "pytorch_sol_code": "result = tensor[1:, 1:]", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert np.allclose(result.numpy(), np.array([[5, 6], [8, 9]]))"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "tensor = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n#result =", "tensorflow_sol_code": "result = tensor[1:, 1:]", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert np.allclose(result.numpy(), np.array([[5, 6], [8, 9]]))"]}} |
| {"question_id": 12, "pytorch_library": "import torch\nimport numpy as np", "pytorch_start_code": "tensor = torch.tensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])\n#result =", "pytorch_sol_code": "result = tensor.numpy()", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert isinstance(result, np.ndarray) and result.shape == (2, 2, 3)"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "\ntensor = tf.constant([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])\n#result =", "tensorflow_sol_code": "result = tensor.numpy()", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert isinstance(result, np.ndarray) and result.shape == (2, 2, 3)"]}} |
| {"question_id": 13, "pytorch_library": "import torch\nimport numpy as np", "pytorch_start_code": "tensor = torch.tensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])\n#result =", "pytorch_sol_code": "result = tensor.view(2, 6)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert result.shape == (2, 6)"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "\ntensor = tf.constant([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])\n#result =", "tensorflow_sol_code": "result = tf.reshape(tensor, (2, 6))", "tensorflow_test_code": {"setup_code": "", "test_cases": ["result = tf.reshape(tensor, (2, 6))"]}} |
| {"question_id": 14, "pytorch_library": "import torch\nfrom torch.autograd import Variable\nimport numpy as np", "pytorch_start_code": "\n#result =", "pytorch_sol_code": "result = Variable(torch.randn(4, 6), requires_grad=True)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert result.shape == (4, 6) and isinstance(result, torch.Tensor) and result.requires_grad"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "\n#result =", "tensorflow_sol_code": "result = tf.Variable(tf.random.normal([4, 6]))", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert result.shape == (4, 6) and isinstance(result, tf.Variable)"]}} |
| {"question_id": 15, "pytorch_library": "import torch\nfrom torch.autograd import Variable\nimport numpy as np", "pytorch_start_code": "\nx = Variable(torch.tensor(3.0), requires_grad=True)\n#result =", "pytorch_sol_code": "y = x ** 2\ny.backward()\nresult = x.grad", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert np.isclose(result.item(), 6.0)"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "\nx = tf.Variable(3.0)\n#result =", "tensorflow_sol_code": "with tf.GradientTape() as tape:\n y = x ** 2\nresult = tape.gradient(y, x)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert np.isclose(result.numpy(), 6.0)"]}} |
| {"question_id": 16, "pytorch_library": "import torch\nfrom torch.autograd import Variable\nimport numpy as np", "pytorch_start_code": "\nx = Variable(torch.tensor(10.0), requires_grad=True)\n#result =", "pytorch_sol_code": "y = (x - 5) ** 2\ny.backward()\nwith torch.no_grad():\n x -= 0.1 * x.grad\nresult = x", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert np.isclose(result.item(), 9.0)"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "\nx = tf.Variable(10.0)\n#result =", "tensorflow_sol_code": "with tf.GradientTape() as tape:\n tape.watch(x)\n y = (x - 5) ** 2\ngrad = tape.gradient(y, x)\nx.assign(x - 0.1 * grad)\nresult = x", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert np.isclose(result.numpy(), 9.0)"]}} |
| {"question_id": 17, "pytorch_library": "import torch\nimport numpy as np", "pytorch_start_code": "\na = torch.tensor([2., 3.], requires_grad=True)\nb = torch.tensor([6., 4.], requires_grad=True)\n#result =", "pytorch_sol_code": "Q = 3 * a ** 3 - b ** 2\nQ.backward(torch.tensor([1., 1.]))\ndQ_da = a.grad\ndQ_db = b.grad\nresult = [dQ_da, dQ_db]", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert np.allclose(result[0].numpy(), [36., 81.]) and np.allclose(result[1].numpy(), [-12., -8.])"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "\na = tf.Variable([2., 3.], dtype=tf.float32)\nb = tf.Variable([6., 4.], dtype=tf.float32)\n#result =", "tensorflow_sol_code": "with tf.GradientTape() as tape:\n tape.watch([a, b])\n Q = 3 * a ** 3 - b ** 2\ngrads = tape.gradient(Q, [a, b])\nresult = grads", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert np.allclose(result[0].numpy(), [36., 81.]) and np.allclose(result[1].numpy(), [-12., -8.])"]}} |
| {"question_id": 18, "pytorch_library": "import torch\nfrom torchvision import datasets, transforms\nimport numpy as np", "pytorch_start_code": "#train_dataset =", "pytorch_sol_code": "train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transforms.ToTensor())", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert isinstance(train_dataset, torch.utils.data.Dataset)\nassert len(train_dataset) == 60000"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np\nimport tensorflow_datasets as tfds", "tensorflow_start_code": "\n#train_dataset =", "tensorflow_sol_code": "train_dataset = tfds.load('mnist', split='train', shuffle_files=True)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert isinstance(train_dataset, tf.data.Dataset)\nassert len(list(train_dataset)) == 60000"]}} |
| {"question_id": 19, "pytorch_library": "import torch\nimport numpy as np", "pytorch_start_code": "\ntensor = torch.tensor([-1.0, 0, 1.0, 5.0], dtype=torch.float32)\n#result =", "pytorch_sol_code": "result = torch.relu(tensor)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert np.allclose(result.numpy(), [0, 0, 1.0, 5.0], atol=1e-5)"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "\ntensor = tf.constant([-1.0, 0, 1.0, 5.0], dtype=tf.float32)\n#result =", "tensorflow_sol_code": "result = tf.nn.relu(tensor)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert np.allclose(result.numpy(), [0, 0, 1.0, 5.0], atol=1e-5)"]}} |
| {"question_id": 20, "pytorch_library": "import torch\nimport numpy as np", "pytorch_start_code": "\ntensor = torch.tensor([-1.0, 0, 1.0, 5.0, 6.5], dtype=torch.float32)\n#result =", "pytorch_sol_code": "result = torch.nn.functional.relu6(tensor)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert np.allclose(result.numpy(), [0, 0, 1.0, 5.0, 6.0], atol=1e-5)"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "\ntensor = tf.constant([-1.0, 0, 1.0, 5.0, 6.5], dtype=tf.float32)\n#result =", "tensorflow_sol_code": "result = tf.nn.relu6(tensor)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert np.allclose(result.numpy(), [0, 0, 1.0, 5.0, 6.0], atol=1e-5)"]}} |
| {"question_id": 21, "pytorch_library": "import torch\nimport numpy as np", "pytorch_start_code": "\ntensor = torch.tensor([-1.0, 0, 1.0, 5.0], dtype=torch.float32)\n#result =", "pytorch_sol_code": "result = torch.sigmoid(tensor)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert np.allclose(result.numpy(), [0.26894143, 0.5, 0.7310586, 0.9933072 ], atol=1e-5)"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "\ntensor = tf.constant([-1.0, 0, 1.0, 5.0], dtype=tf.float32)\n#result =", "tensorflow_sol_code": "result = tf.nn.sigmoid(tensor)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert np.allclose(result.numpy(), [0.26894143, 0.5, 0.7310586, 0.9933072 ], atol=1e-5)"]}} |
| {"question_id": 22, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "\n#class NeuralNetwork(nn.Module):\n#model = NeuralNetwork()", "pytorch_sol_code": "class NeuralNetwork(nn.Module):\n def __init__(self):\n super().__init__()\n self.flatten = nn.Flatten()\n self.linear_relu_stack = nn.Sequential(\n nn.Linear(28 * 28, 512),\n nn.ReLU(),\n nn.Linear(512, 512),\n nn.ReLU(),\n nn.Linear(512, 10),\n )\n\n def forward(self, x):\n x = self.flatten(x)\n logits = self.linear_relu_stack(x)\n return logits\n\n\nmodel = NeuralNetwork()", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert isinstance(model, nn.Module) and len(list(model.children())) == 2\nassert isinstance(model.linear_relu_stack[0], nn.Linear)\nassert model.linear_relu_stack[0].out_features == 512 and model.linear_relu_stack[2].out_features == 512 and model.linear_relu_stack[4].out_features == 10\n"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "\n#model =", "tensorflow_sol_code": "model = tf.keras.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(512, activation=\"relu\"),\n tf.keras.layers.Dense(512, activation=\"relu\"),\n tf.keras.layers.Dense(10),\n ]\n)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert isinstance(model, tf.keras.Sequential) and len(model.layers) == 4\n", "assert isinstance(model.layers[1], tf.keras.layers.Dense)\n", "assert model.layers[1].units == 512 and model.layers[2].units == 512 and model.layers[3].units == 10\n"]}} |
| {"question_id": 23, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.optim as optim", "pytorch_start_code": "class NeuralNetwork(nn.Module):\n def __init__(self):\n super().__init__()\n self.flatten = nn.Flatten()\n self.linear_relu_stack = nn.Sequential(\n nn.Linear(28 * 28, 512),\n nn.ReLU(),\n nn.Linear(512, 512),\n nn.ReLU(),\n nn.Linear(512, 10),\n )\n\n def forward(self, x):\n x = self.flatten(x)\n logits = self.linear_relu_stack(x)\n return logits\n\n\nmodel = NeuralNetwork()\n# optimizer", "pytorch_sol_code": "optimizer = optim.SGD(model.parameters(), lr=0.01)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert isinstance(optimizer, optim.SGD) and optimizer.param_groups[0]['lr'] == 0.01\n"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "model = tf.keras.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(512, activation=\"relu\"),\n tf.keras.layers.Dense(512, activation=\"relu\"),\n tf.keras.layers.Dense(10),\n ]\n)\n#optimizer", "tensorflow_sol_code": "optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert isinstance(optimizer, tf.keras.optimizers.SGD) and np.isclose(optimizer.learning_rate.numpy(), 0.01)\n"]}} |
| {"question_id": 24, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "# loss_fn = ", "pytorch_sol_code": "loss_fn = nn.CrossEntropyLoss()", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert isinstance(loss_fn, nn.CrossEntropyLoss) and loss_fn.reduction == 'mean'\n"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "# loss_fn =", "tensorflow_sol_code": "loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert isinstance(loss_fn, tf.keras.losses.SparseCategoricalCrossentropy) and loss_fn.from_logits == True\n"]}} |
| {"question_id": 25, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.optim as optim", "pytorch_start_code": "class NeuralNetwork(nn.Module):\n def __init__(self):\n super().__init__()\n self.flatten = nn.Flatten()\n self.linear_relu_stack = nn.Sequential(\n nn.Linear(28 * 28, 512),\n nn.ReLU(),\n nn.Linear(512, 512),\n nn.ReLU(),\n nn.Linear(512, 10),\n )\n\n def forward(self, x):\n x = self.flatten(x)\n logits = self.linear_relu_stack(x)\n return logits\n\n\nmodel = NeuralNetwork()\n# optimizer", "pytorch_sol_code": "optimizer = optim.Adam(model.parameters(), lr=0.001)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert isinstance(optimizer, optim.Adam) and optimizer.param_groups[0]['lr'] == 0.001\n"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "model = tf.keras.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(512, activation=\"relu\"),\n tf.keras.layers.Dense(512, activation=\"relu\"),\n tf.keras.layers.Dense(10),\n ]\n)\n#optimizer =", "tensorflow_sol_code": "optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert isinstance(optimizer, tf.keras.optimizers.Adam) and np.isclose(optimizer.learning_rate.numpy(), 0.001)\n"]}} |
| {"question_id": 26, "pytorch_library": "import torch", "pytorch_start_code": "tensor = torch.tensor([[1, 2, 3], [4, 5, 6]])\n#result =", "pytorch_sol_code": "result = torch.max(tensor).item()", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert result == 6\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n#result =", "tensorflow_sol_code": "result = tf.reduce_max(tensor).numpy()", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert result == 6\n"]}} |
| {"question_id": 27, "pytorch_library": "import torch", "pytorch_start_code": "tensor = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n#result =", "pytorch_sol_code": "result = torch.mean(tensor)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert result.item() == 3.5\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tensor = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n#result =", "tensorflow_sol_code": "result = tf.reduce_mean(tensor).numpy()", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert result == 3.5\n"]}} |
| {"question_id": 28, "pytorch_library": "import torch", "pytorch_start_code": "tensor = torch.tensor([[1, 2, 3], [4, 5, 6]])\n#result =", "pytorch_sol_code": "result = torch.prod(tensor)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert result.item() == 720\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n#result =", "tensorflow_sol_code": "result = tf.reduce_prod(tensor).numpy()", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert result == 720\n"]}} |
| {"question_id": 29, "pytorch_library": "import torch", "pytorch_start_code": "tensor = torch.tensor([1, 2, 1, 2, 1, 2])\n#result =", "pytorch_sol_code": "result = torch.unique(tensor)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert torch.equal(result, torch.tensor([1, 2]))\n"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "tensor = tf.constant([1, 2, 1, 2, 1, 2])\n#result =", "tensorflow_sol_code": "result = tf.unique(tensor).y.numpy()", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert np.array_equal(result, [1, 2])\n"]}} |
| {"question_id": 30, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "a = torch.tensor([1, 0, 1])\nb = torch.tensor([1, 1, 0])\n#result =", "pytorch_sol_code": "result = torch.bitwise_xor(a, b)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert torch.equal(result, torch.tensor([0, 1, 1]))\n"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "a = tf.constant([1, 0, 1])\nb = tf.constant([1, 1, 0])\n#result =", "tensorflow_sol_code": "result = tf.bitwise.bitwise_xor(a, b).numpy()", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert np.array_equal(result, [0, 1, 1])\n"]}} |
| {"question_id": 31, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "angles = torch.tensor([1, 3.2, 4.5])\n#result =", "pytorch_sol_code": "sines = torch.sin(angles)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert torch.allclose(sines, torch.tensor([ 0.8415, -0.0584, -0.9775]), atol=1e-4)\n"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "angles = tf.constant([1, 3.2, 4.5])\n#result =", "tensorflow_sol_code": "sines = tf.sin(angles)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert np.allclose(sines.numpy(), [ 0.8415, -0.0584, -0.9775], atol=1e-4)\n"]}} |
| {"question_id": 32, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "import torch\n# Define the model and save it to the variable 'model'.\n#model = TinyModel()", "pytorch_sol_code": "\nclass TinyModel(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n self.linear1 = torch.nn.Linear(100, 200)\n self.activation = torch.nn.ReLU()\n self.linear2 = torch.nn.Linear(200, 10)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, x):\n x = self.linear1(x)\n x = self.activation(x)\n x = self.linear2(x)\n x = self.softmax(x)\n return x\n\n\nmodel = TinyModel()\n\n", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert isinstance(model, nn.Module) and model.linear1.out_features == 200\n", "assert model.linear2.out_features == 10 and isinstance(model.activation, nn.ReLU) and isinstance(model.softmax, nn.Softmax)"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "# Define the model and save it to the variable 'model'.\n#model = ", "tensorflow_sol_code": "model = tf.keras.models.Sequential([\n tf.keras.layers.Dense(200, input_shape=(100,), activation='relu'),\n tf.keras.layers.Dense(10, activation='softmax')\n])", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert model.layers[0].output.shape == (None, 200)\n", "assert model.layers[0].activation.__name__ == 'relu' and model.layers[1].output.shape == (None, 10) and model.layers[1].activation.__name__ == 'softmax'"]}} |
| {"question_id": 33, "pytorch_library": "import torch", "pytorch_start_code": "\nclass TinyModel(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n self.linear1 = torch.nn.Linear(100, 200)\n self.activation = torch.nn.ReLU()\n self.linear2 = torch.nn.Linear(200, 10)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, x):\n x = self.linear1(x)\n x = self.activation(x)\n x = self.linear2(x)\n x = self.softmax(x)\n return x\n\n\nmodel = TinyModel()\n\n\n#total_params =", "pytorch_sol_code": "total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert total_params == 22210, f'Expected 22210 parameters, got {total_params}'\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "model = tf.keras.models.Sequential([\n tf.keras.layers.Dense(200, input_shape=(100,), activation='relu'),\n tf.keras.layers.Dense(10, activation='softmax')\n])\n#total_params = ", "tensorflow_sol_code": "total_params = model.count_params()", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert total_params == 22210, f'Expected 22210 parameters, got {total_params}'\n"]}} |
| {"question_id": 34, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "\nclass TinyModel(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n self.linear1 = torch.nn.Linear(100, 200)\n self.activation = torch.nn.ReLU()\n self.linear2 = torch.nn.Linear(200, 10)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, x):\n x = self.linear1(x)\n x = self.activation(x)\n x = self.linear2(x)\n x = self.softmax(x)\n return x\n\n\nmodel = TinyModel()\n\n\n#first_layer_params =", "pytorch_sol_code": "first_layer_params = model.linear1.weight.numel() + model.linear1.bias.numel()", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert first_layer_params == 20200, f'Expected 20200 parameters in the first layer, got {first_layer_params}'\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "model = tf.keras.models.Sequential([\n tf.keras.layers.Dense(200, input_shape=(100,), activation='relu'),\n tf.keras.layers.Dense(10, activation='softmax')\n])\n#first_layer_params = ", "tensorflow_sol_code": "first_layer_params = model.layers[0].count_params()", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert first_layer_params == 20200, f'Expected 20200 parameters in the first layer, got {first_layer_params}'\n"]}} |
| {"question_id": 35, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "tensor = torch.tensor([[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]]], dtype=torch.float32)\n#output =", "pytorch_sol_code": "pool = nn.MaxPool2d(2, stride=2)\noutput = pool(tensor)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert torch.equal(output, torch.tensor([[[[6, 8], [14, 16]]]]))\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tensor = tf.constant([[[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]], dtype=tf.float32)\n#output =", "tensorflow_sol_code": "output = tf.nn.max_pool2d(tensor, ksize=2, strides=2, padding='VALID')", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert tf.reduce_all(tf.equal(output, tf.constant([[[[6], [8]], [[14], [16]]]], dtype=tf.float32))).numpy(), 'Output did not match expected'\n"]}} |
| {"question_id": 36, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "tensor = torch.tensor([[1.0, 2.0, 3.0, 4.0, 5.0], [2.0, 3.0, 4.0, 5.0, 6.0]], dtype=torch.float32) # Increased batch size\n#output =", "pytorch_sol_code": "bn_layer = nn.BatchNorm1d(num_features=5)\noutput = bn_layer(tensor)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert torch.allclose(output, torch.tensor([[-1.0000, -1.0000, -1.0000, -1.0000, -1.0000], [ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]]), atol=0.0001), 'Output did not match expected values.'\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tensor = tf.constant([[1.0, 2.0, 3.0, 4.0, 5.0], [2.0, 3.0, 4.0, 5.0, 6.0]], dtype=tf.float32) # Already has batch dimension\n#output =", "tensorflow_sol_code": "bn_layer = tf.keras.layers.BatchNormalization(axis=1)\noutput = bn_layer(tensor, training=True)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert tf.experimental.numpy.allclose(output, tf.constant([[-0.9980061, -0.99800587, -0.99800587, -0.99800587, -0.99800587], [0.99800587, 0.99800634, 0.99800587, 0.99800587, 0.99800587]], dtype=tf.float32), atol=1e-5), 'Output did not match expected values.'\n"]}} |
| {"question_id": 37, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "torch.manual_seed(0)\ntensor = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0], requires_grad=True)\n#output =", "pytorch_sol_code": "dropout = nn.Dropout(p=0.5)\noutput = dropout(tensor)", "pytorch_test_code": {"setup_code": "", "test_cases": ["expected_output = torch.tensor([0., 0., 6., 0., 0.])\nassert torch.equal(output, expected_output), 'Output does not match expected tensor'\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tensor = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0])\n#You have to set the seed to 0 in dropout\n#output =", "tensorflow_sol_code": "dropout = tf.keras.layers.Dropout(rate=0.5,seed=0)\noutput = dropout(tensor, training=True)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["expected_output = tf.constant([0. ,4., 6. ,8., 0.], dtype=tf.float32)\nassert tf.reduce_all(tf.equal(output, expected_output)).numpy(), 'Output does not match expected tensor'\n"]}} |
| {"question_id": 38, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.optim as optim", "pytorch_start_code": "\nclass TinyModel(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n self.linear1 = torch.nn.Linear(100, 200)\n self.activation = torch.nn.ReLU()\n self.linear2 = torch.nn.Linear(200, 10)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, x):\n x = self.linear1(x)\n x = self.activation(x)\n x = self.linear2(x)\n x = self.softmax(x)\n return x\n\n\nmodel = TinyModel()\n\n\n#optimizer = ", "pytorch_sol_code": "optimizer = optim.SGD(model.parameters(), lr=0.0001)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert isinstance(optimizer, optim.SGD) and optimizer.param_groups[0]['lr'] == 0.0001, f'Incorrect optimizer configuration'\n"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Dense(200, input_shape=(100,), activation='relu'),\n tf.keras.layers.Dense(10, activation='softmax')\n])\n#optimizer = ", "tensorflow_sol_code": "optimizer = tf.keras.optimizers.SGD(learning_rate=0.0001)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert isinstance(optimizer, tf.keras.optimizers.SGD) and np.isclose(optimizer.learning_rate.numpy(), 0.0001), f'Incorrect optimizer configuration; expected learning rate 0.0001, got {optimizer.learning_rate.numpy()}'\n"]}} |
| {"question_id": 39, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "input_tensor = torch.tensor([2.7, 4.2, 3.6, 9.8], requires_grad=True)\ntarget_tensor = torch.tensor([1., 3., 5., 7.])\n#loss = ", "pytorch_sol_code": "mse_loss = nn.MSELoss()\nloss = mse_loss(input_tensor, target_tensor)", "pytorch_test_code": {"setup_code": "expected_loss = 3.5325\n", "test_cases": ["assert torch.isclose(loss, torch.tensor(expected_loss)), f'Calculated loss {loss.item()} does not match expected loss {expected_loss}'\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "input_tensor = tf.constant([2.7, 4.2, 3.6, 9.8])\ntarget_tensor = tf.constant([1., 3., 5., 7.])\n#loss = ", "tensorflow_sol_code": "mse_loss = tf.keras.losses.MeanSquaredError()\nloss = mse_loss(target_tensor, input_tensor)", "tensorflow_test_code": {"setup_code": "expected_loss = 3.5325\n", "test_cases": ["assert tf.experimental.numpy.isclose(loss, expected_loss, atol=1e-6), f'Calculated loss {loss.numpy()} does not match expected loss {expected_loss}'\n"]}} |
| {"question_id": 40, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.optim as optim", "pytorch_start_code": "\ntorch.manual_seed(0)\nclass TinyModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.linear1 = nn.Linear(100, 200)\n self.activation = nn.ReLU()\n self.linear2 = nn.Linear(200, 10)\n self.softmax = nn.Softmax(dim=1)\n\n def forward(self, x):\n x = self.linear1(x)\n x = self.activation(x)\n x = self.linear2(x)\n x = self.softmax(x)\n return x\n\nmodel = TinyModel()\ncriterion = nn.MSELoss()\noptimizer = optim.SGD(model.parameters(), lr=0.01)\ninput_tensor = torch.randn(10, 100) # Batch size of 10\ntarget = torch.randn(10, 10) # Random target values for MSE calculation\n# loss =\n ", "pytorch_sol_code": "\noptimizer.zero_grad()\noutput = model(input_tensor)\nloss = criterion(output, target)\nloss.backward()\noptimizer.step()\n ", "pytorch_test_code": {"setup_code": "expected_loss = 1.2815496921539307\n", "test_cases": ["assert torch.isclose(loss,torch.tensor(expected_loss))\n"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "\ntf.random.set_seed(0)\nnp.random.seed(0)\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Dense(200, input_shape=(100,), activation='relu', kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0)),\n tf.keras.layers.Dense(10, activation='softmax',kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))\n])\nmodel.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01),\n loss='mean_squared_error')\ninput_tensor = tf.random.normal((10, 100),seed=0) # Batch size of 10\ntarget = tf.random.normal((10, 10),seed=0) # Random target values for MSE calculation\n# loss = \n ", "tensorflow_sol_code": "\nloss = model.train_on_batch(input_tensor, target)\n ", "tensorflow_test_code": {"setup_code": "expected_loss = 0.9032668\n", "test_cases": ["assert tf.experimental.numpy.isclose(loss, expected_loss)\n"]}} |
| {"question_id": 41, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "torch.manual_seed(0)\ninput_tensor = torch.randn(4, requires_grad=True)\ntarget_tensor = torch.randn(4)\n#loss = ", "pytorch_sol_code": "mae_loss = nn.L1Loss()\nloss = mae_loss(input_tensor, target_tensor)", "pytorch_test_code": {"setup_code": "expected_loss = 1.6456040143966675\n", "test_cases": ["assert torch.isclose(loss, torch.tensor(expected_loss), atol=1e-4), f'Calculated loss {loss.item()} does not match expected loss {expected_loss}'\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tf.random.set_seed(0)\ninput_tensor = tf.random.normal([4],seed=0)\ntarget_tensor = tf.random.normal([4],seed=0)\n#loss = ", "tensorflow_sol_code": "mae_loss = tf.keras.losses.MeanAbsoluteError()\nloss = mae_loss(target_tensor, input_tensor)", "tensorflow_test_code": {"setup_code": "expected_loss = 1.902283787727356\n", "test_cases": ["assert tf.experimental.numpy.isclose(loss, expected_loss, atol=1e-4), f'Calculated loss {loss.numpy()} does not match expected loss {expected_loss}'\n"]}} |
| {"question_id": 42, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "torch.manual_seed(0)\ninput_tensor =torch.randn(7, requires_grad=True)\ntarget_tensor = torch.randn(7, requires_grad=True)\n#loss = ", "pytorch_sol_code": "hinge_loss = nn.HingeEmbeddingLoss()\nloss = hinge_loss(input_tensor.float(), target_tensor.float())", "pytorch_test_code": {"setup_code": "expected_loss = 1.0772851705551147\n", "test_cases": ["assert torch.isclose(loss, torch.tensor(expected_loss), atol=1e-4), f'Calculated loss {loss.item()} does not match expected loss {expected_loss}'\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tf.random.set_seed(0)\ninput_tensor = tf.random.normal([7],seed=0)\ntarget_tensor = tf.random.normal([7],seed=0)\n#loss =", "tensorflow_sol_code": "hinge_loss = tf.keras.losses.Hinge()\nloss = hinge_loss(target_tensor, input_tensor)", "tensorflow_test_code": {"setup_code": "expected_loss = 1.2223261594772339\n", "test_cases": ["assert tf.experimental.numpy.isclose(loss, expected_loss, atol=1e-4), f'Calculated loss {loss.numpy()} does not match expected loss {expected_loss}'\n"]}} |
| {"question_id": 43, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "torch.manual_seed(0)\ninput_tensor = torch.randn(5, requires_grad=True)\ntarget_tensor = torch.randn(5)\n#loss = ", "pytorch_sol_code": "huber_loss = nn.HuberLoss()\nloss = huber_loss(input_tensor, target_tensor)", "pytorch_test_code": {"setup_code": "expected_loss = 1.2437692880630493\n", "test_cases": ["assert torch.isclose(loss, torch.tensor(expected_loss), atol=1e-4), f'Calculated loss {loss.item()} does not match expected loss {expected_loss}'\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tf.random.set_seed(0)\ninput_tensor = tf.random.normal([5],seed=0)\ntarget_tensor = tf.random.normal([5],seed=0)\n#loss = ", "tensorflow_sol_code": "huber_loss = tf.keras.losses.Huber()\nloss = huber_loss(target_tensor, input_tensor)", "tensorflow_test_code": {"setup_code": "expected_loss = 0.7624791860580444\n", "test_cases": ["assert tf.experimental.numpy.isclose(loss, expected_loss, atol=1e-4), f'Calculated loss {loss.numpy()} does not match expected loss {expected_loss}'\n"]}} |
| {"question_id": 44, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "\n# model =\n ", "pytorch_sol_code": "\nmodel = nn.Sequential(\n nn.Conv2d(1, 20, 5),\n nn.ReLU(),\n nn.Conv2d(20, 64, 5),\n nn.ReLU()\n)\n ", "pytorch_test_code": {"setup_code": "\n", "test_cases": ["assert isinstance(model, nn.Sequential), 'Model is not an instance of nn.Sequential'\n", "assert len(model) == 4, 'Model does not contain the correct number of layers'\n", "assert isinstance(model[0], nn.Conv2d) and model[0].in_channels == 1 and model[0].out_channels == 20, 'First layer specifications are incorrect'\n", "assert isinstance(model[1], nn.ReLU), 'Second layer should be ReLU activation'\nassert isinstance(model[2], nn.Conv2d) and model[2].in_channels == 20 and model[2].out_channels == 64, 'Third layer specifications are incorrect'\n", "assert isinstance(model[3], nn.ReLU), 'Fourth layer should be ReLU activation'\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nmodel = tf.keras.Sequential([\n tf.keras.layers.Conv2D(20, (5, 5), input_shape=(None, None, 1), padding='valid'),\n tf.keras.layers.ReLU(),\n tf.keras.layers.Conv2D(64, (5, 5), padding='valid'),\n tf.keras.layers.ReLU()\n])\n ", "tensorflow_sol_code": "\nmodel = tf.keras.Sequential([\n tf.keras.layers.Conv2D(20, (5, 5), input_shape=(None, None, 1), padding='valid'),\n tf.keras.layers.ReLU(),\n tf.keras.layers.Conv2D(64, (5, 5), padding='valid'),\n tf.keras.layers.ReLU()\n])\n ", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert isinstance(model, tf.keras.Sequential), 'Model is not an instance of tf.keras.Sequential'\n", "assert len(model.layers) == 4, 'Model does not contain the correct number of layers'\n", "assert isinstance(model.layers[0], tf.keras.layers.Conv2D) and model.layers[0].filters == 20 and model.layers[0].kernel_size == (5, 5), 'First layer specifications are incorrect'\n", "assert isinstance(model.layers[1], tf.keras.layers.ReLU), 'Second layer should be ReLU activation'\n", "assert isinstance(model.layers[2], tf.keras.layers.Conv2D) and model.layers[2].filters == 64 and model.layers[2].kernel_size == (5, 5), 'Third layer specifications are incorrect'\nassert isinstance(model.layers[3], tf.keras.layers.ReLU), 'Fourth layer should be ReLU activation'\n"]}} |
| {"question_id": 45, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "\ntorch.manual_seed(0)\nclass TinyModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.linear1 = nn.Linear(100, 200)\n self.activation = nn.ReLU()\n self.linear2 = nn.Linear(200, 10)\n self.softmax = nn.Softmax(dim=1)\n\n def forward(self, x):\n x = self.linear1(x)\n x = self.activation(x)\n x = self.linear2(x)\n x = self.softmax(x)\n return x\n\nmodel = TinyModel()\n# set device\n ", "pytorch_sol_code": "\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel.to(device)\n ", "pytorch_test_code": {"setup_code": "\n", "test_cases": ["assert next(model.parameters()).is_cuda == True, 'Model is not on CUDA device; it is on {}'.format(next(model.parameters()).device)\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\ntf.config.set_soft_device_placement(True) # Enable automatic device placement\ntf.debugging.set_log_device_placement(True) # Log device placement for debugging\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Dense(200, input_shape=(100,), activation='relu', kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0)),\n tf.keras.layers.Dense(10, activation='softmax',kernel_initializer=tf.keras.initializers.GlorotUniform(seed=0))\n])\n# set device\n ", "tensorflow_sol_code": "\ndevice = '/gpu:0' if tf.config.list_physical_devices('GPU') else '/cpu:0'\nwith tf.device(device):\n model = tf.keras.models.Sequential([\n tf.keras.layers.Dense(200, input_shape=(100,), activation='relu'),\n tf.keras.layers.Dense(10, activation='softmax')\n ])\n ", "tensorflow_test_code": {"setup_code": "\ndummy_input = tf.random.normal([1, 100])\noutput = model(dummy_input)\n", "test_cases": ["gpu_available = tf.config.list_physical_devices('GPU')\nop_device = output.device\nassert ('gpu' in op_device.lower() and gpu_available) or ('cpu' in op_device.lower() and not gpu_available), 'Weight {} not on device {}'.format(weight.name, device)\n"]}} |
| {"question_id": 46, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "\nmodel = nn.Sequential(\n nn.Conv2d(1, 20, 5),\n nn.ReLU(),\n nn.Conv2d(20, 64, 5),\n nn.ReLU()\n)\n# Save the model with the name 'seq_model.pth'\n ", "pytorch_sol_code": "torch.save(model, 'seq_model.pth')", "pytorch_test_code": {"setup_code": "\n", "test_cases": ["import os\nassert os.path.exists('seq_model.pth'), 'Model file not found after save operation'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nmodel = tf.keras.Sequential([\n tf.keras.layers.Conv2D(20, (5, 5), input_shape=(None, None, 1), padding='valid'),\n tf.keras.layers.ReLU(),\n tf.keras.layers.Conv2D(64, (5, 5), padding='valid'),\n tf.keras.layers.ReLU()\n])\n# Save the model with the name 'seq_model.keras'\n ", "tensorflow_sol_code": "model.save('seq_model.keras')", "tensorflow_test_code": {"setup_code": "", "test_cases": ["import os\nassert os.path.exists('seq_model.keras'), 'Model file not found after save operation'"]}} |
| {"question_id": 47, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "\nmodel = nn.Sequential(\n nn.Conv2d(1, 20, 5),\n nn.ReLU(),\n nn.Conv2d(20, 64, 5),\n nn.ReLU()\n)\ntorch.save(model, 'seq_model.pth')\n# Load the model\n ", "pytorch_sol_code": "loaded_model = torch.load('seq_model.pth')", "pytorch_test_code": {"setup_code": "\n", "test_cases": ["\nassert isinstance(loaded_model, nn.Sequential), 'Loaded model is not an instance of nn.Sequential'\n", "assert len(loaded_model) == 4, 'Model does not contain the correct number of layers'\n", "assert isinstance(loaded_model[0], nn.Conv2d) and loaded_model[0].in_channels == 1 and loaded_model[0].out_channels == 20, 'First Conv2d layer parameters are incorrect'\n", "assert isinstance(loaded_model[1], nn.ReLU), 'Second layer should be ReLU activation'\nassert isinstance(loaded_model[2], nn.Conv2d) and loaded_model[2].in_channels == 20 and loaded_model[2].out_channels == 64, 'Third Conv2d layer parameters are incorrect'\n", "assert isinstance(loaded_model[3], nn.ReLU), 'Fourth layer should be ReLU activation'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nmodel = tf.keras.Sequential([\n tf.keras.layers.Conv2D(20, (5, 5), input_shape=(None, None, 1), padding='valid'),\n tf.keras.layers.ReLU(),\n tf.keras.layers.Conv2D(64, (5, 5), padding='valid'),\n tf.keras.layers.ReLU()\n])\nmodel.save('seq_model.keras')\n# Load the model\n ", "tensorflow_sol_code": "loaded_model = tf.keras.models.load_model('seq_model.keras')", "tensorflow_test_code": {"setup_code": "", "test_cases": ["\nassert isinstance(loaded_model, tf.keras.Sequential), 'Loaded model is not an instance of tf.keras.Sequential'\n", "assert len(loaded_model.layers) == 4, 'Model does not contain the correct number of layers'\n", "assert isinstance(loaded_model.layers[0], tf.keras.layers.Conv2D) and loaded_model.layers[0].filters == 20 and loaded_model.layers[0].kernel_size == (5, 5), 'First Conv2D layer parameters are incorrect'\n", "assert isinstance(loaded_model.layers[1], tf.keras.layers.ReLU), 'Second layer should be ReLU activation'\nassert isinstance(loaded_model.layers[2], tf.keras.layers.Conv2D) and loaded_model.layers[2].filters == 64 and loaded_model.layers[2].kernel_size == (5, 5), 'Third Conv2D layer parameters are incorrect'\n", "assert isinstance(loaded_model.layers[3], tf.keras.layers.ReLU), 'Fourth layer should be ReLU activation'\n "]}} |
| {"question_id": 48, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "\n# Define the model using Sequential\n# model =\n ", "pytorch_sol_code": "\nmodel = nn.Sequential(\n nn.Embedding(num_embeddings=1000, embedding_dim=64),\n nn.LSTM(64, 128, batch_first=True),\n nn.Linear(128, 10)\n)\n ", "pytorch_test_code": {"setup_code": "\n", "test_cases": ["\nassert isinstance(model[0], nn.Embedding) and model[0].num_embeddings == 1000 and model[0].embedding_dim == 64, 'Embedding layer configuration error'\n", "assert isinstance(model[1], nn.LSTM) and model[1].hidden_size == 128, 'LSTM layer configuration error'\n", "assert isinstance(model[2], nn.Linear) and model[2].out_features == 10, 'Dense layer configuration error'\n"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers", "tensorflow_start_code": "\n# Define the model using Sequential\n# model = \n ", "tensorflow_sol_code": "\nmodel = tf.keras.Sequential([\n layers.Embedding(input_dim=1000, output_dim=64),\n layers.LSTM(128),\n layers.Dense(10)\n])\n ", "tensorflow_test_code": {"setup_code": "", "test_cases": ["\nassert isinstance(model.layers[0], layers.Embedding) and model.layers[0].input_dim == 1000 and model.layers[0].output_dim == 64, 'Embedding layer configuration error'\n ", "assert isinstance(model.layers[1], layers.LSTM) and model.layers[1].units == 128, 'LSTM layer configuration error'\n", "assert isinstance(model.layers[2], layers.Dense) and model.layers[2].units == 10, 'Dense layer configuration error'\n"]}} |
| {"question_id": 49, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "\n# Define the model using Sequential\n# model =\n ", "pytorch_sol_code": "\nmodel = nn.Sequential(\n nn.LSTM(input_size=10, hidden_size=64, batch_first=True, bidirectional=True),\n nn.LSTM(input_size=128, hidden_size=32, batch_first=True, bidirectional=True), # Input size doubles due to bidirectionality\n nn.Linear(64, 10) # Output from the second LSTM is doubled due to bidirectionality\n)\n ", "pytorch_test_code": {"setup_code": "\n", "test_cases": ["\nassert isinstance(model[0], nn.LSTM) and model[0].hidden_size == 64 and model[0].bidirectional, 'First LSTM layer configuration error'\n", "assert isinstance(model[1], nn.LSTM) and model[1].hidden_size == 32 and model[1].bidirectional, 'Second LSTM layer configuration error'\nassert isinstance(model[2], nn.Linear) and model[2].out_features == 10, 'Dense layer configuration error'\n "]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers", "tensorflow_start_code": "\n# Define the model using Sequential\n# model = \n ", "tensorflow_sol_code": "\nmodel = tf.keras.Sequential([\n layers.Bidirectional(layers.LSTM(64, return_sequences=True), input_shape=(5, 10)),\n layers.Bidirectional(layers.LSTM(32)),\n layers.Dense(10)\n])\n ", "tensorflow_test_code": {"setup_code": "\ndummy_input = tf.random.normal([32, 5, 10])\nfirst_output = model.layers[0](dummy_input)\nsecond_output = model.layers[1](first_output)\n", "test_cases": ["assert isinstance(model.layers[0], layers.Bidirectional) and first_output.shape == (32, 5, 128), 'First Bidirectional LSTM layer configuration error'\nassert isinstance(model.layers[1], layers.Bidirectional) and second_output.shape == (32,64), 'Second Bidirectional LSTM layer configuration error'\n", "assert isinstance(model.layers[2], layers.Dense) and model.layers[2].units == 10, 'Dense layer configuration error'\n"]}} |
| {"question_id": 50, "pytorch_library": "import torch\nimport torch.nn.functional as F\nimport numpy as np", "pytorch_start_code": "\ntorch.manual_seed(0)\ntensor1 = torch.randn(10, requires_grad=True)\ntensor2 = torch.randn(10, requires_grad=True)\n# Calculate cosine similarity\n# cosine_similarity =\n ", "pytorch_sol_code": "\ncosine_similarity = F.cosine_similarity(tensor1, tensor2, dim=0)\n ", "pytorch_test_code": {"setup_code": "\nexpected_value = 0.41493287682533264\n", "test_cases": ["assert np.isclose(cosine_similarity.item(), expected_value, atol=1e-5), 'Cosine similarity calculation does not match expected value'\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\ntf.random.set_seed(0)\ntensor1 = tf.random.normal([10],seed=0)\ntensor2 = tf.random.normal([10],seed=0)\n# Calculate cosine similarity\n# cosine_similarity =\n ", "tensorflow_sol_code": "\ncosine_similarity = tf.keras.losses.cosine_similarity(tensor1, tensor2)\n ", "tensorflow_test_code": {"setup_code": "\nexpected_value = -0.25341374\n", "test_cases": ["assert tf.experimental.numpy.isclose(cosine_similarity.numpy(), expected_value, atol=1e-5), 'Cosine similarity calculation does not match expected value'\n"]}} |
| {"question_id": 51, "pytorch_library": "import torch\nimport torch.nn.functional as F\nimport numpy as np", "pytorch_start_code": "\ntorch.manual_seed(0)\ntensor1 = torch.randn(10, requires_grad=True)\ntensor2 = torch.randn(10, requires_grad=True)\n# Calculate Euclidean distance\n# euclidean_distance =\n ", "pytorch_sol_code": "\neuclidean_distance = torch.dist(tensor1, tensor2)\n ", "pytorch_test_code": {"setup_code": "\nexpected_value = 3.3985581398010254\n", "test_cases": ["assert np.isclose(euclidean_distance.item(), expected_value, atol=1e-5), 'Euclidean distance calculation does not match expected value'\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\ntf.random.set_seed(0)\ntensor1 = tf.random.normal([10], seed=0)\ntensor2 = tf.random.normal([10], seed=0)\n# Calculate Euclidean distance\n# euclidean_distance =\n ", "tensorflow_sol_code": "\neuclidean_distance = tf.norm(tensor1 - tensor2)\n ", "tensorflow_test_code": {"setup_code": "\nexpected_value = 4.275403\n", "test_cases": ["assert tf.experimental.numpy.isclose(euclidean_distance.numpy(), expected_value, atol=1e-5), 'Euclidean distance calculation does not match expected value'"]}} |
| {"question_id": 52, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "torch.manual_seed(1)\nword_to_ix = {\"hello\": 0, \"world\": 1}\nembeds = nn.Embedding(2, 5) # 2 words in vocab, 5 dimensional embeddings\nlookup_tensor = torch.tensor([word_to_ix[\"hello\"]], dtype=torch.long)\n# hello_embed = ", "pytorch_sol_code": "hello_embed = embeds(lookup_tensor)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert hello_embed.shape == (1, 5), 'Shape of hello_embed tensor is incorrect'", "expected_values = torch.tensor([[ 0.6614, 0.2669, 0.0617, 0.6213, -0.4519]])\nassert torch.allclose(hello_embed, expected_values, atol=1e-4), 'Values of hello_embed tensor are incorrect'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "import tensorflow as tf\nimport tensorflow.keras as keras\ntf.random.set_seed(1)\nword_to_ix = {\"hello\": 0, \"world\": 1}\nembeds = tf.keras.layers.Embedding(input_dim=2, output_dim=5, embeddings_initializer=keras.initializers.RandomNormal(seed=1))\nlookup_tensor = tf.constant([word_to_ix[\"hello\"]])\nhello_embed = embeds(lookup_tensor)\nhello_embed, lookup_tensor", "tensorflow_sol_code": "hello_embed = embeds(lookup_tensor)", "tensorflow_test_code": {"setup_code": "tf.random.set_seed(1)", "test_cases": ["assert hello_embed.shape == (1, 5), 'Shape of hello_embed tensor is incorrect'", "expected_values = tf.constant([[0.00633252, -0.02465083, 0.03155954, -0.03944233, 0.02841545]], dtype=tf.float32)\nassert tf.reduce_all(tf.abs(hello_embed - expected_values) < 1e-4), 'Values of hello_embed tensor are incorrect'"]}} |
| {"question_id": 53, "pytorch_library": "import torch\nimport math\n", "pytorch_start_code": "\n# def scaled_dot_product_attention(Q, K, V, mask=None):\n# result =\n", "pytorch_sol_code": "\ndef scaled_dot_product_attention(Q, K, V, mask=None):\n attn_scores = torch.matmul(Q, K.transpose(-2, -1)) / math.sqrt(Q.size(-1))\n if mask is not None:\n attn_scores = attn_scores.masked_fill(mask == 0, -1e9)\n attn_probs = torch.softmax(attn_scores, dim=-1)\n output = torch.matmul(attn_probs, V)\n return output\n\n# result = scaled_dot_product_attention(Q, K, V, mask)\n", "pytorch_test_code": {"setup_code": "import math\nQ = torch.rand(5, 10, 20)\nK = torch.rand(5, 10, 20)\nV = torch.rand(5, 10, 20)\nmask = torch.randint(0, 2, (5, 10, 10))\n", "test_cases": ["assert scaled_dot_product_attention(Q, K, V, mask).shape == torch.Size([5, 10, 20])", "assert scaled_dot_product_attention(Q, K, V).shape == torch.Size([5, 10, 20])"]}, "tensorflow_library": "import tensorflow as tf\nimport math", "tensorflow_start_code": "# def scaled_dot_product_attention(Q, K, V, mask=None):\n# result = ", "tensorflow_sol_code": "def scaled_dot_product_attention(Q, K, V, mask=None):\n matmul_qk = tf.matmul(Q, K, transpose_b=True)\n depth = tf.cast(tf.shape(K)[-1], tf.float32)\n logits = matmul_qk / tf.math.sqrt(depth)\n if mask is not None:\n mask = tf.cast(mask, tf.float32)\n logits += (mask * -1e9)\n attention_weights = tf.nn.softmax(logits, axis=-1)\n output = tf.matmul(attention_weights, V)\n return output\n\n# result = scaled_dot_product_attention(Q, K, V, mask)", "tensorflow_test_code": {"setup_code": "import tensorflow as tf\nimport math\nQ = tf.random.uniform((5, 10, 20))\nK = tf.random.uniform((5, 10, 20))\nV = tf.random.uniform((5, 10, 20))\nmask = tf.random.uniform((5, 10, 10), maxval=2, dtype=tf.int32)", "test_cases": ["assert scaled_dot_product_attention(Q, K, V, mask).shape == (5, 10, 20)", "assert scaled_dot_product_attention(Q, K, V).shape == (5, 10, 20)"]}} |
| {"question_id": 54, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "\n# def split_heads(num_heads, d_k, x):\n", "pytorch_sol_code": "def split_heads(num_heads, d_k, x):\n batch_size, seq_length, d_model = x.size()\n return x.view(batch_size, seq_length, num_heads, d_k).transpose(1, 2)\n# result = split_heads(num_heads, d_k, x)", "pytorch_test_code": {"setup_code": "\nimport torch\nnum_heads = 2\nd_k = 64\nx = torch.rand(32, 10, 128)\n", "test_cases": ["assert split_heads(num_heads, d_k, x).shape == (32, 2, 10, 64)"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "# def split_heads(num_heads, d_k, x):", "tensorflow_sol_code": "def split_heads(num_heads, d_k, x):\n batch_size, seq_length, d_model = x.shape\n x = tf.reshape(x, (batch_size, seq_length, num_heads, d_k))\n return tf.transpose(x, perm=[0, 2, 1, 3])\n# result = split_heads(num_heads, d_k, x)", "tensorflow_test_code": {"setup_code": "import tensorflow as tf\nnum_heads = 2\nd_k = 64\nx = tf.random.uniform((32, 10, 128))", "test_cases": ["assert split_heads(num_heads, d_k, x).shape == (32, 2, 10, 64)"]}} |
| {"question_id": 55, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "\n# Define the combine_heads function\n# def combine_heads(d_model, x):\n# return\n", "pytorch_sol_code": "def combine_heads(d_model, x):\n batch_size, _, seq_length, d_k = x.size()\n return x.transpose(1, 2).contiguous().view(batch_size, seq_length, d_model)", "pytorch_test_code": {"setup_code": "", "test_cases": ["x = torch.randn(2, 8, 10, 64)\nd_model = 8 * 64\nresult = combine_heads(d_model, x)\nassert result.shape == (2, 10, 512)", "x = torch.randn(3, 4, 5, 32)\nd_model = 4 * 32\nresult = combine_heads(d_model, x)\nassert result.shape == (3, 5, 128)", "x = torch.randn(1, 2, 3, 16)\nd_model = 2 * 16\nresult = combine_heads(d_model, x)\nassert result.shape == (1, 3, 32)"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "# Define the combine_heads function\n# def combine_heads(d_model, x):\n# return", "tensorflow_sol_code": "def combine_heads(d_model, x):\n batch_size, num_heads, seq_length, depth = tf.shape(x)\n x = tf.transpose(x, perm=[0, 2, 1, 3])\n return tf.reshape(x, (batch_size, seq_length, d_model))", "tensorflow_test_code": {"setup_code": "", "test_cases": ["x = tf.random.normal((2, 8, 10, 64))\nd_model = 8 * 64\nresult = combine_heads(d_model, x)\nassert result.shape == (2, 10, 512)", "x = tf.random.normal((3, 4, 5, 32))\nd_model = 4 * 32\nresult = combine_heads(d_model, x)\nassert result.shape == (3, 5, 128)", "x = tf.random.normal((1, 2, 3, 16))\nd_model = 2 * 16\nresult = combine_heads(d_model, x)\nassert result.shape == (1, 3, 32)"]}} |
| {"question_id": 56, "pytorch_library": "import torch\nimport torch.nn as nn\nimport math", "pytorch_start_code": "\nclass MultiHeadAttention(nn.Module):\n def __init__(self, d_model, num_heads):\n super().__init__()\n assert d_model % num_heads == 0, \"d_model must be divisible by num_heads\"\n\n self.d_model = d_model\n self.num_heads = num_heads\n self.d_k = d_model // num_heads\n\n self.W_q = nn.Linear(d_model, d_model)\n self.W_k = nn.Linear(d_model, d_model)\n self.W_v = nn.Linear(d_model, d_model)\n self.W_o = nn.Linear(d_model, d_model)\n\n def scaled_dot_product_attention(self, Q, K, V, mask=None):\n attn_scores = torch.matmul(\n Q, K.transpose(-2, -1)) / math.sqrt(self.d_k)\n if mask is not None:\n attn_scores = attn_scores.masked_fill(mask == 0, -1e9)\n attn_probs = torch.softmax(attn_scores, dim=-1)\n output = torch.matmul(attn_probs, V)\n return output\n\n def split_heads(self, x):\n batch_size, seq_length, d_model = x.size()\n return x.view(batch_size, seq_length, self.num_heads, self.d_k).transpose(1, 2)\n\n def combine_heads(self, x):\n batch_size, _, seq_length, d_k = x.size()\n return x.transpose(1, 2).contiguous().view(batch_size, seq_length, self.d_model)\n\n # def forward(self, Q, K, V, mask=None):\n\n# model = MultiHeadAttention(d_model, num_heads)\n", "pytorch_sol_code": "\nclass MultiHeadAttention(nn.Module):\n def __init__(self, d_model, num_heads):\n super().__init__()\n assert d_model % num_heads == 0, \"d_model must be divisible by num_heads\"\n\n self.d_model = d_model\n self.num_heads = num_heads\n self.d_k = d_model // num_heads\n\n self.W_q = nn.Linear(d_model, d_model)\n self.W_k = nn.Linear(d_model, d_model)\n self.W_v = nn.Linear(d_model, d_model)\n self.W_o = nn.Linear(d_model, d_model)\n\n def scaled_dot_product_attention(self, Q, K, V, mask=None):\n attn_scores = torch.matmul(\n Q, K.transpose(-2, -1)) / math.sqrt(self.d_k)\n if mask is not None:\n attn_scores = attn_scores.masked_fill(mask == 0, -1e9)\n attn_probs = torch.softmax(attn_scores, dim=-1)\n output = torch.matmul(attn_probs, V)\n return output\n\n def split_heads(self, x):\n batch_size, seq_length, d_model = x.size()\n return x.view(batch_size, seq_length, self.num_heads, self.d_k).transpose(1, 2)\n\n def combine_heads(self, x):\n batch_size, _, seq_length, d_k = x.size()\n return x.transpose(1, 2).contiguous().view(batch_size, seq_length, self.d_model)\n\n def forward(self, Q, K, V, mask=None):\n Q = self.split_heads(self.W_q(Q))\n K = self.split_heads(self.W_k(K))\n V = self.split_heads(self.W_v(V))\n \n attn_output = self.scaled_dot_product_attention(Q, K, V, mask)\n output = self.W_o(self.combine_heads(attn_output))\n return output\n\n# model = MultiHeadAttention(d_model, num_heads)\n", "pytorch_test_code": {"setup_code": "\nd_model = 512\nnum_heads = 8\nmodel = MultiHeadAttention(d_model, num_heads)\n", "test_cases": ["assert isinstance(model, nn.Module)", "Q, K, V = torch.rand(5, 10, 512), torch.rand(5, 10, 512), torch.rand(5, 10, 512)\noutput = model(Q, K, V)\nassert output.shape == (5, 10, 512)", "mask = torch.zeros(5, 10, 10).unsqueeze(1).repeat(1, 8, 1, 1)\noutput = model(Q, K, V, mask)\nassert output.shape == (5, 10, 512)"]}, "tensorflow_library": "import tensorflow as tf\nimport math", "tensorflow_start_code": "\n\nclass MultiHeadAttention(tf.keras.layers.Layer):\n def __init__(self, d_model, num_heads):\n super(MultiHeadAttention, self).__init__()\n assert d_model % num_heads == 0, \"d_model must be divisible by num_heads\"\n \n self.num_heads = num_heads\n self.d_model = d_model\n self.d_k = d_model // num_heads\n \n self.W_q = tf.keras.layers.Dense(d_model)\n self.W_k = tf.keras.layers.Dense(d_model)\n self.W_v = tf.keras.layers.Dense(d_model)\n self.W_o = tf.keras.layers.Dense(d_model)\n \n def scaled_dot_product_attention(self, Q, K, V, mask=None):\n matmul_qk = tf.matmul(Q, K, transpose_b=True)\n depth = tf.cast(self.d_k, tf.float32)\n logits = matmul_qk / tf.math.sqrt(depth)\n if mask is not None:\n logits += (mask * -1e9)\n attention_weights = tf.nn.softmax(logits, axis=-1)\n output = tf.matmul(attention_weights, V)\n return output\n \n def split_heads(self, x):\n batch_size = tf.shape(x)[0]\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.d_k))\n return tf.transpose(x, perm=[0, 2, 1, 3])\n \n def combine_heads(self, x):\n batch_size = tf.shape(x)[0]\n x = tf.transpose(x, perm=[0, 2, 1, 3])\n return tf.reshape(x, (batch_size, -1, self.d_model))\n \n # def call(self, Q, K, V, mask=None):\n", "tensorflow_sol_code": "\nclass MultiHeadAttention(tf.keras.layers.Layer):\n def __init__(self, d_model, num_heads):\n super().__init__()\n assert d_model % num_heads == 0, \"d_model must be divisible by num_heads\"\n \n self.num_heads = num_heads\n self.d_model = d_model\n self.d_k = d_model // num_heads\n \n self.W_q = tf.keras.layers.Dense(d_model)\n self.W_k = tf.keras.layers.Dense(d_model)\n self.W_v = tf.keras.layers.Dense(d_model)\n self.W_o = tf.keras.layers.Dense(d_model)\n \n def scaled_dot_product_attention(self, Q, K, V, mask=None):\n matmul_qk = tf.matmul(Q, K, transpose_b=True)\n depth = tf.cast(self.d_k, tf.float32)\n logits = matmul_qk / tf.math.sqrt(depth)\n if mask is not None:\n logits += (mask * -1e9)\n attention_weights = tf.nn.softmax(logits, axis=-1)\n output = tf.matmul(attention_weights, V)\n return output\n \n def split_heads(self, x):\n batch_size = tf.shape(x)[0]\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.d_k))\n return tf.transpose(x, perm=[0, 2, 1, 3])\n \n def combine_heads(self, x):\n batch_size = tf.shape(x)[0]\n x = tf.transpose(x, perm=[0, 2, 1, 3])\n return tf.reshape(x, (batch_size, -1, self.d_model))\n \n def call(self, Q, K, V, mask=None):\n Q = self.split_heads(self.W_q(Q))\n K = self.split_heads(self.W_k(K))\n V = self.split_heads(self.W_v(V))\n \n attn_output = self.scaled_dot_product_attention(Q, K, V, mask)\n attn_output = self.combine_heads(attn_output)\n output = self.W_o(attn_output)\n return output", "tensorflow_test_code": {"setup_code": "\nd_model = 512\nnum_heads = 8\nmodel = MultiHeadAttention(d_model, num_heads)", "test_cases": ["assert isinstance(model, tf.keras.layers.Layer)", "Q, K, V = tf.random.uniform((5, 10, 512)), tf.random.uniform((5, 10, 512)), tf.random.uniform((5, 10, 512))\noutput = model(Q, K, V)\nassert output.shape == (5, 10, 512)", "mask = tf.zeros((5, 10, 10))\nmask = tf.expand_dims(mask, 1)\nmask = tf.tile(mask, [1, 8, 1, 1])\noutput = model(Q, K, V, mask)\nassert output.shape == (5, 10, 512)"]}} |
| {"question_id": 57, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "class PositionWiseFeedForward(nn.Module):\n def __init__(self, d_model, d_ff):\n super(PositionWiseFeedForward, self).__init__()\n self.fc1 = nn.Linear(d_model, d_ff)\n self.fc2 = nn.Linear(d_ff, d_model)\n self.relu = nn.ReLU()\n\n # def forward(self, x):\n# result =", "pytorch_sol_code": "\nclass PositionWiseFeedForward(nn.Module):\n def __init__(self, d_model, d_ff):\n super().__init__()\n self.fc1 = nn.Linear(d_model, d_ff)\n self.fc2 = nn.Linear(d_ff, d_model)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n return self.fc2(self.relu(self.fc1(x)))\n\n# model = PositionWiseFeedForward(d_model, d_ff)\n", "pytorch_test_code": {"setup_code": "\nimport torch.nn as nn\nd_model = 512\nd_ff = 2048\nx = torch.rand(10, d_model)\nmodel = PositionWiseFeedForward(d_model, d_ff)\n", "test_cases": ["assert model.fc1.in_features == d_model and model.fc1.out_features == d_ff, 'First linear layer configuration error'", "assert model.fc2.in_features == d_ff and model.fc2.out_features == d_model, 'Second linear layer configuration error'", "assert model.forward(x).shape == (10, d_model), 'Forward function output shape error'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nclass PositionWiseFeedForward(tf.keras.layers.Layer):\n def __init__(self, d_model, d_ff):\n super().__init__()\n self.fc1 = tf.keras.layers.Dense(d_ff, activation='relu')\n self.fc2 = tf.keras.layers.Dense(d_model)\n\n# def call(self, x):\n# model =\n\n", "tensorflow_sol_code": "\nclass PositionWiseFeedForward(tf.keras.layers.Layer):\n def __init__(self, d_model, d_ff):\n super().__init__()\n self.fc1 = tf.keras.layers.Dense(d_ff, activation='relu')\n self.fc2 = tf.keras.layers.Dense(d_model)\n\n def call(self, x):\n x = self.fc1(x)\n return self.fc2(x)\n\n# model = PositionWiseFeedForward(d_model, d_ff)\n", "tensorflow_test_code": {"setup_code": "import tensorflow as tf\nd_model = 512\nd_ff = 2048\nx = tf.random.uniform((10, d_model))\nmodel = PositionWiseFeedForward(d_model, d_ff)", "test_cases": ["assert model.fc1.units == d_ff and model.fc1.activation == tf.keras.activations.relu, 'First Dense layer configuration error'", "assert model.fc2.units == d_model, 'Second Dense layer configuration error'\n", "assert model.call(x).shape == (10, d_model), 'Call function output shape error'"]}} |
| {"question_id": 58, "pytorch_library": "import torch\nimport torch.nn as nn\nimport math", "pytorch_start_code": "\nclass PositionalEncoding(nn.Module):\n def __init__(self, d_model, max_seq_length):\n super().__init__()\n # Initialize the positional encoding layer\n \n def forward(self, x):\n # Apply positional encoding to x\n # result = ...\n pass\n", "pytorch_sol_code": "\nclass PositionalEncoding(nn.Module):\n def __init__(self, d_model, max_seq_length):\n super().__init__()\n pe = torch.zeros(max_seq_length, d_model)\n position = torch.arange(0, max_seq_length, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n self.register_buffer('pe', pe.unsqueeze(0))\n \n def forward(self, x):\n return x + self.pe[:, :x.size(1)]\n\n# result = PositionalEncoding(d_model, max_seq_length)\n", "pytorch_test_code": {"setup_code": "d_model = 512\nmax_seq_length = 100\nx = torch.randn(32, 100, 512)", "test_cases": ["pos_encoding = PositionalEncoding(d_model, max_seq_length)\nresult = pos_encoding(x)\nassert result.shape == x.shape, 'Output shape should match input shape'", "expected_result = x + pos_encoding.pe[:, :x.size(1)]\nassert torch.allclose(result, expected_result,atol=1e-6), \"The positional encodings are not added correctly.\"", "assert torch.all(result[:, :, 1::2] != x[:, :, 1::2]), 'Cosine encoding not applied correctly'"]}, "tensorflow_library": "import tensorflow as tf\nimport math", "tensorflow_start_code": "\nclass PositionalEncoding(tf.keras.layers.Layer):\n def __init__(self, d_model, max_seq_length):\n super().__init__()\n # Initialize the positional encoding layer\n \n def call(self, x):\n # Apply positional encoding to x\n # result = ...\n pass\n\n", "tensorflow_sol_code": "\nclass PositionalEncoding(tf.keras.layers.Layer):\n def __init__(self, d_model, max_seq_length):\n super().__init__()\n position = tf.range(0, max_seq_length, dtype=tf.float32)[..., tf.newaxis]\n i = tf.range(0, d_model, 2, dtype=tf.float32)\n div_term = tf.exp(i * -(math.log(10000.0) / d_model))\n pe = tf.concat([tf.sin(position * div_term), tf.cos(position * div_term)], axis=-1)\n pe = pe[tf.newaxis, ...]\n self.pe = tf.cast(pe, tf.float32)\n \n def call(self, x):\n return x + self.pe[:, :tf.shape(x)[1], :]\n\n# result = PositionalEncoding(d_model, max_seq_length)\n", "tensorflow_test_code": {"setup_code": "d_model = 512\nmax_seq_length = 100\nx = tf.random.uniform((32, 100, 512))", "test_cases": ["pos_encoding = PositionalEncoding(d_model, max_seq_length)\nresult = pos_encoding(x)\nassert result.shape == x.shape, 'Output shape should match input shape'", "assert not tf.reduce_all(tf.equal(result[:, :, 0::2], x[:, :, 0::2])), 'Sine encoding not applied correctly'", "assert not tf.reduce_all(tf.equal(result[:, :, 1::2], x[:, :, 1::2])), 'Cosine encoding not applied correctly'"]}} |
| {"question_id": 59, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class SelfAttention(nn.Module):\n def __init__(self, input_dim):\n super().__init__()\n \n def forward(self, x): # x.shape (batch_size, seq_length, input_dim)\n #weighted = ...\n #return weighted\n pass\n# model = SelfAttention(input_dim)", "pytorch_sol_code": "\nclass SelfAttention(nn.Module):\n def __init__(self, input_dim):\n super().__init__()\n self.input_dim = input_dim\n self.query = nn.Linear(input_dim, input_dim) # [batch_size, seq_length, input_dim]\n self.key = nn.Linear(input_dim, input_dim) # [batch_size, seq_length, input_dim]\n self.value = nn.Linear(input_dim, input_dim)\n self.softmax = nn.Softmax(dim=2)\n \n def forward(self, x): # x.shape (batch_size, seq_length, input_dim)\n queries = self.query(x)\n keys = self.key(x)\n values = self.value(x)\n\n scores = torch.bmm(queries, keys.transpose(1, 2))/(self.input_dim**0.5)\n attention = self.softmax(scores)\n weighted = torch.bmm(attention, values)\n return weighted\n\n# model = SelfAttention(input_dim)\n", "pytorch_test_code": {"setup_code": "import torch\ninput_dim = 64\nmodel = SelfAttention(input_dim)\nx = torch.rand(10, 20, input_dim)", "test_cases": ["assert model(x).shape == torch.Size([10, 20, input_dim]), 'Output shape is incorrect'", "assert isinstance(model.query, nn.Linear) and model.query.in_features == input_dim, 'Query layer configuration error'", "assert isinstance(model.key, nn.Linear) and model.key.in_features == input_dim, 'Key layer configuration error'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "class SelfAttention(tf.keras.layers.Layer):\n def __init__(self, input_dim):\n super().__init__()\n \n def call(self, x): # x.shape (batch_size, seq_length, input_dim)\n #weighted = ...\n #return weighted\n pass\n# model = SelfAttention(input_dim)", "tensorflow_sol_code": "\nclass SelfAttention(tf.keras.layers.Layer):\n def __init__(self, input_dim):\n super().__init__()\n self.input_dim = input_dim\n self.query = tf.keras.layers.Dense(input_dim)\n self.key = tf.keras.layers.Dense(input_dim)\n self.value = tf.keras.layers.Dense(input_dim)\n \n def call(self, x): # x.shape (batch_size, seq_length, input_dim)\n queries = self.query(x)\n keys = self.key(x)\n values = self.value(x)\n\n scores = tf.matmul(queries, keys, transpose_b=True) / (self.input_dim ** 0.5)\n attention = tf.nn.softmax(scores, axis=-1)\n weighted = tf.matmul(attention, values)\n return weighted\n\n# model = SelfAttention(input_dim)\n", "tensorflow_test_code": {"setup_code": "import tensorflow as tf\ninput_dim = 64\nmodel = SelfAttention(input_dim)\nx = tf.random.uniform((10, 20, input_dim))", "test_cases": ["assert model(x).shape == (10, 20, input_dim), 'Output shape is incorrect'", "assert isinstance(model.query, tf.keras.layers.Dense) and model.query.units == input_dim, 'Query layer configuration error'", "assert isinstance(model.key, tf.keras.layers.Dense) and model.key.units == input_dim, 'Key layer configuration error'"]}} |
| {"question_id": 60, "pytorch_library": "import torch", "pytorch_start_code": "def softmax(x):\n pass", "pytorch_sol_code": "def softmax(x):\n exp_x = torch.exp(x - torch.max(x))\n return exp_x / torch.sum(exp_x, dim=-1, keepdim=True)", "pytorch_test_code": {"setup_code": "x = torch.tensor([1.0, 2.0, 3.0]).float()", "test_cases": ["assert torch.allclose(softmax(x), torch.tensor([0.0900, 0.2447, 0.6652]), atol=1e-4), 'Test failed: Softmax output is not as expected'", "large_values = torch.tensor([1000.0, 1000.0, 1000.0]).float()\nassert torch.allclose(softmax(large_values), torch.tensor([0.3333, 0.3333, 0.3333]), atol=1e-4), 'Test failed: Softmax fails with large numbers'", "mixed_values = torch.tensor([0.0, -1.0, -3.0]).float()\nassert torch.allclose(softmax(mixed_values), torch.tensor([0.7054, 0.2595, 0.0351]), atol=1e-4), 'Test failed: Softmax fails with negative and zero values'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "def softmax(x):\n pass", "tensorflow_sol_code": "def softmax(x):\n exp_x = tf.exp(x - tf.reduce_max(x))\n return exp_x / tf.reduce_sum(exp_x, axis=-1, keepdims=True)", "tensorflow_test_code": {"setup_code": "x = tf.constant([1.0, 2.0, 3.0], dtype=tf.float32)", "test_cases": ["assert tf.experimental.numpy.allclose(softmax(x), tf.constant([0.09003057, 0.24472848, 0.66524094], dtype=tf.float32), atol=1e-4), 'Test failed: Softmax output is not as expected'", "large_values = tf.constant([1000.0, 1000.0, 1000.0], dtype=tf.float32)\nassert tf.experimental.numpy.allclose(softmax(large_values), tf.constant([0.3333, 0.3333, 0.3333], dtype=tf.float32), atol=1e-4), 'Test failed: Softmax fails with large numbers'", "mixed_values = tf.constant([0.0, -1.0, -3.0], dtype=tf.float32)\nassert tf.experimental.numpy.allclose(softmax(mixed_values), tf.constant([0.70538455,0.25949648,0.03511903], dtype=tf.float32), atol=1e-4), 'Test failed: Softmax fails with negative and zero values'"]}} |
| {"question_id": 61, "pytorch_library": "import torch\nimport numpy as np", "pytorch_start_code": "\nx_values = [i for i in range(11)]\n# x_train = ....\ny_values = [2*i + 1 for i in x_values]\n# y_train = ...\n", "pytorch_sol_code": "x_values = [i for i in range(11)]\nx_train = np.array(x_values, dtype=np.float32)\nx_train = x_train.reshape(-1, 1)\nx_train = torch.from_numpy(x_train)\ny_values = [2*i + 1 for i in x_values]\ny_train = np.array(y_values, dtype=np.float32)\ny_train = y_train.reshape(-1, 1)\ny_train = torch.from_numpy(y_train)\n\n# dataset = (x_train, y_train)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert x_train.shape == (11, 1), 'The shape of x_train should be (11, 1)'", "assert y_train.shape == (11, 1), 'The shape of y_train should be (11, 1)'", "assert torch.equal(x_train[5], torch.tensor([5.0])), 'The fifth element of x_train should be tensor([5.0])'"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "\nx_values = [i for i in range(11)]\n# x_train = ....\ny_values = [2*i + 1 for i in x_values]\n# y_train = ...\n\n", "tensorflow_sol_code": "x_values = [i for i in range(11)]\nx_train = np.array(x_values, dtype=np.float32)\nx_train = x_train.reshape(-1, 1)\nx_train = tf.convert_to_tensor(x_train)\ny_values = [2*i + 1 for i in x_values]\ny_train = np.array(y_values, dtype=np.float32)\ny_train = y_train.reshape(-1, 1)\ny_train = tf.convert_to_tensor(y_train)\n\n# dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert x_train.shape == (11, 1), 'The shape of x_train should be (11, 1)'", "assert y_train.shape == (11, 1), 'The shape of y_train should be (11, 1)'", "assert tf.reduce_all(tf.equal(x_train[5], tf.constant([5.0]))), 'The fifth element of x_train should be tf.constant([5.0])'"]}} |
| {"question_id": 62, "pytorch_library": "import torch\nfrom torch.autograd import Variable", "pytorch_start_code": "\nclass LinearRegression(torch.nn.Module):\n def __init__(self, inputSize, outputSize):\n super().__init__()\n # self.linear = \n def forward(self, x):\n pass\n# model = LinearRegression(inputSize, outputSize)\n", "pytorch_sol_code": "class LinearRegression(torch.nn.Module):\n def __init__(self, inputSize, outputSize):\n super().__init__()\n self.linear = torch.nn.Linear(inputSize, outputSize)\n\n def forward(self, x):\n out = self.linear(x)\n return out\n\n# model = LinearRegression(inputSize, outputSize)", "pytorch_test_code": {"setup_code": "inputSize = 2\noutputSize = 1\nmodel = LinearRegression(inputSize, outputSize)\ninput_tensor = torch.tensor([[1.0, 2.0]])", "test_cases": ["assert model.linear.in_features == inputSize, 'Incorrect input size'", "assert model.linear.out_features == outputSize, 'Incorrect output size'", "assert model(input_tensor).shape == (1, outputSize), 'Incorrect output shape from forward method'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nclass LinearRegression(tf.keras.Model):\n def __init__(self, inputSize, outputSize):\n super().__init__()\n # self.linear = \n def call(self, x):\n pass\n# model = LinearRegression(inputSize, outputSize)\n\n", "tensorflow_sol_code": "class LinearRegression(tf.keras.Model):\n def __init__(self, inputSize, outputSize):\n super().__init__()\n self.linear = tf.keras.layers.Dense(outputSize, input_shape=(inputSize,))\n\n def call(self, x):\n return self.linear(x)\n\n# model = LinearRegression(inputSize, outputSize)", "tensorflow_test_code": {"setup_code": "inputSize = 2\noutputSize = 1\nmodel = LinearRegression(inputSize, outputSize)\ninput_tensor = tf.constant([[1.0, 2.0]])\noutput_tensor = model(input_tensor)", "test_cases": ["\noutput_tensor = model(input_tensor)\nassert output_tensor.shape == (1, 1), 'Output shape should be (1, 1)'\nassert model.linear.weights[0].shape == (2, 1), 'Weight shape should be (2, 1)'\n", "assert model.linear.units == outputSize, 'Incorrect output size'", "assert model(input_tensor).shape == (1, outputSize), 'Incorrect output shape from call method'"]}} |
| {"question_id": 63, "pytorch_library": "import torch\nfrom torch.autograd import Variable", "pytorch_start_code": "class linearRegression(torch.nn.Module):\n def __init__(self, inputSize, outputSize):\n super().__init__()\n self.linear = torch.nn.Linear(inputSize, outputSize)\n\n def forward(self, x):\n out = self.linear(x)\n return out\ninputDim = 1 # takes variable 'x' \noutputDim = 1 # takes variable 'y'\nmodel = linearRegression(inputDim, outputDim)\n# learningRate = ...\n# optimizer = ...", "pytorch_sol_code": "learningRate = 0.01\noptimizer = torch.optim.SGD(model.parameters(), lr=learningRate)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert isinstance(optimizer, torch.optim.SGD), 'Optimizer should be an instance of torch.optim.SGD'", "assert optimizer.param_groups[0]['lr'] == 0.01, 'Learning rate should be 0.01'", "assert len(optimizer.param_groups[0]['params']) == 2, 'Optimizer should have parameters for both weight and bias'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\ninputDim = 1\noutputDim = 1\nmodel = tf.keras.Sequential([\n tf.keras.layers.Dense(outputDim, input_shape=(inputDim,))\n])\n# learningRate = ...\n# optimizer = ...\n\n", "tensorflow_sol_code": "learningRate = 0.01\noptimizer = tf.keras.optimizers.SGD(learning_rate=learningRate)", "tensorflow_test_code": {"setup_code": "inputDim = 1\noutputDim = 1", "test_cases": ["assert isinstance(optimizer, tf.keras.optimizers.SGD), 'Optimizer should be an instance of tf.keras.optimizers.SGD'", "assert optimizer.learning_rate == 0.01, 'Learning rate should be 0.01'", "assert hasattr(model.layers[0], 'kernel'), 'Model should have at least one layer with weights'"]}} |
| {"question_id": 64, "pytorch_library": "import torch\nimport numpy as np\nfrom torch.autograd import Variable", "pytorch_start_code": "x_values = [i for i in range(11)]\nx_train = np.array(x_values, dtype=np.float32)\nx_train = x_train.reshape(-1, 1)\nx_train = torch.from_numpy(x_train)\ny_values = [2*i + 1 for i in x_values]\ny_train = np.array(y_values, dtype=np.float32)\ny_train = y_train.reshape(-1, 1)\ny_train = torch.from_numpy(y_train)\nclass linearRegression(torch.nn.Module):\n def __init__(self, inputSize, outputSize):\n super().__init__()\n self.linear = torch.nn.Linear(inputSize, outputSize)\n\n def forward(self, x):\n out = self.linear(x)\n return out\ninputDim = 1\noutputDim = 1\nmodel = linearRegression(inputDim, outputDim)\nlearningRate = 0.01\noptimizer = torch.optim.SGD(model.parameters(), lr=learningRate)\nepochs = 10\nlosses = []\nfor epoch in range(epochs):\n pass\n# losses = ", "pytorch_sol_code": "criterion = torch.nn.MSELoss()\nfor epoch in range(epochs):\n inputs = Variable(x_train)\n labels = Variable(y_train)\n optimizer.zero_grad()\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n losses.append(loss.item())\n# losses = losses", "pytorch_test_code": {"setup_code": "import torch\nimport numpy as np\nfrom torch.autograd import Variable\nclass linearRegression(torch.nn.Module):\n def __init__(self, inputSize, outputSize):\n super().__init__()\n self.linear = torch.nn.Linear(inputSize, outputSize)\n\n def forward(self, x):\n out = self.linear(x)\n return out\ninputDim = 1\noutputDim = 1\nmodel = linearRegression(inputDim, outputDim)\nlearningRate = 0.01\noptimizer = torch.optim.SGD(model.parameters(), lr=learningRate)\ncriterion = torch.nn.MSELoss()", "test_cases": ["x_values = [i for i in range(11)]\nx_train = np.array(x_values, dtype=np.float32)\nx_train = x_train.reshape(-1, 1)\nx_train = torch.from_numpy(x_train)\ny_values = [2*i + 1 for i in x_values]\ny_train = np.array(y_values, dtype=np.float32)\ny_train = y_train.reshape(-1, 1)\ny_train = torch.from_numpy(y_train)\nepochs = 10\nlosses = []\nfor epoch in range(epochs):\n inputs = Variable(x_train)\n labels = Variable(y_train)\n optimizer.zero_grad()\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n losses.append(loss.item())\nassert len(losses) == 10, 'The losses list should contain 10 elements for 10 epochs'", "assert losses[0] > losses[-1], 'The loss should decrease over epochs'", "assert isinstance(losses[0], float), 'Losses should be recorded as float values'"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "x_values = [i for i in range(11)]\nx_train = np.array(x_values, dtype=np.float32)\nx_train = x_train.reshape(-1, 1)\ny_values = [2*i + 1 for i in x_values]\ny_train = np.array(y_values, dtype=np.float32)\ny_train = y_train.reshape(-1, 1)\nmodel = tf.keras.Sequential([\n tf.keras.layers.Dense(units=1, input_shape=[1])\n])\nmodel.compile(optimizer='sgd', loss='mean_squared_error')\nepochs = 10\nlosses = []\nfor epoch in range(epochs):\n pass\n# losses = ", "tensorflow_sol_code": "for epoch in range(epochs):\n history = model.fit(x_train, y_train, epochs=1, verbose=0)\n losses.append(history.history['loss'][0])\n# losses = losses", "tensorflow_test_code": {"setup_code": "import tensorflow as tf\nimport numpy as np\nmodel = tf.keras.Sequential([\n tf.keras.layers.Dense(units=1, input_shape=[1])\n])\nmodel.compile(optimizer='sgd', loss='mean_squared_error')", "test_cases": ["x_values = [i for i in range(11)]\nx_train = np.array(x_values, dtype=np.float32)\nx_train = x_train.reshape(-1, 1)\ny_values = [2*i + 1 for i in x_values]\ny_train = np.array(y_values, dtype=np.float32)\ny_train = y_train.reshape(-1, 1)\nepochs = 10\nlosses = []\nfor epoch in range(epochs):\n history = model.fit(x_train, y_train, epochs=1, verbose=0)\n losses.append(history.history['loss'][0])\nassert len(losses) == 10, 'The losses list should contain 10 elements for 10 epochs'", "assert losses[0] > losses[-1], 'The loss should decrease over epochs'", "assert isinstance(losses[0], float), 'Losses should be recorded as float values'"]}} |
| {"question_id": 65, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "x_values = [i for i in range(11)]\ny_values = [1 if i > 5 else 0 for i in x_values]", "pytorch_sol_code": "x_train = torch.tensor(x_values, dtype=torch.float32).view(-1, 1)\ny_train = torch.tensor(y_values, dtype=torch.float32).view(-1, 1)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert x_train.shape == (11, 1), 'The shape of x_train should be (11, 1)'", "assert y_train.shape == (11, 1), 'The shape of y_train should be (11, 1)'", "assert y_train.sum() == 5, 'There should be five positive examples'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "x_values = [i for i in range(11)]\ny_values = [1 if i > 5 else 0 for i in x_values]", "tensorflow_sol_code": "x_train = tf.constant(x_values, dtype=tf.float32, shape=[11, 1])\ny_train = tf.constant(y_values, dtype=tf.float32, shape=[11, 1])", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert x_train.shape == (11, 1), 'The shape of x_train should be (11, 1)'", "assert y_train.shape == (11, 1), 'The shape of y_train should be (11, 1)'", "assert tf.reduce_sum(y_train) == 5, 'There should be five positive examples'"]}} |
| {"question_id": 66, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "class LogisticRegression(nn.Module):\n def __init__(self, inputSize, outputSize):\n super().__init__()\n # self.linear = ...\n # self.sigmoid = ...", "pytorch_sol_code": "class LogisticRegression(nn.Module):\n def __init__(self, inputSize, outputSize):\n super().__init__()\n self.linear = nn.Linear(inputSize, outputSize)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n return self.sigmoid(self.linear(x))", "pytorch_test_code": {"setup_code": "model = LogisticRegression(1, 1)\ninput_tensor = torch.tensor([[1.0]])", "test_cases": ["assert isinstance(model.linear, nn.Linear), 'The model should contain a linear layer'", "assert isinstance(model.sigmoid, nn.Sigmoid), 'The model should contain a sigmoid activation'", "assert model(input_tensor).shape == (1, 1), 'Output shape should be (1, 1)'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "class LogisticRegression(tf.keras.Model):\n def __init__(self, inputSize, outputSize):\n super().__init__()\n # self.linear = ...\n # self.activation = ...", "tensorflow_sol_code": "\nclass LogisticRegression(tf.keras.Model):\n def __init__(self, inputSize, outputSize):\n super().__init__()\n self.linear = tf.keras.layers.Dense(outputSize, activation='sigmoid', input_shape=(inputSize,))\n \n def call(self, x):\n return self.linear(x)\n", "tensorflow_test_code": {"setup_code": "model = LogisticRegression(1, 1)\ninput_tensor = tf.constant([[1.0]])", "test_cases": ["assert model(input_tensor).shape == (1, 1), 'Output shape should be (1, 1)'", "assert isinstance(model.linear, tf.keras.layers.Dense), 'The model should contain a Dense layer'"]}} |
| {"question_id": 67, "pytorch_library": "import torch\nfrom torch.optim import Adam\nimport torch.nn as nn", "pytorch_start_code": "class LogisticRegression(nn.Module):\n def __init__(self, inputSize, outputSize):\n super().__init__()\n self.linear = nn.Linear(inputSize, outputSize)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n return self.sigmoid(self.linear(x))\nmodel = LogisticRegression(1, 1)\n# optimizer = ...", "pytorch_sol_code": "optimizer = Adam(model.parameters(), lr=0.01)", "pytorch_test_code": {"setup_code": "model = LogisticRegression(1, 1)", "test_cases": ["assert isinstance(optimizer, Adam), 'Optimizer should be an instance of torch.optim.Adam'", "assert optimizer.param_groups[0]['lr'] == 0.01, 'Learning rate should be 0.01'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "model = tf.keras.Sequential([tf.keras.layers.Dense(1, activation='sigmoid', input_shape=(1,))])\n# optimizer = ...", "tensorflow_sol_code": "optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)", "tensorflow_test_code": {"setup_code": "model = tf.keras.Sequential([tf.keras.layers.Dense(1, activation='sigmoid', input_shape=(1,))])", "test_cases": ["assert isinstance(optimizer, tf.keras.optimizers.Adam), 'Optimizer should be an instance of tf.keras.optimizers.Adam'", "assert optimizer.learning_rate == 0.01, 'Learning rate should be 0.01'"]}} |
| {"question_id": 68, "pytorch_library": "import torch\nimport torch.nn as nn\nfrom torch.optim import Adam", "pytorch_start_code": "x_train = torch.tensor([[1], [2], [3], [4], [5]], dtype=torch.float32)\ny_train = torch.tensor([[0], [0], [1], [1], [1]], dtype=torch.float32)\nclass LogisticRegression(nn.Module):\n def __init__(self, inputSize, outputSize):\n super().__init__()\n self.linear = nn.Linear(inputSize, outputSize)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n return self.sigmoid(self.linear(x))\nmodel = LogisticRegression(1, 1)\noptimizer = Adam(model.parameters(), lr=0.01)\nepochs = 10\nlosses = []\n# training loop here", "pytorch_sol_code": "criterion = nn.BCELoss()\nfor epoch in range(epochs):\n optimizer.zero_grad()\n outputs = model(x_train)\n loss = criterion(outputs, y_train)\n loss.backward()\n optimizer.step()\n losses.append(loss.item())", "pytorch_test_code": {"setup_code": "model = LogisticRegression(1, 1)", "test_cases": ["assert len(losses) == 10, 'The losses list should contain 10 elements for 10 epochs'", "assert losses[0] > losses[-1], 'The loss should decrease over epochs'", "assert isinstance(optimizer, Adam), 'Optimizer should be an instance of torch.optim.Adam'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "x_train = tf.constant([[1], [2], [3], [4], [5]], dtype=tf.float32)\ny_train = tf.constant([[0], [0], [1], [1], [1]], dtype=tf.float32)\nmodel = tf.keras.Sequential([tf.keras.layers.Dense(1, activation='sigmoid', input_shape=(1,))])\nmodel.compile(optimizer='Adam', loss='binary_crossentropy')\nepochs = 10\nlosses = []\n# training loop here", "tensorflow_sol_code": "for epoch in range(epochs):\n history = model.fit(x_train, y_train, epochs=1, verbose=0)\n losses.append(history.history['loss'][0])", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert len(losses) == 10, 'The losses list should contain 10 elements for 10 epochs'", "assert losses[0] > losses[-1], 'The loss should decrease over epochs'", "assert type(model.optimizer) == tf.keras.optimizers.Adam, 'Optimizer should be an instance of tf.keras.optimizers.Adam'"]}} |
| {"question_id": 69, "pytorch_library": "import torch", "pytorch_start_code": "values = torch.tensor([0.1, -0.5, 0.7], dtype=torch.float32)\n#result =", "pytorch_sol_code": "atanh_values = torch.atanh(values)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert torch.allclose(atanh_values, torch.tensor([0.1003, -0.5493, 0.8673]), atol=1e-4)", "atanh_values = torch.atanh(torch.tensor([-0.3, 0.2, 0.9], dtype=torch.float32))\nassert torch.allclose(atanh_values, torch.tensor([-0.3095, 0.2027, 1.4722]), atol=1e-4)", "atanh_values = torch.atanh(torch.tensor([0.0, 0.99, -0.99], dtype=torch.float32))\nassert torch.allclose(atanh_values, torch.tensor([0.0, 2.6467, -2.6467]), atol=1e-4)"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "values = tf.constant([0.1, -0.5, 0.7], dtype=tf.float32)\n#result =", "tensorflow_sol_code": "atanh_values = tf.math.atanh(values)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert np.allclose(atanh_values.numpy(), [0.1003, -0.5493, 0.8673], atol=1e-4)", "atanh_values = tf.math.atanh(tf.constant([-0.3, 0.2, 0.9], dtype=tf.float32))\nassert np.allclose(atanh_values.numpy(), [-0.3095, 0.2027, 1.4722], atol=1e-4)", "atanh_values = tf.math.atanh(tf.constant([0.0, 0.99, -0.99], dtype=tf.float32))\nassert np.allclose(atanh_values.numpy(), [0.0, 2.6467, -2.6467], atol=1e-4)"]}} |
| {"question_id": 70, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class RNN(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super().__init__()\n\n\n def forward(self, input, hidden):\n pass\n\n def initHidden(self):\n pass\n\n# n_hidden = 128\n# rnn = RNN(input_size, n_hidden, output_size)", "pytorch_sol_code": "class RNN(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super().__init__()\n\n self.hidden_size = hidden_size\n\n self.i2h = nn.Linear(input_size, hidden_size)\n self.h2h = nn.Linear(hidden_size, hidden_size)\n self.h2o = nn.Linear(hidden_size, output_size)\n self.softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, input, hidden):\n hidden = F.tanh(self.i2h(input) + self.h2h(hidden))\n output = self.h2o(hidden)\n output = self.softmax(output)\n return output, hidden\n\n def initHidden(self):\n return torch.zeros(1, self.hidden_size)\n\n# n_hidden = 128\n# rnn = RNN(input_size, n_hidden, output_size)", "pytorch_test_code": {"setup_code": "input_size = 10\nn_hidden = 128\noutput_size = 2\nrnn = RNN(input_size, n_hidden, output_size)\ninput = torch.randn(1, input_size)\nhidden = torch.zeros(1, n_hidden)", "test_cases": ["output, hidden = rnn(input, hidden)\nassert output.shape == (1, output_size), 'Output shape is incorrect'", "assert hidden.shape == (1, n_hidden), 'Hidden state shape is incorrect'", "assert isinstance(rnn.i2h, nn.Linear) and isinstance(rnn.h2h, nn.Linear) and isinstance(rnn.h2o, nn.Linear), 'Model layers are not correctly implemented'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "class RNN(tf.keras.Model):\n def __init__(self, input_size, hidden_size, output_size):\n super().__init__()\n\n\n def call(self, inputs, hidden):\n pass\n\n def initHidden(self):\n pass\n\n# n_hidden = 128\n# rnn = RNN(input_size, n_hidden, output_size)", "tensorflow_sol_code": "class RNN(tf.keras.Model):\n def __init__(self, input_size, hidden_size, output_size):\n super().__init__()\n\n self.hidden_size = hidden_size\n self.i2h = tf.keras.layers.Dense(hidden_size, activation='tanh')\n self.h2h = tf.keras.layers.Dense(hidden_size, activation='tanh')\n self.h2o = tf.keras.layers.Dense(output_size)\n self.softmax = tf.keras.layers.Activation('softmax')\n\n def call(self, inputs, hidden):\n hidden = self.i2h(inputs) + self.h2h(hidden)\n hidden = tf.nn.tanh(hidden)\n output = self.h2o(hidden)\n output = self.softmax(output)\n return output, hidden\n\n def initHidden(self):\n return tf.zeros((1, self.hidden_size))\n\n# n_hidden = 128\n# rnn = RNN(input_size, n_hidden, output_size)", "tensorflow_test_code": {"setup_code": "input_size = 10\nn_hidden = 128\noutput_size = 2\nrnn = RNN(input_size, n_hidden, output_size)\ninputs = tf.random.normal([1, input_size])\nhidden = tf.zeros([1, n_hidden])", "test_cases": ["output, hidden = rnn(inputs, hidden)\nassert output.shape == (1, output_size), 'Output shape is incorrect'", "assert hidden.shape == (1, n_hidden), 'Hidden state shape is incorrect'", "assert isinstance(rnn.i2h, tf.keras.layers.Dense) and isinstance(rnn.h2h, tf.keras.layers.Dense) and isinstance(rnn.h2o, tf.keras.layers.Dense), 'Model layers are not correctly implemented'"]}} |
| {"question_id": 71, "pytorch_library": "import torch", "pytorch_start_code": "def accuracy(y_pred, y_true):\n pass", "pytorch_sol_code": "def accuracy(y_pred, y_true):\n correct = torch.sum(y_pred == y_true)\n total = y_true.size(0)\n return correct.float() / total", "pytorch_test_code": {"setup_code": "y_pred = torch.tensor([0, 2, 1, 3])\ny_true = torch.tensor([0, 1, 2, 3])", "test_cases": ["assert accuracy(y_pred, y_true) == 0.5, 'Test failed: Accuracy should be 0.5'", "assert accuracy(torch.tensor([0, 1, 2, 3]), torch.tensor([0, 1, 2, 3])) == 1, 'Test failed: Accuracy should be 1 for all correct predictions'", "assert accuracy(torch.tensor([0, 0, 0, 0]), torch.tensor([0, 1, 2, 3])) == 0.25, 'Test failed: Accuracy should be 0.25'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "def accuracy(y_pred, y_true):\n pass", "tensorflow_sol_code": "def accuracy(y_pred, y_true):\n correct = tf.reduce_sum(tf.cast(tf.equal(y_pred, y_true), dtype=tf.float32))\n total = tf.cast(tf.size(y_true), dtype=tf.float32)\n return correct / total", "tensorflow_test_code": {"setup_code": "y_pred = tf.constant([0, 2, 1, 3])\ny_true = tf.constant([0, 1, 2, 3])", "test_cases": ["assert accuracy(y_pred, y_true).numpy() == 0.5, 'Test failed: Accuracy should be 0.5'", "assert accuracy(tf.constant([0, 1, 2, 3]), tf.constant([0, 1, 2, 3])).numpy() == 1, 'Test failed: Accuracy should be 1 for all correct predictions'", "assert accuracy(tf.constant([0, 0, 0, 0]), tf.constant([0, 1, 2, 3])).numpy() == 0.25, 'Test failed: Accuracy should be 0.25'"]}} |
| {"question_id": 72, "pytorch_library": "import torch", "pytorch_start_code": "def precision(y_pred, y_true):\n pass", "pytorch_sol_code": "def precision(y_pred, y_true):\n true_positive = torch.sum((y_pred == 1) & (y_true == 1))\n predicted_positive = torch.sum(y_pred == 1)\n return true_positive.float() / predicted_positive", "pytorch_test_code": {"setup_code": "y_pred = torch.tensor([1, 0, 1, 0])\ny_true = torch.tensor([1, 0, 0, 1])", "test_cases": ["assert precision(y_pred, y_true) == 0.5, 'Test failed: Precision should be 0.5'", "assert precision(torch.tensor([1, 1, 1, 1]), torch.tensor([1, 1, 1, 1])) == 1, 'Test failed: Precision should be 1'", "assert precision(torch.tensor([1, 1, 1, 1]), torch.tensor([0, 0, 0, 0])) == 0, 'Test failed: Precision should be 0'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "def precision(y_pred, y_true):\n pass", "tensorflow_sol_code": "def precision(y_pred, y_true):\n true_positive = tf.reduce_sum(tf.cast(tf.logical_and(y_pred == 1, y_true == 1), dtype=tf.float32))\n predicted_positive = tf.reduce_sum(tf.cast(y_pred == 1, dtype=tf.float32))\n return true_positive / predicted_positive", "tensorflow_test_code": {"setup_code": "y_pred = tf.constant([1, 0, 1, 0])\ny_true = tf.constant([1, 0, 0, 1])", "test_cases": ["assert precision(y_pred, y_true).numpy() == 0.5, 'Test failed: Precision should be 0.5'", "assert precision(tf.constant([1, 1, 1, 1]), tf.constant([1, 1, 1, 1])).numpy() == 1, 'Test failed: Precision should be 1'", "assert precision(tf.constant([1, 1, 1, 1]), tf.constant([0, 0, 0, 0])).numpy() == 0, 'Test failed: Precision should be 0'"]}} |
| {"question_id": 73, "pytorch_library": "import torch", "pytorch_start_code": "def recall(y_pred, y_true):\n pass", "pytorch_sol_code": "def recall(y_pred, y_true):\n true_positive = torch.sum((y_pred == 1) & (y_true == 1))\n actual_positive = torch.sum(y_true == 1)\n return true_positive.float() / actual_positive", "pytorch_test_code": {"setup_code": "y_pred = torch.tensor([1, 0, 1, 0])\ny_true = torch.tensor([1, 0, 0, 1])", "test_cases": ["assert recall(y_pred, y_true) == 0.5, 'Test failed: Recall should be 0.5'", "assert recall(torch.tensor([1, 1, 1, 1]), torch.tensor([1, 1, 1, 1])) == 1, 'Test failed: Recall should be 1'", "assert recall(torch.tensor([0, 0, 0, 0]), torch.tensor([1, 1, 1, 1])) == 0, 'Test failed: Recall should be 0'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "def recall(y_pred, y_true):\n pass", "tensorflow_sol_code": "def recall(y_pred, y_true):\n true_positive = tf.reduce_sum(tf.cast(tf.logical_and(y_pred == 1, y_true == 1), dtype=tf.float32))\n actual_positive = tf.reduce_sum(tf.cast(y_true == 1, dtype=tf.float32))\n return true_positive / actual_positive", "tensorflow_test_code": {"setup_code": "y_pred = tf.constant([1, 0, 1, 0])\ny_true = tf.constant([1, 0, 0, 1])", "test_cases": ["assert recall(y_pred, y_true).numpy() == 0.5, 'Test failed: Recall should be 0.5'", "assert recall(tf.constant([1, 1, 1, 1]), tf.constant([1, 1, 1, 1])).numpy() == 1, 'Test failed: Recall should be 1'", "assert recall(tf.constant([0, 0, 0, 0]), tf.constant([1, 1, 1, 1])).numpy() == 0, 'Test failed: Recall should be 0'"]}} |
| {"question_id": 74, "pytorch_library": "import torch", "pytorch_start_code": "def f1_score(y_pred, y_true):\n pass", "pytorch_sol_code": "def f1_score(y_pred, y_true):\n true_positive = torch.sum((y_pred == 1) & (y_true == 1))\n predicted_positive = torch.sum(y_pred == 1)\n actual_positive = torch.sum(y_true == 1)\n precision = true_positive.float() / predicted_positive if predicted_positive else 0\n recall = true_positive.float() / actual_positive if actual_positive else 0\n return 2 * (precision * recall) / (precision + recall) if (precision + recall) else 0", "pytorch_test_code": {"setup_code": "y_pred = torch.tensor([1, 0, 1, 0])\ny_true = torch.tensor([1, 1, 1, 0])", "test_cases": ["assert torch.isclose(f1_score(y_pred, y_true), torch.tensor(0.8), atol=1e-6), 'Test failed: F1 score calculation is incorrect'", "assert f1_score(torch.tensor([0, 1, 0, 0, 1, 0]), torch.tensor([0, 1, 1, 0, 0, 1])) == 0.4, 'Test failed: F1 score should be 1 for perfect precision and recall'", "assert f1_score(torch.tensor([0, 0, 0, 0]), torch.tensor([1, 1, 1, 1])) == 0, 'Test failed: F1 score should be 0 when there are no true positives'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "def f1_score(y_pred, y_true):\n pass", "tensorflow_sol_code": "def f1_score(y_pred, y_true):\n true_positive = tf.reduce_sum(tf.cast(tf.logical_and(y_pred == 1, y_true == 1), dtype=tf.float32))\n predicted_positive = tf.reduce_sum(tf.cast(y_pred == 1, dtype=tf.float32))\n actual_positive = tf.reduce_sum(tf.cast(y_true == 1, dtype=tf.float32))\n precision = true_positive / predicted_positive if predicted_positive != 0 else 0\n recall = true_positive / actual_positive if actual_positive != 0 else 0\n return 2 * (precision * recall) / (precision + recall) if (precision + recall) != 0 else 0", "tensorflow_test_code": {"setup_code": "y_pred = tf.constant([1, 0, 1, 0])\ny_true = tf.constant([1, 1, 1, 0])", "test_cases": ["assert tf.experimental.numpy.isclose(f1_score(y_pred, y_true), 0.8, atol=1e-6).numpy(), 'Test failed: F1 score calculation is incorrect'", "assert tf.experimental.numpy.isclose(f1_score(tf.constant([0, 1, 0, 0, 1, 0]), tf.constant([0, 1, 1, 0, 0, 1])), 0.4, atol=1e-6), 'Test failed: F1 score should be 1 for perfect precision and recall'", "assert f1_score(tf.constant([0, 0, 0, 0]), tf.constant([1, 1, 1, 1])) == 0, 'Test failed: F1 score should be 0 when there are no true positives'"]}} |
| {"question_id": 75, "pytorch_library": "import torch", "pytorch_start_code": "tensor = torch.rand(4, 4, 3, 1)\n# tensor_squeezed = ", "pytorch_sol_code": "tensor_squeezed = torch.squeeze(tensor)", "pytorch_test_code": {"setup_code": "import torch\ntensor = torch.rand(4, 4, 3, 1)", "test_cases": ["assert tensor_squeezed.shape == (4, 4, 3), 'Shape after squeeze should be (4, 4, 3)'", "assert tensor_squeezed.dim() == 3, 'Dimension after squeeze should be 3'", "assert not any(d == 1 for d in tensor_squeezed.shape), 'No dimension should be of size 1'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tensor = tf.random.uniform((4, 4, 3, 1))\n# tensor_squeezed = ", "tensorflow_sol_code": "tensor_squeezed = tf.squeeze(tensor)", "tensorflow_test_code": {"setup_code": "import tensorflow as tf\ntensor = tf.random.uniform((4, 4, 3, 1))", "test_cases": ["assert tensor_squeezed.shape == (4, 4, 3), 'Shape after squeeze should be (4, 4, 3)'", "assert len(tensor_squeezed.shape) == 3, 'Dimension after squeeze should be 3'", "assert all(dim != 1 for dim in tensor_squeezed.shape), 'No dimension should be of size 1'"]}} |
| {"question_id": 76, "pytorch_library": "import torch", "pytorch_start_code": "matrix = torch.zeros(4, 4)\n# Fill the matrix according to the conditions", "pytorch_sol_code": "matrix.fill_diagonal_(1)\nupper_indices = torch.triu_indices(row=4, col=4, offset=1)\nmatrix[upper_indices[0], upper_indices[1]] = 2", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert torch.all(matrix.diagonal() == torch.ones(4)), 'Diagonal elements should be 1'", "assert torch.all(matrix.tril(diagonal=-1) == 0), 'Elements below the diagonal should be 0'", "assert torch.all(matrix.triu(diagonal=1) == torch.tensor([[0, 2, 2, 2], [0, 0, 2, 2], [0, 0, 0, 2], [0, 0, 0, 0]])), 'Elements above the diagonal should be 2'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "matrix = tf.zeros((4, 4), dtype=tf.float32)\n# Fill the matrix according to the conditions", "tensorflow_sol_code": "diag_values = tf.ones((4,), dtype=tf.float32)\nmatrix = tf.linalg.set_diag(matrix, diag_values)\nupper_mask = tf.linalg.band_part(tf.ones((4, 4), dtype=tf.float32), 0, -1) - tf.linalg.band_part(tf.ones((4, 4), dtype=tf.float32), 0, 0)\nmatrix += 2 * upper_mask", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert tf.reduce_all(tf.linalg.diag_part(matrix) == 1), 'Diagonal elements should be 1'", "assert tf.reduce_all(matrix == tf.constant([[1, 2, 2, 2], [0, 1, 2, 2], [0, 0, 1, 2], [0, 0, 0, 1]], dtype=tf.float32)), \"Incorrect matrix values\""]}} |
| {"question_id": 77, "pytorch_library": "import torch", "pytorch_start_code": "tensor = torch.rand(3, 2)\n# transposed_tensor = ", "pytorch_sol_code": "transposed_tensor = tensor.t()", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert transposed_tensor.shape == (2, 3), 'Transposed tensor shape should be 2x3'", "assert torch.equal(transposed_tensor, tensor.transpose(0, 1)), 'Transposed tensor should match the manually transposed tensor'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tensor = tf.random.uniform((3, 2))\n# transposed_tensor = ", "tensorflow_sol_code": "transposed_tensor = tf.transpose(tensor)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert transposed_tensor.shape == (2, 3), 'Transposed tensor shape should be 2x3'", "assert tf.reduce_all(tf.equal(transposed_tensor, tf.transpose(tensor))), 'Transposed tensor should match the manually transposed tensor'"]}} |
| {"question_id": 78, "pytorch_library": "import torch", "pytorch_start_code": "tensor_a = torch.rand(3, 2)\ntensor_b = torch.rand(3, 3)\n# concatenated_tensor = ", "pytorch_sol_code": "concatenated_tensor = torch.cat((tensor_a, tensor_b), dim=1)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert concatenated_tensor.shape == (3, 5), 'Concatenated tensor shape should be 3x5'", "assert torch.all(concatenated_tensor[:, :2] == tensor_a), 'First part of concatenated tensor should match tensor_a'", "assert torch.all(concatenated_tensor[:, 2:] == tensor_b), 'Second part of concatenated tensor should match tensor_b'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tensor_a = tf.random.uniform((3, 2))\ntensor_b = tf.random.uniform((3, 3))\n# concatenated_tensor = ", "tensorflow_sol_code": "concatenated_tensor = tf.concat([tensor_a, tensor_b], axis=1)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert concatenated_tensor.shape == (3, 5), 'Concatenated tensor shape should be 3x5'", "assert tf.reduce_all(tf.equal(concatenated_tensor[:, :2], tensor_a)), 'First part of concatenated tensor should match tensor_a'", "assert tf.reduce_all(tf.equal(concatenated_tensor[:, 2:], tensor_b)), 'Second part of concatenated tensor should match tensor_b'"]}} |
| {"question_id": 79, "pytorch_library": "import torch", "pytorch_start_code": "tensor = torch.rand(4, 6)\n# tensor_a, tensor_b = ", "pytorch_sol_code": "tensor_a, tensor_b = torch.split(tensor, 3, dim=1)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert tensor_a.shape == (4, 3) and tensor_b.shape == (4, 3), 'Each split tensor should have shape 4x3'", "assert torch.all(torch.cat((tensor_a, tensor_b), dim=1) == tensor), 'Concatenating the split tensors should recreate the original tensor'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tensor = tf.random.uniform((4, 6))\n# tensor_a, tensor_b = ", "tensorflow_sol_code": "tensor_a, tensor_b = tf.split(tensor, num_or_size_splits=2, axis=1)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert tensor_a.shape == (4, 3) and tensor_b.shape == (4, 3), 'Each split tensor should have shape 4x3'", "assert tf.reduce_all(tf.concat([tensor_a, tensor_b], axis=1) == tensor), 'Concatenating the split tensors should recreate the original tensor'"]}} |
| {"question_id": 80, "pytorch_library": "import torch", "pytorch_start_code": "tensor_a = torch.rand(3, 4)\ntensor_b = torch.rand(4, 5)\n# result_tensor = ", "pytorch_sol_code": "result_tensor = torch.matmul(tensor_a, tensor_b)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert result_tensor.shape == (3, 5), 'The result of matrix multiplication should have shape 3x5'", "assert torch.allclose(result_tensor, tensor_a @ tensor_b), 'Result tensor should match the output of direct @ operation'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tensor_a = tf.random.uniform((3, 4))\ntensor_b = tf.random.uniform((4, 5))\n# result_tensor = ", "tensorflow_sol_code": "result_tensor = tf.matmul(tensor_a, tensor_b)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert result_tensor.shape == (3, 5), 'The result of matrix multiplication should have shape 3x5'", "assert tf.reduce_all(tf.equal(result_tensor, tf.matmul(tensor_a, tensor_b))), 'Result tensor should match the output of tf.matmul'"]}} |
| {"question_id": 81, "pytorch_library": "import torch", "pytorch_start_code": "tensor_a = torch.rand(3, 3)\ntensor_b = torch.rand(3, 3)\n# result_tensor = ", "pytorch_sol_code": "result_tensor = tensor_a + tensor_b", "pytorch_test_code": {"setup_code": "import torch\ntensor_a = torch.rand(3, 3)\ntensor_b = torch.rand(3, 3)", "test_cases": ["assert result_tensor.shape == (3, 3), 'Result tensor should have shape 3x3'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tensor_a = tf.random.uniform((3, 3))\ntensor_b = tf.random.uniform((3, 3))\n# result_tensor = ", "tensorflow_sol_code": "result_tensor = tensor_a + tensor_b", "tensorflow_test_code": {"setup_code": "import tensorflow as tf\ntensor_a = tf.random.uniform((3, 3))\ntensor_b = tf.random.uniform((3, 3))", "test_cases": ["assert result_tensor.shape == (3, 3), 'Result tensor should have shape 3x3'"]}} |
| {"question_id": 82, "pytorch_library": "import torch", "pytorch_start_code": "tensor = torch.rand(4, 4)\n# mean_tensor = ", "pytorch_sol_code": "mean_tensor = tensor.mean(dim=0)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert mean_tensor.shape == (4,), 'Mean tensor should have reduced shape'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tensor = tf.random.uniform((4, 4))\n# mean_tensor = ", "tensorflow_sol_code": "mean_tensor = tf.reduce_mean(tensor, axis=0)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert mean_tensor.shape == (4,), 'Mean tensor should have reduced shape'"]}} |
| {"question_id": 83, "pytorch_library": "import torch", "pytorch_start_code": "# linspace_tensor = ", "pytorch_sol_code": "linspace_tensor = torch.linspace(1, 10, 10)", "pytorch_test_code": {"setup_code": "import torch", "test_cases": ["assert linspace_tensor.size(0) == 10, 'Tensor should contain 10 elements'", "assert linspace_tensor[0] == 1 and linspace_tensor[-1] == 10, 'Tensor should start at 1 and end at 10'", "assert torch.allclose(linspace_tensor[1] - linspace_tensor[0], torch.ones(1) * (linspace_tensor[2] - linspace_tensor[1])), 'Elements should be evenly spaced'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "# linspace_tensor = ", "tensorflow_sol_code": "linspace_tensor = tf.linspace(1.0, 10.0, 10)", "tensorflow_test_code": {"setup_code": "import tensorflow as tf", "test_cases": ["assert tf.size(linspace_tensor) == 10, 'Tensor should contain 10 elements'", "assert linspace_tensor[0] == 1 and linspace_tensor[-1] == 10, 'Tensor should start at 1 and end at 10'", "assert tf.reduce_all(tf.equal(linspace_tensor[1] - linspace_tensor[0], linspace_tensor[2] - linspace_tensor[1])), 'Elements should be evenly spaced'"]}} |
| {"question_id": 84, "pytorch_library": "import torch", "pytorch_start_code": "tensor = torch.rand(3, 4)\n# tensors_unbound = ", "pytorch_sol_code": "tensors_unbound = torch.unbind(tensor, dim=0)", "pytorch_test_code": {"setup_code": "import torch\ntensor = torch.rand(3, 4)", "test_cases": ["assert len(tensors_unbound) == 3, 'There should be three unbound tensors'", "all(tensor.shape == (4,) for tensor in tensors_unbound), 'Each unbound tensor should have shape (4,)'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tensor = tf.random.uniform((3, 4))\n# tensors_unbound = ", "tensorflow_sol_code": "tensors_unbound = tf.unstack(tensor, axis=0)", "tensorflow_test_code": {"setup_code": "import tensorflow as tf\ntensor = tf.random.uniform((3, 4))", "test_cases": ["assert len(tensors_unbound) == 3, 'There should be three unbound tensors'", "all(tensor.shape == (4,) for tensor in tensors_unbound), 'Each unbound tensor should have shape (4,)'"]}} |
| {"question_id": 85, "pytorch_library": "import torch", "pytorch_start_code": "prob_tensor = torch.tensor([[0.3, 0.6, 0.9], [0.1, 0.4, 0.7]])\n# bernoulli_tensor = ", "pytorch_sol_code": "bernoulli_tensor = torch.bernoulli(prob_tensor)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert bernoulli_tensor.shape == (2, 3), 'The shape of the Bernoulli tensor should be (2, 3)'", "assert torch.all((bernoulli_tensor == 0) | (bernoulli_tensor == 1)), 'All elements in the Bernoulli tensor should be 0 or 1'", "assert torch.all((bernoulli_tensor.sum(dim=1) >= 0) & (bernoulli_tensor.sum(dim=1) <= 3)), 'Sum of each row should be between 0 and 3 inclusive, representing the number of successes'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "prob_tensor = tf.constant([[0.3, 0.6, 0.9], [0.1, 0.4, 0.7]])\n# bernoulli_tensor = ", "tensorflow_sol_code": "bernoulli_tensor = tf.cast(tf.random.uniform((2, 3)) < prob_tensor, tf.int32)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert bernoulli_tensor.shape == (2, 3), 'The shape of the Bernoulli tensor should be (2, 3)'", "assert tf.reduce_all((bernoulli_tensor == 0) | (bernoulli_tensor == 1)), 'All elements in the Bernoulli tensor should be 0 or 1'", "assert tf.reduce_all((tf.reduce_sum(bernoulli_tensor, axis=1) >= 0) & (tf.reduce_sum(bernoulli_tensor, axis=1) <= 3)), 'Sum of each row should be between 0 and 3 inclusive, representing the number of successes'"]}} |
| {"question_id": 86, "pytorch_library": "import torch", "pytorch_start_code": "tensor = torch.tensor([[1.0, 3.0, 5.0, 7.0, 9.0], [2.0, 4.0, 6.0, 8.0, 10.0]])\n# top_values, top_indices = ", "pytorch_sol_code": "top_values, top_indices = torch.topk(tensor, 3, dim=1)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert top_values.shape == (2, 3), 'The shape of top values tensor should be (2, 3)'", "assert top_indices.shape == (2, 3), 'The shape of top indices tensor should be (2, 3)'", "assert torch.all(top_values == torch.tensor([[9.0, 7.0, 5.0], [10.0, 8.0, 6.0]])), 'Top values are not correct'", "assert torch.all(top_indices == torch.tensor([[4, 3, 2], [4, 3, 2]])), 'Top indices are not correct'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tensor = tf.constant([[1.0, 3.0, 5.0, 7.0, 9.0], [2.0, 4.0, 6.0, 8.0, 10.0]])\n# values, indices = ", "tensorflow_sol_code": "values, indices = tf.math.top_k(tensor, 3, sorted=True)", "tensorflow_test_code": {"setup_code": "import tensorflow as tf\ntensor = tf.constant([[1.0, 3.0, 5.0, 7.0, 9.0], [2.0, 4.0, 6.0, 8.0, 10.0]])", "test_cases": ["assert values.shape == (2, 3), 'The shape of top values tensor should be (2, 3)'", "assert indices.shape == (2, 3), 'The shape of top indices tensor should be (2, 3)'", "assert tf.reduce_all(tf.equal(values, tf.constant([[9.0, 7.0, 5.0], [10.0, 8.0, 6.0]]))), 'Top values are not correct'", "assert tf.reduce_all(tf.equal(indices, tf.constant([[4, 3, 2], [4, 3, 2]]))), 'Top indices are not correct'"]}} |
| {"question_id": 87, "pytorch_library": "import torch", "pytorch_start_code": "# full_tensor = ", "pytorch_sol_code": "full_tensor = torch.full((3, 3), 7)", "pytorch_test_code": {"setup_code": "import torch", "test_cases": ["assert full_tensor.shape == (3, 3), 'The shape of the full tensor should be (3, 3)'", "assert torch.all(full_tensor == 7), 'All elements in the tensor should be 7'", "assert full_tensor.dtype == torch.int64, 'The dtype of the tensor should be torch.int64'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "# full_tensor = ", "tensorflow_sol_code": "full_tensor = tf.fill([3, 3], 7)", "tensorflow_test_code": {"setup_code": "import tensorflow as tf", "test_cases": ["assert full_tensor.shape == (3, 3), 'The shape of the full tensor should be (3, 3)'", "assert tf.reduce_all(full_tensor == 7), 'All elements in the tensor should be 7'", "assert full_tensor.dtype == tf.int32, 'The dtype of the tensor should be tf.int32'"]}} |
| {"question_id": 88, "pytorch_library": "import torch", "pytorch_start_code": "matrix = torch.tensor([[1, 0, 0], [0, 2, 0], [0, 0, 3]])\n# trace_value = ", "pytorch_sol_code": "trace_value = torch.trace(matrix)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert trace_value == 6, 'The trace of the matrix should be 6'", "assert isinstance(trace_value, torch.Tensor), 'The trace value should be a tensor'", "assert trace_value.item() == 6, 'The trace of the matrix calculated incorrectly'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "matrix = tf.constant([[1, 0, 0], [0, 2, 0], [0, 0, 3]])\n# trace_value = ", "tensorflow_sol_code": "trace_value = tf.linalg.trace(matrix)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert trace_value.numpy() == 6, 'The trace of the matrix should be 6'", "assert isinstance(trace_value, tf.Tensor), 'The trace value should be a tensor'", "assert trace_value.numpy() == 6, 'The trace of the matrix calculated incorrectly'"]}} |
| {"question_id": 89, "pytorch_library": "import torch", "pytorch_start_code": "tensor = torch.randn(3, 3, 3)\n# total_elements = ", "pytorch_sol_code": "total_elements = torch.numel(tensor)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert total_elements == 27, 'The total number of elements should be 27'", "assert isinstance(total_elements, int), 'The return type should be an integer'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tensor = tf.random.normal([3, 3, 3])\n# total_elements = ", "tensorflow_sol_code": "total_elements = tf.size(tensor).numpy().item()", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert total_elements == 27, 'The total number of elements should be 27'", "assert isinstance(total_elements, int), 'The return type should be an integer'"]}} |
| {"question_id": 90, "pytorch_library": "import torch", "pytorch_start_code": "real = torch.tensor([1.0, 2.0, 3.0, 4.0]).reshape(2, 2)\nimag = torch.tensor([5.0, 6.0, 7.0, 8.0]).reshape(2, 2)\n# complex_tensor = ", "pytorch_sol_code": "complex_tensor = torch.complex(real, imag)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert complex_tensor.shape == (2, 2), 'The shape of the complex tensor should be (2, 2)'", "assert torch.all(torch.eq(complex_tensor.real, real)), 'The real parts of the complex tensor do not match'", "assert torch.all(torch.eq(complex_tensor.imag, imag)), 'The imaginary parts of the complex tensor do not match'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "real = tf.constant([1.0, 2.0, 3.0, 4.0], shape=(2, 2))\nimag = tf.constant([5.0, 6.0, 7.0, 8.0], shape=(2, 2))\n# complex_tensor = ", "tensorflow_sol_code": "complex_tensor = tf.complex(real, imag)", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert complex_tensor.shape == (2, 2), 'The shape of the complex tensor should be (2, 2)'", "assert tf.reduce_all(tf.equal(tf.math.real(complex_tensor), real)), 'The real parts of the complex tensor do not match'", "assert tf.reduce_all(tf.equal(tf.math.imag(complex_tensor), imag)), 'The imaginary parts of the complex tensor do not match'"]}} |
| {"question_id": 91, "pytorch_library": "import torch\nimport torchvision\nfrom torchvision import datasets, transforms", "pytorch_start_code": "transform = transforms.Compose([transforms.ToTensor()])\n# train_set = datasets.FashionMNIST(...)\n# test_set = datasets.FashionMNIST(...)", "pytorch_sol_code": "transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])\ntrain_set = torchvision.datasets.FashionMNIST(\n root='./data/FashionMNIST',\n train=True,\n download=True,\n transform=transform\n)\ntest_set = torchvision.datasets.FashionMNIST(\n root='./data/FashionMNIST',\n train=False,\n download=True,\n transform=transform\n)", "pytorch_test_code": {"setup_code": "from torch.utils.data import DataLoader\ntrain_set = torchvision.datasets.FashionMNIST(root='./data/FashionMNIST', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]))\ntest_set = torchvision.datasets.FashionMNIST(root='./data/FashionMNIST', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]))", "test_cases": ["train_loader = DataLoader(train_set, batch_size=64, shuffle=True)\ntest_loader = DataLoader(test_set, batch_size=64, shuffle=False)\nassert len(train_loader.dataset) == 60000, 'The training set should contain 60000 images'", "assert len(test_loader.dataset) == 10000, 'The test set should contain 10000 images'", "sample_image, sample_label = next(iter(train_loader))\nassert sample_image.shape == (64, 1, 28, 28), 'Each image should have shape 1x28x28'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "# Load the Fashion MNIST dataset using TensorFlow", "tensorflow_sol_code": "(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()\nx_train = x_train.reshape(x_train.shape[0], 28, 28, 1).astype('float32') / 255\nx_test = x_test.reshape(x_test.shape[0], 28, 28, 1).astype('float32') / 255\ny_train = tf.keras.utils.to_categorical(y_train, 10)\ny_test = tf.keras.utils.to_categorical(y_test, 10)", "tensorflow_test_code": {"setup_code": "(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()\nx_train = x_train.reshape(x_train.shape[0], 28, 28, 1).astype('float32') / 255\nx_test = x_test.reshape(x_test.shape[0], 28, 28, 1).astype('float32') / 255\ny_train = tf.keras.utils.to_categorical(y_train, 10)\ny_test = tf.keras.utils.to_categorical(y_test, 10)", "test_cases": ["assert x_train.shape == (60000, 28, 28, 1), 'Training data shape should be (60000, 28, 28, 1)'", "assert x_test.shape == (10000, 28, 28, 1), 'Test data shape should be (10000, 28, 28, 1)'", "assert y_train.shape == (60000, 10), 'Training labels should be one-hot encoded with shape (60000, 10)'", "assert y_test.shape == (10000, 10), 'Test labels should be one-hot encoded with shape (10000, 10)'"]}} |
| {"question_id": 92, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class CNNModel(nn.Module):\n def __init__(self, input_shape):\n super().__init__()\n ", "pytorch_sol_code": "class CNNModel(nn.Module):\n def __init__(self, input_shape):\n super().__init__()\n self.model = nn.Sequential(\n nn.Conv2d(1, 32, kernel_size=3),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2),\n nn.Conv2d(32, 64, kernel_size=3),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2),\n nn.Flatten(),\n nn.Dropout(0.5),\n nn.Linear(64 * 5 * 5, 1),\n nn.Sigmoid()\n )\nmodel = CNNModel(input_shape=(1, 28, 28))", "pytorch_test_code": {"setup_code": "import torch\nimport torch.nn as nn\nmodel = CNNModel(input_shape=(1, 28, 28))", "test_cases": ["assert isinstance(model.model[0], nn.Conv2d) and model.model[0].out_channels == 32, 'First Conv2D layer should have 32 output channels'", "assert isinstance(model.model[3], nn.Conv2d) and model.model[3].out_channels == 64, 'Second Conv2D layer should have 64 output channels'", "assert isinstance(model.model[7], nn.Dropout), 'Should have a dropout layer'", "assert isinstance(model.model[9], nn.Sigmoid), 'Output should use sigmoid activation'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers", "tensorflow_start_code": "input_shape = (28, 28, 1)\n# Define the Keras Sequential model here", "tensorflow_sol_code": "model = keras.Sequential([\n keras.Input(shape=input_shape),\n layers.Conv2D(32, kernel_size=(3, 3), activation='relu'),\n layers.MaxPooling2D(pool_size=(2, 2)),\n layers.Conv2D(64, kernel_size=(3, 3), activation='relu'),\n layers.MaxPooling2D(pool_size=(2, 2)),\n layers.Flatten(),\n layers.Dropout(0.5),\n layers.Dense(1, activation='sigmoid')\n])", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert model.layers[0].output.shape[1:] == (26, 26, 32), 'Output shape of first Conv2D should be (26, 26, 32)'", "assert model.layers[2].output.shape[1:] == (11, 11, 64), 'Output shape of second Conv2D should be (12, 12, 64)'", "assert isinstance(model.layers[5], layers.Dropout), 'Should have a dropout layer'", "assert model.layers[-1].activation.__name__ == 'sigmoid', 'Output layer should use sigmoid activation'"]}} |
| {"question_id": 93, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "class Polynomial3(nn.Module):\n def __init__(self):\n super().__init__()\n ", "pytorch_sol_code": "class Polynomial3(nn.Module):\n def __init__(self):\n super().__init__()\n self.a = nn.Parameter(torch.randn(()))\n self.b = nn.Parameter(torch.randn(()))\n self.c = nn.Parameter(torch.randn(()))\n self.d = nn.Parameter(torch.randn(()))\n\n def forward(self, x):\n return self.a + self.b * x + self.c * x ** 2 + self.d * x ** 3\n\nmodel = Polynomial3()", "pytorch_test_code": {"setup_code": "import torch\nimport torch.nn as nn\nmodel = Polynomial3()", "test_cases": ["x = torch.tensor(2.0)\nassert isinstance(model(x), torch.Tensor), 'Model should output a tensor'", "x = torch.tensor(1.0)\nresult = model(x)\nexpected = model.a.item() + model.b.item() + model.c.item() + model.d.item()\nassert torch.isclose(result,torch.tensor(expected),atol=1e-5), 'Polynomial calculation does not match expected value'", "x = torch.tensor([2.0, 3.0])\nassert model(x).shape == torch.Size([2]), 'Output shape should match the number of input elements'"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "class Polynomial3(tf.keras.layers.Layer):\n def __init__(self):\n super().__init__()\n", "tensorflow_sol_code": "class Polynomial3(tf.keras.layers.Layer):\n def __init__(self):\n super().__init__()\n self.a = self.add_weight(shape=(), initializer='random_normal')\n self.b = self.add_weight(shape=(), initializer='random_normal')\n self.c = self.add_weight(shape=(), initializer='random_normal')\n self.d = self.add_weight(shape=(), initializer='random_normal')\n\n def call(self, x):\n return self.a + self.b * x + self.c * x ** 2 + self.d * x ** 3\n\nmodel = Polynomial3()", "tensorflow_test_code": {"setup_code": "model = Polynomial3()", "test_cases": ["x = tf.constant(2.0)\nassert isinstance(model(x), tf.Tensor), 'Model should output a tensor'", "x = tf.constant(1.0)\nresult = model(x)\nexpected = model.a.numpy() + model.b.numpy() + model.c.numpy() + model.d.numpy()\nassert np.isclose(result.numpy(), expected, atol=1e-5), 'Polynomial calculation does not match expected value'", "x = tf.constant([2.0, 3.0])\nassert model(x).shape == tf.TensorShape([2]), 'Output shape should match the number of input elements'"]}} |
| {"question_id": 94, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "class SimpleModel(nn.Module):\n def __init__(self):\n super().__init__()\n # Define the model layers here", "pytorch_sol_code": "class SimpleModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.layers = nn.Sequential(\n nn.Linear(3, 2),\n nn.ReLU(),\n nn.Linear(2, 3),\n nn.ReLU(),\n nn.Linear(3, 4)\n )\n\n def forward(self, x):\n return self.layers(x)\n\nmodel = SimpleModel()", "pytorch_test_code": {"setup_code": "import torch\nimport torch.nn as nn\nmodel = SimpleModel()", "test_cases": ["assert isinstance(model.layers[0], nn.Linear) and model.layers[0].out_features == 2, 'First layer should be Linear with 2 output features'", "assert isinstance(model.layers[1], nn.ReLU), 'Second layer should be ReLU'", "assert isinstance(model.layers[2], nn.Linear) and model.layers[2].out_features == 3, 'Third layer should be Linear with 3 output features'", "assert isinstance(model.layers[4], nn.Linear) and model.layers[4].out_features == 4, 'Fifth layer should be Linear with 4 output features'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers", "tensorflow_start_code": "model = keras.Sequential()\n# Add layers to the model", "tensorflow_sol_code": "model = keras.Sequential([\n layers.Dense(2, input_shape=(3,), activation='relu'),\n layers.Dense(3, activation='relu'),\n layers.Dense(4)\n])", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert model.layers[0].output.shape == (None, 2), 'Output shape of first Dense layer should be (None, 2)'", "assert model.layers[1].activation.__name__ == 'relu', 'Activation of second Dense layer should be ReLU'", "assert model.layers[2].output.shape == (None, 4), 'Output shape of third Dense layer should be (None, 4)'"]}} |
| {"question_id": 95, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class LinearRegression(nn.Module):\n def __init__(self, input_size, output_size):\n super().__init__()\n # Define the model layers here", "pytorch_sol_code": "class LinearRegression(nn.Module):\n def __init__(self, input_size, output_size):\n super().__init__()\n self.f1 = nn.Linear(input_size, 2000)\n self.f2 = nn.Linear(2000, output_size)\n\n def forward(self, x):\n x = self.f1(x)\n x = F.leaky_relu(x, negative_slope=0.01)\n x = F.dropout(x, p=0.3)\n x = self.f2(x)\n return torch.sigmoid(x)\n\n#model = LinearRegression(input_size, output_size)", "pytorch_test_code": {"setup_code": "model = LinearRegression(10, 1)", "test_cases": ["assert isinstance(model.f1, nn.Linear) and model.f1.out_features == 2000, 'First layer should be linear with 2000 output features'", "assert model.f1.in_features == 10, 'Input features of the first layer should match input_size'", "input = torch.randn(10)\noutput = model(input)\nassert output.dim() == 1, 'Output should have one dimension'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers", "tensorflow_start_code": "model = tf.keras.Sequential()\n# Add layers to the model", "tensorflow_sol_code": "model = tf.keras.Sequential([\n layers.Dense(2000, input_dim=10, activation='leaky_relu'),\n layers.Dropout(0.3),\n layers.Dense(1, activation='sigmoid')\n])", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert model.layers[0].output.shape == (None, 2000), 'First layer output shape should be (None, 2000)'", "assert model.layers[0].activation.__name__ == 'leaky_relu', 'First layer activation should be Leaky ReLU'", "import numpy as np\ninput = np.random.rand(10).reshape(1, -1)\noutput = model(input)\nassert output.shape == (1, 1), 'Output shape should be (1, 1)'"]}} |
| {"question_id": 96, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class LSTMClassifier(nn.Module):\n def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, weights):\n super().__init__()\n # Initialize model layers here\n #def forward(self, x, hidden):\n #pass\n #def init_hidden(self, batch_size):\n #pass", "pytorch_sol_code": "class LSTMClassifier(nn.Module):\n def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, weights):\n super().__init__()\n self.output_size = output_size\n self.n_layers = n_layers\n self.hidden_dim = hidden_dim\n self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)\n self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False)\n self.dropout_1 = nn.Dropout(0.3)\n self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=0.3, batch_first=True)\n self.dropout_2 = nn.Dropout(0.3)\n self.label_layer = nn.Linear(hidden_dim, output_size)\n self.act = nn.Sigmoid()\n def init_hidden(self, batch_size):\n weight = next(self.parameters()).data\n return (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())\n def forward(self, x, hidden):\n x = self.word_embeddings(x)\n x = self.dropout_1(x)\n lstm_out, hidden = self.lstm(x, hidden)\n lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)\n out = self.dropout_2(lstm_out)\n out = self.label_layer(out)\n out = out.view(-1, self.output_size)\n out = self.act(out)\n return out, hidden\n\n#model = LSTMClassifier(1000, 10, 300, 256, 2, torch.randn(1000, 300))", "pytorch_test_code": {"setup_code": "model = LSTMClassifier(1000, 10, 300, 256, 2, torch.randn(1000, 300))", "test_cases": ["x = torch.randint(0, 1000, (1, 5))\nhidden = model.init_hidden(1)\noutput, hidden = model(x, hidden)\nassert isinstance(output, torch.Tensor) and output.shape == (5, 10), 'Output tensor should have shape (5, 10)'", "assert isinstance(model.lstm, nn.LSTM), 'Model should contain an LSTM layer'", "assert model.lstm.dropout == 0.3, 'Dropout in LSTM should be 0.3'", "hidden = model.init_hidden(1)\nassert isinstance(hidden, tuple) and len(hidden) == 2, 'Hidden state should be a tuple with two elements'", "assert isinstance(model.act, nn.Sigmoid), 'Activation function should be sigmoid'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers", "tensorflow_start_code": "class LSTMClassifier(tf.keras.Model):\n def __init__(self, vocab_size, embedding_dim, hidden_dim, n_layers, output_size, weights):\n super().__init__()\n # Initialize model layers here", "tensorflow_sol_code": "class LSTMClassifier(tf.keras.Model):\n def __init__(self, vocab_size, embedding_dim, hidden_dim, n_layers, output_size, weights):\n super().__init__()\n self.embedding = layers.Embedding(input_dim=vocab_size, output_dim=embedding_dim, weights=[weights], trainable=False)\n self.lstm = layers.LSTM(hidden_dim, return_sequences=True, dropout=0.3, recurrent_dropout=0.3)\n self.dropout = layers.Dropout(0.3)\n self.dense = layers.Dense(output_size, activation='sigmoid')\n def call(self, inputs):\n x = self.embedding(inputs)\n x = self.lstm(x)\n x = self.dropout(x)\n return self.dense(x)\n\nmodel = LSTMClassifier(1000, 300, 256, 2, 10, tf.random.normal((1000, 300)))", "tensorflow_test_code": {"setup_code": "model = LSTMClassifier(1000, 300, 256, 2, 10, tf.random.normal((1000, 300)))", "test_cases": ["x = tf.random.uniform((1, 5), maxval=1000, dtype=tf.int32)\noutput = model(x)\nassert tf.is_tensor(output) and output.shape == (1,5, 10), 'Output tensor should have shape (1,5, 10)'", "assert hasattr(model, 'lstm') and isinstance(model.lstm, layers.LSTM), 'Model should contain an LSTM layer'", "assert model.lstm.dropout == 0.3, 'Dropout in LSTM should be 0.3'", "output = model(x)\nassert tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=tf.ones_like(output)).shape == output.shape, 'Output activation should be sigmoid and match the output shape'"]}} |
| {"question_id": 97, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class LeNet5(nn.Module):\n def __init__(self, num_classes):\n super().__init__()\n # Define model layers here", "pytorch_sol_code": "class LeNet5(nn.Module):\n def __init__(self, num_classes):\n super().__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=0),\n nn.BatchNorm2d(6),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n self.layer2 = nn.Sequential(\n nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n self.fc = nn.Linear(400, 120)\n self.relu = nn.ReLU()\n self.fc1 = nn.Linear(120, 84)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Linear(84, num_classes)\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = x.view(-1, 400)\n x = self.fc(x)\n x = self.relu(x)\n x = self.fc1(x)\n x = self.relu1(x)\n x = self.fc2(x)\n return x", "pytorch_test_code": {"setup_code": "model = LeNet5(num_classes=10)", "test_cases": ["assert isinstance(model.layer1[0], nn.Conv2d), 'First layer should be a Conv2d'", "assert model.layer1[0].out_channels == 6, 'Output channels of first Conv2d should be 6'", "input = torch.randn(1, 1, 32, 32)\noutput = model(input)\nassert output.size(1) == 10, 'Output size should match number of classes'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers", "tensorflow_start_code": "input_shape = (32, 32, 1)\nnum_classes=10\n# Define the Keras Sequential model", "tensorflow_sol_code": "model = tf.keras.Sequential([\n layers.Conv2D(6, kernel_size=(5, 5), activation='relu', input_shape=input_shape),\n layers.BatchNormalization(),\n layers.MaxPooling2D(pool_size=(2, 2)),\n layers.Conv2D(16, kernel_size=(5, 5), activation='relu'),\n layers.BatchNormalization(),\n layers.MaxPooling2D(pool_size=(2, 2)),\n layers.Flatten(),\n layers.Dense(120, activation='relu'),\n layers.Dense(84, activation='relu'),\n layers.Dense(num_classes, activation='softmax')\n])", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert model.layers[0].output.shape == (None, 28, 28, 6), 'Output shape of first Conv2D layer should be (None, 28, 28, 6)'", "assert isinstance(model.layers[1], layers.BatchNormalization), 'Second layer should be BatchNormalization'", "import numpy as np\ninput = np.random.rand(1, 32, 32, 1).astype('float32')\noutput = model(input)\nassert output.shape == (1, 10), 'Output shape should be (1, 10)'"]}} |
| {"question_id": 98, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class ResidualBlock(nn.Module):\n def __init__(self, in_channels, out_channels, stride=1, downsample=None):\n super().__init__()\n # Initialize layers here", "pytorch_sol_code": "class ResidualBlock(nn.Module):\n def __init__(self, in_channels, out_channels, stride=1, downsample=None):\n super().__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(out_channels)\n )\n self.downsample = downsample\n self.relu = nn.ReLU()\n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.conv2(out)\n if self.downsample:\n residual = self.downsample(x)\n out += residual\n out = self.relu(out)\n return out", "pytorch_test_code": {"setup_code": "model = ResidualBlock(3, 3)", "test_cases": ["assert isinstance(model.conv1[0], nn.Conv2d), 'First layer should be Conv2d'", "assert model.conv1[0].out_channels == 3, 'Incorrect number of output channels in first convolution'", "input = torch.randn(1, 3, 64, 64)\noutput = model(input)\nassert output.shape == (1, 3, 64, 64), 'Output shape should match input shape if no downsampling'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers", "tensorflow_start_code": "class ResidualBlock(tf.keras.layers.Layer):\n def __init__(self, in_channels, out_channels, stride=1, downsample=None):\n super().__init__()\n # Initialize layers here", "tensorflow_sol_code": "class ResidualBlock(tf.keras.layers.Layer):\n def __init__(self, in_channels, out_channels, stride=1, downsample=None):\n super().__init__()\n self.conv1 = tf.keras.Sequential([\n layers.Conv2D(out_channels, 3, stride, 'same'),\n layers.BatchNormalization(),\n layers.ReLU()\n ])\n self.conv2 = tf.keras.Sequential([\n layers.Conv2D(out_channels, 3, 1, 'same'),\n layers.BatchNormalization()\n ])\n self.downsample = downsample\n def call(self, inputs):\n residual = inputs\n x = self.conv1(inputs)\n x = self.conv2(x)\n if self.downsample is not None:\n residual = self.downsample(inputs)\n x += residual\n return layers.ReLU()(x)", "tensorflow_test_code": {"setup_code": "model = ResidualBlock(3, 3)", "test_cases": ["assert isinstance(model.conv1.layers[0], layers.Conv2D), 'First layer should be Conv2D'", "assert model.conv1.layers[0].filters == 3, 'Incorrect number of filters in first convolution'", "input = tf.random.normal((1, 64, 64, 3))\noutput = model(input)\nassert output.shape == (1, 64, 64, 3), 'Output shape should match input shape if no downsampling'"]}} |
| {"question_id": 99, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class AlexNet(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n # Define model layers here", "pytorch_sol_code": "class AlexNet(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=0),\n nn.BatchNorm2d(96),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2)\n )\n self.layer2 = nn.Sequential(\n nn.Conv2d(96, 256, kernel_size=5, stride=1, padding=2),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2)\n )\n self.layer3 = nn.Sequential(\n nn.Conv2d(256, 384, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(384),\n nn.ReLU()\n )\n self.layer4 = nn.Sequential(\n nn.Conv2d(384, 384, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(384),\n nn.ReLU()\n )\n self.layer5 = nn.Sequential(\n nn.Conv2d(384, 256, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2)\n )\n self.fc = nn.Sequential(\n nn.Dropout(0.5),\n nn.Linear(9216, 4096),\n nn.ReLU()\n )\n self.fc1 = nn.Sequential(\n nn.Dropout(0.5),\n nn.Linear(4096, 4096),\n nn.ReLU()\n )\n self.fc2 = nn.Sequential(\n nn.Linear(4096, num_classes)\n )\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.layer5(out)\n out = out.view(out.size(0), -1)\n out = self.fc(out)\n out = self.fc1(out)\n out = self.fc2(out)\n return out\n\nmodel = AlexNet(num_classes=10)", "pytorch_test_code": {"setup_code": "model = AlexNet(num_classes=10)", "test_cases": ["assert isinstance(model.layer1[0], nn.Conv2d), 'Layer 1 should contain a Conv2d layer'", "assert model.fc[1].in_features == 9216, 'First fully connected layer should have 9216 input features'", "input = torch.randn(1, 3, 227, 227)\noutput = model(input)\nassert output.size(1) == 10, 'Final output should match the number of classes'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential", "tensorflow_start_code": "#model = Sequential", "tensorflow_sol_code": "model = Sequential([\n layers.Conv2D(96, kernel_size=11, strides=4, padding='valid', input_shape=(227, 227, 3)),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.MaxPooling2D(pool_size=3, strides=2),\n layers.Conv2D(256, kernel_size=5, strides=1, padding='same'),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.MaxPooling2D(pool_size=3, strides=2),\n layers.Conv2D(384, kernel_size=3, strides=1, padding='same'),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.Conv2D(384, kernel_size=3, strides=1, padding='same'),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.Conv2D(256, kernel_size=3, strides=1, padding='same'),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.MaxPooling2D(pool_size=3, strides=2),\n layers.Flatten(),\n layers.Dropout(0.5),\n layers.Dense(4096, activation='relu'),\n layers.Dropout(0.5),\n layers.Dense(4096, activation='relu'),\n layers.Dense(10, activation='softmax')\n])", "tensorflow_test_code": {"setup_code": "model = Sequential([\n layers.Conv2D(96, kernel_size=11, strides=4, padding='valid', input_shape=(227, 227, 3)),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.MaxPooling2D(pool_size=3, strides=2),\n layers.Conv2D(256, kernel_size=5, strides=1, padding='same'),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.MaxPooling2D(pool_size=3, strides=2),\n layers.Conv2D(384, kernel_size=3, strides=1, padding='same'),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.Conv2D(384, kernel_size=3, strides=1, padding='same'),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.Conv2D(256, kernel_size=3, strides=1, padding='same'),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.MaxPooling2D(pool_size=3, strides=2),\n layers.Flatten(),\n layers.Dropout(0.5),\n layers.Dense(4096, activation='relu'),\n layers.Dropout(0.5),\n layers.Dense(4096, activation='relu'),\n layers.Dense(10, activation='softmax')\n])", "test_cases": ["assert isinstance(model.layers[0], layers.Conv2D), 'First layer should be Conv2D'", "assert model.layers[14].output.shape== (None, 13, 13, 256), 'The shape of the output tensor is incorrect'", "input = tf.random.normal([1, 227, 227, 3])\noutput = model(input)\nassert output.shape == (1, 10), 'Final output shape should match number of classes'"]}} |
| {"question_id": 100, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.optim as optim", "pytorch_start_code": "class Polynomial3(nn.Module):\n def __init__(self):\n super().__init__()\n self.a = nn.Parameter(torch.randn(()))\n self.b = nn.Parameter(torch.randn(()))\n self.c = nn.Parameter(torch.randn(()))\n self.d = nn.Parameter(torch.randn(()))\n\n def forward(self, x):\n return self.a + self.b * x + self.c * x ** 2 + self.d * x ** 3\n\nmodel = Polynomial3()", "pytorch_sol_code": "optimizer = optim.Adam(model.parameters(), lr=0.01)\n\n", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert isinstance(optimizer, optim.Adam), 'Optimizer should be Adam'", "assert optimizer.defaults['lr'] == 0.01, 'Learning rate should be 0.01'"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "class Polynomial3(tf.keras.layers.Layer):\n def __init__(self):\n super().__init__()\n self.a = self.add_weight(shape=(), initializer='random_normal')\n self.b = self.add_weight(shape=(), initializer='random_normal')\n self.c = self.add_weight(shape=(), initializer='random_normal')\n self.d = self.add_weight(shape=(), initializer='random_normal')\n\n def call(self, x):\n return self.a + self.b * x + self.c * x ** 2 + self.d * x ** 3\n\nmodel = Polynomial3()", "tensorflow_sol_code": "optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)\n\n", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert isinstance(optimizer, tf.keras.optimizers.Adam), 'Optimizer should be Adam'", "assert np.isclose(optimizer.learning_rate.numpy(), 0.01), 'Learning rate should be 0.01'"]}} |
| {"question_id": 101, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.optim as optim", "pytorch_start_code": "class AlexNet(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=0),\n nn.BatchNorm2d(96),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2)\n )\n self.layer2 = nn.Sequential(\n nn.Conv2d(96, 256, kernel_size=5, stride=1, padding=2),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2)\n )\n self.layer3 = nn.Sequential(\n nn.Conv2d(256, 384, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(384),\n nn.ReLU()\n )\n self.layer4 = nn.Sequential(\n nn.Conv2d(384, 384, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(384),\n nn.ReLU()\n )\n self.layer5 = nn.Sequential(\n nn.Conv2d(384, 256, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2)\n )\n self.fc = nn.Sequential(\n nn.Dropout(0.5),\n nn.Linear(9216, 4096),\n nn.ReLU()\n )\n self.fc1 = nn.Sequential(\n nn.Dropout(0.5),\n nn.Linear(4096, 4096),\n nn.ReLU()\n )\n self.fc2= nn.Sequential(\n nn.Linear(4096, num_classes)\n )\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.layer5(out)\n out = out.view(out.size(0), -1)\n out = self.fc(out)\n out = self.fc1(out)\n out = self.fc2(out)\n return out\n\nmodel = AlexNet(num_classes=10)", "pytorch_sol_code": "optimizer = optim.AdamW(model.parameters(), lr=0.001)\n\n# Example usage:\n# loss_fn = nn.CrossEntropyLoss()\n# optimizer.zero_grad()\n# output = model(x)\n# loss = loss_fn(output, target)\n# loss.backward()\n# optimizer.step()", "pytorch_test_code": {"setup_code": "model = AlexNet(num_classes=10)\noptimizer = optim.AdamW(model.parameters(), lr=0.001)", "test_cases": ["assert isinstance(optimizer, optim.AdamW), 'Optimizer should be AdamW'", "assert optimizer.defaults['lr'] == 0.001, 'Learning rate should be 0.001'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.optimizers import AdamW", "tensorflow_start_code": "model = Sequential([\n layers.Conv2D(96, kernel_size=11, strides=4, padding='valid', input_shape=(227, 227, 3)),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.MaxPooling2D(pool_size=3, strides=2),\n layers.Conv2D(256, kernel_size=5, strides=1, padding='same'),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.MaxPooling2D(pool_size=3, strides=2),\n layers.Conv2D(384, kernel_size=3, strides=1, padding='same'),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.Conv2D(384, kernel_size=3, strides=1, padding='same'),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.Conv2D(256, kernel_size=3, strides=1, padding='same'),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.MaxPooling2D(pool_size=3, strides=2),\n layers.Flatten(),\n layers.Dropout(0.5),\n layers.Dense(4096, activation='relu'),\n layers.Dropout(0.5),\n layers.Dense(4096, activation='relu'),\n layers.Dense(10, activation='softmax')\n])", "tensorflow_sol_code": "optimizer = AdamW(learning_rate=0.001)\n\n# Example usage:\n# model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy')\n# model.fit(x_train, y_train, epochs=10)", "tensorflow_test_code": {"setup_code": "import numpy as np\nmodel = Sequential([\n layers.Conv2D(96, kernel_size=11, strides=4, padding='valid', input_shape=(227, 227, 3)),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.MaxPooling2D(pool_size=3, strides=2),\n layers.Conv2D(256, kernel_size=5, strides=1, padding='same'),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.MaxPooling2D(pool_size=3, strides=2),\n layers.Conv2D(384, kernel_size=3, strides=1, padding='same'),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.Conv2D(384, kernel_size=3, strides=1, padding='same'),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.Conv2D(256, kernel_size=3, strides=1, padding='same'),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.MaxPooling2D(pool_size=3, strides=2),\n layers.Flatten(),\n layers.Dropout(0.5),\n layers.Dense(4096, activation='relu'),\n layers.Dropout(0.5),\n layers.Dense(4096, activation='relu'),\n layers.Dense(10, activation='softmax')\n])\noptimizer = AdamW(learning_rate=0.001)", "test_cases": ["assert isinstance(optimizer, tf.keras.optimizers.AdamW), 'Optimizer should be AdamW'", "assert np.isclose(optimizer.learning_rate.numpy(), 0.001), 'Learning rate should be 0.001'"]}} |
| {"question_id": 102, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.optim as optim", "pytorch_start_code": "class LeNet5(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=0),\n nn.BatchNorm2d(6),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n self.layer2 = nn.Sequential(\n nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n self.fc = nn.Linear(400, 120)\n self.relu = nn.ReLU()\n self.fc1 = nn.Linear(120, 84)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Linear(84, num_classes)\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = x.view(-1, 400)\n x = self.fc(x)\n x = self.relu(x)\n x = self.fc1(x)\n x = self.relu1(x)\n x = self.fc2(x)\n return x\n\nmodel = LeNet5(num_classes=10)", "pytorch_sol_code": "optimizer = optim.RMSprop(model.parameters(), lr=0.001)\n\n# Example usage:\n# loss_fn = nn.CrossEntropyLoss()\n# optimizer.zero_grad()\n# output = model(x)\n# loss = loss_fn(output, target)\n# loss.backward()\n# optimizer.step()", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert isinstance(optimizer, optim.RMSprop), 'Optimizer should be RMSprop'", "assert optimizer.defaults['lr'] == 0.001, 'Learning rate should be 0.001'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.optimizers import RMSprop", "tensorflow_start_code": "input_shape = (32, 32, 1)\nnum_classes=10\nmodel = tf.keras.Sequential([\n layers.Conv2D(6, kernel_size=(5, 5), activation='relu', input_shape=input_shape),\n layers.BatchNormalization(),\n layers.MaxPooling2D(pool_size=(2, 2)),\n layers.Conv2D(16, kernel_size=(5, 5), activation='relu'),\n layers.BatchNormalization(),\n layers.MaxPooling2D(pool_size=(2, 2)),\n layers.Flatten(),\n layers.Dense(120, activation='relu'),\n layers.Dense(84, activation='relu'),\n layers.Dense(num_classes, activation='softmax')\n])", "tensorflow_sol_code": "optimizer = RMSprop(learning_rate=0.001)\n\ninput_shape = (32, 32, 1)\nnum_classes=10\nmodel = tf.keras.Sequential([\n layers.Conv2D(6, kernel_size=(5, 5), activation='relu', input_shape=input_shape),\n layers.BatchNormalization(),\n layers.MaxPooling2D(pool_size=(2, 2)),\n layers.Conv2D(16, kernel_size=(5, 5), activation='relu'),\n layers.BatchNormalization(),\n layers.MaxPooling2D(pool_size=(2, 2)),\n layers.Flatten(),\n layers.Dense(120, activation='relu'),\n layers.Dense(84, activation='relu'),\n layers.Dense(num_classes, activation='softmax')\n])\noptimizer = RMSprop(learning_rate=0.001)", "tensorflow_test_code": {"setup_code": "import numpy as np", "test_cases": ["assert isinstance(optimizer, tf.keras.optimizers.RMSprop), 'Optimizer should be RMSprop'", "assert np.isclose(optimizer.learning_rate.numpy(), 0.001), 'Learning rate should be 0.001'"]}} |
| {"question_id": 103, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim", "pytorch_start_code": "class LinearRegression(nn.Module):\n def __init__(self, input_size, output_size):\n super().__init__()\n self.f1 = nn.Linear(input_size, 2000)\n self.f2 = nn.Linear(2000, output_size)\n\n def forward(self, x):\n x = self.f1(x)\n x = F.leaky_relu(x, negative_slope=0.01)\n x = F.dropout(x, p=0.3)\n x = self.f2(x)\n return torch.sigmoid(x)\n\nmodel = LinearRegression(10, 1)", "pytorch_sol_code": "optimizer = optim.RMSprop(model.parameters(), lr=0.002)\n\n", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert isinstance(optimizer, optim.RMSprop), 'Optimizer should be RMSprop'", "assert optimizer.defaults['lr'] == 0.002, 'Learning rate should be 0.002'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.optimizers import RMSprop", "tensorflow_start_code": "model = tf.keras.Sequential([\n layers.Dense(2000, input_dim=10, activation='leaky_relu'),\n layers.Dropout(0.3),\n layers.Dense(1, activation='sigmoid')\n])", "tensorflow_sol_code": "optimizer = RMSprop(learning_rate=0.002)\n\n", "tensorflow_test_code": {"setup_code": "import numpy as np", "test_cases": ["assert isinstance(optimizer, tf.keras.optimizers.RMSprop), 'Optimizer should be RMSprop'", "assert np.isclose(optimizer.learning_rate.numpy(), 0.002), 'Learning rate should be 0.002'"]}} |
| {"question_id": 104, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim", "pytorch_start_code": "class LSTMClassifier(nn.Module):\n def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, weights):\n super().__init__()\n self.output_size = output_size\n self.n_layers = n_layers\n self.hidden_dim = hidden_dim\n self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)\n self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False)\n self.dropout_1 = nn.Dropout(0.3)\n self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=0.3, batch_first=True)\n self.dropout_2 = nn.Dropout(0.3)\n self.label_layer = nn.Linear(hidden_dim, output_size)\n self.act = nn.Sigmoid()\n def init_hidden(self, batch_size):\n weight = next(self.parameters()).data\n return (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())\n def forward(self, x, hidden):\n x = self.word_embeddings(x)\n x = self.dropout_1(x)\n lstm_out, hidden = self.lstm(x, hidden)\n lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)\n out = self.dropout_2(lstm_out)\n out = self.label_layer(out)\n out = out.view(-1, self.output_size)\n out = self.act(out)\n return out, hidden\n\nmodel = LSTMClassifier(1000, 10, 300, 256, 2, torch.randn(1000, 300))", "pytorch_sol_code": "optimizer = optim.Rprop(model.parameters(), lr=0.003)\n\n", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert isinstance(optimizer, optim.Rprop), 'Optimizer should be Rprop'", "assert optimizer.defaults['lr'] == 0.003, 'Learning rate should be 0.003'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.optimizers import RMSprop", "tensorflow_start_code": "class LSTMClassifier(tf.keras.Model):\n def __init__(self, vocab_size, embedding_dim, hidden_dim, n_layers, output_size, weights):\n super().__init__()\n self.embedding = layers.Embedding(input_dim=vocab_size, output_dim=embedding_dim, weights=[weights], trainable=False)\n self.lstm = layers.LSTM(hidden_dim, return_sequences=True, dropout=0.3, recurrent_dropout=0.3)\n self.dropout = layers.Dropout(0.3)\n self.dense = layers.Dense(output_size, activation='sigmoid')\n def call(self, inputs):\n x = self.embedding(inputs)\n x = self.lstm(x)\n x = self.dropout(x)\n return self.dense(x)\n\nmodel = LSTMClassifier(1000, 300, 256, 2, 10, tf.random.normal((1000, 300)))", "tensorflow_sol_code": "optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.003)\n\n", "tensorflow_test_code": {"setup_code": "import numpy as np", "test_cases": ["assert isinstance(optimizer, tf.keras.optimizers.RMSprop), 'Optimizer should be RMSprop'", "assert np.isclose(optimizer.learning_rate.numpy(), 0.003), 'Learning rate should be 0.003'"]}} |
| {"question_id": 105, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "torch.manual_seed(0)\ninput_tensor = torch.randn(3, 5, requires_grad=True)\ntarget_tensor = torch.tensor([1, 0, 4])\n#loss = ", "pytorch_sol_code": "nll_loss = nn.NLLLoss()\nloss = nll_loss(input_tensor.log_softmax(dim=1), target_tensor)", "pytorch_test_code": {"setup_code": "expected_loss = 2.7184\n", "test_cases": ["assert torch.isclose(loss, torch.tensor(expected_loss), atol=1e-4), f'Calculated loss {loss.item()} does not match expected loss {expected_loss}'\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tf.random.set_seed(0)\ninput_tensor = tf.random.normal([3, 5], seed=0)\ntarget_tensor = tf.constant([1, 0, 4], dtype=tf.int32)\n#loss = ", "tensorflow_sol_code": "nll_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\nloss = nll_loss(target_tensor, input_tensor)", "tensorflow_test_code": {"setup_code": "expected_loss = 1.0320467\n", "test_cases": ["assert tf.experimental.numpy.isclose(loss, expected_loss, atol=1e-4), f'Calculated loss {loss.numpy()} does not match expected loss {expected_loss}'\n"]}} |
| {"question_id": 106, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "torch.manual_seed(0)\ninput1 = torch.randn(3, requires_grad=True)\ninput2 = torch.randn(3, requires_grad=True)\ntarget_tensor = torch.tensor([1, -1, 1])\n#loss = ", "pytorch_sol_code": "margin_loss = nn.MarginRankingLoss(margin=1.0)\nloss = margin_loss(input1, input2, target_tensor)", "pytorch_test_code": {"setup_code": "expected_loss = 1.1996\n", "test_cases": ["assert torch.isclose(loss, torch.tensor(expected_loss), atol=1e-4), f'Calculated loss {loss.item()} does not match expected loss {expected_loss}'\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tf.random.set_seed(0)\ninput1 = tf.random.normal([3], seed=0)\ninput2 = tf.random.normal([3], seed=0)\ntarget_tensor = tf.constant([1, -1, 1], dtype=tf.float32)\n#loss = ", "tensorflow_sol_code": "margin = 1.0\nloss = tf.reduce_mean(tf.maximum(0.0, -target_tensor * (input1 - input2) + margin))", "tensorflow_test_code": {"setup_code": "expected_loss = 2.3072548\n", "test_cases": ["assert tf.experimental.numpy.isclose(loss, expected_loss, atol=1e-4), f'Calculated loss {loss.numpy()} does not match expected loss {expected_loss}'\n"]}} |
| {"question_id": 107, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "torch.manual_seed(0)\nanchor = torch.randn(5, requires_grad=True)\npositive = torch.randn(5, requires_grad=True)\nnegative = torch.randn(5, requires_grad=True)\n#loss = ", "pytorch_sol_code": "triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2)\nloss = triplet_loss(anchor, positive, negative)", "pytorch_test_code": {"setup_code": "expected_loss = 2.8983\n", "test_cases": ["assert torch.isclose(loss, torch.tensor(expected_loss), atol=1e-4), f'Calculated loss {loss.item()} does not match expected loss {expected_loss}'\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tf.random.set_seed(0)\nanchor = tf.random.normal([5], seed=0)\npositive = tf.random.normal([5], seed=0)\nnegative = tf.random.normal([5], seed=0)\nmargin = 3.0\n#loss = ", "tensorflow_sol_code": "positive_dist = tf.reduce_sum(tf.square(anchor - positive))\nnegative_dist = tf.reduce_sum(tf.square(anchor - negative))\nloss = tf.maximum(positive_dist - negative_dist + margin, 0.0)", "tensorflow_test_code": {"setup_code": "expected_loss = 0.61476135\n", "test_cases": ["assert tf.experimental.numpy.isclose(loss, expected_loss, atol=1e-4), f'Calculated loss {loss.numpy()} does not match expected loss {expected_loss}'\n"]}} |
| {"question_id": 108, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "torch.manual_seed(0)\ninput_tensor = torch.randn(3, requires_grad=True)\ntarget_tensor = torch.randn(3)\n#loss = ", "pytorch_sol_code": "kldiv_loss = nn.KLDivLoss(reduction='batchmean')\nloss = kldiv_loss(torch.log_softmax(input_tensor, dim=-1), torch.softmax(target_tensor, dim=-1))", "pytorch_test_code": {"setup_code": "expected_loss = 0.0310\n", "test_cases": ["assert torch.isclose(loss, torch.tensor(expected_loss), atol=1e-4), f'Calculated loss {loss.item()} does not match expected loss {expected_loss}'\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tf.random.set_seed(0)\ninput_tensor = tf.random.normal([3], seed=0)\ntarget_tensor = tf.random.normal([3], seed=0)\n#loss = ", "tensorflow_sol_code": "kldiv_loss = tf.keras.losses.KLDivergence(reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE)\nloss = kldiv_loss(tf.nn.log_softmax(input_tensor), tf.nn.softmax(target_tensor))", "tensorflow_test_code": {"setup_code": "expected_loss = -4.4145813e-06\n", "test_cases": ["assert tf.experimental.numpy.isclose(loss, expected_loss, atol=1e-4), f'Calculated loss {loss.numpy()} does not match expected loss {expected_loss}'\n"]}} |
| {"question_id": 109, "pytorch_library": "import torch\nimport torch.nn.functional as F", "pytorch_start_code": "#def my_custom_loss(my_outputs, my_labels):", "pytorch_sol_code": "def my_custom_loss(my_outputs, my_labels):\n # Specify the batch size\n my_batch_size = my_outputs.size()[0]\n # Calculate the log of softmax values\n my_outputs = F.log_softmax(my_outputs, dim=1)\n # Select the values that correspond to labels\n my_outputs = my_outputs[range(my_batch_size), my_labels]\n return -torch.sum(my_outputs) / my_batch_size", "pytorch_test_code": {"setup_code": "my_outputs = torch.tensor([[2.0, 1.0, 0.1], [0.5, 0.2, 0.3]])\nmy_labels = torch.tensor([0, 2])\nexpected_loss = 0.7784\n", "test_cases": ["loss = my_custom_loss(my_outputs, my_labels)\nassert torch.isclose(loss, torch.tensor(expected_loss), atol=1e-4), f'Calculated loss {loss.item()} does not match expected loss {expected_loss}'\n"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import backend as K", "tensorflow_start_code": "#def my_custom_loss(my_outputs, my_labels):", "tensorflow_sol_code": "def my_custom_loss(my_outputs, my_labels):\n # Specify the batch size\n my_batch_size = tf.shape(my_outputs)[0]\n # Calculate the log of softmax values\n my_outputs = tf.nn.log_softmax(my_outputs, axis=1)\n # Select the values that correspond to labels\n indices = tf.stack([tf.range(my_batch_size), my_labels], axis=1)\n my_outputs = tf.gather_nd(my_outputs, indices)\n return -tf.reduce_sum(my_outputs) / tf.cast(my_batch_size, tf.float32)", "tensorflow_test_code": {"setup_code": "my_outputs = tf.constant([[2.0, 1.0, 0.1], [0.5, 0.2, 0.3]])\nmy_labels = tf.constant([0, 2])\nexpected_loss = 0.7784", "test_cases": ["loss = my_custom_loss(my_outputs, my_labels)\nassert tf.experimental.numpy.isclose(loss, expected_loss, atol=1e-4), f'Calculated loss {loss.numpy()} does not match expected loss {expected_loss}'\n"]}} |
| {"question_id": 110, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class DiceLoss(nn.Module):\n def __init__(self, weight=None, size_average=True):\n super().__init__()", "pytorch_sol_code": "class DiceLoss(nn.Module):\n def __init__(self, weight=None, size_average=True):\n super().__init__()\n def forward(self, inputs, targets, smooth=1):\n inputs = F.sigmoid(inputs)\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n intersection = (inputs * targets).sum()\n dice = (2.0 * intersection + smooth) / (inputs.sum() + targets.sum() + smooth)\n return 1 - dice", "pytorch_test_code": {"setup_code": "inputs = torch.tensor([[0.5, 0.2, 0.8], [0.7, 0.1, 0.6]])\ntargets = torch.tensor([[1, 0, 1], [1, 0, 0]])\nexpected_loss = 0.3558\ncriterion = DiceLoss()", "test_cases": ["loss = criterion(inputs, targets)\nassert torch.isclose(loss, torch.tensor(expected_loss), atol=1e-4), f'Calculated loss {loss.item()} does not match expected loss {expected_loss}'\n"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import backend as K", "tensorflow_start_code": "class DiceLoss(tf.keras.losses.Loss):\n def __init__(self, smooth=1):\n super().__init__()", "tensorflow_sol_code": "class DiceLoss(tf.keras.losses.Loss):\n def __init__(self, smooth=1):\n super().__init__()\n self.smooth = smooth\n def call(self, y_true, y_pred):\n y_pred = tf.nn.sigmoid(y_pred)\n y_true = tf.reshape(y_true, [-1])\n y_pred = tf.reshape(y_pred, [-1])\n intersection = tf.reduce_sum(y_true * y_pred)\n dice = (2.0 * intersection + self.smooth) / (tf.reduce_sum(y_true) + tf.reduce_sum(y_pred) + self.smooth)\n return 1 - dice", "tensorflow_test_code": {"setup_code": "inputs = tf.constant([[0.5, 0.2, 0.8], [0.7, 0.1, 0.6]])\ntargets = tf.constant([[1, 0, 1], [1, 0, 0]])\nexpected_loss = 0.3558\ncriterion = DiceLoss()", "test_cases": ["loss = criterion(targets, inputs)\nassert tf.experimental.numpy.isclose(loss, expected_loss, atol=1e-4), f'Calculated loss {loss.numpy()} does not match expected loss {expected_loss}'\n"]}} |
| {"question_id": 111, "pytorch_library": "import torch\nimport torch.nn.functional as F\nimport numpy as np", "pytorch_start_code": "\ntorch.manual_seed(0)\ntensor1 = torch.randn(10, requires_grad=True)\ntensor2 = torch.randn(10, requires_grad=True)\n# Calculate pairwise distance\n# pairwise_distance =\n ", "pytorch_sol_code": "\npairwise_distance = F.pairwise_distance(tensor1.unsqueeze(0), tensor2.unsqueeze(0), p=2)\n ", "pytorch_test_code": {"setup_code": "\nexpected_value = 3.3985583782196045", "test_cases": ["assert np.isclose(pairwise_distance.item(), expected_value, atol=1e-4), 'Pairwise distance calculation does not match expected value'\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\ntf.random.set_seed(0)\ntensor1 = tf.random.normal([10],seed=0)\ntensor2 = tf.random.normal([10],seed=0)\n# Calculate pairwise distance\n# pairwise_distance =\n ", "tensorflow_sol_code": "\npairwise_distance = tf.norm(tensor1 - tensor2, ord='euclidean')\n ", "tensorflow_test_code": {"setup_code": "\nexpected_value = 4.275403\n", "test_cases": ["assert tf.experimental.numpy.isclose(pairwise_distance.numpy(), expected_value, atol=1e-4), 'Pairwise distance calculation does not match expected value'\n"]}} |
| {"question_id": 112, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "torch.manual_seed(0)\ninput_tensor = torch.randn(3, 5, requires_grad=True)\ntarget_tensor = torch.rand(3, 5)\n#loss = ", "pytorch_sol_code": "poisson_nll_loss = nn.PoissonNLLLoss(log_input=True)\nloss = poisson_nll_loss(input_tensor, target_tensor)", "pytorch_test_code": {"setup_code": "expected_loss = 1.3761\n", "test_cases": ["assert torch.isclose(loss, torch.tensor(expected_loss), atol=1e-4), f'Calculated loss {loss.item()} does not match expected loss {expected_loss}'\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tf.random.set_seed(0)\ninput_tensor = tf.random.normal([3, 5], seed=0)\ntarget_tensor = tf.random.uniform([3, 5], dtype=tf.float32)\n#loss = ", "tensorflow_sol_code": "poisson_nll_loss = tf.keras.losses.Poisson()\nloss = poisson_nll_loss(target_tensor, tf.math.exp(input_tensor))", "tensorflow_test_code": {"setup_code": "expected_loss = 2.3630867\n", "test_cases": ["assert tf.experimental.numpy.isclose(loss, expected_loss, atol=1e-4), f'Calculated loss {loss.numpy()} does not match expected loss {expected_loss}'\n"]}} |
| {"question_id": 113, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "torch.manual_seed(0)\ninput_tensor = torch.randn(3, 5, requires_grad=True)\ntarget_tensor = torch.randn(3, 5)\nvar_tensor = torch.ones(3, 5)\n#loss = ", "pytorch_sol_code": "gaussian_nll_loss = nn.GaussianNLLLoss()\nloss = gaussian_nll_loss(input_tensor, target_tensor, var_tensor)", "pytorch_test_code": {"setup_code": "expected_loss = 0.8254\n", "test_cases": ["assert torch.isclose(loss, torch.tensor(expected_loss), atol=1e-4), f'Calculated loss {loss.item()} does not match expected loss {expected_loss}'\n"]}, "tensorflow_library": "import tensorflow as tf\nimport tensorflow_probability as tfp", "tensorflow_start_code": "tf.random.set_seed(0)\ninput_tensor = tf.random.normal([3, 5], seed=0)\ntarget_tensor = tf.random.normal([3, 5], seed=0)\nvar_tensor = tf.ones([3, 5])\n#loss = ", "tensorflow_sol_code": "gaussian_nll_loss = tf.reduce_mean(tfp.distributions.Normal(loc=input_tensor, scale=tf.sqrt(var_tensor)).log_prob(target_tensor))\nloss = -gaussian_nll_loss", "tensorflow_test_code": {"setup_code": "expected_loss = 2.9229803\n", "test_cases": ["assert tf.experimental.numpy.isclose(loss, expected_loss, atol=1e-4), f'Calculated loss {loss.numpy()} does not match expected loss {expected_loss}'\n"]}} |
| {"question_id": 114, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "torch.manual_seed(0)\ninput_tensor = torch.sigmoid(torch.randn(3, 5, requires_grad=True))\ntarget_tensor = torch.tensor([[1, 0, 1, 0, 1], [0, 1, 0, 1, 0], [1, 0, 1, 0, 1]], dtype=torch.float)\n#loss = ", "pytorch_sol_code": "bce_loss = nn.BCELoss()\nloss = bce_loss(input_tensor, target_tensor)", "pytorch_test_code": {"setup_code": "expected_loss_1 = 0.9850\nexpected_loss_2 = 0.8601\n", "test_cases": ["assert torch.isclose(loss, torch.tensor(expected_loss_1), atol=1e-4), f'Calculated loss {loss.item()} does not match expected loss {expected_loss_1}'\n", "input_tensor_2 = torch.sigmoid(torch.randn(3, 5, requires_grad=True))\ntarget_tensor_2 = torch.tensor([[0, 1, 0, 1, 0], [1, 0, 1, 0, 1], [0, 1, 0, 1, 0]], dtype=torch.float)\nloss_2 = bce_loss(input_tensor_2, target_tensor_2)\nassert torch.isclose(loss_2, torch.tensor(expected_loss_2), atol=1e-4), f'Calculated loss {loss_2.item()} does not match expected loss {expected_loss_2}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tf.random.set_seed(0)\ninput_tensor = tf.sigmoid(tf.random.normal([3, 5], seed=0))\ntarget_tensor = tf.constant([[1, 0, 1, 0, 1], [0, 1, 0, 1, 0], [1, 0, 1, 0, 1]], dtype=tf.float32)\n#loss = ", "tensorflow_sol_code": "bce_loss = tf.keras.losses.BinaryCrossentropy()\nloss = bce_loss(target_tensor, input_tensor)", "tensorflow_test_code": {"setup_code": "expected_loss_1 = 0.8719441\nexpected_loss_2 = 0.63218504\n", "test_cases": ["assert tf.experimental.numpy.isclose(loss, expected_loss_1, atol=1e-4), f'Calculated loss {loss.numpy()} does not match expected loss {expected_loss_1}'\n", "input_tensor_2 = tf.sigmoid(tf.random.normal([3, 5], seed=1))\ntarget_tensor_2 = tf.constant([[0, 1, 0, 1, 0], [1, 0, 1, 0, 1], [0, 1, 0, 1, 0]], dtype=tf.float32)\nloss_2 = bce_loss(target_tensor_2, input_tensor_2)\nassert tf.experimental.numpy.isclose(loss_2, expected_loss_2, atol=1e-4), f'Calculated loss {loss_2.numpy()} does not match expected loss {expected_loss_2}'"]}} |
| {"question_id": 115, "pytorch_library": "import torch\nimport torch.nn.functional as F", "pytorch_start_code": "torch.manual_seed(12)\ninput_tensor = torch.randn(10)\n# Apply dropout with rate=0.3\n#output_tensor = ", "pytorch_sol_code": "output_tensor = F.dropout(input_tensor, p=0.3, training=True)", "pytorch_test_code": {"setup_code": "expected_nonzero_count = 9\nexpected_elements = torch.tensor([-0.3054, -1.9685, 0.6451, 1.1226, -1.5549, -0.7998, -1.3337, 0.0685,-0.1205])\n", "test_cases": ["nonzero_elements = output_tensor[output_tensor != 0]\nassert torch.sum(output_tensor != 0).item() == expected_nonzero_count, f'Non-zero elements count {torch.sum(output_tensor != 0).item()} does not match expected {expected_nonzero_count}'\n", "assert torch.allclose(nonzero_elements, expected_elements, atol=1e-4), f'Non-zero elements {nonzero_elements} do not match expected {expected_elements}'\n"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "tf.random.set_seed(0)\ninput_tensor = tf.random.normal([10])+1\n# Apply dropout with rate=0.3\n#output_tensor = ", "tensorflow_sol_code": "output_tensor = tf.nn.dropout(input_tensor, rate=0.3)", "tensorflow_test_code": {"setup_code": "expected_nonzero_count = 8\nexpected_elements = tf.constant([[ 3.5872324 ],[ 0.8290073 ],[-0.05148172],[-0.3383255 ],[ 2.10039 ],[ 3.1269405 ],[ 2.2893333 ],[ 2.2853017 ]], dtype=tf.float32)\n", "test_cases": ["nonzero_elements = tf.gather(output_tensor, tf.where(output_tensor != 0))\nassert tf.reduce_sum(tf.cast(output_tensor != 0, tf.int32)).numpy() == expected_nonzero_count, f'Non-zero elements count {tf.reduce_sum(tf.cast(output_tensor != 0, tf.int32)).numpy()} does not match expected {expected_nonzero_count}'\n", "assert tf.reduce_all(tf.experimental.numpy.isclose(nonzero_elements, expected_elements, atol=1e-4)), f'Non-zero elements {nonzero_elements.numpy()} do not match expected {expected_elements.numpy()}'\n"]}} |
| {"question_id": 116, "pytorch_library": "import torch\nimport torch.nn as nn\nimport random", "pytorch_start_code": "class DynamicNet(nn.Module):\n def __init__(self):\n super().__init__()\n # Initialize model parameters here", "pytorch_sol_code": "class DynamicNet(nn.Module):\n def __init__(self):\n super().__init__()\n self.a = nn.Parameter(torch.randn(()))\n self.b = nn.Parameter(torch.randn(()))\n self.c = nn.Parameter(torch.randn(()))\n self.d = nn.Parameter(torch.randn(()))\n self.e = nn.Parameter(torch.randn(()))\n\n def forward(self, x):\n y = self.a + self.b * x + self.c * x**2 + self.d * x**3\n for exp in range(4, random.randint(4, 6)):\n y = y + self.e * x**exp\n return y\n\nmodel = DynamicNet()", "pytorch_test_code": {"setup_code": "import torch\nimport random\nimport numpy as np\nmodel = DynamicNet()\nrandom.seed(0)\n", "test_cases": ["x = torch.tensor(2.0)\nresult = model(x)\nassert isinstance(result, torch.Tensor), 'Model output should be a tensor'", "x = torch.tensor(1.0)\nresult = model(x)\nexpected_min = model.a.item() + model.b.item() + model.c.item() + model.d.item() + model.e.item()\nassert np.isclose(result.item(),expected_min), 'Polynomial output should equal to expected minimum'", "x = torch.tensor([2.0, 3.0])\nassert model(x).shape == torch.Size([2]), 'Output shape should match the number of input elements'"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np\nimport random", "tensorflow_start_code": "class DynamicNet(tf.keras.layers.Layer):\n def __init__(self):\n super().__init__()\n # Initialize model parameters here", "tensorflow_sol_code": "class DynamicNet(tf.keras.layers.Layer):\n def __init__(self):\n super().__init__()\n self.a = self.add_weight(shape=(), initializer='random_normal')\n self.b = self.add_weight(shape=(), initializer='random_normal')\n self.c = self.add_weight(shape=(), initializer='random_normal')\n self.d = self.add_weight(shape=(), initializer='random_normal')\n self.e = self.add_weight(shape=(), initializer='random_normal')\n\n def call(self, x):\n y = self.a + self.b * x + self.c * x**2 + self.d * x**3\n for exp in range(4, random.randint(4, 6)):\n y = y + self.e * x**exp\n return y\n\nmodel = DynamicNet()", "tensorflow_test_code": {"setup_code": "import tensorflow as tf\nimport random\nrandom.seed(0)\nmodel = DynamicNet()", "test_cases": ["x = tf.constant(2.0)\nresult = model(x)\nassert tf.is_tensor(result), 'Model output should be a tensor'", "x = tf.constant(1.0)\nresult = model(x)\nexpected_min = model.a.numpy() + model.b.numpy() + model.c.numpy() + model.d.numpy() + model.e.numpy()\nassert np.isclose(result.numpy(),expected_min), 'Polynomial output should equal to expected minimum'", "x = tf.constant([2.0, 3.0])\nresult = model(x)\nassert result.shape == (2,), 'Output shape should match the number of input elements'"]}} |
| {"question_id": 117, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math", "pytorch_start_code": "class CustomLayer(nn.Module):\n def __init__(self, in_features, out_features, custom_param):\n super().__init__()\n # Initialize weights and optional parameters", "pytorch_sol_code": "class CustomLayer(nn.Module):\n def __init__(self, in_features, out_features, custom_param):\n super().__init__()\n self.weight = nn.Parameter(torch.Tensor(out_features, in_features))\n self.bias = None # Optionally, describe custom parameters\n self.custom_param = custom_param\n self.reset_parameters() # Example: Initialize weights and optional parameters\n def reset_parameters(self):\n nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n def forward(self, x):\n x = F.linear(x, self.weight, self.bias)\n return x\n\nmodel = CustomLayer(100, 200, custom_param=0.5)", "pytorch_test_code": {"setup_code": "model = CustomLayer(100, 200, custom_param=0.5)", "test_cases": ["x = torch.randn(10, 100)\noutput = model(x)\nassert output.shape == (10, 200), 'Output shape should be (10, 200)'", "assert torch.is_tensor(model.weight), 'Weight should be a tensor'", "assert model.custom_param == 0.5, 'Custom parameter should be 0.5'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.initializers import HeUniform", "tensorflow_start_code": "class CustomLayer(layers.Layer):\n def __init__(self, in_features, out_features, custom_param):\n super(CustomLayer, self).__init__()\n # Initialize weights and optional parameters", "tensorflow_sol_code": "class CustomLayer(layers.Layer):\n def __init__(self, in_features, out_features, custom_param):\n super().__init__()\n self.weight = self.add_weight(shape=(in_features, out_features), initializer=HeUniform())\n self.custom_param = custom_param\n def call(self, inputs):\n return tf.matmul(inputs, self.weight)\n\nmodel = CustomLayer(100, 200, custom_param=0.5)", "tensorflow_test_code": {"setup_code": "model = CustomLayer(100, 200, custom_param=0.5)", "test_cases": ["x = tf.random.normal([10, 100])\noutput = model(x)\nassert output.shape == (10, 200), 'Output shape should be (10, 200)'", "assert 'variable' in str(model.weight), 'Weight should be a variable'", "assert model.custom_param == 0.5, 'Custom parameter should be 0.5'"]}} |
| {"question_id": 118, "pytorch_library": "import torch", "pytorch_start_code": "# Set the seed for reproducibility\ntorch.manual_seed(42)\n# Create a random tensor of dtype float64\ninput_tensor = torch.randn(5, dtype=torch.float64)\n#long_tesnor =", "pytorch_sol_code": "long_tensor = input_tensor.long()", "pytorch_test_code": {"setup_code": "expected_tensor = torch.tensor([0, 0, 0, 0, -1], dtype=torch.int64)\n", "test_cases": ["assert input_tensor.dtype == torch.float64, 'Input tensor should have dtype float64'", "assert long_tensor.dtype == torch.int64, 'Converted tensor should have dtype int64'", "assert torch.equal(long_tensor, expected_tensor), f'Converted tensor values do not match expected: {long_tensor}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "# Set the seed for reproducibility\ntf.random.set_seed(42)\n# Create a random tensor of dtype float64\ninput_tensor = tf.random.normal([5], seed = 42,dtype=tf.float64)\n#long_tensor =", "tensorflow_sol_code": "long_tensor = tf.cast(input_tensor, dtype=tf.int64)", "tensorflow_test_code": {"setup_code": "expected_tensor = tf.constant([-1, 0, 0, 0, 0], dtype=tf.int64)\n", "test_cases": ["assert input_tensor.dtype == tf.float64, 'Input tensor should have dtype float64'", "assert long_tensor.dtype == tf.int64, 'Converted tensor should have dtype int64'", "assert tf.reduce_all(tf.equal(long_tensor, expected_tensor)), f'Converted tensor values do not match expected: {long_tensor.numpy()}'"]}} |
| {"question_id": 119, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "# Define the model and save it to the variable 'model'.\n#model = Net()", "pytorch_sol_code": "class Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\n self.dropout1 = nn.Dropout2d(0.25)\n self.dropout2 = nn.Dropout2d(0.5)\n self.fc1 = nn.Linear(9216, 128)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n output = F.log_softmax(x, dim=1)\n return output\n\nmodel = Net()", "pytorch_test_code": {"setup_code": "import torch\nmodel = Net()", "test_cases": ["assert isinstance(model.conv1, nn.Conv2d), 'First layer should be Conv2d'", "assert model.conv1.out_channels == 32, 'First Conv2d layer should have 32 output channels'", "assert isinstance(model.fc1, nn.Linear) and model.fc1.out_features == 128, 'First fully connected layer should have 128 output features'", "input_tensor = torch.randn(1, 1, 28, 28)\noutput = model(input_tensor)\nassert output.shape == (1, 10), 'Output tensor shape should be (1, 10)'", "assert torch.allclose(output.exp().sum(dim=1), torch.tensor([1.0])), 'Output should be a valid probability distribution (sum to 1)'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "# Define the model and save it to the variable 'model'.\n#model = Net()", "tensorflow_sol_code": "class Net(Model):\n def __init__(self):\n super().__init__()\n self.conv1 = layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1))\n self.conv2 = layers.Conv2D(64, 3, activation='relu')\n self.max_pool = layers.MaxPooling2D(pool_size=(2, 2))\n self.dropout1 = layers.Dropout(0.25)\n self.flatten = layers.Flatten()\n self.fc1 = layers.Dense(128, activation='relu')\n self.dropout2 = layers.Dropout(0.5)\n self.fc2 = layers.Dense(10, activation='softmax')\n\n def call(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.max_pool(x)\n x = self.dropout1(x)\n x = self.flatten(x)\n x = self.fc1(x)\n x = self.dropout2(x)\n return self.fc2(x)\n\nmodel = Net()", "tensorflow_test_code": {"setup_code": "import numpy as np\nimport tensorflow as tf\nmodel = Net()", "test_cases": ["assert isinstance(model.conv1, layers.Conv2D), 'First layer should be Conv2D'", "assert model.conv1.filters == 32, 'First Conv2D layer should have 32 filters'", "assert isinstance(model.fc1, layers.Dense) and model.fc1.units == 128, 'First dense layer should have 128 units'", "input_tensor = tf.random.normal([1, 28, 28, 1])\noutput = model(input_tensor)\nassert output.shape == (1, 10), 'Output tensor shape should be (1, 10)'", "assert np.allclose(tf.reduce_sum(output, axis=1), tf.ones((1,))), 'Output should be a valid probability distribution (sum to 1)'"]}} |
| {"question_id": 120, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "\nclass Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\n self.dropout1 = nn.Dropout2d(0.25)\n self.dropout2 = nn.Dropout2d(0.5)\n self.fc1 = nn.Linear(9216, 128)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n output = F.log_softmax(x, dim=1)\n return output\n\nmodel = Net()\n\n#total_params =", "pytorch_sol_code": "total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)", "pytorch_test_code": {"setup_code": "", "test_cases": ["assert total_params == 1199882, f'Expected 1199882 parameters, got {total_params}'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "model = tf.keras.Sequential([\n layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)),\n layers.Conv2D(64, 3, activation='relu'),\n layers.MaxPooling2D(pool_size=(2, 2)),\n layers.Dropout(0.25),\n layers.Flatten(),\n layers.Dense(128, activation='relu'),\n layers.Dropout(0.5),\n layers.Dense(10, activation='softmax')\n])\n\n", "tensorflow_sol_code": "total_params = model.count_params()", "tensorflow_test_code": {"setup_code": "", "test_cases": ["assert total_params == 1199882, f'Expected 1199882 parameters, got {total_params}'"]}} |
| {"question_id": 121, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class VAE(nn.Module):\n def __init__(self):\n super().__init__()\n # Initialize the model layers here", "pytorch_sol_code": "class VAE(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(784, 400)\n self.fc21 = nn.Linear(400, 20)\n self.fc22 = nn.Linear(400, 20)\n self.fc3 = nn.Linear(20, 400)\n self.fc4 = nn.Linear(400, 784)\n\n def encode(self, x):\n h1 = F.relu(self.fc1(x))\n return self.fc21(h1), self.fc22(h1)\n\n def reparameterize(self, mu, logvar):\n std = torch.exp(0.5*logvar)\n eps = torch.randn_like(std)\n return mu + eps*std\n\n def decode(self, z):\n h3 = F.relu(self.fc3(z))\n return torch.sigmoid(self.fc4(h3))\n\n def forward(self, x):\n mu, logvar = self.encode(x.view(-1, 784))\n z = self.reparameterize(mu, logvar)\n return self.decode(z), mu, logvar\n\nmodel = VAE()", "pytorch_test_code": {"setup_code": "import torch\nmodel = VAE()", "test_cases": ["input_tensor = torch.randn(1, 784)\noutput, mu, logvar = model(input_tensor)\nassert output.shape == (1, 784), 'Output shape should be (1, 784)'", "assert mu.shape == logvar.shape == torch.Size([1, 20]), 'Mu and logvar shapes should be (1, 20)'", "z = model.reparameterize(mu, logvar)\nassert z.shape == torch.Size([1, 20]), 'Latent variable z should have shape (1, 20)'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "# Define the VAE model using TensorFlow/Keras", "tensorflow_sol_code": "class VAE(Model):\n def __init__(self):\n super().__init__()\n self.fc1 = layers.Dense(400, activation='relu')\n self.fc21 = layers.Dense(20)\n self.fc22 = layers.Dense(20)\n self.fc3 = layers.Dense(400, activation='relu')\n self.fc4 = layers.Dense(784, activation='sigmoid')\n\n def encode(self, x):\n h1 = self.fc1(x)\n return self.fc21(h1), self.fc22(h1)\n\n def reparameterize(self, mu, logvar):\n std = tf.exp(0.5 * logvar)\n eps = tf.random.normal(std.shape)\n return mu + eps * std\n\n def decode(self, z):\n h3 = self.fc3(z)\n return self.fc4(h3)\n\n def call(self, x):\n mu, logvar = self.encode(tf.reshape(x, [-1, 784]))\n z = self.reparameterize(mu, logvar)\n return self.decode(z), mu, logvar\n\nmodel = VAE()", "tensorflow_test_code": {"setup_code": "import numpy as np\nimport tensorflow as tf\nmodel = VAE()", "test_cases": ["input_tensor = tf.random.normal([1, 784])\noutput, mu, logvar = model(input_tensor)\nassert output.shape == (1, 784), 'Output shape should be (1, 784)'", "assert mu.shape == logvar.shape == (1, 20), 'Mu and logvar shapes should be (1, 20)'", "z = model.reparameterize(mu, logvar)\nassert z.shape == (1, 20), 'Latent variable z should have shape (1, 20)'"]}} |
| {"question_id": 122, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "\nclass Generator(nn.Module):\n def __init__(self, ngpu):\n super().__init__()\n nz = 100\n ngf = 64\n nc = 3\n self.ngpu = ngpu\n", "pytorch_sol_code": "\n\nclass Generator(nn.Module):\n def __init__(self, ngpu):\n super().__init__()\n nz = 100\n ngf = 64\n nc = 3\n self.ngpu = ngpu\n self.main = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n )\n\n def forward(self, input):\n if input.is_cuda and self.ngpu > 1:\n output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))\n else:\n output = self.main(input)\n return output\n\nmodel = Generator(ngpu=1)\n", "pytorch_test_code": {"setup_code": "nz = 100\nngf = 64\nnc = 3\nmodel = Generator(ngpu=1)", "test_cases": ["input_tensor = torch.randn(1, nz, 1, 1)\noutput = model(input_tensor)\nassert output.shape == (1, nc, 64, 64), 'Output shape should be (1, nc, 64, 64)'", "assert isinstance(model.main[0], nn.ConvTranspose2d), 'First layer should be ConvTranspose2d'", "assert isinstance(model.main[-1], nn.Tanh), 'Last layer should be Tanh activation'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "\nclass Generator(Model):\n def __init__(self):\n super().__init__()\n nz = 100\n ngf = 64\n nc = 3", "tensorflow_sol_code": "\n\nclass Generator(Model):\n def __init__(self):\n super().__init__()\n nz = 100\n ngf = 64\n nc = 3\n self.main = tf.keras.Sequential([\n layers.Conv2DTranspose(ngf * 8, (4, 4), strides=(1, 1), padding='valid', use_bias=False),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.Conv2DTranspose(ngf * 4, (4, 4), strides=(2, 2), padding='same', use_bias=False),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.Conv2DTranspose(ngf * 2, (4, 4), strides=(2, 2), padding='same', use_bias=False),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.Conv2DTranspose(ngf, (4, 4), strides=(2, 2), padding='same', use_bias=False),\n layers.BatchNormalization(),\n layers.ReLU(),\n layers.Conv2DTranspose(nc, (4, 4), strides=(2, 2), padding='same', use_bias=False),\n layers.Activation('tanh')\n ])\n\n def call(self, input):\n return self.main(input)\n\nmodel = Generator()\n", "tensorflow_test_code": {"setup_code": "nz = 100\nngf = 64\nnc = 3\nmodel = Generator()", "test_cases": ["input_tensor = tf.random.normal([1, 1, 1, nz])\noutput = model(input_tensor)\nassert output.shape == (1, 64, 64, nc), 'Output shape should be (1, 64, 64, nc)'", "assert isinstance(model.main.layers[0], layers.Conv2DTranspose), 'First layer should be Conv2DTranspose'", "assert isinstance(model.main.layers[-1], layers.Activation) and model.main.layers[-1].activation == tf.keras.activations.tanh, 'Last layer should be Tanh activation'"]}} |
| {"question_id": 123, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "# Define the BoWClassifier model\n#model = BoWClassifier(num_labels, vocab_size)", "pytorch_sol_code": "\nclass BoWClassifier(nn.Module):\n\n def __init__(self, num_labels, vocab_size):\n super().__init__()\n self.linear = nn.Linear(vocab_size, num_labels)\n\n def forward(self, bow_vec):\n return F.log_softmax(self.linear(bow_vec), dim=1)\n\n# Define a BoWClassifier with 2 labels and a vocabulary size of 1000\nmodel = BoWClassifier(num_labels=2, vocab_size=1000)\n", "pytorch_test_code": {"setup_code": "model = BoWClassifier(num_labels=2, vocab_size=1000)\ninput_tensor = torch.zeros(1, 1000)\ninput_tensor[0, 100] = 1\ninput_tensor[0, 200] = 1\n", "test_cases": ["output = model(input_tensor)\nassert output.shape == (1, 2), 'Output should be of shape (1, 2)'", "assert isinstance(model.linear, nn.Linear), 'The model should contain a Linear layer'", "assert torch.allclose(output.exp().sum(), torch.tensor(1.0)), 'The output probabilities should sum to 1'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "# Define the BoWClassifier model\n#model = BoWClassifier(num_labels, vocab_size)", "tensorflow_sol_code": "\nclass BoWClassifier(Model):\n\n def __init__(self, num_labels, vocab_size):\n super().__init__()\n self.linear = layers.Dense(num_labels)\n\n def call(self, bow_vec):\n return tf.nn.log_softmax(self.linear(bow_vec), axis=1)\n\n# Define a BoWClassifier with 2 labels and a vocabulary size of 1000\nmodel = BoWClassifier(num_labels=2, vocab_size=1000)\n", "tensorflow_test_code": {"setup_code": "model = BoWClassifier(num_labels=2, vocab_size=1000)\ninput_tensor = tf.zeros([1, 1000])\ninput_tensor = tf.tensor_scatter_nd_update(input_tensor, [[0, 100], [0, 200]], [1.0, 1.0])\n", "test_cases": ["output = model(input_tensor)\nassert output.shape == (1, 2), 'Output should be of shape (1, 2)'", "assert isinstance(model.linear, layers.Dense), 'The model should contain a Dense layer'", "assert tf.reduce_all(tf.exp(output) <= 1.0), 'The output values should represent valid probabilities'"]}} |
| {"question_id": 124, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "# Define the LSTMTagger model\n#model = LSTMTagger(embedding_dim, hidden_dim, vocab_size, tagset_size)", "pytorch_sol_code": "\nclass LSTMTagger(nn.Module):\n\n def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):\n super().__init__()\n self.hidden_dim = hidden_dim\n self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)\n self.lstm = nn.LSTM(embedding_dim, hidden_dim)\n self.hidden2tag = nn.Linear(hidden_dim, tagset_size)\n\n def forward(self, sentence):\n embeds = self.word_embeddings(sentence)\n lstm_out, _ = self.lstm(embeds.view(len(sentence), 1, -1))\n tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))\n tag_scores = F.log_softmax(tag_space, dim=1)\n return tag_scores\n\n# Define the LSTMTagger with appropriate dimensions\nmodel = LSTMTagger(embedding_dim=6, hidden_dim=6, vocab_size=10, tagset_size=3)\n", "pytorch_test_code": {"setup_code": "model = LSTMTagger(embedding_dim=6, hidden_dim=6, vocab_size=10, tagset_size=3)\nsentence = torch.tensor([1, 2, 3, 4], dtype=torch.long)\n", "test_cases": ["tag_scores = model(sentence)\nassert tag_scores.shape == (4, 3), 'Tag scores shape should be (sentence_length, tagset_size)'", "assert isinstance(model.lstm, nn.LSTM), 'Model should contain an LSTM layer'", "assert isinstance(model.hidden2tag, nn.Linear), 'Model should contain a Linear layer mapping hidden states to tag scores'", "assert torch.allclose(tag_scores.exp().sum(dim=1), torch.tensor(1.0)), 'Each tag score row should sum to 1 (valid probability distribution)'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "# Define the LSTMTagger model\n#model = LSTMTagger(embedding_dim, hidden_dim, vocab_size, tagset_size)", "tensorflow_sol_code": "\nclass LSTMTagger(Model):\n\n def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):\n super().__init__()\n self.embedding = layers.Embedding(vocab_size, embedding_dim)\n self.lstm = layers.LSTM(hidden_dim, return_sequences=True)\n self.hidden2tag = layers.Dense(tagset_size)\n\n def call(self, sentence):\n # Add batch dimension if not already present\n if len(sentence.shape) == 1:\n sentence = tf.expand_dims(sentence, axis=0)\n embeds = self.embedding(sentence)\n lstm_out = self.lstm(embeds)\n tag_space = self.hidden2tag(lstm_out)\n tag_scores = tf.nn.log_softmax(tag_space, axis=-1)\n return tag_scores\n\n# Define the LSTMTagger with appropriate dimensions\nmodel = LSTMTagger(embedding_dim=6, hidden_dim=6, vocab_size=10, tagset_size=3)\n", "tensorflow_test_code": {"setup_code": "sentence = tf.constant([1, 2, 3, 4], dtype=tf.int32)\nmodel = LSTMTagger(embedding_dim=6, hidden_dim=6, vocab_size=10, tagset_size=3)\n", "test_cases": ["tag_scores = model(sentence)\nassert tag_scores.shape == (1, 4, 3), 'Tag scores shape should be (batch_size, sequence_length, tagset_size)'", "sentence_batch = tf.constant([[1, 2, 3, 4], [1, 2, 3, 0]], dtype=tf.int32)\ntag_scores_batch = model(sentence_batch)\nassert tag_scores_batch.shape == (2, 4, 3), 'Tag scores shape should be (batch_size, sequence_length, tagset_size)'", "assert isinstance(model.lstm, layers.LSTM), 'Model should contain an LSTM layer'", "assert isinstance(model.hidden2tag, layers.Dense), 'Model should contain a Dense layer mapping hidden states to tag scores'", "assert tf.reduce_all(tf.exp(tag_scores) <= 1.0), 'Tag scores should represent valid probabilities'"]}} |
| {"question_id": 125, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class EncoderRNN(nn.Module):\n def __init__(self, input_size, hidden_size, dropout_p=0.1):\n super().__init__()\n self.hidden_size = hidden_size\n self.embedding = nn.Embedding(input_size, hidden_size)\n self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True)\n self.dropout = nn.Dropout(dropout_p)\n\n def forward(self, input):\n embedded = self.dropout(self.embedding(input))\n output, hidden = self.gru(embedded)\n return output, hidden\n\n# Initialize the EncoderRNN\nmodel = EncoderRNN(input_size=10, hidden_size=20, dropout_p=0.1)\n", "pytorch_sol_code": "class EncoderRNN(nn.Module):\n def __init__(self, input_size, hidden_size, dropout_p=0.1):\n super().__init__()\n self.hidden_size = hidden_size\n self.embedding = nn.Embedding(input_size, hidden_size)\n self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True)\n self.dropout = nn.Dropout(dropout_p)\n\n def forward(self, input):\n embedded = self.dropout(self.embedding(input))\n output, hidden = self.gru(embedded)\n return output, hidden\n\nmodel = EncoderRNN(input_size=10, hidden_size=20, dropout_p=0.1)\n", "pytorch_test_code": {"setup_code": "model = EncoderRNN(input_size=10, hidden_size=20)\nsentence = torch.tensor([[1, 2, 3, 4]])", "test_cases": ["output, hidden = model(sentence)\nassert output.shape == (1, 4, 20), 'Output shape should be (batch_size, sequence_length, hidden_size)'", "assert hidden.shape == (1, 1, 20), 'Hidden shape should be (num_layers, batch_size, hidden_size)'", "assert isinstance(model.embedding, nn.Embedding), 'Embedding layer should be of type nn.Embedding'", "assert isinstance(model.gru, nn.GRU), 'RNN layer should be of type nn.GRU'", "assert isinstance(model.dropout, nn.Dropout), 'Dropout layer should be of type nn.Dropout'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class EncoderRNN(Model):\n def __init__(self, input_size, hidden_size, dropout_p=0.1):\n super().__init__()\n self.embedding = layers.Embedding(input_size, hidden_size)\n self.gru = layers.GRU(hidden_size, return_sequences=True, return_state=True)\n self.dropout = layers.Dropout(dropout_p)\n\n def call(self, input):\n embedded = self.dropout(self.embedding(input))\n output, hidden = self.gru(embedded)\n return output, hidden\n\nmodel = EncoderRNN(input_size=10, hidden_size=20, dropout_p=0.1)\n", "tensorflow_sol_code": "class EncoderRNN(Model):\n def __init__(self, input_size, hidden_size, dropout_p=0.1):\n super().__init__()\n self.embedding = layers.Embedding(input_size, hidden_size)\n self.gru = layers.GRU(hidden_size, return_sequences=True, return_state=True)\n self.dropout = layers.Dropout(dropout_p)\n\n def call(self, input):\n embedded = self.dropout(self.embedding(input))\n output, hidden = self.gru(embedded)\n return output, hidden\n\nmodel = EncoderRNN(input_size=10, hidden_size=20, dropout_p=0.1)\n", "tensorflow_test_code": {"setup_code": "sentence = tf.constant([[1, 2, 3, 4]])\nmodel = EncoderRNN(input_size=10, hidden_size=20, dropout_p=0.1)", "test_cases": ["output, hidden = model(sentence)\nassert output.shape == (1, 4, 20), 'Output shape should be (batch_size, sequence_length, hidden_size)'", "assert hidden.shape == (1, 20), 'Hidden shape should be (batch_size, hidden_size)'", "assert isinstance(model.embedding, layers.Embedding), 'Embedding layer should be of type layers.Embedding'", "assert isinstance(model.gru, layers.GRU), 'RNN layer should be of type layers.GRU'", "assert isinstance(model.dropout, layers.Dropout), 'Dropout layer should be of type layers.Dropout'"]}} |
| {"question_id": 126, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class DecoderRNN(nn.Module):\n def __init__(self, hidden_size, output_size):\n super().__init__()\n self.embedding = nn.Embedding(output_size, hidden_size)\n self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True)\n self.out = nn.Linear(hidden_size, output_size)\n self.SOS_token = 0 \n self.MAX_LENGTH = 5\n\n def forward(self, encoder_outputs, encoder_hidden, target_tensor=None):\n pass", "pytorch_sol_code": "class DecoderRNN(nn.Module):\n def __init__(self, hidden_size, output_size):\n super().__init__()\n self.embedding = nn.Embedding(output_size, hidden_size)\n self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True)\n self.out = nn.Linear(hidden_size, output_size)\n self.SOS_token = 0 \n self.MAX_LENGTH = 5\n\n def forward(self, encoder_outputs, encoder_hidden, target_tensor=None):\n batch_size = encoder_outputs.size(0)\n decoder_input = torch.empty(batch_size, 1, dtype=torch.long).fill_(self.SOS_token)\n decoder_hidden = encoder_hidden\n decoder_outputs = []\n\n for i in range(self.MAX_LENGTH):\n decoder_output, decoder_hidden = self.forward_step(decoder_input, decoder_hidden)\n decoder_outputs.append(decoder_output)\n if target_tensor is not None:\n decoder_input = target_tensor[:, i].unsqueeze(1) # Teacher forcing\n else:\n _, topi = decoder_output.topk(1)\n decoder_input = topi.squeeze(-1).detach()\n\n decoder_outputs = torch.cat(decoder_outputs, dim=1)\n decoder_outputs = F.log_softmax(decoder_outputs, dim=-1)\n return decoder_outputs, decoder_hidden, None\n\n def forward_step(self, input, hidden):\n output = self.embedding(input)\n output = F.relu(output)\n output, hidden = self.gru(output, hidden)\n output = self.out(output)\n return output, hidden\n\nmodel = DecoderRNN(hidden_size=20, output_size=10)\n", "pytorch_test_code": {"setup_code": "hidden_size = 20\noutput_size = 10\ndecoder = DecoderRNN(hidden_size, output_size)\nencoder_hidden = torch.zeros(1, 1, hidden_size)\nencoder_outputs = torch.zeros(1, decoder.MAX_LENGTH, hidden_size)\ntarget_tensor = torch.tensor([[1, 2, 3, 4, 5]])", "test_cases": ["decoder_outputs, decoder_hidden, _ = decoder(encoder_outputs, encoder_hidden, target_tensor)\nassert decoder_outputs.shape == (1, decoder.MAX_LENGTH, output_size), 'Decoder output shape should be (batch_size, MAX_LENGTH, output_size)'", "assert decoder_hidden.shape == (1, 1, hidden_size), 'Decoder hidden state shape should match (num_layers, batch_size, hidden_size)'", "assert isinstance(decoder.embedding, nn.Embedding), 'Embedding layer should be of type nn.Embedding'", "assert isinstance(decoder.gru, nn.GRU), 'GRU layer should be of type nn.GRU'", "assert isinstance(decoder.out, nn.Linear), 'Final output layer should be a Linear layer'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class DecoderRNN(Model):\n def __init__(self, hidden_size, output_size):\n super().__init__()\n self.embedding = layers.Embedding(output_size, hidden_size)\n self.gru = layers.GRU(hidden_size, return_sequences=True, return_state=True)\n self.out = layers.Dense(output_size)\n self.SOS_token = 0 \n self.MAX_LENGTH = 5\n\n def call(self, encoder_outputs, encoder_hidden, target_tensor=None):\n pass", "tensorflow_sol_code": "class DecoderRNN(Model):\n def __init__(self, hidden_size, output_size):\n super().__init__()\n self.embedding = layers.Embedding(output_size, hidden_size)\n self.gru = layers.GRU(hidden_size, return_sequences=True, return_state=True)\n self.out = layers.Dense(output_size)\n self.SOS_token = 0 \n self.MAX_LENGTH = 5\n\n def call(self, encoder_outputs, encoder_hidden, target_tensor=None):\n batch_size = tf.shape(encoder_outputs)[0]\n decoder_input = tf.fill([batch_size, 1], self.SOS_token)\n decoder_hidden = encoder_hidden\n decoder_outputs = []\n\n for i in range(self.MAX_LENGTH):\n decoder_output, decoder_hidden = self.forward_step(decoder_input, decoder_hidden)\n decoder_outputs.append(decoder_output)\n if target_tensor is not None:\n decoder_input = tf.expand_dims(target_tensor[:, i], axis=1)\n else:\n decoder_input = tf.argmax(decoder_output, axis=-1)\n\n decoder_outputs = tf.concat(decoder_outputs, axis=1)\n decoder_outputs = tf.nn.log_softmax(decoder_outputs, axis=-1)\n return decoder_outputs, decoder_hidden, None\n\n def forward_step(self, input, hidden):\n output = self.embedding(input)\n output = tf.nn.relu(output)\n output, hidden = self.gru(output, initial_state=hidden)\n output = self.out(output)\n return output, hidden\n\nmodel = DecoderRNN(hidden_size=20, output_size=10)\n", "tensorflow_test_code": {"setup_code": "hidden_size = 20\noutput_size = 10\nencoder_hidden = tf.zeros([1, hidden_size])\nencoder_outputs = tf.zeros([1, 5, hidden_size])\ntarget_tensor = tf.constant([[1, 2, 3, 4, 5]])", "test_cases": ["decoder_outputs, decoder_hidden, _ = model(encoder_outputs, encoder_hidden, target_tensor)\nassert decoder_outputs.shape == (1, model.MAX_LENGTH, output_size), 'Decoder output shape should be (batch_size, MAX_LENGTH, output_size)'", "assert decoder_hidden.shape == (1, hidden_size), 'Decoder hidden state shape should match (batch_size, hidden_size)'", "assert isinstance(model.embedding, layers.Embedding), 'Embedding layer should be of type layers.Embedding'", "assert isinstance(model.gru, layers.GRU), 'GRU layer should be of type layers.GRU'", "assert isinstance(model.out, layers.Dense), 'Final output layer should be a Dense layer'"]}} |
| {"question_id": 127, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class BahdanauAttention(nn.Module):\n def __init__(self, hidden_size):\n super().__init__()\n self.Wa = nn.Linear(hidden_size, hidden_size)\n self.Ua = nn.Linear(hidden_size, hidden_size)\n self.Va = nn.Linear(hidden_size, 1)\n\n def forward(self, query, keys):\n pass\n #scores=", "pytorch_sol_code": "class BahdanauAttention(nn.Module):\n def __init__(self, hidden_size):\n super().__init__()\n self.Wa = nn.Linear(hidden_size, hidden_size)\n self.Ua = nn.Linear(hidden_size, hidden_size)\n self.Va = nn.Linear(hidden_size, 1)\n\n def forward(self, query, keys):\n scores = self.Va(torch.tanh(self.Wa(query) + self.Ua(keys)))\n scores = scores.squeeze(2).unsqueeze(1)\n weights = F.softmax(scores, dim=-1)\n context = torch.bmm(weights, keys)\n return context, weights\n", "pytorch_test_code": {"setup_code": "hidden_size = 128\nquery = torch.randn(16, 1, hidden_size)\nkeys = torch.randn(16, 10, hidden_size)\nmodel = BahdanauAttention(hidden_size)", "test_cases": ["context, weights = model(query, keys)\nassert context.shape == (16, 1, hidden_size), f'Context shape mismatch: {context.shape}'", "assert weights.shape == (16, 1, 10), f'Attention weights shape mismatch: {weights.shape}'", "assert isinstance(model.Wa, nn.Linear), 'Wa should be an instance of nn.Linear'", "assert isinstance(model.Ua, nn.Linear), 'Ua should be an instance of nn.Linear'", "assert isinstance(model.Va, nn.Linear), 'Va should be an instance of nn.Linear'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class BahdanauAttention(tf.keras.layers.Layer):\n def __init__(self, hidden_size):\n super().__init__()\n self.Wa = layers.Dense(hidden_size)\n self.Ua = layers.Dense(hidden_size)\n self.Va = layers.Dense(1)\n\n def call(self, query, keys):\n \n pass\n #scores=", "tensorflow_sol_code": "class BahdanauAttention(tf.keras.layers.Layer):\n def __init__(self, hidden_size):\n super().__init__()\n self.Wa = layers.Dense(hidden_size)\n self.Ua = layers.Dense(hidden_size)\n self.Va = layers.Dense(1)\n\n def call(self, query, keys):\n scores = self.Va(tf.nn.tanh(self.Wa(query) + self.Ua(keys)))\n scores = tf.squeeze(scores, axis=-1)\n weights = tf.nn.softmax(scores, axis=-1)\n weights = tf.expand_dims(weights, axis=1)\n context = tf.matmul(weights, keys)\n return context, weights\n", "tensorflow_test_code": {"setup_code": "hidden_size = 128\nquery = tf.random.normal([16, 1, hidden_size])\nkeys = tf.random.normal([16, 10, hidden_size])\nmodel = BahdanauAttention(hidden_size)", "test_cases": ["context, weights = model(query, keys)\nassert context.shape == (16, 1, hidden_size), f'Context shape mismatch: {context.shape}'", "assert weights.shape == (16, 1, 10), f'Attention weights shape mismatch: {weights.shape}'", "assert isinstance(model.Wa, layers.Dense), 'Wa should be an instance of layers.Dense'", "assert isinstance(model.Ua, layers.Dense), 'Ua should be an instance of layers.Dense'", "assert isinstance(model.Va, layers.Dense), 'Va should be an instance of layers.Dense'"]}} |
| {"question_id": 128, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class SimpleMLP(nn.Module):\n def __init__(self):\n super().__init__()\n # Initialize layers here", "pytorch_sol_code": "class SimpleMLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(784, 128)\n self.fc2 = nn.Linear(128, 128)\n self.fc3 = nn.Linear(128, 10)\n\n def forward(self, x):\n x = x.flatten(1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n x = F.relu(x)\n x = self.fc3(x)\n return x\n", "pytorch_test_code": {"setup_code": "model = SimpleMLP()\ninput_tensor = torch.randn(32, 1, 28, 28)", "test_cases": ["output = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert isinstance(model.fc1, nn.Linear), 'First layer should be Linear'", "assert isinstance(model.fc2, nn.Linear), 'Second layer should be Linear'", "assert isinstance(model.fc3, nn.Linear), 'Third layer should be Linear'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class SimpleMLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n # Initialize layers here", "tensorflow_sol_code": "class SimpleMLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.fc1 = layers.Dense(128, activation='relu')\n self.fc2 = layers.Dense(128, activation='relu')\n self.fc3 = layers.Dense(10)\n\n def call(self, x):\n x = tf.reshape(x, [tf.shape(x)[0], -1])\n x = self.fc1(x)\n x = self.fc2(x)\n return self.fc3(x)\n", "tensorflow_test_code": {"setup_code": "model = SimpleMLP()\ninput_tensor = tf.random.normal([32, 28, 28, 1])", "test_cases": ["output = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert isinstance(model.fc1, layers.Dense), 'First layer should be Dense'", "assert isinstance(model.fc2, layers.Dense), 'Second layer should be Dense'", "assert isinstance(model.fc3, layers.Dense), 'Third layer should be Dense'"]}} |
| {"question_id": 129, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class TextClassificationModel(nn.Module):\n def __init__(self, vocab_size, embed_dim, num_class):\n super().__init__()\n # Initialize layers here", "pytorch_sol_code": "class TextClassificationModel(nn.Module):\n def __init__(self, vocab_size, embed_dim, num_class):\n super().__init__()\n self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=False)\n self.fc = nn.Linear(embed_dim, num_class)\n self.init_weights()\n\n def init_weights(self):\n initrange = 0.5\n self.embedding.weight.data.uniform_(-initrange, initrange)\n self.fc.weight.data.uniform_(-initrange, initrange)\n self.fc.bias.data.zero_()\n\n def forward(self, text, offsets):\n embedded = self.embedding(text, offsets)\n return self.fc(embedded)\n", "pytorch_test_code": {"setup_code": "vocab_size = 10000\nembed_dim = 64\nnum_class = 5\nmodel = TextClassificationModel(vocab_size, embed_dim, num_class)\ntext = torch.randint(0, vocab_size, (32,))\noffsets = torch.tensor([0])", "test_cases": ["output = model(text, offsets)\nassert output.shape == (1, num_class), f'Expected output shape (1, {num_class}), got {output.shape}'", "assert isinstance(model.embedding, nn.EmbeddingBag), 'First layer should be EmbeddingBag'", "assert isinstance(model.fc, nn.Linear), 'Second layer should be Linear'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class TextClassificationModel(tf.keras.Model):\n def __init__(self, vocab_size, embed_dim, num_class):\n super().__init__()\n # Initialize layers here", "tensorflow_sol_code": "class TextClassificationModel(tf.keras.Model):\n def __init__(self, vocab_size, embed_dim, num_class):\n super().__init__()\n self.embedding = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)\n self.fc = layers.Dense(num_class)\n\n def call(self, text, offsets):\n x = self.embedding(text)\n x = tf.reduce_mean(x, axis=1)\n return self.fc(x)\n", "tensorflow_test_code": {"setup_code": "vocab_size = 10000\nembed_dim = 64\nnum_class = 5\nmodel = TextClassificationModel(vocab_size, embed_dim, num_class)\ntext = tf.random.uniform((32, 1), maxval=vocab_size, dtype=tf.int32)\noffsets = tf.constant([0])", "test_cases": ["output = model(text, offsets)\nassert output.shape == (32, num_class), f'Expected output shape (32, {num_class}), got {output.shape}'", "assert isinstance(model.embedding, layers.Embedding), 'First layer should be Embedding'", "assert isinstance(model.fc, layers.Dense), 'Second layer should be Dense'"]}} |
| {"question_id": 130, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F", "pytorch_start_code": "class Net(nn.Module):\n def __init__(self, upscale_factor):\n super().__init__()\n # Initialize layers here", "pytorch_sol_code": "class Net(nn.Module):\n def __init__(self, upscale_factor):\n super().__init__()\n self.relu = nn.ReLU()\n self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))\n self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))\n self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))\n self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1))\n self.pixel_shuffle = nn.PixelShuffle(upscale_factor)\n self._initialize_weights()\n\n def forward(self, x):\n x = self.relu(self.conv1(x))\n x = self.relu(self.conv2(x))\n x = self.relu(self.conv3(x))\n x = self.pixel_shuffle(self.conv4(x))\n return x\n\n def _initialize_weights(self):\n init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))\n init.orthogonal_(self.conv2.weight, init.calculate_gain('relu'))\n init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))\n init.orthogonal_(self.conv4.weight)\n\nmodel = Net(upscale_factor=2)", "pytorch_test_code": {"setup_code": "model = Net(upscale_factor=2)\ninput_tensor = torch.randn(1, 1, 16, 16)", "test_cases": ["output = model(input_tensor)\nassert output.shape == (1, 1, 32, 32), f'Expected output shape (1, 1, 32, 32), got {output.shape}'", "assert isinstance(model.conv1, nn.Conv2d), 'First layer should be Conv2d'", "assert isinstance(model.pixel_shuffle, nn.PixelShuffle), 'PixelShuffle layer should be PixelShuffle'", "assert model.conv1.out_channels == 64, 'First convolution should have 64 output channels'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class Net(Model):\n def __init__(self, upscale_factor):\n super().__init__()\n # Initialize layers here", "tensorflow_sol_code": "class Net(Model):\n def __init__(self, upscale_factor):\n super().__init__()\n self.conv1 = layers.Conv2D(64, (5, 5), padding='same', activation='relu')\n self.conv2 = layers.Conv2D(64, (3, 3), padding='same', activation='relu')\n self.conv3 = layers.Conv2D(32, (3, 3), padding='same', activation='relu')\n self.conv4 = layers.Conv2D(upscale_factor ** 2, (3, 3), padding='same')\n self.upscale_factor = upscale_factor\n\n def call(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = tf.nn.depth_to_space(x, self.upscale_factor)\n return x\n\nmodel = Net(upscale_factor=2)", "tensorflow_test_code": {"setup_code": "model = Net(upscale_factor=2)\ninput_tensor = tf.random.normal([1, 16, 16, 1])", "test_cases": ["output = model(input_tensor)\nassert output.shape == (1, 32, 32, 1), f'Expected output shape (1, 32, 32, 1), got {output.shape}'", "assert isinstance(model.conv1, layers.Conv2D), 'First layer should be Conv2D'", "assert model.conv1.filters == 64, 'First convolution should have 64 filters'"]}} |
| {"question_id": 131, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class Net(nn.Module):\n def __init__(self):\n super().__init__()\n # Initialize layers here", "pytorch_sol_code": "class Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.rnn = nn.LSTM(input_size=28, hidden_size=64, batch_first=True)\n self.batchnorm = nn.BatchNorm1d(64)\n self.dropout1 = nn.Dropout2d(0.25)\n self.dropout2 = nn.Dropout2d(0.5)\n self.fc1 = nn.Linear(64, 32)\n self.fc2 = nn.Linear(32, 10)\n\n def forward(self, input):\n input = input.reshape(-1, 28, 28)\n output, hidden = self.rnn(input)\n output = output[:, -1, :]\n output = self.batchnorm(output)\n output = self.dropout1(output)\n output = self.fc1(output)\n output = F.relu(output)\n output = self.dropout2(output)\n output = self.fc2(output)\n output = F.log_softmax(output, dim=1)\n return output\n\nmodel = Net()", "pytorch_test_code": {"setup_code": "model = Net()\ninput_tensor = torch.randn(32, 1, 28, 28)", "test_cases": ["output = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert isinstance(model.rnn, nn.LSTM), 'First layer should be LSTM'", "assert isinstance(model.fc1, nn.Linear) and model.fc1.out_features == 32, 'First fully connected layer should have 32 output features'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class Net(Model):\n def __init__(self):\n super().__init__()\n # Initialize layers here", "tensorflow_sol_code": "class Net(Model):\n def __init__(self):\n super().__init__()\n self.lstm = layers.LSTM(64, input_shape=(28, 28), return_sequences=False)\n self.batchnorm = layers.BatchNormalization()\n self.dropout1 = layers.Dropout(0.25)\n self.fc1 = layers.Dense(32, activation='relu')\n self.dropout2 = layers.Dropout(0.5)\n self.fc2 = layers.Dense(10, activation='softmax')\n\n def call(self, x):\n x = tf.reshape(x, [-1, 28, 28])\n x = self.lstm(x)\n x = self.batchnorm(x)\n x = self.dropout1(x)\n x = self.fc1(x)\n x = self.dropout2(x)\n return self.fc2(x)\n\nmodel = Net()", "tensorflow_test_code": {"setup_code": "model = Net()\ninput_tensor = tf.random.normal([32, 1, 28, 28])", "test_cases": ["output = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert isinstance(model.lstm, layers.LSTM), 'First layer should be LSTM'", "assert isinstance(model.fc1, layers.Dense) and model.fc1.units == 32, 'First fully connected layer should have 32 units'"]}} |
| {"question_id": 132, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class Net(nn.Module):\n def __init__(self):\n super().__init__()\n # Initialize layers here", "pytorch_sol_code": "class Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\nmodel = Net()", "pytorch_test_code": {"setup_code": "model = Net()\ninput_tensor = torch.randn(32, 1, 28, 28)", "test_cases": ["output = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert isinstance(model.conv1, nn.Conv2d), 'First layer should be Conv2d'", "assert isinstance(model.fc1, nn.Linear) and model.fc1.out_features == 50, 'First fully connected layer should have 50 output features'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class Net(Model):\n def __init__(self):\n super().__init__()\n # Initialize layers here", "tensorflow_sol_code": "class Net(Model):\n def __init__(self):\n super().__init__()\n self.conv1 = layers.Conv2D(10, kernel_size=5, activation='relu', input_shape=(28, 28, 1))\n self.conv2 = layers.Conv2D(20, kernel_size=5, activation='relu')\n self.max_pool = layers.MaxPooling2D(pool_size=(2, 2))\n self.dropout = layers.Dropout(0.5)\n self.flatten = layers.Flatten()\n self.fc1 = layers.Dense(50, activation='relu')\n self.fc2 = layers.Dense(10, activation='softmax')\n\n def call(self, x):\n x = self.conv1(x)\n x = self.max_pool(x)\n x = self.conv2(x)\n x = self.max_pool(x)\n x = self.dropout(x)\n x = self.flatten(x)\n x = self.fc1(x)\n return self.fc2(x)\n\nmodel = Net()", "tensorflow_test_code": {"setup_code": "model = Net()\ninput_tensor = tf.random.normal([32, 28, 28, 1])", "test_cases": ["output = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert isinstance(model.conv1, layers.Conv2D), 'First layer should be Conv2D'", "assert isinstance(model.fc1, layers.Dense) and model.fc1.units == 50, 'First fully connected layer should have 50 units'"]}} |
| {"question_id": 133, "pytorch_library": "import torch\nimport torch.nn.functional as F", "pytorch_start_code": "# Define the loss function\n#def loss_function(recon_x, x, mu, logvar):\n", "pytorch_sol_code": "def loss_function(recon_x, x, mu, logvar):\n BCE = F.binary_cross_entropy(recon_x, x.view(-1, 784), reduction='sum')\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n return BCE + KLD", "pytorch_test_code": {"setup_code": "recon_x = torch.sigmoid(torch.randn(64, 784))\nx = torch.sigmoid(torch.randn(64, 28, 28))\nmu = torch.randn(64, 20)\nlogvar = torch.randn(64, 20)\n", "test_cases": ["loss = loss_function(recon_x, x, mu, logvar)\nassert loss.item() > 0, 'Loss should be positive'", "assert loss_function(recon_x, x, mu, logvar).dtype == torch.float32, 'Loss should be a floating-point tensor'", "assert torch.isfinite(loss_function(recon_x, x, mu, logvar)), 'Loss should be finite and not NaN/Inf'", "x_zeros = torch.zeros_like(x)\nassert loss_function(recon_x, x_zeros, mu, logvar).item() > 0, 'Loss with zero target should be positive'", "recon_zeros = torch.zeros_like(recon_x)\nassert loss_function(recon_zeros, x, mu, logvar).item() > 0, 'Loss with zero reconstruction should be positive'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import backend as K", "tensorflow_start_code": "# Define the loss function\n#def loss_function(recon_x, x, mu, logvar):\n", "tensorflow_sol_code": "def loss_function(recon_x, x, mu, logvar):\n BCE = tf.keras.losses.binary_crossentropy(x, recon_x)\n BCE = tf.reduce_sum(BCE)\n KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.square(mu) - tf.exp(logvar))\n return BCE + KLD", "tensorflow_test_code": {"setup_code": "recon_x = tf.sigmoid(tf.random.normal([64, 784]))\nx = tf.sigmoid(tf.random.normal([64, 28, 28]))\nmu = tf.random.normal([64, 20])\nlogvar = tf.random.normal([64, 20])\n# Flatten the target tensor to match the shape of recon_x\nx = tf.reshape(x, [64, 784])", "test_cases": ["loss = loss_function(recon_x, x, mu, logvar)\nassert loss.numpy() > 0, 'Loss should be positive'", "assert loss_function(recon_x, x, mu, logvar).dtype == tf.float32, 'Loss should be a floating-point tensor'", "assert tf.math.is_finite(loss), 'Loss should be finite and not NaN/Inf'", "x_zeros = tf.zeros_like(x)\nassert loss_function(recon_x, x_zeros, mu, logvar).numpy() > 0, 'Loss with zero target should be positive'", "recon_zeros = tf.zeros_like(recon_x)\nassert loss_function(recon_zeros, x, mu, logvar).numpy() > 0, 'Loss with zero reconstruction should be positive'"]}} |
| {"question_id": 134, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "# Define the Policy class\n#class Policy(nn.Module):\n# def __init__(self):\n# super().__init__()\n# self.affine1 = nn.Linear(4, 128)\n# self.dropout = nn.Dropout(p=0.6)\n# self.affine2 = nn.Linear(128, 2)\n", "pytorch_sol_code": "class Policy(nn.Module):\n def __init__(self):\n super().__init__()\n self.affine1 = nn.Linear(4, 128)\n self.dropout = nn.Dropout(p=0.6)\n self.affine2 = nn.Linear(128, 2)\n\n self.saved_log_probs = []\n self.rewards = []\n\n def forward(self, x):\n x = self.affine1(x)\n x = self.dropout(x)\n x = F.relu(x)\n action_scores = self.affine2(x)\n return F.softmax(action_scores, dim=1)\n", "pytorch_test_code": {"setup_code": "model = Policy()\ninput_tensor = torch.randn(32, 4)", "test_cases": ["output = model(input_tensor)\nassert output.shape == (32, 2), f'Expected output shape (32, 2), got {output.shape}'", "assert isinstance(model.affine1, nn.Linear), 'affine1 should be an instance of nn.Linear'", "assert isinstance(model.affine2, nn.Linear), 'affine2 should be an instance of nn.Linear'", "assert isinstance(model.dropout, nn.Dropout), 'dropout should be an instance of nn.Dropout'", "assert torch.allclose(output.sum(dim=1), torch.tensor([1.0] * 32)), 'Output probabilities should sum to 1'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "# Define the Policy class\n#class Policy(Model):\n# def __init__(self):\n# super().__init__()\n# self.fc1 = layers.Dense(128, activation=None)\n# self.dropout = layers.Dropout(0.6)\n# self.fc2 = layers.Dense(2)", "tensorflow_sol_code": "class Policy(Model):\n def __init__(self):\n super().__init__()\n self.fc1 = layers.Dense(128, activation=None)\n self.dropout = layers.Dropout(0.6)\n self.fc2 = layers.Dense(2)\n\n def call(self, inputs):\n x = self.fc1(inputs)\n x = self.dropout(x)\n x = tf.nn.relu(x)\n action_scores = self.fc2(x)\n return tf.nn.softmax(action_scores, axis=1)\n", "tensorflow_test_code": {"setup_code": "model = Policy()\ninput_tensor = tf.random.normal([32, 4])", "test_cases": ["output = model(input_tensor)\nassert output.shape == (32, 2), f'Expected output shape (32, 2), got {output.shape}'", "assert isinstance(model.fc1, layers.Dense), 'fc1 should be an instance of layers.Dense'", "assert isinstance(model.fc2, layers.Dense), 'fc2 should be an instance of layers.Dense'", "assert isinstance(model.dropout, layers.Dropout), 'dropout should be an instance of layers.Dropout'", "assert tf.reduce_all(tf.abs(tf.reduce_sum(output, axis=1) - 1.0) < 1e-5), 'Output probabilities should sum to 1'"]}} |
| {"question_id": 135, "pytorch_library": "from transformers import BertModel\nimport torch", "pytorch_start_code": "# Load the BERT model from HuggingFace\n", "pytorch_sol_code": "model = BertModel.from_pretrained('bert-base-cased')", "pytorch_test_code": {"setup_code": "model = BertModel.from_pretrained('bert-base-cased')\nencoded_sequences = [\n [101, 7592, 999, 102],\n [101, 4658, 1012, 102],\n [101, 3835, 999, 102]\n]\nmodel_inputs = torch.tensor(encoded_sequences)\noutput = model(model_inputs)", "test_cases": ["assert output.last_hidden_state.shape == (3, 4, 768), f'Expected output shape (3, 4, 768), got {output.last_hidden_state.shape}'", "assert output.last_hidden_state is not None, 'Model should return a valid last_hidden_state'", "assert model.config.model_type == 'bert', 'The loaded model should be of type BERT'", "assert model.config._name_or_path == 'bert-base-cased', f'Expected model to be \"bert-base-cased\", got {model.config._name_or_path}'", "encoded_sequences_2 = [[101, 2057, 2000, 2224, 999, 102], [101, 1005, 1055, 1521, 1005, 102]]\nmodel_inputs_2 = torch.tensor(encoded_sequences_2)\noutput_2 = model(model_inputs_2)\nassert output_2.last_hidden_state.shape == (2, 6, 768), f'Expected output shape (2, 7, 768), got {output_2.last_hidden_state.shape}'", "encoded_sequences_3 = [[101, 2023, 2003, 1037, 6429, 999, 102]]\nmodel_inputs_3 = torch.tensor(encoded_sequences_3)\noutput_3 = model(model_inputs_3)\nassert output_3.last_hidden_state.shape == (1, 7, 768), f'Expected output shape (1, 7, 768), got {output_3.last_hidden_state.shape}'"]}, "tensorflow_library": "from transformers import TFBertModel\nimport tensorflow as tf", "tensorflow_start_code": "# Load the BERT model from HuggingFace", "tensorflow_sol_code": "model = TFBertModel.from_pretrained('bert-base-cased')", "tensorflow_test_code": {"setup_code": "model = TFBertModel.from_pretrained('bert-base-cased')\nencoded_sequences = [\n [101, 7592, 999, 102],\n [101, 4658, 1012, 102],\n [101, 3835, 999, 102]\n]\nmodel_inputs = tf.constant(encoded_sequences)\noutput = model(model_inputs)", "test_cases": ["assert output.last_hidden_state.shape == (3, 4, 768), f'Expected output shape (3, 4, 768), got {output.last_hidden_state.shape}'", "assert output.last_hidden_state is not None, 'Model should return a valid last_hidden_state'", "assert model.config.model_type == 'bert', 'The loaded model should be of type BERT'", "assert model.config._name_or_path == 'bert-base-cased', f'Expected model to be \"bert-base-cased\", got {model.config._name_or_path}'", "encoded_sequences_2 = [[101, 2057, 2000, 2224, 999, 102], [101, 1005, 1055, 1521, 1005, 102]]\nmodel_inputs_2 = tf.constant(encoded_sequences_2)\noutput_2 = model(model_inputs_2)\nassert output_2.last_hidden_state.shape == (2, 6, 768), f'Expected output shape (2, 6, 768), got {output_2.last_hidden_state.shape}'", "encoded_sequences_3 = [[101, 2023, 2003, 1037, 6429, 999, 102]]\nmodel_inputs_3 = tf.constant(encoded_sequences_3)\noutput_3 = model(model_inputs_3)\nassert output_3.last_hidden_state.shape == (1, 7, 768), f'Expected output shape (1, 7, 768), got {output_3.last_hidden_state.shape}'"]}} |
| {"question_id": 136, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class DeepNN(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n # Define the model layers here", "pytorch_sol_code": "class DeepNN(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n self.features = nn.Sequential(\n nn.Conv2d(3, 128, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.Conv2d(128, 64, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.Conv2d(64, 32, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n self.classifier = nn.Sequential(\n nn.Linear(2048, 512),\n nn.ReLU(),\n nn.Dropout(0.1),\n nn.Linear(512, num_classes)\n )\n\n def forward(self, x):\n x = self.features(x)\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n return x\n", "pytorch_test_code": {"setup_code": "model = DeepNN(num_classes=10)\ninput_tensor = torch.randn(32, 3, 32, 32)", "test_cases": ["output = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert isinstance(model.features[0], nn.Conv2d), 'First layer should be Conv2d'", "assert model.features[0].out_channels == 128, 'First Conv2d layer should have 128 output channels'", "assert isinstance(model.classifier[3], nn.Linear), 'Final classifier layer should be Linear with output size of num_classes'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class DeepNN(Model):\n def __init__(self, num_classes=10):\n super().__init__()\n # Define the model layers here", "tensorflow_sol_code": "class DeepNN(Model):\n def __init__(self, num_classes=10):\n super().__init__()\n self.features = tf.keras.Sequential([\n layers.Conv2D(128, (3, 3), padding='same', activation='relu', input_shape=(32, 32, 3)),\n layers.Conv2D(64, (3, 3), padding='same', activation='relu'),\n layers.MaxPooling2D(pool_size=(2, 2)),\n layers.Conv2D(64, (3, 3), padding='same', activation='relu'),\n layers.Conv2D(32, (3, 3), padding='same', activation='relu'),\n layers.MaxPooling2D(pool_size=(2, 2)),\n ])\n self.classifier = tf.keras.Sequential([\n layers.Flatten(),\n layers.Dense(512, activation='relu'),\n layers.Dropout(0.1),\n layers.Dense(num_classes)\n ])\n\n def call(self, x):\n x = self.features(x)\n x = self.classifier(x)\n return x\n", "tensorflow_test_code": {"setup_code": "model = DeepNN(num_classes=10)\ninput_tensor = tf.random.normal([32, 32, 32, 3])", "test_cases": ["output = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert isinstance(model.features.layers[0], layers.Conv2D), 'First layer should be Conv2D'", "assert model.features.layers[0].filters == 128, 'First Conv2D layer should have 128 filters'", "assert isinstance(model.classifier.layers[-1], layers.Dense), 'Final classifier layer should be Dense with output size of num_classes'"]}} |
| {"question_id": 137, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class LightNN(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n # Define the model layers here", "pytorch_sol_code": "class LightNN(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n self.features = nn.Sequential(\n nn.Conv2d(3, 16, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Conv2d(16, 16, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n self.classifier = nn.Sequential(\n nn.Linear(1024, 256),\n nn.ReLU(),\n nn.Dropout(0.1),\n nn.Linear(256, num_classes)\n )\n\n def forward(self, x):\n x = self.features(x)\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n return x\n", "pytorch_test_code": {"setup_code": "model = LightNN(num_classes=10)\ninput_tensor = torch.randn(32, 3, 32, 32)", "test_cases": ["output = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert isinstance(model.features[0], nn.Conv2d), 'First layer should be Conv2d'", "assert model.features[0].out_channels == 16, 'First Conv2d layer should have 16 output channels'", "assert isinstance(model.classifier[-1], nn.Linear), 'Final classifier layer should be Linear with output size of num_classes'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class LightNN(Model):\n def __init__(self, num_classes=10):\n super().__init__()\n # Define the model layers here", "tensorflow_sol_code": "class LightNN(Model):\n def __init__(self, num_classes=10):\n super().__init__()\n self.features = tf.keras.Sequential([\n layers.Conv2D(16, (3, 3), padding='same', activation='relu', input_shape=(32, 32, 3)),\n layers.MaxPooling2D(pool_size=(2, 2)),\n layers.Conv2D(16, (3, 3), padding='same', activation='relu'),\n layers.MaxPooling2D(pool_size=(2, 2)),\n ])\n self.classifier = tf.keras.Sequential([\n layers.Flatten(),\n layers.Dense(256, activation='relu'),\n layers.Dropout(0.1),\n layers.Dense(num_classes)\n ])\n\n def call(self, x):\n x = self.features(x)\n x = self.classifier(x)\n return x\n", "tensorflow_test_code": {"setup_code": "model = LightNN(num_classes=10)\ninput_tensor = tf.random.normal([32, 32, 32, 3])", "test_cases": ["output = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert isinstance(model.features.layers[0], layers.Conv2D), 'First layer should be Conv2D'", "assert model.features.layers[0].filters == 16, 'First Conv2D layer should have 16 filters'", "assert isinstance(model.classifier.layers[-1], layers.Dense), 'Final classifier layer should be Dense with output size of num_classes'"]}} |
| {"question_id": 138, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class ModifiedDeepNNCosine(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n # Define the model layers here", "pytorch_sol_code": "class ModifiedDeepNNCosine(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n self.features = nn.Sequential(\n nn.Conv2d(3, 128, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.Conv2d(128, 64, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.Conv2d(64, 32, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n self.classifier = nn.Sequential(\n nn.Linear(2048, 512),\n nn.ReLU(),\n nn.Dropout(0.1),\n nn.Linear(512, num_classes)\n )\n\n def forward(self, x):\n x = self.features(x)\n flattened_conv_output = torch.flatten(x, 1)\n x = self.classifier(flattened_conv_output)\n flattened_conv_output_after_pooling = F.avg_pool1d(flattened_conv_output.unsqueeze(1), 2).squeeze(1)\n return x, flattened_conv_output_after_pooling\n", "pytorch_test_code": {"setup_code": "model = ModifiedDeepNNCosine(num_classes=10)\ninput_tensor = torch.randn(32, 3, 32, 32)", "test_cases": ["output, pooled_output = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert pooled_output.shape == (32, 1024), f'Expected pooled output shape (32, 1024), got {pooled_output.shape}'", "assert isinstance(model.features[0], nn.Conv2d), 'First layer should be Conv2d'", "assert isinstance(model.classifier[-1], nn.Linear), 'Final classifier layer should be Linear with output size of num_classes'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class ModifiedDeepNNCosine(Model):\n def __init__(self, num_classes=10):\n super().__init__()\n # Define the model layers here", "tensorflow_sol_code": "class ModifiedDeepNNCosine(Model):\n def __init__(self, num_classes=10):\n super().__init__()\n self.features = tf.keras.Sequential([\n layers.Conv2D(128, (3, 3), padding='same', activation='relu'),\n layers.Conv2D(64, (3, 3), padding='same', activation='relu'),\n layers.MaxPooling2D((2, 2)),\n layers.Conv2D(64, (3, 3), padding='same', activation='relu'),\n layers.Conv2D(32, (3, 3), padding='same', activation='relu'),\n layers.MaxPooling2D((2, 2)),\n ])\n self.classifier = tf.keras.Sequential([\n layers.Flatten(),\n layers.Dense(512, activation='relu'),\n layers.Dropout(0.1),\n layers.Dense(num_classes)\n ])\n\n def call(self, x):\n x = self.features(x)\n flattened_conv_output = tf.keras.layers.Flatten()(x)\n output = self.classifier(flattened_conv_output)\n pooled_output = tf.keras.layers.AveragePooling1D(pool_size=2)(tf.expand_dims(flattened_conv_output, axis=-1))\n pooled_output = tf.squeeze(pooled_output, axis=-1) # Ensure the shape is correct\n return output, pooled_output\n", "tensorflow_test_code": {"setup_code": "model = ModifiedDeepNNCosine(num_classes=10)\ninput_tensor = tf.random.normal([32, 32, 32, 3])", "test_cases": ["output, pooled_output = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert pooled_output.shape == (32, 1024), f'Expected pooled output shape (32, 1024), got {pooled_output.shape}'", "assert isinstance(model.features.layers[0], layers.Conv2D), 'First layer should be Conv2D'", "assert isinstance(model.classifier.layers[-1], layers.Dense), 'Final classifier layer should be Dense with output size of num_classes'"]}} |
| {"question_id": 139, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class ModifiedLightNNCosine(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n # Initialize model layers here", "pytorch_sol_code": "class ModifiedLightNNCosine(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n self.features = nn.Sequential(\n nn.Conv2d(3, 16, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Conv2d(16, 16, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n self.classifier = nn.Sequential(\n nn.Linear(1024, 256),\n nn.ReLU(),\n nn.Dropout(0.1),\n nn.Linear(256, num_classes)\n )\n\n def forward(self, x):\n x = self.features(x)\n flattened_conv_output = torch.flatten(x, 1)\n x = self.classifier(flattened_conv_output)\n return x, flattened_conv_output\n", "pytorch_test_code": {"setup_code": "model = ModifiedLightNNCosine(num_classes=10)\ninput_tensor = torch.randn(32, 3, 32, 32)", "test_cases": ["output, flattened_conv_output = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert flattened_conv_output.shape == (32, 1024), f'Expected flattened_conv_output shape (32, 1024), got {flattened_conv_output.shape}'", "assert isinstance(model.features[0], nn.Conv2d), 'First layer should be Conv2d'", "assert isinstance(model.classifier[-1], nn.Linear), 'Final classifier layer should be Linear with output size of num_classes'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class ModifiedLightNNCosine(Model):\n def __init__(self, num_classes=10):\n super().__init__()\n # Initialize model layers here", "tensorflow_sol_code": "class ModifiedLightNNCosine(Model):\n def __init__(self, num_classes=10):\n super().__init__()\n self.features = tf.keras.Sequential([\n layers.Conv2D(16, (3, 3), padding='same', activation='relu'),\n layers.MaxPooling2D((2, 2)),\n layers.Conv2D(16, (3, 3), padding='same', activation='relu'),\n layers.MaxPooling2D((2, 2)),\n ])\n self.classifier = tf.keras.Sequential([\n layers.Flatten(),\n layers.Dense(256, activation='relu'),\n layers.Dropout(0.1),\n layers.Dense(num_classes)\n ])\n\n def call(self, x):\n x = self.features(x)\n flattened_conv_output = tf.keras.layers.Flatten()(x)\n output = self.classifier(flattened_conv_output)\n return output, flattened_conv_output\n", "tensorflow_test_code": {"setup_code": "model = ModifiedLightNNCosine(num_classes=10)\ninput_tensor = tf.random.normal([32, 32, 32, 3])", "test_cases": ["output, flattened_conv_output = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert flattened_conv_output.shape == (32, 1024), f'Expected flattened_conv_output shape (32, 1024), got {flattened_conv_output.shape}'", "assert isinstance(model.features.layers[0], layers.Conv2D), 'First layer should be Conv2D'", "assert isinstance(model.classifier.layers[-1], layers.Dense), 'Final classifier layer should be Dense with output size of num_classes'"]}} |
| {"question_id": 140, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class ModifiedDeepNNRegressor(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n # Initialize model layers here", "pytorch_sol_code": "class ModifiedDeepNNRegressor(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n self.features = nn.Sequential(\n nn.Conv2d(3, 128, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.Conv2d(128, 64, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.Conv2d(64, 32, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n self.classifier = nn.Sequential(\n nn.Linear(2048, 512),\n nn.ReLU(),\n nn.Dropout(0.1),\n nn.Linear(512, num_classes)\n )\n\n def forward(self, x):\n x = self.features(x)\n conv_feature_map = x\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n return x, conv_feature_map\n", "pytorch_test_code": {"setup_code": "model = ModifiedDeepNNRegressor(num_classes=10)\ninput_tensor = torch.randn(32, 3, 32, 32)", "test_cases": ["output, conv_feature_map = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert conv_feature_map.shape == (32, 32, 8, 8), f'Expected conv_feature_map shape (32, 32, 8, 8), got {conv_feature_map.shape}'", "assert isinstance(model.features[0], nn.Conv2d), 'First layer should be Conv2d'", "assert isinstance(model.classifier[-1], nn.Linear), 'Final classifier layer should be Linear with output size of num_classes'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class ModifiedDeepNNRegressor(Model):\n def __init__(self, num_classes=10):\n super().__init__()\n # Initialize model layers here", "tensorflow_sol_code": "class ModifiedDeepNNRegressor(Model):\n def __init__(self, num_classes=10):\n super().__init__()\n self.features = tf.keras.Sequential([\n layers.Conv2D(128, (3, 3), padding='same', activation='relu'),\n layers.Conv2D(64, (3, 3), padding='same', activation='relu'),\n layers.MaxPooling2D((2, 2)),\n layers.Conv2D(64, (3, 3), padding='same', activation='relu'),\n layers.Conv2D(32, (3, 3), padding='same', activation='relu'),\n layers.MaxPooling2D((2, 2))\n ])\n self.classifier = tf.keras.Sequential([\n layers.Flatten(),\n layers.Dense(512, activation='relu'),\n layers.Dropout(0.1),\n layers.Dense(num_classes)\n ])\n\n def call(self, x):\n conv_feature_map = self.features(x)\n output = self.classifier(conv_feature_map)\n return output, conv_feature_map\n", "tensorflow_test_code": {"setup_code": "model = ModifiedDeepNNRegressor(num_classes=10)\ninput_tensor = tf.random.normal([32, 32, 32, 3])", "test_cases": ["output, conv_feature_map = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert conv_feature_map.shape == (32, 8, 8, 32), f'Expected conv_feature_map shape (32, 8, 8, 32), got {conv_feature_map.shape}'", "assert isinstance(model.features.layers[0], layers.Conv2D), 'First layer should be Conv2D'", "assert isinstance(model.classifier.layers[-1], layers.Dense), 'Final classifier layer should be Dense with output size of num_classes'"]}} |
| {"question_id": 141, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class ModifiedLightNNRegressor(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n # Initialize model layers here", "pytorch_sol_code": "class ModifiedLightNNRegressor(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n self.features = nn.Sequential(\n nn.Conv2d(3, 16, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Conv2d(16, 16, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n self.regressor = nn.Sequential(\n nn.Conv2d(16, 32, kernel_size=3, padding=1)\n )\n self.classifier = nn.Sequential(\n nn.Linear(1024, 256),\n nn.ReLU(),\n nn.Dropout(0.1),\n nn.Linear(256, num_classes)\n )\n\n def forward(self, x):\n x = self.features(x)\n regressor_output = self.regressor(x)\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n return x, regressor_output\n", "pytorch_test_code": {"setup_code": "model = ModifiedLightNNRegressor(num_classes=10)\ninput_tensor = torch.randn(32, 3, 32, 32)", "test_cases": ["output, regressor_output = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert regressor_output.shape == (32, 32, 8, 8), f'Expected regressor_output shape (32, 32, 8, 8), got {regressor_output.shape}'", "assert isinstance(model.features[0], nn.Conv2d), 'First layer should be Conv2d'", "assert isinstance(model.classifier[-1], nn.Linear), 'Final classifier layer should be Linear with output size of num_classes'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class ModifiedLightNNRegressor(Model):\n def __init__(self, num_classes=10):\n super().__init__()\n # Initialize model layers here", "tensorflow_sol_code": "class ModifiedLightNNRegressor(Model):\n def __init__(self, num_classes=10):\n super().__init__()\n self.features = tf.keras.Sequential([\n layers.Conv2D(16, (3, 3), padding='same', activation='relu'),\n layers.MaxPooling2D((2, 2)),\n layers.Conv2D(16, (3, 3), padding='same', activation='relu'),\n layers.MaxPooling2D((2, 2))\n ])\n self.regressor = tf.keras.Sequential([\n layers.Conv2D(32, (3, 3), padding='same')\n ])\n self.classifier = tf.keras.Sequential([\n layers.Flatten(),\n layers.Dense(256, activation='relu'),\n layers.Dropout(0.1),\n layers.Dense(num_classes)\n ])\n\n def call(self, x):\n x = self.features(x)\n regressor_output = self.regressor(x)\n x = tf.keras.layers.Flatten()(x)\n output = self.classifier(x)\n return output, regressor_output\n", "tensorflow_test_code": {"setup_code": "model = ModifiedLightNNRegressor(num_classes=10)\ninput_tensor = tf.random.normal([32, 32, 32, 3])", "test_cases": ["output, regressor_output = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert regressor_output.shape == (32, 8, 8, 32), f'Expected regressor_output shape (32, 8, 8, 32), got {regressor_output.shape}'", "assert isinstance(model.features.layers[0], layers.Conv2D), 'First layer should be Conv2D'", "assert isinstance(model.classifier.layers[-1], layers.Dense), 'Final classifier layer should be Dense with output size of num_classes'"]}} |
| {"question_id": 142, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class BiRNN(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers, num_classes):\n super().__init__()\n # Initialize model layers here", "pytorch_sol_code": "class BiRNN(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers, num_classes):\n super().__init__()\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True)\n self.fc = nn.Linear(hidden_size*2, num_classes) # 2 for bidirection\n\n def forward(self, x):\n h0 = torch.zeros(self.num_layers*2, x.size(0), self.hidden_size).to(x.device) # 2 for bidirection \n c0 = torch.zeros(self.num_layers*2, x.size(0), self.hidden_size).to(x.device)\n out, _ = self.lstm(x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size*2)\n out = self.fc(out[:, -1, :])\n return out\n", "pytorch_test_code": {"setup_code": "input_size = 28\nhidden_size = 64\nnum_layers = 2\nnum_classes = 10\nmodel = BiRNN(input_size, hidden_size, num_layers, num_classes)\ninput_tensor = torch.randn(32, 10, input_size).to(torch.device('cuda' if torch.cuda.is_available() else 'cpu'))", "test_cases": ["output = model(input_tensor)\nassert output.shape == (32, num_classes), f'Expected output shape (32, {num_classes}), got {output.shape}'", "assert isinstance(model.lstm, nn.LSTM), 'The LSTM layer should be of type nn.LSTM'", "assert model.lstm.bidirectional == True, 'The LSTM layer should be bidirectional'", "assert isinstance(model.fc, nn.Linear), 'The final layer should be a Linear layer with output size of num_classes'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class BiRNN(Model):\n def __init__(self, input_size, hidden_size, num_layers, num_classes):\n super().__init__()\n # Initialize model layers here", "tensorflow_sol_code": "class BiRNN(Model):\n def __init__(self, input_size, hidden_size, num_layers, num_classes):\n super().__init__()\n self.lstm = layers.Bidirectional(layers.LSTM(hidden_size, return_sequences=False, recurrent_initializer='glorot_uniform', recurrent_activation='sigmoid', dropout=0.1))\n self.fc = layers.Dense(num_classes)\n\n def call(self, x):\n out = self.lstm(x)\n out = self.fc(out)\n return out\n", "tensorflow_test_code": {"setup_code": "input_size = 28\nhidden_size = 64\nnum_layers = 2\nnum_classes = 10\nmodel = BiRNN(input_size, hidden_size, num_layers, num_classes)\ninput_tensor = tf.random.normal([32, 10, input_size])", "test_cases": ["output = model(input_tensor)\nassert output.shape == (32, num_classes), f'Expected output shape (32, {num_classes}), got {output.shape}'", "assert isinstance(model.lstm, layers.Bidirectional), 'The LSTM layer should be bidirectional'", "assert isinstance(model.fc, layers.Dense), 'The final layer should be Dense with output size of num_classes'"]}} |
| {"question_id": 143, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class ConvNet(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n # Initialize model layers here", "pytorch_sol_code": "class ConvNet(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2))\n self.layer2 = nn.Sequential(\n nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2))\n self.fc = nn.Linear(7*7*32, num_classes)\n\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = out.reshape(out.size(0), -1)\n out = self.fc(out)\n return out\n", "pytorch_test_code": {"setup_code": "model = ConvNet(num_classes=10)\ninput_tensor = torch.randn(32, 1, 28, 28)", "test_cases": ["output = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert isinstance(model.layer1[0], nn.Conv2d), 'First layer should be Conv2d'", "assert model.layer1[0].out_channels == 16, 'First Conv2d layer should have 16 output channels'", "assert isinstance(model.fc, nn.Linear), 'Final layer should be a Linear layer with output size of num_classes'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class ConvNet(Model):\n def __init__(self, num_classes=10):\n super().__init__()\n # Initialize model layers here", "tensorflow_sol_code": "class ConvNet(Model):\n def __init__(self, num_classes=10):\n super().__init__()\n self.layer1 = tf.keras.Sequential([\n layers.Conv2D(16, (5, 5), padding='same', activation='relu', input_shape=(28, 28, 1)),\n layers.BatchNormalization(),\n layers.MaxPooling2D(pool_size=(2, 2))\n ])\n self.layer2 = tf.keras.Sequential([\n layers.Conv2D(32, (5, 5), padding='same', activation='relu'),\n layers.BatchNormalization(),\n layers.MaxPooling2D(pool_size=(2, 2))\n ])\n self.fc = layers.Dense(num_classes)\n\n def call(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = tf.keras.layers.Flatten()(x)\n x = self.fc(x)\n return x\n", "tensorflow_test_code": {"setup_code": "model = ConvNet(num_classes=10)\ninput_tensor = tf.random.normal([32, 28, 28, 1])", "test_cases": ["output = model(input_tensor)\nassert output.shape == (32, 10), f'Expected output shape (32, 10), got {output.shape}'", "assert isinstance(model.layer1.layers[0], layers.Conv2D), 'First layer should be Conv2D'", "assert model.layer1.layers[0].filters == 16, 'First Conv2D layer should have 16 filters'", "assert isinstance(model.fc, layers.Dense), 'Final classifier layer should be Dense with output size of num_classes'"]}} |
| {"question_id": 144, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class AutoEncoder(nn.Module):\n def __init__(self):\n super().__init__()\n # Define the encoder and decoder layers here", "pytorch_sol_code": "class AutoEncoder(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.encoder = nn.Sequential(\n nn.Linear(28*28, 128),\n nn.Tanh(),\n nn.Linear(128, 64),\n nn.Tanh(),\n nn.Linear(64, 12),\n nn.Tanh(),\n nn.Linear(12, 3)\n )\n self.decoder = nn.Sequential(\n nn.Linear(3, 12),\n nn.Tanh(),\n nn.Linear(12, 64),\n nn.Tanh(),\n nn.Linear(64, 128),\n nn.Tanh(),\n nn.Linear(128, 28*28),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n encoded = self.encoder(x)\n decoded = self.decoder(encoded)\n return encoded, decoded\n", "pytorch_test_code": {"setup_code": "model = AutoEncoder()\ninput_tensor = torch.randn(32, 28*28)", "test_cases": ["encoded, decoded = model(input_tensor)\nassert encoded.shape == (32, 3), f'Expected encoded shape (32, 3), got {encoded.shape}'", "assert decoded.shape == (32, 28*28), f'Expected decoded shape (32, 28*28), got {decoded.shape}'", "assert isinstance(model.encoder[0], nn.Linear), 'First layer in encoder should be Linear'", "assert isinstance(model.decoder[-2], nn.Linear), 'Final layer in decoder should be Linear'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class AutoEncoder(Model):\n def __init__(self):\n super().__init__()\n # Define the encoder and decoder layers here", "tensorflow_sol_code": "class AutoEncoder(Model):\n def __init__(self):\n super().__init__()\n self.encoder = tf.keras.Sequential([\n layers.Dense(128, activation='tanh'),\n layers.Dense(64, activation='tanh'),\n layers.Dense(12, activation='tanh'),\n layers.Dense(3)\n ])\n self.decoder = tf.keras.Sequential([\n layers.Dense(12, activation='tanh'),\n layers.Dense(64, activation='tanh'),\n layers.Dense(128, activation='tanh'),\n layers.Dense(28*28, activation='sigmoid')\n ])\n\n def call(self, x):\n encoded = self.encoder(x)\n decoded = self.decoder(encoded)\n return encoded, decoded\n", "tensorflow_test_code": {"setup_code": "model = AutoEncoder()\ninput_tensor = tf.random.normal([32, 28*28])", "test_cases": ["encoded, decoded = model(input_tensor)\nassert encoded.shape == (32, 3), f'Expected encoded shape (32, 3), got {encoded.shape}'", "assert decoded.shape == (32, 28*28), f'Expected decoded shape (32, 28*28), got {decoded.shape}'", "assert isinstance(model.encoder.layers[0], layers.Dense), 'First layer in encoder should be Dense'", "assert isinstance(model.decoder.layers[-1], layers.Dense), 'Final layer in decoder should be Dense'"]}} |
| {"question_id": 145, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class DenseLayer(nn.Module):\n def __init__(self, in_channels):\n super(DenseLayer, self).__init__()\n # Define the batch normalization and convolutional layers here", "pytorch_sol_code": "class DenseLayer(nn.Module):\n def __init__(self, in_channels):\n super().__init__()\n k=12\n self.BN1 = nn.BatchNorm2d(num_features=in_channels)\n self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=4*k, kernel_size=1, stride=1, padding=0, bias=False)\n self.BN2 = nn.BatchNorm2d(num_features=4*k)\n self.conv2 = nn.Conv2d(in_channels=4*k, out_channels=k, kernel_size=3, stride=1, padding=1, bias=False)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n xin = x\n x = self.BN1(x)\n x = self.relu(x)\n x = self.conv1(x)\n x = self.BN2(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = torch.cat([xin, x], 1)\n return x\n", "pytorch_test_code": {"setup_code": "k=12\nmodel = DenseLayer(in_channels=64)\ninput_tensor = torch.randn(32, 64, 32, 32)", "test_cases": ["output = model(input_tensor)\nexpected_channels = 64 + k\nassert output.shape == (32, expected_channels, 32, 32), f'Expected output shape (32, {expected_channels}, 32, 32), got {output.shape}'", "assert isinstance(model.BN1, nn.BatchNorm2d), 'BN1 should be an instance of BatchNorm2d'", "assert isinstance(model.conv1, nn.Conv2d), 'conv1 should be an instance of Conv2d'", "assert isinstance(model.conv2, nn.Conv2d), 'conv2 should be an instance of Conv2d'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class DenseLayer(Model):\n def __init__(self, in_channels):\n super().__init__()\n # Define the batch normalization and convolutional layers here", "tensorflow_sol_code": "class DenseLayer(Model):\n def __init__(self, in_channels):\n super().__init__()\n k=12\n self.BN1 = layers.BatchNormalization()\n self.conv1 = layers.Conv2D(4*k, kernel_size=1, strides=1, padding='valid', use_bias=False)\n self.BN2 = layers.BatchNormalization()\n self.conv2 = layers.Conv2D(k, kernel_size=3, strides=1, padding='same', use_bias=False)\n self.relu = layers.ReLU()\n\n def call(self, x):\n xin = x\n x = self.BN1(x)\n x = self.relu(x)\n x = self.conv1(x)\n x = self.BN2(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = tf.concat([xin, x], axis=-1)\n return x\n", "tensorflow_test_code": {"setup_code": "k = 12\nmodel = DenseLayer(in_channels=64)\ninput_tensor = tf.random.normal([32, 32, 32, 64])", "test_cases": ["output = model(input_tensor)\nexpected_channels = 64 + k\nassert output.shape == (32, 32, 32, expected_channels), f'Expected output shape (32, 32, 32, {expected_channels}), got {output.shape}'", "assert isinstance(model.BN1, layers.BatchNormalization), 'BN1 should be an instance of BatchNormalization'", "assert isinstance(model.conv1, layers.Conv2D), 'conv1 should be an instance of Conv2D'", "assert isinstance(model.conv2, layers.Conv2D), 'conv2 should be an instance of Conv2D'"]}} |
| {"question_id": 146, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class TransitionLayer(nn.Module):\n def __init__(self, in_channels, compression_factor):\n super().__init__()\n # Define the batch normalization, 1x1 convolution, and average pooling layers here", "pytorch_sol_code": "class TransitionLayer(nn.Module):\n def __init__(self, in_channels, compression_factor):\n super().__init__()\n self.BN = nn.BatchNorm2d(in_channels)\n self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=int(in_channels * compression_factor), kernel_size=1, stride=1, padding=0, bias=False)\n self.avgpool = nn.AvgPool2d(kernel_size=2, stride=2)\n\n def forward(self, x):\n x = self.BN(x)\n x = self.conv1(x)\n x = self.avgpool(x)\n return x\n", "pytorch_test_code": {"setup_code": "compression_factor = 0.5\nmodel = TransitionLayer(in_channels=64, compression_factor=compression_factor)\ninput_tensor = torch.randn(32, 64, 32, 32)", "test_cases": ["output = model(input_tensor)\nexpected_channels = int(64 * compression_factor)\nassert output.shape == (32, expected_channels, 16, 16), f'Expected output shape (32, {expected_channels}, 16, 16), got {output.shape}'", "assert isinstance(model.BN, nn.BatchNorm2d), 'BN should be an instance of BatchNorm2d'", "assert isinstance(model.conv1, nn.Conv2d), 'conv1 should be an instance of Conv2d'", "assert isinstance(model.avgpool, nn.AvgPool2d), 'avgpool should be an instance of AvgPool2d'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class TransitionLayer(Model):\n def __init__(self, in_channels, compression_factor):\n super().__init__()\n # Define the batch normalization, 1x1 convolution, and average pooling layers here", "tensorflow_sol_code": "class TransitionLayer(Model):\n def __init__(self, in_channels, compression_factor):\n super().__init__()\n self.BN = layers.BatchNormalization()\n self.conv1 = layers.Conv2D(int(in_channels * compression_factor), kernel_size=1, strides=1, padding='valid', use_bias=False)\n self.avgpool = layers.AvgPool2D(pool_size=2, strides=2)\n\n def call(self, x):\n x = self.BN(x)\n x = self.conv1(x)\n x = self.avgpool(x)\n return x\n", "tensorflow_test_code": {"setup_code": "compression_factor = 0.5\nmodel = TransitionLayer(in_channels=64, compression_factor=compression_factor)\ninput_tensor = tf.random.normal([32, 32, 32, 64])", "test_cases": ["output = model(input_tensor)\nexpected_channels = int(64 * compression_factor)\nassert output.shape == (32, 16, 16, expected_channels), f'Expected output shape (32, 16, 16, {expected_channels}), got {output.shape}'", "assert isinstance(model.BN, layers.BatchNormalization), 'BN should be an instance of BatchNormalization'", "assert isinstance(model.conv1, layers.Conv2D), 'conv1 should be an instance of Conv2D'", "assert isinstance(model.avgpool, layers.AvgPool2D), 'avgpool should be an instance of AvgPool2D'"]}} |
| {"question_id": 147, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "class ConvBlock(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=False):\n super().__init__()\n # Define the convolutional, batch normalization, and ReLU layers here", "pytorch_sol_code": "class ConvBlock(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=False):\n super().__init__()\n self.conv2d = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias)\n self.batchnorm2d = nn.BatchNorm2d(out_channels)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n return self.relu(self.batchnorm2d(self.conv2d(x)))\n", "pytorch_test_code": {"setup_code": "model = ConvBlock(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1)\ninput_tensor = torch.randn(32, 3, 64, 64)", "test_cases": ["output = model(input_tensor)\nassert output.shape == (32, 16, 64, 64), f'Expected output shape (32, 16, 64, 64), got {output.shape}'", "assert isinstance(model.conv2d, nn.Conv2d), 'conv2d should be an instance of Conv2d'", "assert isinstance(model.batchnorm2d, nn.BatchNorm2d), 'batchnorm2d should be an instance of BatchNorm2d'", "assert isinstance(model.relu, nn.ReLU), 'relu should be an instance of ReLU'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class ConvBlock(Model):\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=False):\n super().__init__()\n # Define the convolutional, batch normalization, and ReLU layers here", "tensorflow_sol_code": "class ConvBlock(Model):\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=False):\n super().__init__()\n self.conv2d = layers.Conv2D(out_channels, kernel_size=kernel_size, strides=stride, padding='same', use_bias=bias, input_shape=(None, None, in_channels))\n self.batchnorm2d = layers.BatchNormalization()\n self.relu = layers.ReLU()\n\n def call(self, x):\n return self.relu(self.batchnorm2d(self.conv2d(x)))\n", "tensorflow_test_code": {"setup_code": "model = ConvBlock(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding='same')\ninput_tensor = tf.random.normal([32, 64, 64, 3])", "test_cases": ["output = model(input_tensor)\nassert output.shape == (32, 64, 64, 16), f'Expected output shape (32, 64, 64, 16), got {output.shape}'", "assert isinstance(model.conv2d, layers.Conv2D), 'conv2d should be an instance of Conv2D'", "assert isinstance(model.batchnorm2d, layers.BatchNormalization), 'batchnorm2d should be an instance of BatchNormalization'", "assert isinstance(model.relu, layers.ReLU), 'relu should be an instance of ReLU'"]}} |
| {"question_id": 148, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "class TwoLayerNet(nn.Module):\n def __init__(self, D_in, H, D_out):\n super().__init__()\n # Define the layers here", "pytorch_sol_code": "class TwoLayerNet(nn.Module):\n def __init__(self, D_in, H, D_out):\n super().__init__()\n self.linear1 = nn.Linear(D_in, H)\n self.linear2 = nn.Linear(H, D_out)\n\n def forward(self, x):\n h_relu = self.linear1(x).clamp(min=0)\n y_pred = self.linear2(h_relu)\n return y_pred\n", "pytorch_test_code": {"setup_code": "model = TwoLayerNet(D_in=100, H=50, D_out=10)\ninput_tensor = torch.randn(64, 100)", "test_cases": ["output = model(input_tensor)\nassert output.shape == (64, 10), f'Expected output shape (64, 10), got {output.shape}'", "assert isinstance(model.linear1, nn.Linear), 'First layer should be a Linear layer'", "assert isinstance(model.linear2, nn.Linear), 'Second layer should be a Linear layer'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers, Model", "tensorflow_start_code": "class TwoLayerNet(Model):\n def __init__(self, D_in, H, D_out):\n super().__init__()\n # Define the layers here", "tensorflow_sol_code": "class TwoLayerNet(Model):\n def __init__(self, D_in, H, D_out):\n super().__init__()\n self.linear1 = layers.Dense(H, input_shape=(D_in,), activation='relu')\n self.linear2 = layers.Dense(D_out)\n\n def call(self, x):\n x = self.linear1(x)\n y_pred = self.linear2(x)\n return y_pred\n", "tensorflow_test_code": {"setup_code": "model = TwoLayerNet(D_in=100, H=50, D_out=10)\ninput_tensor = tf.random.normal([64, 100])", "test_cases": ["output = model(input_tensor)\nassert output.shape == (64, 10), f'Expected output shape (64, 10), got {output.shape}'", "assert isinstance(model.linear1, layers.Dense), 'First layer should be a Dense layer'", "assert isinstance(model.linear2, layers.Dense), 'Second layer should be a Dense layer'"]}} |
| {"question_id": 149, "pytorch_library": "import torch\nimport numpy as np", "pytorch_start_code": "\ntorch.manual_seed(0)\nsquare_matrix = torch.randn(3, 3)\n# Calculate eigenvalues\n# eigvals =\n", "pytorch_sol_code": "\neigvals = torch.linalg.eigvals(square_matrix)\n", "pytorch_test_code": {"setup_code": "\nexpected_values = np.array([ 0.87527955+0.j,-0.56903154+0.9436213j,-0.56903154-0.9436213j])\n", "test_cases": ["assert np.allclose(eigvals.numpy(), expected_values, atol=1e-4), 'Eigenvalue calculation does not match expected values'"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "\ntf.random.set_seed(0)\nsquare_matrix = tf.random.normal([3, 3], seed=0)\n# Calculate eigenvalues\n# eigvals =\n", "tensorflow_sol_code": "\neigvals = tf.linalg.eigvals(square_matrix)\n", "tensorflow_test_code": {"setup_code": "\nexpected_values = np.array([-1.493342 +0.j,0.10106981+0.5204931j,0.10106981-0.5204931j])\n", "test_cases": ["assert np.allclose(eigvals.numpy(), expected_values, atol=1e-4), 'Eigenvalue calculation does not match expected values'"]}} |
| {"question_id": 150, "pytorch_library": "import torch", "pytorch_start_code": "\nconv1 = torch.nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3)\n# Initialize weights\n# torch.nn.init.???(conv1.weight)\n", "pytorch_sol_code": "\ntorch.nn.init.xavier_uniform_(conv1.weight)\n", "pytorch_test_code": {"setup_code": "\n", "test_cases": ["assert conv1.weight.std().item() > 0, 'Weights are not initialized'", "assert abs(conv1.weight.mean().item()) < 0.01, 'Weight initialization is biased'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nconv1 = tf.keras.layers.Conv2D(filters=16, kernel_size=3, input_shape=(None, None, 3))\n# Initialize weights\n# initializer = tf.keras.initializers.???\n# conv1.kernel_initializer = initializer\n", "tensorflow_sol_code": "\ninitializer = tf.keras.initializers.GlorotUniform()\nconv1.kernel_initializer = initializer\n", "tensorflow_test_code": {"setup_code": "\nconv1.build(input_shape=(None, 32, 32, 3))\n", "test_cases": ["assert conv1.kernel.numpy().std() > 0, 'Weights are not initialized'", "assert abs(conv1.kernel.numpy().mean()) < 0.01, 'Weight initialization is biased'"]}} |
| {"question_id": 151, "pytorch_library": "import torch", "pytorch_start_code": "\nx = torch.tensor([[1.0, 2.0], [3.0, 4.0]], requires_grad=True)\n# Create a deep copy of x that does not require gradients\n# y = \n", "pytorch_sol_code": "\ny = x.clone().detach()\n", "pytorch_test_code": {"setup_code": "\nx.grad = torch.ones_like(x)\n", "test_cases": ["assert torch.allclose(x, y), 'Copied tensor values do not match original'", "assert y.requires_grad == False, 'Copied tensor should not require gradients'", "assert y.grad is None, 'Gradients were not detached from copied tensor'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nx = tf.constant([[1.0, 2.0], [3.0, 4.0]])\n# Create a deep copy of x\n# y = \n", "tensorflow_sol_code": "\ny = tf.identity(x)\n", "tensorflow_test_code": {"setup_code": "\n", "test_cases": ["assert tf.reduce_all(tf.equal(x, y)), 'Copied tensor values do not match original'", "assert y.dtype == x.dtype, 'Copied tensor dtype does not match original'"]}} |
| {"question_id": 152, "pytorch_library": "import torch", "pytorch_start_code": "\nx = torch.randn(2, 3, 4)\n# Permute dimensions to the order (1, 2, 0)\n# y = \n", "pytorch_sol_code": "\ny = x.permute(1, 2, 0)\n", "pytorch_test_code": {"setup_code": "\nexpected_shape = (3, 4, 2)\n", "test_cases": ["assert y.shape == expected_shape, f'Output shape {y.shape} does not match expected {expected_shape}'", "assert torch.allclose(y[0, 0], x[:, 0, 0]), 'Permuted tensor values do not match expected'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nx = tf.random.normal(shape=(2, 3, 4))\n# Permute dimensions to the order (1, 2, 0)\n# y = \n", "tensorflow_sol_code": "\ny = tf.transpose(x, perm=[1, 2, 0])\n", "tensorflow_test_code": {"setup_code": "\nexpected_shape = (3, 4, 2)\n", "test_cases": ["assert y.shape == expected_shape, f'Output shape {y.shape} does not match expected {expected_shape}'", "assert tf.reduce_all(tf.equal(y[0, 0], x[:, 0, 0])), 'Permuted tensor values do not match expected'"]}} |
| {"question_id": 153, "pytorch_library": "import torch", "pytorch_start_code": "\n# Create a tensor with values drawn from a normal distribution with mean=2 and std=4\n# x = \n", "pytorch_sol_code": "\nx = torch.normal(mean=2, std=4, size=(100,))\n", "pytorch_test_code": {"setup_code": "\nimport numpy as np\ncheck_mean = 2\ncheck_std = 4\ntol = 1.0 # allow some tolerance due to randomness\n", "test_cases": ["assert np.abs(x.mean().item() - check_mean) < tol, f'Sample mean {x.mean().item():.2f} deviates too much from {check_mean}'", "assert np.abs(x.std().item() - check_std) < tol, f'Sample std {x.std().item():.2f} deviates too much from {check_std}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\n# Create a tensor with values drawn from a normal distribution with mean=2 and std=4\n# x = \n", "tensorflow_sol_code": "\nx = tf.random.normal(shape=(100,), mean=2, stddev=4)\n", "tensorflow_test_code": {"setup_code": "\nimport numpy as np\ncheck_mean = 2\ncheck_std = 4\ntol = 1.0 # allow some tolerance due to randomness\n", "test_cases": ["assert np.abs(tf.reduce_mean(x).numpy() - check_mean) < tol, f'Sample mean {tf.reduce_mean(x).numpy():.2f} deviates too much from {check_mean}'", "assert np.abs(tf.math.reduce_std(x).numpy() - check_std) < tol, f'Sample std {tf.math.reduce_std(x).numpy():.2f} deviates too much from {check_std}'"]}} |
| {"question_id": 154, "pytorch_library": "import torch", "pytorch_start_code": "\nn = 5\nindices = torch.randint(0, n, size=(4, 7))\n# Create one-hot vectors\n# one_hot = \n", "pytorch_sol_code": "\none_hot = torch.nn.functional.one_hot(indices, n)\n", "pytorch_test_code": {"setup_code": "\n", "test_cases": ["assert one_hot.shape == (4, 7, n), f'Incorrect output shape: {one_hot.shape}, expected (4, 7, {n})'", "assert torch.allclose(one_hot.sum(dim=-1), torch.tensor(1)), 'Sum of each one-hot vector should be 1'", "assert torch.allclose(one_hot.argmax(dim=-1), indices), 'Incorrect one-hot encoding'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nn = 5\nindices = tf.random.uniform(shape=(4, 7), maxval=n, dtype=tf.int64)\n# Create one-hot vectors\n# one_hot = \n", "tensorflow_sol_code": "\none_hot = tf.one_hot(indices, depth=n)\n", "tensorflow_test_code": {"setup_code": "\n", "test_cases": ["assert one_hot.shape == (4, 7, n), f'Incorrect output shape: {one_hot.shape}, expected (4, 7, {n})'", "assert tf.reduce_all(tf.equal(tf.reduce_sum(one_hot, axis=-1), 1)), 'Sum of each one-hot vector should be 1'", "assert tf.reduce_all(tf.equal(tf.argmax(one_hot, axis=-1), indices)), 'Incorrect one-hot encoding'"]}} |
| {"question_id": 155, "pytorch_library": "import torch", "pytorch_start_code": "\nlens = torch.LongTensor([3, 5, 4])\nmax_len = lens.max().item()\n# Create a mask tensor\n# mask = \n", "pytorch_sol_code": "\nmask = torch.arange(max_len).expand(len(lens), max_len) < lens.unsqueeze(1)\nmask = mask.long()\n", "pytorch_test_code": {"setup_code": "\nexpected_mask = torch.LongTensor([[1, 1, 1, 0, 0],\n [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 0]])\n", "test_cases": ["assert isinstance(lens, torch.LongTensor), f'Input lens should be a torch.LongTensor, got {type(lens)}'", "assert isinstance(mask, torch.LongTensor), f'Output mask should be a torch.LongTensor, got {type(mask)}'", "assert mask.shape == expected_mask.shape, f'Incorrect mask shape: {mask.shape}, expected {expected_mask.shape}'", "assert torch.all(mask == expected_mask), 'Mask values do not match expected'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nlens = tf.constant([3, 5, 4], dtype=tf.int64)\nmax_len = tf.reduce_max(lens)\n# Create a mask tensor\n# mask = \n", "tensorflow_sol_code": "\nmask = tf.sequence_mask(lens, maxlen=max_len, dtype=tf.int64)\n", "tensorflow_test_code": {"setup_code": "\nexpected_mask = tf.constant([[1, 1, 1, 0, 0],\n [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 0]], dtype=tf.int64)\n", "test_cases": ["assert lens.dtype == tf.int64, f'Input lens should have dtype int64, got {lens.dtype}'", "assert mask.dtype == tf.int64, f'Output mask should have dtype int64, got {mask.dtype}'", "assert mask.shape == expected_mask.shape, f'Incorrect mask shape: {mask.shape}, expected {expected_mask.shape}'", "assert tf.reduce_all(tf.equal(mask, expected_mask)), 'Mask values do not match expected'"]}} |
| {"question_id": 156, "pytorch_library": "import torch", "pytorch_start_code": "\nt = torch.tensor([True, False, True, False])\n# Convert to integer tensor\n# t_integer = \n", "pytorch_sol_code": "\nt_integer = t.long()\n", "pytorch_test_code": {"setup_code": "\nexpected_output = torch.tensor([1, 0, 1, 0])\n", "test_cases": ["assert isinstance(t, torch.BoolTensor), f'Input tensor should be of type torch.BoolTensor, got {t.dtype}'", "assert isinstance(t_integer, torch.LongTensor), f'Output tensor should be of type torch.LongTensor, got {t_integer.dtype}'", "assert torch.all(t_integer == expected_output), 'Converted values do not match expected'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nt = tf.constant([True, False, True, False])\n# Convert to integer tensor\n# t_integer = \n", "tensorflow_sol_code": "\nt_integer = tf.cast(t, tf.int64)\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.constant([1, 0, 1, 0], dtype=tf.int64)\n", "test_cases": ["assert t.dtype == tf.bool, f'Input tensor should have dtype bool, got {t.dtype}'", "assert t_integer.dtype == tf.int64, f'Output tensor should have dtype int64, got {t_integer.dtype}'", "assert tf.reduce_all(tf.equal(t_integer, expected_output)), 'Converted values do not match expected'"]}} |
| {"question_id": 157, "pytorch_library": "import torch", "pytorch_start_code": "\nt = torch.randn(3, 4)\nmean = 0.0\nstd = 0.1\n# Add Gaussian noise to the tensor\n# noisy_t = \n", "pytorch_sol_code": "\nnoisy_t = t + torch.randn_like(t) * std + mean\n", "pytorch_test_code": {"setup_code": "\ncheck_mean = 0.0\ncheck_std = 0.1\ntol = 0.2 # allow some tolerance due to randomness\n", "test_cases": ["assert noisy_t.shape == t.shape, f'Shape of noisy tensor {noisy_t.shape} does not match original {t.shape}'", "assert abs(noisy_t.mean().item() - (t.mean().item() + check_mean)) < tol, 'Mean of noisy tensor deviates too much from expected'", "assert abs(noisy_t.std().item() - (t.std().item() + check_std)) < tol, 'Std of noisy tensor deviates too much from expected'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nt = tf.random.normal(shape=(3, 4))\nmean = 0.0\nstd = 0.1\n# Add Gaussian noise to the tensor\n# noisy_t = \n", "tensorflow_sol_code": "\nnoise = tf.random.normal(shape=t.shape, mean=mean, stddev=std)\nnoisy_t = t + noise\n", "tensorflow_test_code": {"setup_code": "\ncheck_mean = 0.0\ncheck_std = 0.1\ntol = 0.2 # allow some tolerance due to randomness\n", "test_cases": ["assert noisy_t.shape == t.shape, f'Shape of noisy tensor {noisy_t.shape} does not match original {t.shape}'", "assert abs(tf.reduce_mean(noisy_t).numpy() - (tf.reduce_mean(t).numpy() + check_mean)) < tol, 'Mean of noisy tensor deviates too much from expected'", "assert abs(tf.math.reduce_std(noisy_t).numpy() - (tf.math.reduce_std(t).numpy() + check_std)) < tol, 'Std of noisy tensor deviates too much from expected'"]}} |
| {"question_id": 158, "pytorch_library": "import torch\nfrom torch.autograd import Variable", "pytorch_start_code": "\nx = torch.tensor([[1, 2, 3], [4, 5, 6]])\n# Create a 6 x 2 x 3 tensor by repeating x along a new dimension\n# y = \n", "pytorch_sol_code": "\ny = x.unsqueeze(0).expand(6, 2, 3)\n", "pytorch_test_code": {"setup_code": "\nexpected_output = torch.tensor([[[1, 2, 3],\n [4, 5, 6]],\n [[1, 2, 3],\n [4, 5, 6]],\n [[1, 2, 3],\n [4, 5, 6]],\n [[1, 2, 3],\n [4, 5, 6]],\n [[1, 2, 3],\n [4, 5, 6]],\n [[1, 2, 3],\n [4, 5, 6]]])\n", "test_cases": ["assert y.shape == (6, 2, 3), f'Output shape {y.shape} does not match expected (6, 2, 3)'", "assert torch.all(y == expected_output), 'Output values do not match expected'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nx = tf.constant([[1, 2, 3], [4, 5, 6]])\n# Create a 6 x 2 x 3 tensor by repeating x along a new dimension\n# y = \n", "tensorflow_sol_code": "\ny = tf.tile(tf.expand_dims(x, axis=0), multiples=[6, 1, 1])\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.constant([[[1, 2, 3],\n [4, 5, 6]],\n [[1, 2, 3],\n [4, 5, 6]],\n [[1, 2, 3],\n [4, 5, 6]],\n [[1, 2, 3],\n [4, 5, 6]],\n [[1, 2, 3],\n [4, 5, 6]],\n [[1, 2, 3],\n [4, 5, 6]]])\n", "test_cases": ["assert y.shape == (6, 2, 3), f'Output shape {y.shape} does not match expected (6, 2, 3)'", "assert tf.reduce_all(tf.equal(y, expected_output)), 'Output values do not match expected'"]}} |
| {"question_id": 159, "pytorch_library": "import torch", "pytorch_start_code": "\na = torch.tensor([1, 2, 3])\nb = torch.tensor([1, 2])\nc = torch.tensor([1])\n# Create a padded tensor from the sequences\n# padded_tensor = \n", "pytorch_sol_code": "\npadded_tensor = torch.nn.utils.rnn.pad_sequence([a, b, c], batch_first=True)\n", "pytorch_test_code": {"setup_code": "\nexpected_output = torch.tensor([[1, 2, 3],\n [1, 2, 0],\n [1, 0, 0]])\n", "test_cases": ["assert padded_tensor.shape == (3, 3), f'Output shape {padded_tensor.shape} does not match expected (3, 3)'", "assert torch.equal(padded_tensor, expected_output), 'Output values do not match expected'", "assert isinstance(padded_tensor, torch.Tensor), f'Output should be a torch.Tensor, got {type(padded_tensor)}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\na = tf.constant([1, 2, 3])\nb = tf.constant([1, 2])\nc = tf.constant([1])\n# Create a padded tensor from the sequences\n# padded_tensor = \n", "tensorflow_sol_code": "\npadded_tensor = tf.keras.preprocessing.sequence.pad_sequences([a.numpy(), b.numpy(), c.numpy()], padding='post')\npadded_tensor = tf.constant(padded_tensor)\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.constant([[1, 2, 3],\n [1, 2, 0],\n [1, 0, 0]])\n", "test_cases": ["assert padded_tensor.shape == (3, 3), f'Output shape {padded_tensor.shape} does not match expected (3, 3)'", "assert tf.reduce_all(tf.equal(padded_tensor, expected_output)), 'Output values do not match expected'", "assert isinstance(padded_tensor, tf.Tensor), f'Output should be a tf.Tensor, got {type(padded_tensor)}'"]}} |
| {"question_id": 160, "pytorch_library": "import torch", "pytorch_start_code": "\nt = torch.tensor([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n# Create a mask tensor where values are greater than 3 and less than 7\n# mask = \n", "pytorch_sol_code": "\nmask = (t > 3) & (t < 7)\n", "pytorch_test_code": {"setup_code": "\nexpected_output = torch.tensor([[False, False, False],\n [True, True, True],\n [False, False, False]])\n", "test_cases": ["assert mask.shape == t.shape, f'Mask shape {mask.shape} does not match input tensor shape {t.shape}'", "assert torch.equal(mask, expected_output), 'Mask values do not match expected'", "assert mask.dtype == torch.bool, f'Mask should be of dtype torch.bool, got {mask.dtype}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nt = tf.constant([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n# Create a mask tensor where values are greater than 3 and less than 7\n# mask = \n", "tensorflow_sol_code": "\nmask = tf.logical_and(t > 3, t < 7)\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.constant([[False, False, False],\n [True, True, True],\n [False, False, False]])\n", "test_cases": ["assert mask.shape == t.shape, f'Mask shape {mask.shape} does not match input tensor shape {t.shape}'", "assert tf.reduce_all(tf.equal(mask, expected_output)), 'Mask values do not match expected'", "assert mask.dtype == tf.bool, f'Mask should be of dtype tf.bool, got {mask.dtype}'"]}} |
| {"question_id": 161, "pytorch_library": "import torch\nfrom torch.utils.data import Dataset, DataLoader, RandomSampler", "pytorch_start_code": "\nclass MyDataset(Dataset):\n def __init__(self, N):\n self.N = N\n self.x = torch.rand(self.N, 10)\n self.y = torch.randint(0, 3, (self.N,))\n\n def __len__(self):\n return self.N\n\n def __getitem__(self, idx):\n return self.x[idx], self.y[idx]\n\n# Create an instance of MyDataset with N samples\nN = 100\nds = MyDataset(N)\n\n# Create a DataLoader to sample M samples with replacement\nM = 50\n# sampler = \n# dl = \n", "pytorch_sol_code": "\nsampler = RandomSampler(ds, replacement=True, num_samples=M)\ndl = DataLoader(ds, batch_size=5, sampler=sampler)\n", "pytorch_test_code": {"setup_code": "\nnum_unique_samples = len(set((x.numpy().tobytes(), y.numpy().tobytes()) for x, y in dl))\n", "test_cases": ["assert len(ds) == N, f'Dataset size should be {N}, got {len(ds)}'", "assert num_unique_samples < M, 'Sampling with replacement should result in fewer unique samples than M'", "assert all(x.shape == (5, 10) and y.shape == (5,) for x, y in dl), 'Batch shapes should be (4, 10) and (4,)'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "# Create a dummy dataset\nN = 100\nx = tf.random.uniform((N, 10))\ny = tf.random.uniform((N,), maxval=3, dtype=tf.int32)\nds = tf.data.Dataset.from_tensor_slices((x, y))\n\n# Sample M samples with replacement\nM = 50\n# ds_sampled = \n", "tensorflow_sol_code": "\nds_sampled = ds.repeat().take(M)\nds_sampled = ds_sampled.batch(5)\n", "tensorflow_test_code": {"setup_code": "\nnum_batches = len(list(ds_sampled))\nnum_samples = sum(x.shape[0] for x, _ in ds_sampled)\n", "test_cases": ["assert num_samples >= M, f'Number of samples should be at least {M}, got {num_samples}'", "assert all(x.shape[0] == 5 for x, _ in ds_sampled), 'Batch size should be 4'", "assert num_batches == (M + 3) // 5, f'Number of batches should be {(M + 3) // 5}, got {num_batches}'"]}} |
| {"question_id": 162, "pytorch_library": "import torch", "pytorch_start_code": "\nvocab = ['a', 'b', 'c', 'd']\ndata = [['a', 'c', 'd'], ['d', 'a', 'b']]\n# Create a PyTorch Module of StringLookup\nclass StringLookup(torch.nn.Module):\n def __init__(self, vocabulary):\n super().__init__()\n # Initialize the lookup table\n\n # self.lookup_table = \n\n\n def forward(self, x):\n # Perform the lookup operation\n pass\n # return\n# Create an instance of StringLookup\n\n# layer = \n\n\n# Apply the StringLookup layer to the data\n# output = \n", "pytorch_sol_code": "class StringLookup(torch.nn.Module):\n def __init__(self, vocabulary):\n super().__init__()\n self.lookup_table = {word: idx for idx, word in enumerate(vocabulary)}\n\n def forward(self, x):\n indices = [[self.lookup_table[word] for word in seq] for seq in x]\n return torch.tensor(indices)\n\n\nlayer = StringLookup(vocabulary=vocab)\noutput = layer(data)\noutput", "pytorch_test_code": {"setup_code": "\nexpected_output = torch.tensor([[0, 2, 3], [3, 0, 1]])\n", "test_cases": ["assert isinstance(layer, torch.nn.Module), 'StringLookup should be a torch.nn.Module'", "assert torch.allclose(output, expected_output), 'Output does not match the expected result'", "assert output.dtype == torch.long, f'Output should have dtype torch.long, got {output.dtype}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nvocab = ['a', 'b', 'c', 'd']\ndata = tf.constant([['a', 'c', 'd'], ['d', 'a', 'b']])\n\n# Create a StringLookup layer\n# layer = \n\n# Apply the StringLookup layer to the data\n# output = \n", "tensorflow_sol_code": "\nlayer = tf.keras.layers.StringLookup(vocabulary=vocab)\noutput = layer(data)\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.constant([[1, 3, 4], [4, 1, 2]],dtype=tf.int64)\nprint(output)\n", "test_cases": ["assert isinstance(layer, tf.keras.layers.StringLookup), 'Layer should be an instance of tf.keras.layers.StringLookup'", "assert tf.reduce_all(tf.equal(output, expected_output)), 'Output does not match the expected result'", "assert output.dtype == tf.int64, f'Output should have dtype int64, got {output.dtype}'"]}} |
| {"question_id": 163, "pytorch_library": "import torch", "pytorch_start_code": "\nA = torch.tensor([[4, 3, 3, 0, 0, 0],\n [13, 4, 13, 0, 0, 0],\n [707, 707, 4, 0, 0, 0],\n [7, 7, 7, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [195, 195, 195, 0, 0, 0]], dtype=torch.int32)\n\n# Identify and remove columns filled with zeros\n# valid_cols = \n# A = \n", "pytorch_sol_code": "\nvalid_cols = []\nfor col_idx in range(A.size(1)):\n if not torch.all(A[:, col_idx] == 0):\n valid_cols.append(col_idx)\nA = A[:, valid_cols]\n", "pytorch_test_code": {"setup_code": "expected_output = torch.tensor([[4, 3, 3],\n [13, 4, 13],\n [707, 707, 4],\n [7, 7, 7],\n [0, 0, 0],\n [195, 195, 195]], dtype=torch.int32)\n", "test_cases": ["assert A.shape == expected_output.shape, f'Output shape {A.shape} does not match expected shape {expected_output.shape}'", "assert torch.allclose(A, expected_output), 'Output values do not match the expected result'", "assert all(torch.any(A[:, col_idx] != 0) for col_idx in range(A.size(1))), 'Output still contains columns filled with zeros'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nA = tf.constant([[4, 3, 3, 0, 0, 0],\n [13, 4, 13, 0, 0, 0],\n [707, 707, 4, 0, 0, 0],\n [7, 7, 7, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [195, 195, 195, 0, 0, 0]], dtype=tf.int32)\n\n# Identify and remove columns filled with zeros\n# valid_cols = \n# A = \n", "tensorflow_sol_code": "\nvalid_cols = tf.where(tf.reduce_any(A != 0, axis=0))\nA = tf.gather(A, valid_cols, axis=1)\nA= tf.squeeze(A, axis=-1)\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.constant([[4, 3, 3],\n [13, 4, 13],\n [707, 707, 4],\n [7, 7, 7],\n [0, 0, 0],\n [195, 195, 195]], dtype=tf.int32)\n", "test_cases": ["assert A.shape == expected_output.shape, f'Output shape {A.shape} does not match expected shape {expected_output.shape}'", "assert tf.reduce_all(A == expected_output), 'Output values do not match the expected result'", "assert tf.reduce_all(tf.reduce_any(A != 0, axis=0)), 'Output still contains columns filled with zeros'"]}} |
| {"question_id": 164, "pytorch_library": "import torch", "pytorch_start_code": "\nn = 2\nd = 5\nrand_mat = torch.rand(n, d)\npercent_ones = 0.25\n\n# Create an integer tensor where percent_ones of elements are 1s and the rest are 0s\n# k = \n# k_th_quant = \n# bool_tensor = \n# desired_tensor = \n", "pytorch_sol_code": "\nk = round(percent_ones * d)\nk_th_quant = torch.topk(rand_mat, k, largest=False)[0][:, -1:]\nbool_tensor = rand_mat <= k_th_quant\ndesired_tensor = torch.where(bool_tensor, torch.tensor(1), torch.tensor(0))\n", "pytorch_test_code": {"setup_code": "\nnum_ones = torch.sum(desired_tensor)\ntol = 0.2 # Tolerance for percentage deviation\n", "test_cases": ["assert desired_tensor.dtype == torch.int64, f'Output should have dtype torch.int64, got {desired_tensor.dtype}'", "assert desired_tensor.shape == (n, d), f'Output shape should be ({n}, {d}), got {desired_tensor.shape}'", "assert desired_tensor.unique().tolist() == [0, 1], 'Output should contain only 0s and 1s'", "assert abs(num_ones / (n * d) - percent_ones) < tol, f'Percentage of 1s should be close to {percent_ones}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nn = 2\nd = 5\nrand_mat = tf.random.uniform(shape=(n, d))\npercent_ones = 0.25\n\n# Create an integer tensor where percent_ones of elements are 1s and the rest are 0s\n# k = \n# k_th_quant = \n# bool_tensor = \n# desired_tensor = \n", "tensorflow_sol_code": "\nk = round(percent_ones * d)\nk_th_quant = tf.sort(rand_mat, direction='ASCENDING')[..., k - 1:k]\nbool_tensor = rand_mat <= k_th_quant\ndesired_tensor = tf.cast(bool_tensor, tf.int32)\n", "tensorflow_test_code": {"setup_code": "tf.experimental.numpy.experimental_enable_numpy_behavior()\nnum_ones = tf.reduce_sum(desired_tensor)\ntol = 0.2 # Tolerance for percentage deviation\n", "test_cases": ["assert desired_tensor.dtype == tf.int32, f'Output should have dtype int32, got {desired_tensor.dtype}'", "assert desired_tensor.shape == (n, d), f'Output shape should be ({n}, {d}), got {desired_tensor.shape}'", "assert sorted(tf.unique(tf.squeeze(desired_tensor.reshape(n*d,-1))).y.numpy()) == [0, 1], 'Output should contain only 0s and 1s'", "assert abs(num_ones / (n * d) - percent_ones) < tol, f'Percentage of 1s should be close to {percent_ones}'"]}} |
| {"question_id": 165, "pytorch_library": "import torch", "pytorch_start_code": "\nx = torch.arange(24).view(4, 3, 2)\nids = torch.randint(0, 3, size=(4, 1))\n\n# Gather rows from x using indices in ids\n# idx = \n# result = \n", "pytorch_sol_code": "\nidx = ids.repeat(1, 2).view(4, 1, 2)\nresult = torch.gather(x, 1, idx)\n", "pytorch_test_code": {"setup_code": "\nexpected_output = x[torch.arange(4), ids.squeeze()]\n", "test_cases": ["assert result.shape == (4, 1, 2), f'Output shape should be (4, 1, 2), got {result.shape}'", "assert torch.allclose(result.squeeze(), expected_output), 'Output values do not match the expected result'", "assert result.dtype == x.dtype, f'Output dtype {result.dtype} does not match input dtype {x.dtype}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nx = tf.reshape(tf.range(24), (4, 3, 2))\nids = tf.random.uniform(shape=(4, 1), maxval=3, dtype=tf.int32)\n\n# Gather rows from x using indices in ids\n# idx = \n# result = \n", "tensorflow_sol_code": "\nidx = tf.tile(ids, [1, 2])\nresult = tf.gather(x, idx, batch_dims=1)\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.gather(x, ids, batch_dims=1)\n", "test_cases": ["assert tf.squeeze(result).shape == (4, 2, 2), f'Output shape should be (4, 2, 2), got {result.shape}'", "assert tf.reduce_all(result == expected_output), 'Output values do not match the expected result'", "assert result.dtype == x.dtype, f'Output dtype {result.dtype} does not match input dtype {x.dtype}'"]}} |
| {"question_id": 166, "pytorch_library": "import torch", "pytorch_start_code": "\nA = torch.randint(2, (10,))\nB = torch.randint(2, (10,))\n\n# Count the number of equal elements in A and B\n# num_equal = \n", "pytorch_sol_code": "\nnum_equal = torch.eq(A, B).sum()\n", "pytorch_test_code": {"setup_code": "\nexpected_output = torch.sum(A == B)\n", "test_cases": ["assert isinstance(num_equal, torch.Tensor), f'Output should be a torch.Tensor, got {type(num_equal)}'", "assert num_equal.dtype == torch.int64, f'Output dtype should be torch.int64, got {num_equal.dtype}'", "assert num_equal.item() == expected_output.item(), f'Expected {expected_output.item()} equal elements, got {num_equal.item()}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nA = tf.random.uniform(shape=(10,), maxval=2, dtype=tf.int32)\nB = tf.random.uniform(shape=(10,), maxval=2, dtype=tf.int32)\n\n# Count the number of equal elements in A and B\n# num_equal = \n", "tensorflow_sol_code": "\nnum_equal = tf.reduce_sum(tf.cast(tf.equal(A, B), tf.int32))\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.reduce_sum(tf.cast(tf.equal(A, B), tf.int32))\n", "test_cases": ["assert isinstance(num_equal, tf.Tensor), f'Output should be a tf.Tensor, got {type(num_equal)}'", "assert num_equal.dtype == tf.int32, f'Output dtype should be tf.int32, got {num_equal.dtype}'", "assert num_equal.numpy() == expected_output.numpy(), f'Expected {expected_output.numpy()} equal elements, got {num_equal.numpy()}'"]}} |
| {"question_id": 167, "pytorch_library": "import torch", "pytorch_start_code": "\nA = torch.zeros(2, dtype=torch.float32) # sum vector\nB = torch.tensor([0, 0, 1, 1]) # contribution vector\nC = torch.tensor([20.0, 30.0, 40.0, 10.0]) # value vector\n\n# Sum values in C according to indices in B and place results in A\n# A.index_add_(...)\n", "pytorch_sol_code": "\nA.index_add_(0, B, C)\n", "pytorch_test_code": {"setup_code": "\nexpected_output = torch.tensor([50.0, 50.0])\n", "test_cases": ["assert isinstance(A, torch.Tensor), f'A should be a torch.Tensor, got {type(A)}'", "assert A.shape == (2,), f'A should have shape (2,), got {A.shape}'", "assert torch.allclose(A, expected_output), f'Expected A to be {expected_output}, got {A}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nA = tf.zeros(2, dtype=tf.float32) # sum vector\nB = tf.constant([0, 0, 1, 1]) # contribution vector\nC = tf.constant([20.0, 30.0, 40.0, 10.0]) # value vector\n\n# Sum values in C according to indices in B and place results in A\n# A = \n", "tensorflow_sol_code": "\nA = tf.tensor_scatter_nd_add(A, tf.expand_dims(B, axis=-1), C)\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.constant([50.0, 50.0])\n", "test_cases": ["assert isinstance(A, tf.Tensor), f'A should be a tf.Tensor, got {type(A)}'", "assert A.shape == (2,), f'A should have shape (2,), got {A.shape}'", "assert tf.reduce_all(tf.equal(A, expected_output)), f'Expected A to be {expected_output}, got {A}'"]}} |
| {"question_id": 168, "pytorch_library": "import torch", "pytorch_start_code": "\nx = torch.tensor([\n [1, 2, 3, 4, 3, 3, 4],\n [1, 6, 3, 5, 3, 5, 4]\n], dtype=torch.long)\n\n# Zero out duplicate values in each row of x\n# result = \n", "pytorch_sol_code": "\ny, indices = x.sort(dim=-1)\ny[:, 1:] *= ((y[:, 1:] - y[:, :-1]) != 0).long()\nindices = indices.sort(dim=-1)[1]\nresult = torch.gather(y, 1, indices)\n", "pytorch_test_code": {"setup_code": "\nexpected_output = torch.tensor([\n [1, 2, 3, 4, 0, 0, 0],\n [1, 6, 0, 0, 3, 5, 4]\n], dtype=torch.long)\n", "test_cases": ["assert isinstance(result, torch.Tensor), f'result should be a torch.Tensor, got {type(result)}'", "assert result.shape == x.shape, f'result shape {result.shape} does not match input shape {x.shape}'", "assert torch.all(result == expected_output), f'Expected result:\\n{expected_output}\\nGot:\\n{result}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nx = tf.constant([\n [1, 2, 3, 4, 3, 3, 4],\n [1, 6, 3, 5, 3, 5, 4]\n], dtype=tf.int64)\n\n# Zero out duplicate values in each row of x\n# result = \n", "tensorflow_sol_code": "\ny = tf.sort(x, axis=-1)\nindices = tf.argsort(x, axis=-1)\nmask = tf.cast(tf.not_equal(y[:, 1:], y[:, :-1]), tf.int64)\ny = tf.concat([y[:, :1], y[:, 1:] * mask], axis=-1)\nresult = tf.gather(y, indices, batch_dims=1)\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.constant([\n [1, 2, 3, 0, 4, 0, 0],\n [1, 0, 5, 6, 4, 0, 3]\n], dtype=tf.int64)\n", "test_cases": ["assert isinstance(result, tf.Tensor), f'result should be a tf.Tensor, got {type(result)}'", "assert result.shape == x.shape, f'result shape {result.shape} does not match input shape {x.shape}'", "assert tf.reduce_all(tf.equal(result, expected_output)), f'Expected result:\\n{expected_output}\\nGot:\\n{result}'"]}} |
| {"question_id": 169, "pytorch_library": "import torch", "pytorch_start_code": "\nold_values = torch.tensor([1, 2, 3, 4, 5, 5, 2, 3, 3, 2], dtype=torch.int32)\nold_new_value = torch.tensor([[2, 22], [3, 33], [6, 66]], dtype=torch.int32)\n\n# Replace old values with new values in old_values\n# result = \n", "pytorch_sol_code": "\nold_new_dict = dict(old_new_value.tolist())\nresult = old_values.clone()\nfor old_val, new_val in old_new_dict.items():\n result[old_values == old_val] = new_val\n", "pytorch_test_code": {"setup_code": "\nexpected_output = torch.tensor([1, 22, 33, 4, 5, 5, 22, 33, 33, 22], dtype=torch.int32)\n", "test_cases": ["assert isinstance(result, torch.Tensor), f'result should be a torch.Tensor, got {type(result)}'", "assert result.shape == old_values.shape, f'result shape {result.shape} does not match input shape {old_values.shape}'", "assert torch.all(result == expected_output), f'Expected result:\\n{expected_output}\\nGot:\\n{result}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nold_values = tf.constant([1, 2, 3, 4, 5, 5, 2, 3, 3, 2], dtype=tf.int32)\nold_new_value = tf.constant([[2, 22], [3, 33], [6, 66]], dtype=tf.int32)\n\n# Replace old values with new values in old_values\n# result = \n", "tensorflow_sol_code": "\nold_new_dict = dict(old_new_value.numpy())\nresult = tf.Variable(old_values)\nfor old_val, new_val in old_new_dict.items():\n result.assign(tf.where(old_values == old_val, new_val, result))\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.constant([1, 22, 33, 4, 5, 5, 22, 33, 33, 22], dtype=tf.int32)\n", "test_cases": ["assert isinstance(result, tf.Variable), f'result should be a tf.Variable, got {type(result)}'", "assert result.shape == old_values.shape, f'result shape {result.shape} does not match input shape {old_values.shape}'", "assert tf.reduce_all(tf.equal(result, expected_output)), f'Expected result:\\n{expected_output}\\nGot:\\n{result}'"]}} |
| {"question_id": 170, "pytorch_library": "import torch\nimport torch.nn as nn\nimport numpy as np\nimport random", "pytorch_start_code": "\nrandom_image = []\nfor x in range(1, 946):\n random_image.append(random.randint(0, 255))\n\nrandom_image_arr = np.array(random_image)\n\n# Create a Conv2d layer\n# conv2 = \n\n# Reshape the image to the required input shape\n# image_d = \n\n# Apply the convolution operation\n# fc = \n", "pytorch_sol_code": "\nconv2 = nn.Conv2d(1, 18, kernel_size=3, stride=1, padding=1)\nimage_d = torch.FloatTensor(np.asarray(random_image_arr.reshape(1, 1, 27, 35)))\nfc = conv2(image_d)\n", "pytorch_test_code": {"setup_code": "\nexpected_output_shape = (1, 18, 27, 35)\n", "test_cases": ["assert isinstance(conv2, nn.Conv2d), f'conv2 should be an instance of nn.Conv2d, got {type(conv2)}'", "assert isinstance(image_d, torch.Tensor), f'image_d should be a torch.Tensor, got {type(image_d)}'", "assert image_d.shape == (1, 1, 27, 35), f'Expected image_d shape (1, 1, 27, 35), got {image_d.shape}'", "assert isinstance(fc, torch.Tensor), f'fc should be a torch.Tensor, got {type(fc)}'", "assert fc.shape == expected_output_shape, f'Expected output shape {expected_output_shape}, got {fc.shape}'"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np\nimport random", "tensorflow_start_code": "\nrandom_image = []\nfor x in range(1, 946):\n random_image.append(random.randint(0, 255))\n\nrandom_image_arr = np.array(random_image, dtype=np.float32)\n\n# Create a Conv2D layer\n# conv2 = \n\n# Reshape the image to the required input shape\n# image_d = \n\n# Apply the convolution operation\n# fc = \n", "tensorflow_sol_code": "\nconv2 = tf.keras.layers.Conv2D(18, kernel_size=3, strides=1, padding='same')\nimage_d = tf.reshape(random_image_arr, [1, 27, 35, 1])\nfc = conv2(image_d)\n", "tensorflow_test_code": {"setup_code": "\nexpected_output_shape = (1, 27, 35, 18)\n", "test_cases": ["assert isinstance(conv2, tf.keras.layers.Conv2D), f'conv2 should be an instance of tf.keras.layers.Conv2D, got {type(conv2)}'", "assert isinstance(image_d, tf.Tensor), f'image_d should be a tf.Tensor, got {type(image_d)}'", "assert image_d.shape == (1, 27, 35, 1), f'Expected image_d shape (1, 27, 35, 1), got {image_d.shape}'", "assert isinstance(fc, tf.Tensor), f'fc should be a tf.Tensor, got {type(fc)}'", "assert fc.shape == expected_output_shape, f'Expected output shape {expected_output_shape}, got {fc.shape}'"]}} |
| {"question_id": 171, "pytorch_library": "import torch", "pytorch_start_code": "\nt = torch.rand(2, 3)\n\n# Calculate the 2-norm along dimension 1\n# norm_2 = \n", "pytorch_sol_code": "\nnorm_2 = t.norm(dim=1, p=2)\n# Alternative solutions:\n# norm_2 = torch.linalg.norm(t, dim=1, ord=2)\n# norm_2 = t.pow(2).sum(dim=1).sqrt()\n", "pytorch_test_code": {"setup_code": "\nexpected_output = torch.sqrt(torch.sum(t**2, dim=1))\n", "test_cases": ["assert isinstance(norm_2, torch.Tensor), f'norm_2 should be a torch.Tensor, got {type(norm_2)}'", "assert norm_2.shape == (2,), f'Expected norm_2 shape: (2,), got: {norm_2.shape}'", "assert torch.allclose(norm_2, expected_output), f'Expected output: {expected_output}, got: {norm_2}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nt = tf.random.uniform(shape=(2, 3))\n\n# Calculate the 2-norm along dimension 1\n# norm_2 = \n", "tensorflow_sol_code": "\nnorm_2 = tf.norm(t, ord=2, axis=1)\n# Alternative solution:\n# norm_2 = tf.sqrt(tf.reduce_sum(tf.square(t), axis=1))\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.sqrt(tf.reduce_sum(tf.square(t), axis=1))\n", "test_cases": ["assert isinstance(norm_2, tf.Tensor), f'norm_2 should be a tf.Tensor, got {type(norm_2)}'", "assert norm_2.shape == (2,), f'Expected norm_2 shape: (2,), got: {norm_2.shape}'", "assert tf.reduce_all(tf.abs(norm_2 - expected_output) < 1e-6), f'Expected output: {expected_output}, got: {norm_2}'"]}} |
| {"question_id": 172, "pytorch_library": "import torch", "pytorch_start_code": "\nA = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\nB = torch.tensor([2, 5, 6, 8, 12, 15, 16])\n\n# Create a comparison matrix\n# comparison_matrix = \n\n# Count the number of elements from B that exist in A\n# exists = \n\n# Count the number of elements from B that do not exist in A\n# not_exist = \n", "pytorch_sol_code": "\ncomparison_matrix = B.unsqueeze(1) == A.unsqueeze(0)\nmatches_per_element = comparison_matrix.sum(dim=1)\n\nexists = (matches_per_element > 0).sum().item()\nnot_exist = len(B) - exists\n", "pytorch_test_code": {"setup_code": "\nexpected_exists = 4\nexpected_not_exist = 3\n", "test_cases": ["assert isinstance(exists, int), f'exists should be an int, got {type(exists)}'", "assert isinstance(not_exist, int), f'not_exist should be an int, got {type(not_exist)}'", "assert exists == expected_exists, f'Expected exists: {expected_exists}, got: {exists}'", "assert not_exist == expected_not_exist, f'Expected not_exist: {expected_not_exist}, got: {not_exist}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nA = tf.constant([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\nB = tf.constant([2, 5, 6, 8, 12, 15, 16])\n\n# Create a comparison matrix\n# comparison_matrix = \n\n# Count the number of elements from B that exist in A\n# exists = \n\n# Count the number of elements from B that do not exist in A\n# not_exist = \n", "tensorflow_sol_code": "\ncomparison_matrix = tf.equal(tf.expand_dims(B, 1), tf.expand_dims(A, 0))\nmatches_per_element = tf.reduce_sum(tf.cast(comparison_matrix, tf.int32), axis=1)\n\nexists = tf.reduce_sum(tf.cast(matches_per_element > 0, tf.int32)).numpy()\nnot_exist = len(B) - exists\n", "tensorflow_test_code": {"setup_code": "import numpy as np\nexpected_exists = 4\nexpected_not_exist = 3\n", "test_cases": ["assert isinstance(exists, np.int32), f'exists should be an int, got {type(exists)}'", "assert isinstance(not_exist, np.int64), f'not_exist should be an int, got {type(not_exist)}'", "assert exists == expected_exists, f'Expected exists: {expected_exists}, got: {exists}'", "assert not_exist == expected_not_exist, f'Expected not_exist: {expected_not_exist}, got: {not_exist}'"]}} |
| {"question_id": 173, "pytorch_library": "import torch", "pytorch_start_code": "\n# Create a 3-dimensional tensor of shape (a, b, c)\ntensor = torch.rand(4, 5, 6)\n\n# Create a list of indices of length a, each in the range [0, b)\nB = [1, 3, 2, 4]\n\n# Slice the tensor along the first axis using the indices\n# output = \n", "pytorch_sol_code": "\noutput = tensor[torch.arange(len(B)), B]\n", "pytorch_test_code": {"setup_code": "\nexpected_output = torch.stack([tensor[i, B[i]] for i in range(len(B))])\n", "test_cases": ["assert isinstance(output, torch.Tensor), f'output should be a torch.Tensor, got {type(output)}'", "assert output.shape == (len(B), tensor.shape[-1]), f'Expected output shape: ({len(B)}, {tensor.shape[-1]}), got: {output.shape}'", "assert torch.allclose(output, expected_output), f'Expected output:\\n{expected_output}\\nGot:\\n{output}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\n# Create a 3-dimensional tensor of shape (a, b, c)\ntensor = tf.random.uniform(shape=(4, 5, 6))\n\n# Create a list of indices of length a, each in the range [0, b)\nB = [1, 3, 2, 4]\n\n# Slice the tensor along the first axis using the indices\n# output = \n", "tensorflow_sol_code": "\noutput = tf.gather_nd(tensor, tf.stack([tf.range(len(B)), B], axis=1))\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.stack([tensor[i, B[i]] for i in range(len(B))])\n", "test_cases": ["assert isinstance(output, tf.Tensor), f'output should be a tf.Tensor, got {type(output)}'", "assert output.shape == (len(B), tensor.shape[-1]), f'Expected output shape: ({len(B)}, {tensor.shape[-1]}), got: {output.shape}'", "assert tf.reduce_all(tf.abs(output - expected_output) < 1e-6), f'Expected output:\\n{expected_output}\\nGot:\\n{output}'"]}} |
| {"question_id": 174, "pytorch_library": "import torch", "pytorch_start_code": "\nv1 = torch.tensor([2, 3, 3])\nv2 = torch.tensor([1, 2, 6, 2, 3, 10, 4, 6, 4])\n\n# Mask to find elements in v2 that are not in v1\nmask = ~torch.isin(v2, v1)\nv2_without_v1 = v2[mask]\n\n# Get unique elements and their first indices\n# unique_indices = \n", "pytorch_sol_code": "\ndef get_unique_elements_first_idx(tensor):\n # sort tensor\n sorted_tensor, indices = torch.sort(tensor)\n # find position of jumps\n unique_mask = torch.cat((torch.tensor([True]), sorted_tensor[1:] != sorted_tensor[:-1]))\n return indices[unique_mask]\n\nunique_indices = get_unique_elements_first_idx(v2_without_v1)\n", "pytorch_test_code": {"setup_code": "\nexpected_unique_indices = torch.tensor([0, 3, 1, 2])\nexpected_unique_values = v2[mask][expected_unique_indices]\n", "test_cases": ["assert isinstance(unique_indices, torch.Tensor), f'unique_indices should be a torch.Tensor, got {type(unique_indices)}'", "assert torch.all(unique_indices == expected_unique_indices), f'Expected unique_indices: {expected_unique_indices}, got: {unique_indices}'", "assert torch.all(v2[mask][unique_indices] == expected_unique_values), f'Expected unique values: {expected_unique_values}, got: {v2[mask][unique_indices]}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nv1 = tf.constant([2, 3, 3])\nv2 = tf.constant([1, 2, 6, 2, 3, 10, 4, 6, 4])\n\n# Mask to find elements in v2 that are not in v1\nmask = ~tf.reduce_any(tf.equal(tf.expand_dims(v2, 1), tf.expand_dims(v1, 0)), axis=1)\nv2_without_v1 = tf.boolean_mask(v2, mask)\n\n# Get unique elements and their first indices\n# unique_indices = \n", "tensorflow_sol_code": "import tensorflow as tf\n\n\ndef get_unique_elements_first_idx(tensor):\n # sort tensor\n sorted_tensor = tf.sort(tensor)\n # indices of sorted tensor\n indices = tf.argsort(tensor)\n # find position of jumps\n unique_mask = tf.concat(\n ([True], sorted_tensor[1:] != sorted_tensor[:-1]), axis=0)\n return tf.boolean_mask(indices, unique_mask)\n\n\nunique_indices = get_unique_elements_first_idx(v2_without_v1)", "tensorflow_test_code": {"setup_code": "\nexpected_unique_indices = tf.constant([0, 3, 1, 2])\nexpected_unique_values = tf.gather(v2_without_v1, expected_unique_indices)\n", "test_cases": ["assert isinstance(unique_indices, tf.Tensor), f'unique_indices should be a tf.Tensor, got {type(unique_indices)}'", "assert tf.reduce_all(unique_indices == expected_unique_indices), f'Expected unique_indices: {expected_unique_indices}, got: {unique_indices}'", "assert tf.reduce_all(tf.gather(v2_without_v1, unique_indices) == expected_unique_values), f'Expected unique values: {expected_unique_values}, got: {tf.gather(v2_without_v1, unique_indices)}'"]}} |
| {"question_id": 175, "pytorch_library": "import torch", "pytorch_start_code": "\nclass ValidatedArray:\n def __init__(self, array: torch.Tensor):\n self._array = array\n self._validate_array()\n \n def _validate_array(self):\n # Add validation logic here\n pass\n\n @property\n def array(self):\n self._validate_array()\n return self._array\n", "pytorch_sol_code": "\nclass ValidatedArray:\n def __init__(self, array: torch.Tensor):\n self._array = array\n self._validate_array()\n \n def _validate_array(self):\n assert torch.allclose(self._array.sum(dim=-1), torch.ones(self._array.shape[:-1])), f'The last dim represents a categorical distribution. It must sum to one.'\n\n @property\n def array(self):\n self._validate_array()\n return self._array\n", "pytorch_test_code": {"setup_code": "\nvalid_array = torch.tensor([[0.3, 0.7], [0.2, 0.8]])\ninvalid_array = torch.tensor([[0.3, 0.6], [0.2, 0.9]])\n", "test_cases": ["assert isinstance(ValidatedArray(valid_array).array, torch.Tensor), 'ValidatedArray.array should return a torch.Tensor'", "assert torch.allclose(ValidatedArray(valid_array).array, valid_array), 'ValidatedArray.array should return the original tensor if it is valid'", "try:\n ValidatedArray(invalid_array)\n assert False, 'ValidatedArray should raise an AssertionError for an invalid tensor'\nexcept AssertionError:\n pass"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nclass ValidatedArray:\n def __init__(self, array: tf.Tensor):\n self._array = array\n self._validate_array()\n \n def _validate_array(self):\n # Add validation logic here\n pass\n\n @property\n def array(self):\n self._validate_array()\n return self._array\n", "tensorflow_sol_code": "\nclass ValidatedArray:\n def __init__(self, array: tf.Tensor):\n self._array = array\n self._validate_array()\n \n def _validate_array(self):\n assert tf.reduce_all(tf.abs(tf.reduce_sum(self._array, axis=-1) - 1.0) < 1e-6), f'The last dim represents a categorical distribution. It must sum to one.'\n\n @property\n def array(self):\n self._validate_array()\n return self._array\n", "tensorflow_test_code": {"setup_code": "\nvalid_array = tf.constant([[0.3, 0.7], [0.2, 0.8]])\ninvalid_array = tf.constant([[0.3, 0.6], [0.2, 0.9]])\n", "test_cases": ["assert isinstance(ValidatedArray(valid_array).array, tf.Tensor), 'ValidatedArray.array should return a tf.Tensor'", "assert tf.reduce_all(ValidatedArray(valid_array).array == valid_array), 'ValidatedArray.array should return the original tensor if it is valid'", "try:\n ValidatedArray(invalid_array)\n assert False, 'ValidatedArray should raise an AssertionError for an invalid tensor'\nexcept AssertionError:\n pass"]}} |
| {"question_id": 176, "pytorch_library": "import torch", "pytorch_start_code": "\n# Create a 5D tensor\ntensor = torch.rand(1, 3, 10, 40, 1)\n\n# Specify the dimension and size for splitting\ndimension = 3\nsize=10\n# Split the tensor into 10 tensors\n# split_tensors = \n", "pytorch_sol_code": "split_tensors = torch.split(tensor, 10, dim=3)\n# convert the tuple of tensors to a tensor\nsplit_tensors = torch.stack(split_tensors)", "pytorch_test_code": {"setup_code": "\nexpected_shape = (4, 1, 3, 10, 10, 1)\n", "test_cases": ["assert isinstance(split_tensors, torch.Tensor), 'split_tensors should be a torch.Tensor'", "assert split_tensors.shape == expected_shape, f'Expected shape {expected_shape}, got {split_tensors.shape}'", "assert torch.allclose(split_tensors[0], tensor[:, :, :, 0:10,:]), 'First split tensor does not match expected values'", "assert torch.allclose(split_tensors[-1], tensor[:, :, :, 30:40,:]), 'Last split tensor does not match expected values'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\n# Create a 5D tensor\ntensor = tf.random.uniform(shape=(1, 3, 10, 40, 1))\n\n# Specify the dimension and size for splitting\ndimension = 3\nsize = 10\n\n# Split the tensor into smaller tensors\n# split_tensors = \n", "tensorflow_sol_code": "\n# Create a 5D tensor\ntensor = tf.random.uniform(shape=(1, 3, 10, 40, 1))\n\n# Specify the dimension and size for splitting\ndimension = 3\nsize = 10\n\n# Split the tensor into smaller tensors\nsplit_tensors = tf.split(tensor, num_or_size_splits=tensor.shape[dimension]//size, axis=dimension)\nsplit_tensors = tf.stack(split_tensors)\n", "tensorflow_test_code": {"setup_code": "\nexpected_shape = (4, 1, 3, 10, 10, 1)\n", "test_cases": ["assert isinstance(split_tensors, tf.Tensor), 'split_tensors should be a tf.Tensor'", "assert split_tensors.shape == expected_shape, f'Expected shape {expected_shape}, got {split_tensors.shape}'", "assert tf.reduce_all(split_tensors[0] == tensor[:, :, :, :10]), 'First split tensor does not match expected values'", "assert tf.reduce_all(split_tensors[-1] == tensor[:, :, :, 30:40]), 'Last split tensor does not match expected values'"]}} |
| {"question_id": 177, "pytorch_library": "import torch\nimport torch.nn.functional as F", "pytorch_start_code": "\n# Assume we have a batch of images and masks with different sizes\nimage_batch = [torch.rand(3, 32, 32), torch.rand(3, 64, 64), torch.rand(3, 48, 48)]\nmask_batch = [torch.rand(32, 32), torch.rand(64, 64), torch.rand(48, 48)]\n\n# Determine maximum height and width\n# max_height = \n# max_width = \n\n# Pad the image batch\n# image_batch = \n\n# Pad the mask batch\n# mask_batch = \n", "pytorch_sol_code": "# Determine maximum height and width\nmax_height = max([img.size(1) for img in image_batch])\nmax_width = max([img.size(2) for img in image_batch])\n\n# Pad the image batch\nimage_batch = [F.pad(img, [0, max_width - img.size(2), 0, max_height - img.size(1)]) for img in image_batch]\n\n# Pad the mask batch\nmask_batch = [F.pad(mask, [0, max_width - mask.size(1), 0, max_height - mask.size(0)]) for mask in mask_batch]\n", "pytorch_test_code": {"setup_code": "\n", "test_cases": ["assert isinstance(image_batch, list) and isinstance(mask_batch, list), 'image_batch and mask_batch should be lists'", "assert all(isinstance(img, torch.Tensor) for img in image_batch), 'image_batch should contain torch.Tensor objects'", "assert all(isinstance(mask, torch.Tensor) for mask in mask_batch), 'mask_batch should contain torch.Tensor objects'", "assert len(image_batch) == len(mask_batch), 'image_batch and mask_batch should have the same length'", "assert all(img.size() == (3, max_height, max_width) for img in image_batch), 'All images should have the same padded size'", "assert all(mask.size() == (max_height, max_width) for mask in mask_batch), 'All masks should have the same padded size'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\n# Assume we have a batch of images and masks with different sizes\nimage_batch = [tf.random.uniform((32, 32, 3)), tf.random.uniform((64, 64, 3)), tf.random.uniform((48, 48, 3))]\nmask_batch = [tf.random.uniform((32, 32)), tf.random.uniform((64, 64)), tf.random.uniform((48, 48))]\n\n# Determine maximum height and width\n# max_height = \n# max_width = \n\n# Pad the image batch\n# image_batch = \n\n# Pad the mask batch\n# mask_batch = \n", "tensorflow_sol_code": "# Determine maximum height and width\nmax_height = max([img.shape[0] for img in image_batch])\nmax_width = max([img.shape[1] for img in image_batch])\n\n# Pad the image batch\nimage_batch = [tf.pad(img, [[0, max_height - img.shape[0]], [0, max_width - img.shape[1]], [0, 0]]) for img in image_batch]\n\n# Pad the mask batch\nmask_batch = [tf.pad(mask, [[0, max_height - mask.shape[0]], [0, max_width - mask.shape[1]]]) for mask in mask_batch]\n", "tensorflow_test_code": {"setup_code": "\n", "test_cases": ["assert isinstance(image_batch, list) and isinstance(mask_batch, list), 'image_batch and mask_batch should be lists'", "assert all(isinstance(img, tf.Tensor) for img in image_batch), 'image_batch should contain tf.Tensor objects'", "assert all(isinstance(mask, tf.Tensor) for mask in mask_batch), 'mask_batch should contain tf.Tensor objects'", "assert len(image_batch) == len(mask_batch), 'image_batch and mask_batch should have the same length'", "assert all(img.shape == (max_height, max_width, 3) for img in image_batch), 'All images should have the same padded size'", "assert all(mask.shape == (max_height, max_width) for mask in mask_batch), 'All masks should have the same padded size'"]}} |
| {"question_id": 178, "pytorch_library": "import torch", "pytorch_start_code": "\na = torch.tensor([1, 234, 54, 6543, 55, 776])\nb = torch.tensor([234, 54])\nc = torch.tensor([55, 776])\n\n# Check if values in a are contained in b or c and create a boolean mask\n# a_masked = \n", "pytorch_sol_code": "\na_masked = sum(a == i for i in b).bool() + sum(a == i for i in c).bool()\n", "pytorch_test_code": {"setup_code": "\nexpected_output = torch.tensor([False, True, True, False, True, True])\n", "test_cases": ["assert isinstance(a_masked, torch.Tensor), 'a_masked should be a torch.Tensor'", "assert a_masked.dtype == torch.bool, 'a_masked should have dtype torch.bool'", "assert a_masked.shape == a.shape, f'a_masked should have the same shape as a. Expected {a.shape}, got {a_masked.shape}'", "assert torch.all(a_masked == expected_output), f'\\nExpected:\\n{expected_output}\\nGot:\\n{a_masked}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\na = tf.constant([1, 234, 54, 6543, 55, 776])\nb = tf.constant([234, 54])\nc = tf.constant([55, 776])\n\n# Check if values in a are contained in b or c and create a boolean mask\n# a_masked = \n", "tensorflow_sol_code": "\na_masked = tf.reduce_any(tf.equal(a[:, tf.newaxis], b), axis=1) | tf.reduce_any(tf.equal(a[:, tf.newaxis], c), axis=1)\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.constant([False, True, True, False, True, True])\n", "test_cases": ["assert isinstance(a_masked, tf.Tensor), 'a_masked should be a tf.Tensor'", "assert a_masked.dtype == tf.bool, 'a_masked should have dtype tf.bool'", "assert a_masked.shape == a.shape, f'a_masked should have the same shape as a. Expected {a.shape}, got {a_masked.shape}'", "assert tf.reduce_all(tf.equal(a_masked, expected_output)), f'\\nExpected:\\n{expected_output}\\nGot:\\n{a_masked}'"]}} |
| {"question_id": 179, "pytorch_library": "import torch", "pytorch_start_code": "\nx = torch.rand(20, 30, 40, 50)\ny = torch.rand(20, 30, 40, 50)\n\n# Merge x and y based on their channel norms\n# B, C, H, W = x.size()\n# z = torch.zeros_like(x)\n\n# x_norm = \n# y_norm = \n\n# condition = \n\n# Assign values to z based on the condition\n# z[condition] = \n# z[~condition] = \n", "pytorch_sol_code": "\nB, C, H, W = x.size()\nz = torch.zeros_like(x)\n\nx_norm = torch.norm(x, dim=(2, 3))\ny_norm = torch.norm(y, dim=(2, 3))\n\ncondition = x_norm >= y_norm\n\nz[condition] = x[condition]\nz[~condition] = y[~condition]\n", "pytorch_test_code": {"setup_code": "\n", "test_cases": ["assert isinstance(z, torch.Tensor), 'z should be a torch.Tensor'", "assert z.shape == x.shape, f'z should have the same shape as x and y. Expected {x.shape}, got {z.shape}'", "assert torch.allclose(z[x_norm >= y_norm], x[x_norm >= y_norm]), 'z should contain values from x where x_norm >= y_norm'", "assert torch.allclose(z[x_norm < y_norm], y[x_norm < y_norm]), 'z should contain values from y where x_norm < y_norm'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nx = tf.random.uniform((20, 30, 40, 50))\ny = tf.random.uniform((20, 30, 40, 50))\n\n# Merge x and y based on their channel norms\n# B, C, H, W = x.shape\n# z = tf.zeros_like(x)\n\n# x_norm = \n# y_norm = \n\n# condition = \n\n# Assign values to z based on the condition\n# z = tf.where(condition, x, y)\n", "tensorflow_sol_code": "\nB, C, H, W = x.shape\nz = tf.zeros_like(x)\n\nx_norm = tf.norm(x, axis=(2, 3))\ny_norm = tf.norm(y, axis=(2, 3))\n\ncondition = x_norm >= y_norm\n\nz = tf.where(condition[:, :, tf.newaxis, tf.newaxis], x, y)\n", "tensorflow_test_code": {"setup_code": "\n", "test_cases": ["assert isinstance(z, tf.Tensor), 'z should be a tf.Tensor'", "assert z.shape == x.shape, f'z should have the same shape as x and y. Expected {x.shape}, got {z.shape}'", "assert tf.reduce_all(z[condition] == x[condition]), 'z should contain values from x where condition is True'", "assert tf.reduce_all(z[~condition] == y[~condition]), 'z should contain values from y where condition is False'"]}} |
| {"question_id": 180, "pytorch_library": "import torch", "pytorch_start_code": "\na = torch.FloatTensor([[[1, 1, 1], [2, 2, 2]], [[9, 9, 9], [5, 5, 5]]])\nb = torch.IntTensor([1, 0])\n\n# Select rows from a based on indices in b\n# out = \n", "pytorch_sol_code": "\nout = a[torch.arange(a.size(0)), b]\n", "pytorch_test_code": {"setup_code": "\nexpected_output = torch.FloatTensor([[2, 2, 2], [9, 9, 9]])\n", "test_cases": ["assert isinstance(out, torch.Tensor), 'out should be a torch.Tensor'", "assert out.shape == (a.size(0), a.size(2)), f'out should have shape ({a.size(0)}, {a.size(2)}), got {out.shape}'", "assert torch.allclose(out, expected_output), f'\\nExpected:\\n{expected_output}\\nGot:\\n{out}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\na = tf.constant([[[1, 1, 1], [2, 2, 2]], [[9, 9, 9], [5, 5, 5]]], dtype=tf.float32)\nb = tf.constant([1, 0], dtype=tf.int32)\n\n# Select rows from a based on indices in b\n# out = \n", "tensorflow_sol_code": "\nout = tf.gather_nd(a, tf.stack([tf.range(tf.shape(a)[0]), b], axis=1))\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.constant([[2, 2, 2], [9, 9, 9]], dtype=tf.float32)\n", "test_cases": ["assert isinstance(out, tf.Tensor), 'out should be a tf.Tensor'", "assert out.shape == (a.shape[0], a.shape[2]), f'out should have shape ({a.shape[0]}, {a.shape[2]}), got {out.shape}'", "assert tf.reduce_all(tf.equal(out, expected_output)), f'\\nExpected:\\n{expected_output}\\nGot:\\n{out}'"]}} |
| {"question_id": 181, "pytorch_library": "import torch", "pytorch_start_code": "\nindexes = torch.tensor([[0, 2, 1, 3], \n [1, 0, 3, 2]])\nt1 = torch.zeros_like(indexes).float()\nt2 = torch.tensor([[0.1, 0.2, 0.3, 0.4],\n [0.5, 0.6, 0.7, 0.8]])\n\n# Re-assign values in t1 based on indexes and t2\n# t1[...] = ...\n", "pytorch_sol_code": "\nt1.scatter_(1, indexes, t2)\n", "pytorch_test_code": {"setup_code": "\nexpected_output = torch.tensor([[0.1, 0.3, 0.2, 0.4],\n [0.6, 0.5, 0.8, 0.7]])\n", "test_cases": ["assert isinstance(t1, torch.Tensor), 't1 should be a torch.Tensor'", "assert t1.shape == indexes.shape == t2.shape, 't1, indexes, and t2 should have the same shape'", "assert torch.allclose(t1, expected_output), f'Expected:\\n{expected_output}\\nGot:\\n{t1}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nindexes = tf.constant([[0, 2, 1, 3],\n [1, 0, 3, 2]])\nt1 = tf.zeros_like(indexes, dtype=tf.float32)\nt2 = tf.constant([[0.1, 0.2, 0.3, 0.4],\n [0.5, 0.6, 0.7, 0.8]])\n\n# Re-assign values in t1 based on indexes and t2\n# t1 = ...\n", "tensorflow_sol_code": "\nt1 = tf.gather(t2, indexes, batch_dims=1)\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.constant([[0.1, 0.3, 0.2, 0.4],\n [0.6, 0.5, 0.8, 0.7]])\n", "test_cases": ["assert isinstance(t1, tf.Tensor), 't1 should be a tf.Tensor'", "assert t1.shape == indexes.shape == t2.shape, 't1, indexes, and t2 should have the same shape'", "assert tf.reduce_all(tf.equal(t1, expected_output)), f'Expected:\\n{expected_output}\\nGot:\\n{t1}'"]}} |
| {"question_id": 182, "pytorch_library": "import torch", "pytorch_start_code": "\ntensor1 = torch.rand((4, 2, 3, 100))\ntensor2 = torch.rand((4, 2, 3, 100))\n\n# Calculate the Euclidean distance between corresponding vectors\n# dist = \n", "pytorch_sol_code": "\ndist = (tensor1 - tensor2).pow(2).sum(3).sqrt()\n", "pytorch_test_code": {"setup_code": "\nexpected_shape = (4, 2, 3)\ntol = 1e-6\n", "test_cases": ["assert isinstance(dist, torch.Tensor), 'dist should be a torch.Tensor'", "assert dist.shape == expected_shape, f'dist should have shape {expected_shape}, got {dist.shape}'", "assert torch.allclose(dist, torch.norm(tensor1 - tensor2, dim=3), atol=tol), 'dist values do not match expected values'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\ntensor1 = tf.random.uniform((4, 2, 3, 100))\ntensor2 = tf.random.uniform((4, 2, 3, 100))\n\n# Calculate the Euclidean distance between corresponding vectors\n# dist = \n", "tensorflow_sol_code": "\ndist = tf.sqrt(tf.reduce_sum(tf.square(tensor1 - tensor2), axis=3))\n", "tensorflow_test_code": {"setup_code": "\nexpected_shape = (4, 2, 3)\ntol = 1e-6\n", "test_cases": ["assert isinstance(dist, tf.Tensor), 'dist should be a tf.Tensor'", "assert dist.shape == expected_shape, f'dist should have shape {expected_shape}, got {dist.shape}'", "assert tf.reduce_all(tf.abs(dist - tf.norm(tensor1 - tensor2, axis=3)) < tol), 'dist values do not match expected values'"]}} |
| {"question_id": 183, "pytorch_library": "import torch", "pytorch_start_code": "# Create the target tensor\nv = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])\n\n# Create the source tensor\nw = torch.tensor([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0], [70.0, 80.0, 90.0]])\n\n# Create the index tensor\nindex = torch.tensor([[0, 1, 0], [1, 2, 1], [0, 1, 2]])\n# v = ...", "pytorch_sol_code": "\nv.scatter_add_(0, index, w)\n", "pytorch_test_code": {"setup_code": "\nexpected_output = torch.tensor([[ 81., 2., 33.],\n [44., 105., 66.],\n [7., 58., 99.]])\n", "test_cases": ["assert isinstance(v, torch.Tensor), 'v should be a torch.Tensor'", "assert v.shape == (3, 3), 'v should have shape (3, 3)'", "assert torch.allclose(v, expected_output), f'Expected:\\n{expected_output}\\nGot:\\n{v}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\n# Create the target tensor\nv = tf.constant([[1., 2., 3.],\n [4., 5., 6.],\n [7., 8., 9.]])\n\n# Create the source tensor\nw = tf.constant([[10., 20., 30.],\n [40., 50., 60.],\n [70., 80., 90.]])\n\n# Create the index tensor\nindex = tf.constant([0, 1, 0])\n\n# Add rows from w to v based on index\n# v = \n", "tensorflow_sol_code": "\nv = tf.tensor_scatter_nd_add(v, tf.expand_dims(index, axis=1), w)\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.constant([[ 81., 102., 123.],\n [44., 55., 66.],\n [7., 8., 9.]])\n", "test_cases": ["assert isinstance(v, tf.Tensor), 'v should be a tf.Tensor'", "assert v.shape == (3, 3), 'v should have shape (3, 3)'", "assert tf.reduce_all(tf.equal(v, expected_output)), f'Expected:\\n{expected_output}\\nGot:\\n{v}'"]}} |
| {"question_id": 184, "pytorch_library": "import torch", "pytorch_start_code": "\n# Create a sample tensor\na = torch.tensor([[4, 2, 5], \n [1, 3, 2],\n [9, 7, 8]])\n\n# Sort the rows based on the last column\n# ind = \n# sorted_a = \n", "pytorch_sol_code": "\nind = a[:, -1].argsort(dim=0)\nsorted_a = a[ind]\n", "pytorch_test_code": {"setup_code": "\nexpected_output = torch.tensor([[1, 3, 2],\n [4, 2, 5],\n [9, 7, 8]])\n", "test_cases": ["assert isinstance(sorted_a, torch.Tensor), 'sorted_a should be a torch.Tensor'", "assert sorted_a.shape == a.shape, f'sorted_a should have the same shape as a. Expected {a.shape}, got {sorted_a.shape}'", "assert torch.allclose(sorted_a, expected_output), f'Expected:\\n{expected_output}\\nGot:\\n{sorted_a}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\n# Create a sample tensor\na = tf.constant([[4, 2, 5], \n [1, 3, 2],\n [9, 7, 8]])\n\n# Sort the rows based on the last column\n# ind = \n# sorted_a = \n", "tensorflow_sol_code": "\nind = tf.argsort(a[:, -1])\nsorted_a = tf.gather(a, ind)\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.constant([[1, 3, 2],\n [4, 2, 5],\n [9, 7, 8]])\n", "test_cases": ["assert isinstance(sorted_a, tf.Tensor), 'sorted_a should be a tf.Tensor'", "assert sorted_a.shape == a.shape, f'sorted_a should have the same shape as a. Expected {a.shape}, got {sorted_a.shape}'", "assert tf.reduce_all(tf.equal(sorted_a, expected_output)), f'Expected:\\n{expected_output}\\nGot:\\n{sorted_a}'"]}} |
| {"question_id": 185, "pytorch_library": "import torch", "pytorch_start_code": "\nclass LR(torch.nn.Module):\n def __init__(self, n_features):\n super().__init__()\n self.lr = torch.nn.Sequential(\n # Add your layers here\n )\n\n def forward(self, x):\n return self.lr(x)\n", "pytorch_sol_code": "\nclass LR(torch.nn.Module):\n def __init__(self, n_features):\n super().__init__()\n self.lr = torch.nn.Sequential(\n torch.nn.Linear(n_features, 64),\n torch.nn.ReLU(),\n torch.nn.Linear(64, 36, bias=False),\n torch.nn.Softmax(dim=1),\n )\n\n def forward(self, x):\n return self.lr(x)\n", "pytorch_test_code": {"setup_code": "\nn_features = 384\nmodel = LR(n_features)\nsample_input = torch.randn(1, n_features)\n", "test_cases": ["assert isinstance(model, torch.nn.Module), 'model should be an instance of torch.nn.Module'", "assert len(list(model.parameters())) == 3, 'model should have 3 sets of parameters'", "output = model(sample_input)", "assert output.shape == (1, 36), f'Expected output shape (1, 36), got {output.shape}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nmodel = tf.keras.models.Sequential([\n # Add your layers here\n])\n", "tensorflow_sol_code": "\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Dense(64, input_shape=(384,), activation=\"relu\"),\n tf.keras.layers.Dense(36, activation=\"softmax\", use_bias=False)\n])\n", "tensorflow_test_code": {"setup_code": "\nn_features = 384\nsample_input = tf.random.normal((1, n_features))\n", "test_cases": ["assert isinstance(model, tf.keras.Sequential), 'model should be an instance of tf.keras.Sequential'", "assert len(model.layers) == 2, 'model should have 2 layers'", "output = model(sample_input)", "assert output.shape == (1, 36), f'Expected output shape (1, 36), got {output.shape}'"]}} |
| {"question_id": 186, "pytorch_library": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "pytorch_start_code": "\nclass FirstM(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(20, 2)\n \n def forward(self, x):\n x = self.fc1(x)\n return x\n\n\nclass SecondM(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(20, 2)\n \n def forward(self, x):\n x = self.fc1(x)\n return x\n\n\nclass Combined_model(nn.Module):\n def __init__(self, modelA, modelB):\n super(Combined_model, self).__init__()\n self.modelA = modelA\n self.modelB = modelB\n \n # Add a classifier layer to combine the outputs\n # self.classifier = ...\n \n def forward(self, x1, x2):\n # Pass inputs through modelA and modelB\n # x1 = ...\n # x2 = ...\n \n # Concatenate the outputs\n # x = ...\n \n # Pass the concatenated output through the classifier\n # x = ...\n \n return x\n", "pytorch_sol_code": "class Combined_model(nn.Module):\n def __init__(self, modelA, modelB):\n super().__init__()\n self.modelA = modelA\n self.modelB = modelB\n self.classifier = nn.Linear(4, 2)\n \n def forward(self, x1, x2):\n x1 = self.modelA(x1)\n x2 = self.modelB(x2)\n x = torch.cat((x1, x2), dim=1)\n x = self.classifier(F.relu(x))\n return x\n", "pytorch_test_code": {"setup_code": "\nmodelA = FirstM()\nmodelB = SecondM()\ncombined_model = Combined_model(modelA, modelB)\n\nx1 = torch.randn(1, 20)\nx2 = torch.randn(1, 20)\n", "test_cases": ["assert isinstance(combined_model, nn.Module), 'combined_model should be an instance of nn.Module'", "assert len(list(combined_model.parameters())) == 6, 'combined_model should have 6 learnable parameters'", "output = combined_model(x1, x2)", "assert output.shape == (1, 2), f'Expected output shape (1, 2), but got {output.shape}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nclass FirstM(tf.keras.Model):\n def __init__(self):\n super(FirstM, self).__init__()\n self.fc1 = tf.keras.layers.Dense(2, input_shape=(20,))\n \n def call(self, x):\n x = self.fc1(x)\n return x\n\n\nclass SecondM(tf.keras.Model):\n def __init__(self):\n super(SecondM, self).__init__()\n self.fc1 = tf.keras.layers.Dense(2, input_shape=(20,))\n \n def call(self, x):\n x = self.fc1(x)\n return x\n\n\nclass Combined_model(tf.keras.Model):\n def __init__(self, modelA, modelB):\n super(Combined_model, self).__init__()\n self.modelA = modelA\n self.modelB = modelB\n \n # Add a classifier layer to combine the outputs\n # self.classifier = ...\n \n def call(self, x1, x2):\n # Pass inputs through modelA and modelB\n # x1 = ...\n # x2 = ...\n \n # Concatenate the outputs\n # x = ...\n \n # Pass the concatenated output through the classifier\n # x = ...\n \n return x\n", "tensorflow_sol_code": "class Combined_model(tf.keras.Model):\n def __init__(self, modelA, modelB):\n super(Combined_model, self).__init__()\n self.modelA = modelA\n self.modelB = modelB\n self.classifier = tf.keras.layers.Dense(2, activation='relu')\n \n def call(self, x1, x2):\n x1 = self.modelA(x1)\n x2 = self.modelB(x2)\n x = tf.concat((x1, x2), axis=1)\n x = self.classifier(x)\n return x\n", "tensorflow_test_code": {"setup_code": "\nmodelA = FirstM()\nmodelB = SecondM()\ncombined_model = Combined_model(modelA, modelB)\n\nx1 = tf.random.normal((1, 20))\nx2 = tf.random.normal((1, 20))\ncombined_model(x1, x2)", "test_cases": ["assert isinstance(combined_model, tf.keras.Model), 'combined_model should be an instance of tf.keras.Model'", "assert len(combined_model.trainable_variables) == 6, 'combined_model should have 6 trainable variables'", "output = combined_model(x1, x2)", "assert output.shape == (1, 2), f'Expected output shape (1, 2), but got {output.shape}'"]}} |
| {"question_id": 187, "pytorch_library": "import torch\nimport numpy as np", "pytorch_start_code": "\nmultiplier = torch.from_numpy(np.array([1.0, -2.0, 5.0]))\nsource = torch.from_numpy(\n np.array([[0.5, -0.6],\n [-3.0, -2.0],\n [-4.0, 2.3]]))\ntarget = torch.from_numpy(np.array([0, 1, 0]))\n\n# Create an instance of CrossEntropyLoss with reduction='none'\n# loss_fn = \n\n# Compute the batch-specific loss\n# batch_loss = \n\n# Weigh the batch loss using the multiplier\n# weighted_loss = \n\n# Compute the mean weighted loss\n# loss = \n", "pytorch_sol_code": "\nloss_fn = torch.nn.CrossEntropyLoss(reduction='none')\nbatch_loss = loss_fn(source, target)\nweighted_loss = multiplier * batch_loss\nloss = torch.sum(weighted_loss) / torch.sum(multiplier)\n", "pytorch_test_code": {"setup_code": "\nexpected_output = 7.7925\n", "test_cases": ["assert isinstance(loss_fn, torch.nn.CrossEntropyLoss), 'loss_fn should be an instance of torch.nn.CrossEntropyLoss'", "assert loss_fn.reduction == 'none', 'loss_fn should have reduction set to \\'none\\''", "assert torch.is_tensor(batch_loss), 'batch_loss should be a torch.Tensor'", "assert batch_loss.shape == multiplier.shape, f'batch_loss should have the same shape as multiplier, got {batch_loss.shape} and {multiplier.shape}'", "assert torch.isclose(loss, torch.tensor(expected_output,dtype=torch.float64), atol=1e-4), f'Expected loss {expected_output}, but got {loss}'"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "\nmultiplier = tf.constant([1.0, -2.0, 5.0])\nsource = tf.constant([[0.5, -0.6],\n [-3.0, -2.0],\n [-4.0, 2.3]])\ntarget = tf.constant([0, 1, 0])\n\n# Create an instance of SparseCategoricalCrossentropy with reduction='none'\n# loss_fn = \n\n# Compute the batch-specific loss\n# batch_loss = \n\n# Weigh the batch loss using the multiplier\n# weighted_loss = \n\n# Compute the mean weighted loss\n# loss = \n", "tensorflow_sol_code": "\nloss_fn = tf.keras.losses.SparseCategoricalCrossentropy(reduction='none')\nbatch_loss = loss_fn(target, source)\nweighted_loss = multiplier * batch_loss\nloss = tf.reduce_sum(weighted_loss) / tf.reduce_sum(multiplier)\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = 19.801046\n", "test_cases": ["assert isinstance(loss_fn, tf.keras.losses.Loss), 'loss_fn should be an instance of tf.keras.losses.Loss'", "assert loss_fn.reduction == 'none', 'loss_fn should have reduction set to \\'none\\''", "assert tf.is_tensor(batch_loss), 'batch_loss should be a tf.Tensor'", "assert batch_loss.shape == multiplier.shape, f'batch_loss should have the same shape as multiplier, got {batch_loss.shape} and {multiplier.shape}'", "assert tf.math.abs(loss - expected_output) < 1e-4, f'Expected loss {expected_output}, but got {loss}'"]}} |
| {"question_id": 188, "pytorch_library": "import torch", "pytorch_start_code": "\n# Create a sample tensor A with shape [M, N]\nM, N = 3, 4\nA = torch.randn(M, N)\n\n# Specify the number of repetitions K\nK = 5\n\n# Repeat A along a new dimension to create B with shape [M, K, N]\n# B = \n", "pytorch_sol_code": "\nB = A.unsqueeze(1).repeat(1, K, 1)\n", "pytorch_test_code": {"setup_code": "\nexpected_shape = (M, K, N)\n", "test_cases": ["assert isinstance(B, torch.Tensor), 'B should be a torch.Tensor'", "assert B.shape == expected_shape, f'B should have shape {expected_shape}, but got {B.shape}'", "assert torch.allclose(B[:, 0, :], A), 'B[:, 0, :] should be equal to A'", "assert torch.allclose(B[:, -1, :], A), 'B[:, -1, :] should be equal to A'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\n# Create a sample tensor A with shape [M, N]\nM, N = 3, 4\nA = tf.random.normal((M, N))\n\n# Specify the number of repetitions K\nK = 5\n\n# Repeat A along a new dimension to create B with shape [M, K, N]\n# B = \n", "tensorflow_sol_code": "\nB = tf.repeat(tf.expand_dims(A, 1), K, axis=1)\n", "tensorflow_test_code": {"setup_code": "\nexpected_shape = (M, K, N)\n", "test_cases": ["assert isinstance(B, tf.Tensor), 'B should be a tf.Tensor'", "assert B.shape == expected_shape, f'B should have shape {expected_shape}, but got {B.shape}'", "assert tf.reduce_all(B[:, 0, :] == A), 'B[:, 0, :] should be equal to A'", "assert tf.reduce_all(B[:, -1, :] == A), 'B[:, -1, :] should be equal to A'"]}} |
| {"question_id": 189, "pytorch_library": "import torch", "pytorch_start_code": "\ndef sum_by_index(labels, source):\n unique_labels, labels_inds, labels_counts = labels.unique(return_inverse=True, return_counts=True)\n out = torch.zeros((unique_labels.size(0), source.size(2)), dtype=source.dtype)\n \n # Add the embeddings from source to out based on labels_inds\n # out.index_add_(...)\n \n return out\n\nlabels = torch.tensor([[0, 1], [1, 2]])\nsource = torch.tensor(\n [[[0, 1], \n [1, 2]], \n\n [[2, 3],\n [3, 4]]], \n dtype=torch.float, \n requires_grad=True,\n)\n\n# Apply sum_by_index to get the result\n# result = \n", "pytorch_sol_code": "\ndef sum_by_index(labels, source):\n unique_labels, labels_inds, labels_counts = labels.unique(return_inverse=True, return_counts=True)\n out = torch.zeros((unique_labels.size(0), source.size(2)), dtype=source.dtype)\n out.index_add_(0, labels_inds.flatten(), source.flatten(0, 1))\n return out\n\nlabels = torch.tensor([[0, 1], [1, 2]])\nsource = torch.tensor(\n [[[0, 1], \n [1, 2]], \n\n [[2, 3],\n [3, 4]]], \n dtype=torch.float, \n requires_grad=True,\n)\nresult = sum_by_index(labels, source)\n", "pytorch_test_code": {"setup_code": "\nexpected_output = torch.tensor([[0., 1.], [3., 5.], [3., 4.]], dtype=torch.float)\n", "test_cases": ["assert isinstance(result, torch.Tensor), 'result should be a torch.Tensor'", "assert result.shape == expected_output.shape, f'result should have shape {expected_output.shape}, but got {result.shape}'", "assert torch.allclose(result, expected_output), 'result does not match the expected output'", "assert result.requires_grad, 'result should have requires_grad=True'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\ndef sum_by_index(labels, source):\n unique_labels, _, labels_counts = tf.unique_with_counts(tf.reshape(labels, [-1]))\n labels_inds = tf.where(tf.equal(tf.expand_dims(labels, -1), tf.expand_dims(unique_labels, 0)))[..., -1]\n out = tf.zeros((tf.shape(unique_labels)[0], tf.shape(source)[-1]), dtype=source.dtype)\n \n # Add the embeddings from source to out based on labels_inds\n # out = ...\n \n return out\n\nlabels = tf.constant([[0, 1], [1, 2]])\nsource = tf.constant(\n [[[0, 1], \n [1, 2]], \n\n [[2, 3],\n [3, 4]]], \n dtype=tf.float32\n)\n\n# Apply sum_by_index to get the result\n# result = \n", "tensorflow_sol_code": "\ndef sum_by_index(labels, source):\n unique_labels, _, labels_counts = tf.unique_with_counts(tf.reshape(labels, [-1]))\n labels_inds = tf.where(tf.equal(tf.expand_dims(labels, -1), tf.expand_dims(unique_labels, 0)))[..., -1]\n out = tf.zeros((tf.shape(unique_labels)[0], tf.shape(source)[-1]), dtype=source.dtype)\n out = tf.tensor_scatter_nd_add(out, tf.reshape(labels_inds, [-1, 1]), tf.reshape(source, [-1, tf.shape(source)[-1]]))\n return out\n\nlabels = tf.constant([[0, 1], [1, 2]])\nsource = tf.constant(\n [[[0, 1], \n [1, 2]], \n\n [[2, 3],\n [3, 4]]], \n dtype=tf.float32\n)\nresult = sum_by_index(labels, source)\n", "tensorflow_test_code": {"setup_code": "\nexpected_output = tf.constant([[0., 1.], [3., 5.], [3., 4.]], dtype=tf.float32)\n", "test_cases": ["assert isinstance(result, tf.Tensor), 'result should be a tf.Tensor'", "assert result.shape == expected_output.shape, f'result should have shape {expected_output.shape}, but got {result.shape}'", "assert tf.reduce_all(tf.abs(result - expected_output) < 1e-6), 'result does not match the expected output'"]}} |
| {"question_id": 190, "pytorch_library": "import torch", "pytorch_start_code": "\noriginal = torch.randn(size=(4, 3, 2))\nrow_exclude = 2\n\n# Delete the row from x at index row_exclude\n# x = \n", "pytorch_sol_code": "\nx = torch.cat((original[:row_exclude], original[row_exclude+1:]))\n", "pytorch_test_code": {"setup_code": "\noriginal_shape = (4, 3, 2)\nexpected_shape = (3, 3, 2)\n", "test_cases": ["assert isinstance(x, torch.Tensor), 'x should be a torch.Tensor'", "assert x.shape == expected_shape, f'x should have shape {expected_shape}, but got {x.shape}'", "assert torch.allclose(x[:row_exclude], original[:row_exclude]), 'Rows before row_exclude should be unchanged'", "assert torch.allclose(x[row_exclude:], original[row_exclude+1:]), 'Rows after row_exclude should be shifted by 1'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\noriginal = tf.random.normal(shape=(4, 3, 2))\nrow_exclude = 2\n\n# Delete the row from x at index row_exclude\n# x = \n", "tensorflow_sol_code": "\nx = tf.concat([original[:row_exclude], original[row_exclude+1:]], axis=0)\n", "tensorflow_test_code": {"setup_code": "\noriginal_shape = (4, 3, 2)\nexpected_shape = (3, 3, 2)\n", "test_cases": ["assert isinstance(x, tf.Tensor), 'x should be a tf.Tensor'", "assert x.shape == expected_shape, f'x should have shape {expected_shape}, but got {x.shape}'", "assert tf.reduce_all(x[:row_exclude] == original[:row_exclude]), 'Rows before row_exclude should be unchanged'", "assert tf.reduce_all(x[row_exclude:] == original[row_exclude+1:]), 'Rows after row_exclude should be shifted by 1'"]}} |
| {"question_id": 191, "pytorch_library": "import torch\nimport torch.nn as nn", "pytorch_start_code": "\nclass TinyVGG(nn.Module):\n def __init__(self, input_shape: int, hidden_units: int, output_shape: int) -> None:\n super().__init__()\n \n # Define the convolutional block\n # self.conv_block1 = ...\n \n # Define the dropout layer\n # self.dropout = ...\n \n # Define the classifier block\n # self.classifier = ...\n \n def forward(self, x: torch.Tensor):\n # Forward pass through the convolutional block\n # x = ...\n \n # Apply dropout\n # x = ...\n \n # Forward pass through the classifier block\n # x = ...\n \n return x\n", "pytorch_sol_code": "\nclass TinyVGG(nn.Module):\n def __init__(self, input_shape: int, hidden_units: int, output_shape: int) -> None:\n super().__init__()\n self.conv_block1 = nn.Sequential(\n nn.Conv2d(in_channels=input_shape, out_channels=hidden_units, kernel_size=3, stride=1, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Conv2d(in_channels=hidden_units, out_channels=hidden_units, kernel_size=3, stride=1, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Conv2d(in_channels=hidden_units, out_channels=hidden_units, kernel_size=3, stride=1, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n \n self.dropout = nn.Dropout(0.4)\n self.classifier = nn.Sequential(nn.Flatten(), nn.Linear(in_features=hidden_units*18*18, out_features=output_shape))\n \n def forward(self, x: torch.Tensor):\n x = self.conv_block1(x)\n x = self.dropout(x)\n x = self.classifier(x)\n return x\n", "pytorch_test_code": {"setup_code": "\ninput_shape = 3\nhidden_units = 64\noutput_shape = 10\n\nmodel = TinyVGG(input_shape, hidden_units, output_shape)\nsample_input = torch.randn(1, input_shape, 150, 150)\n", "test_cases": ["assert isinstance(model, nn.Module), 'TinyVGG should inherit from nn.Module'", "assert isinstance(model.conv_block1, nn.Sequential), 'conv_block1 should be an instance of nn.Sequential'", "assert isinstance(model.dropout, nn.Dropout), 'dropout should be an instance of nn.Dropout'", "assert isinstance(model.classifier, nn.Sequential), 'classifier should be an instance of nn.Sequential'", "output = model(sample_input)\nassert output.shape == (1, output_shape), f'Output shape should be (1, {output_shape}), but got {output.shape}'"]}, "tensorflow_library": "import tensorflow as tf\nfrom tensorflow.keras import layers", "tensorflow_start_code": "\nclass TinyVGG(tf.keras.Model):\n def __init__(self, input_shape: int, hidden_units: int, output_shape: int) -> None:\n super().__init__()\n \n # Define the convolutional block\n # self.conv_block1 = ...\n \n # Define the dropout layer\n # self.dropout = ...\n \n # Define the classifier block\n # self.classifier = ...\n \n def call(self, x: tf.Tensor):\n # Forward pass through the convolutional block\n # x = ...\n \n # Apply dropout\n # x = ...\n \n # Forward pass through the classifier block\n # x = ...\n \n return x\n", "tensorflow_sol_code": "\nclass TinyVGG(tf.keras.Model):\n def __init__(self, input_shape: int, hidden_units: int, output_shape: int) -> None:\n super().__init__()\n self.conv_block1 = tf.keras.Sequential([\n layers.Conv2D(filters=hidden_units, kernel_size=3, strides=1, padding='same', activation='relu', input_shape=(None, None, input_shape)),\n layers.MaxPooling2D(pool_size=2, strides=2),\n layers.Conv2D(filters=hidden_units, kernel_size=3, strides=1, padding='same', activation='relu'),\n layers.MaxPooling2D(pool_size=2, strides=2),\n layers.Conv2D(filters=hidden_units, kernel_size=3, strides=1, padding='same', activation='relu'),\n layers.MaxPooling2D(pool_size=2, strides=2)\n ])\n \n self.dropout = layers.Dropout(0.4)\n self.classifier = tf.keras.Sequential([layers.Flatten(), layers.Dense(units=output_shape)])\n \n def call(self, x: tf.Tensor):\n x = self.conv_block1(x)\n x = self.dropout(x)\n x = self.classifier(x)\n return x\n", "tensorflow_test_code": {"setup_code": "\ninput_shape = 3\nhidden_units = 64\noutput_shape = 10\n\nmodel = TinyVGG(input_shape, hidden_units, output_shape)\nsample_input = tf.random.normal(shape=(1, 150, 150, input_shape))\n", "test_cases": ["assert isinstance(model, tf.keras.Model), 'TinyVGG should inherit from tf.keras.Model'", "assert isinstance(model.conv_block1, tf.keras.Sequential), 'conv_block1 should be an instance of tf.keras.Sequential'", "assert isinstance(model.dropout, layers.Dropout), 'dropout should be an instance of layers.Dropout'", "assert isinstance(model.classifier, tf.keras.Sequential), 'classifier should be an instance of tf.keras.Sequential'", "output = model(sample_input)\nassert output.shape == (1, output_shape), f'Output shape should be (1, {output_shape}), but got {output.shape}'"]}} |
| {"question_id": 192, "pytorch_library": "import torch\nimport numpy as np", "pytorch_start_code": "\na = torch.Tensor(np.array([0.0917, -0.0006, 0.1825, -0.2484]))\n\n# Create a one-hot tensor based on the index of the maximum value in a\n# one_hot = \n", "pytorch_sol_code": "\none_hot = torch.nn.functional.one_hot(a.argmax(), num_classes=a.numel())\n", "pytorch_test_code": {"setup_code": "\nexpected_output = torch.tensor([0, 0, 1, 0])\n", "test_cases": ["assert isinstance(one_hot, torch.Tensor), 'one_hot should be a torch.Tensor'", "assert one_hot.shape == a.shape, f'one_hot should have the same shape as a, but got {one_hot.shape}'", "assert torch.sum(one_hot) == 1, 'one_hot should have only one non-zero element'", "assert torch.all(one_hot == expected_output), f'one_hot does not match the expected output:\\n{expected_output}'"]}, "tensorflow_library": "import tensorflow as tf\nimport numpy as np", "tensorflow_start_code": "\na = tf.constant([0.0917, -0.0006, 0.1825, -0.2484], dtype=tf.float32)\n\n# Create a one-hot tensor based on the index of the maximum value in a\n# one_hot = \n", "tensorflow_sol_code": "\none_hot = tf.one_hot(tf.argmax(a), depth=tf.size(a))\n", "tensorflow_test_code": {"setup_code": "import numpy as np\nexpected_output = tf.constant([0, 0, 1, 0], dtype=tf.int32)\n", "test_cases": ["assert isinstance(one_hot, tf.Tensor), 'one_hot should be a tf.Tensor'", "assert one_hot.shape == a.shape, f'one_hot should have the same shape as a, but got {one_hot.shape}'", "assert tf.reduce_sum(one_hot) == 1, 'one_hot should have only one non-zero element'", "assert np.all(one_hot.numpy() == expected_output.numpy()), f'one_hot does not match the expected output:\\n{expected_output}'"]}} |
| {"question_id": 193, "pytorch_library": "import torch\nimport torchvision.transforms as T", "pytorch_start_code": "\n# Compose the transformations\n# trans_comp = \n", "pytorch_sol_code": "\ntrans_comp = T.Compose([\n T.Resize([224, 224]),\n T.ToTensor()\n])\n", "pytorch_test_code": {"setup_code": "from PIL import Image\nimport numpy as np\nrandom_image_array = np.random.randint(0, 256, size=(256, 256, 3), dtype=np.uint8)\nimage = Image.fromarray(random_image_array)\ntransformed_image = trans_comp(image)\n", "test_cases": ["assert isinstance(transformed_image, torch.Tensor), 'transformed_image should be a torch.Tensor'", "assert transformed_image.shape[1:] == (224, 224), f'transformed_image should have shape (C, 224, 224), but got {transformed_image.shape}'", "assert transformed_image.min() >= 0 and transformed_image.max() <= 1, 'transformed_image should have values in the range [0, 1]'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\n# Define the resize function\n# def resize(image):\n# return tf.image.resize(image, [224, 224])\n\n# Define the normalize function\n# def normalize(image):\n# return image / 255.0\n\n# Apply the transformations to the dataset\n# def transform(image, label):\n# image = resize(image)\n# image = normalize(image)\n# return image, label\n# \n# transformed_dataset = dataset.map(transform)\n", "tensorflow_sol_code": "\ndef resize(image):\n return tf.image.resize(image, [224, 224])\n\ndef normalize(image):\n return image / 255.0\n\ndef transform(image, label):\n image = resize(image)\n image = normalize(image)\n return image, label\n", "tensorflow_test_code": {"setup_code": "\nimport numpy as np\n\nimage = np.random.randint(0, 256, size=(256, 256, 3), dtype=np.uint8)\nlabel = 0\nimage, label = transform(image, label)\n", "test_cases": ["assert isinstance(image, tf.Tensor), 'image should be a tf.Tensor'", "assert image.shape == (224, 224, 3), f'image should have shape (224, 224, 3), but got {image.shape}'", "assert tf.reduce_min(image) >= 0 and tf.reduce_max(image) <= 1, 'image should have values in the range [0, 1]'"]}} |
| {"question_id": 194, "pytorch_library": "import torch", "pytorch_start_code": "\na = torch.rand(8, 2)\nb = torch.rand(8, 4)\nc = torch.rand(8, 6)\n\n# Concatenate the tensors along a new dimension\n# d = \n\n# Reshape the concatenated tensor into a 3D tensor\n# e = \n", "pytorch_sol_code": "\nd = torch.cat((a, b, c), dim=1)\ne = torch.reshape(d, (8, 3, -1))\n", "pytorch_test_code": {"setup_code": "\nexpected_shape = (8, 3, 4)\n", "test_cases": ["assert isinstance(d, torch.Tensor), 'd should be a torch.Tensor'", "assert d.shape == (8, 12), f'd should have shape (8, 12), but got {d.shape}'", "assert isinstance(e, torch.Tensor), 'e should be a torch.Tensor'", "assert e.shape == expected_shape, f'e should have shape {expected_shape}, but got {e.shape}'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\na = tf.random.uniform((8, 2))\nb = tf.random.uniform((8, 4))\nc = tf.random.uniform((8, 6))\n\n# Concatenate the tensors along a new dimension\n# d = \n\n# Reshape the concatenated tensor into a 3D tensor\n# e = \n", "tensorflow_sol_code": "\nd = tf.concat((a, b, c), axis=1)\ne = tf.reshape(d, (8, 3, -1))\n", "tensorflow_test_code": {"setup_code": "\nexpected_shape = (8, 3, 4)\n", "test_cases": ["assert isinstance(d, tf.Tensor), 'd should be a tf.Tensor'", "assert d.shape == (8, 12), f'd should have shape (8, 12), but got {d.shape}'", "assert isinstance(e, tf.Tensor), 'e should be a tf.Tensor'", "assert e.shape == expected_shape, f'e should have shape {expected_shape}, but got {e.shape}'"]}} |
| {"question_id": 195, "pytorch_library": "import torch", "pytorch_start_code": "\nn = 5\na = torch.randn(1, 3, 8) # Random (1, 3, 8) tensor\n\n# Increase the first dimension to n and pad with zeros\n# result = \n", "pytorch_sol_code": "\nresult = torch.cat((a, torch.zeros(n - 1, a.shape[1], a.shape[2])))\n", "pytorch_test_code": {"setup_code": "\nexpected_shape = (n, 3, 8)\n", "test_cases": ["assert isinstance(a, torch.Tensor), 'a should be a torch.Tensor'", "assert a.shape == (1, 3, 8), f'a should have shape (1, 3, 8), but got {a.shape}'", "assert isinstance(result, torch.Tensor), 'result should be a torch.Tensor'", "assert result.shape == expected_shape, f'result should have shape {expected_shape}, but got {result.shape}'", "assert torch.allclose(result[0], a), 'The first slice of result should be equal to a'", "assert torch.allclose(result[1:], torch.zeros(n - 1, 3, 8)), 'The remaining slices of result should be zero'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nn = 5\na = tf.random.normal((1, 3, 8)) # Random (1, 3, 8) tensor\n\n# Increase the first dimension to n and pad with zeros\n# result = \n", "tensorflow_sol_code": "\nresult = tf.concat((a, tf.zeros((n - 1, a.shape[1], a.shape[2]))), axis=0)\n", "tensorflow_test_code": {"setup_code": "\nexpected_shape = (n, 3, 8)\n", "test_cases": ["assert isinstance(a, tf.Tensor), 'a should be a tf.Tensor'", "assert a.shape == (1, 3, 8), f'a should have shape (1, 3, 8), but got {a.shape}'", "assert isinstance(result, tf.Tensor), 'result should be a tf.Tensor'", "assert result.shape == expected_shape, f'result should have shape {expected_shape}, but got {result.shape}'", "assert tf.reduce_all(tf.equal(result[0], a)), 'The first slice of result should be equal to a'", "assert tf.reduce_all(tf.equal(result[1:], tf.zeros((n - 1, 3, 8)))), 'The remaining slices of result should be zero'"]}} |
| {"question_id": 196, "pytorch_library": "import torch", "pytorch_start_code": "\ndata = torch.arange(10)\nstarts = torch.tensor([0, 3, 4, 1])\nends = starts + 2\n\n# Slice the data tensor using starts and ends\n# result = \n", "pytorch_sol_code": "\nindices = torch.stack((starts, ends), axis=1)\nresult = torch.stack([data[slice(idx[0], idx[1])] for idx in indices])\n", "pytorch_test_code": {"setup_code": "\nexpected_result = torch.tensor([[0, 1], [3, 4], [4, 5], [1, 2]])\n", "test_cases": ["assert isinstance(result, torch.Tensor), 'result should be a torch.Tensor'", "assert result.shape == (4, 2), f'result should have shape (4, 2), but got {result.shape}'", "assert torch.all(torch.eq(result, expected_result)), 'result does not match expected output'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\ndata = tf.range(10)\nstarts = tf.constant([0, 3, 4, 1])\nends = starts + 2\n\n# Slice the data tensor using starts and ends\n# result = \n", "tensorflow_sol_code": "\nindices = tf.stack([starts, ends], axis=1)\nresult = tf.stack([tf.slice(data, [start], [end - start]) for start, end in indices])\n", "tensorflow_test_code": {"setup_code": "\nexpected_result = tf.constant([[0, 1], [3, 4], [4, 5], [1, 2]],dtype=tf.int32)\n", "test_cases": ["assert isinstance(result, tf.Tensor), 'result should be a tf.Tensor'", "assert result.shape == (4, 2), f'result should have shape (4, 2), but got {result.shape}'", "assert tf.reduce_all(tf.equal(result, expected_result)), 'result does not match expected output'"]}} |
| {"question_id": 197, "pytorch_library": "import torch", "pytorch_start_code": "\nx = torch.tensor([1, 2, 3, 4])\n\n# Subtract elements of every possible pair\n# result = \n", "pytorch_sol_code": "\nresult = x[:, None] - x[None, :]\n", "pytorch_test_code": {"setup_code": "\nexpected_result = torch.tensor([[ 0, -1, -2, -3],\n [ 1, 0, -1, -2],\n [ 2, 1, 0, -1],\n [ 3, 2, 1, 0]])\n", "test_cases": ["assert isinstance(result, torch.Tensor), 'result should be a torch.Tensor'", "assert result.shape == (4, 4), f'result should have shape (4, 4), but got {result.shape}'", "assert torch.all(torch.eq(result, expected_result)), 'result does not match expected output'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nx = tf.constant([1, 2, 3, 4])\n\n# Subtract elements of every possible pair\n# result = \n", "tensorflow_sol_code": "\nresult = x[:, tf.newaxis] - x[tf.newaxis, :]\n", "tensorflow_test_code": {"setup_code": "\nexpected_result = tf.constant([[ 0, -1, -2, -3],\n [ 1, 0, -1, -2],\n [ 2, 1, 0, -1],\n [ 3, 2, 1, 0]])\n", "test_cases": ["assert isinstance(result, tf.Tensor), 'result should be a tf.Tensor'", "assert result.shape == (4, 4), f'result should have shape (4, 4), but got {result.shape}'", "assert tf.reduce_all(tf.equal(result, expected_result)), 'result does not match expected output'"]}} |
| {"question_id": 198, "pytorch_library": "import torch", "pytorch_start_code": "\na = torch.as_tensor([\n [[1, 3, 7, 6], [9, 0, 6, 2], [3, 0, 5, 8]],\n [[1, 0, 1, 0], [2, 1, 0, 3], [0, 0, 6, 1]]\n])\n\nsorted_dim = 1 # sort by rows, preserving each row\nsorted_column = 2 # sort rows on value of 3rd column of each row\n\n# Sort the tensor a row-wise based on the sorted_column\n# sorted_a = \n", "pytorch_sol_code": "\nb = torch.argsort(a[:, :, sorted_column], dim=sorted_dim)\nsorted_a = torch.stack([a[i, b[i], :] for i in range(a.shape[0])])\n", "pytorch_test_code": {"setup_code": "\nexpected_result = torch.as_tensor([\n [[3, 0, 5, 8], [9, 0, 6, 2], [1, 3, 7, 6]],\n [[2, 1, 0, 3], [1, 0, 1, 0], [0, 0, 6, 1]]\n])\n", "test_cases": ["assert isinstance(sorted_a, torch.Tensor), 'sorted_a should be a torch.Tensor'", "assert sorted_a.shape == a.shape, f'sorted_a should have the same shape as a, but got {sorted_a.shape} instead of {a.shape}'", "assert torch.all(torch.eq(sorted_a, expected_result)), 'sorted_a does not match the expected result'"]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\na = tf.constant([\n [[1, 3, 7, 6], [9, 0, 6, 2], [3, 0, 5, 8]],\n [[1, 0, 1, 0], [2, 1, 0, 3], [0, 0, 6, 1]]\n])\n\nsorted_dim = 1 # sort by rows, preserving each row\nsorted_column = 2 # sort rows on value of 3rd column of each row\n\n# Sort the tensor a row-wise based on the sorted_column\n# sorted_a = \n", "tensorflow_sol_code": "\nb = tf.argsort(a[:, :, sorted_column], axis=sorted_dim)\nsorted_a = tf.stack([tf.gather(a[i], b[i], axis=0) for i in range(a.shape[0])])\n", "tensorflow_test_code": {"setup_code": "\nexpected_result = tf.constant([\n [[3, 0, 5, 8], [9, 0, 6, 2], [1, 3, 7, 6]],\n [[2, 1, 0, 3], [1, 0, 1, 0], [0, 0, 6, 1]]\n])\n", "test_cases": ["assert isinstance(sorted_a, tf.Tensor), 'sorted_a should be a tf.Tensor'", "assert sorted_a.shape == a.shape, f'sorted_a should have the same shape as a, but got {sorted_a.shape} instead of {a.shape}'", "assert tf.reduce_all(tf.equal(sorted_a, expected_result)), 'sorted_a does not match the expected result'"]}} |
| {"question_id": 199, "pytorch_library": "import torch\nimport torch.nn.functional as F", "pytorch_start_code": "\nin_dict = {\n \"train\": [\n {\n \"input\": [[3, 1, 2], [3, 1, 2], [3, 1, 2]],\n \"output\": [[4, 5, 6], [4, 5, 6], [4, 5, 6]]\n },\n {\n \"input\": [[2, 3, 8], [2, 3, 8], [2, 3, 8]],\n \"output\": [[6, 4, 9], [6, 4, 9], [6, 4, 9]]\n }\n ]\n}\n\n# Convert the dictionary to a list of tensors\n# train_examples = ...\n\n# Apply padding to each input and output tensor\n# for item in train_examples:\n# item[0] = ...\n# item[1] = ...\n\n# Convert the tensors back to a dictionary\n# out_dict = {'train': []}\n# for item in train_examples:\n# out_dict['train'].append(...)\n", "pytorch_sol_code": "\ntrain_examples = []\nfor item in in_dict['train']:\n in_tensor = torch.Tensor(item['input'])\n out_tensor = torch.Tensor(item['output'])\n train_examples.append([in_tensor, out_tensor])\n\nfor item in train_examples:\n item[0] = F.pad(item[0], (1, 1, 1, 1))\n item[1] = F.pad(item[1], (1, 1, 1, 1))\n\nout_dict = {'train': []}\nfor item in train_examples:\n out_dict['train'].append({\n 'input': item[0].tolist(),\n 'output': item[1].tolist()\n })\n", "pytorch_test_code": {"setup_code": "\nexpected_input = [\n [\n [0, 0, 0, 0, 0],\n [0, 3, 1, 2, 0],\n [0, 3, 1, 2, 0],\n [0, 3, 1, 2, 0],\n [0, 0, 0, 0, 0]\n ],\n [\n [0, 0, 0, 0, 0],\n [0, 2, 3, 8, 0],\n [0, 2, 3, 8, 0],\n [0, 2, 3, 8, 0],\n [0, 0, 0, 0, 0]\n ]\n]\n\nexpected_output = [\n [\n [0, 0, 0, 0, 0],\n [0, 4, 5, 6, 0],\n [0, 4, 5, 6, 0],\n [0, 4, 5, 6, 0],\n [0, 0, 0, 0, 0]\n ],\n [\n [0, 0, 0, 0, 0],\n [0, 6, 4, 9, 0],\n [0, 6, 4, 9, 0],\n [0, 6, 4, 9, 0],\n [0, 0, 0, 0, 0]\n ]\n]\n", "test_cases": ["assert isinstance(train_examples, list), 'train_examples should be a list'", "assert len(train_examples) == 2, f'Expected 2 training examples, got {len(train_examples)}'", "assert all(isinstance(item, list) and len(item) == 2 for item in train_examples), 'Each item in train_examples should be a list of length 2'", "assert all(isinstance(tensor, torch.Tensor) for item in train_examples for tensor in item), 'Each item in train_examples should contain torch.Tensor objects'", "assert out_dict['train'][0]['input'] == expected_input[0], f\"First input tensor does not match the expected padded tensor:\\nExpected: {expected_input[0]}\\nGot: {out_dict['train'][0]['input']}\"", "assert out_dict['train'][1]['input'] == expected_input[1], f\"Second input tensor does not match the expected padded tensor:\\nExpected: {expected_input[1]}\\nGot: {out_dict['train'][1]['input']}\"", "assert out_dict['train'][0]['output'] == expected_output[0], f\"First output tensor does not match the expected padded tensor:\\nExpected: {expected_output[0]}\\nGot: {out_dict['train'][0]['output']}\"", "assert out_dict['train'][1]['output'] == expected_output[1], f\"Second output tensor does not match the expected padded tensor:\\nExpected: {expected_output[1]}\\nGot: {out_dict['train'][1]['output']}\""]}, "tensorflow_library": "import tensorflow as tf", "tensorflow_start_code": "\nin_dict = {\n \"train\": [\n {\n \"input\": [[3, 1, 2], [3, 1, 2], [3, 1, 2]],\n \"output\": [[4, 5, 6], [4, 5, 6], [4, 5, 6]]\n },\n {\n \"input\": [[2, 3, 8], [2, 3, 8], [2, 3, 8]],\n \"output\": [[6, 4, 9], [6, 4, 9], [6, 4, 9]]\n }\n ]\n}\n\n# Convert the dictionary to a list of tensors\n# train_examples = ...\n\n# Apply padding to each input and output tensor\n# for item in train_examples:\n# item[0] = ...\n# item[1] = ...\n\n# Convert the tensors back to a dictionary\n# out_dict = {'train': []}\n# for item in train_examples:\n# out_dict['train'].append(...)\n", "tensorflow_sol_code": "\ntrain_examples = []\nfor item in in_dict['train']:\n in_tensor = tf.convert_to_tensor(item['input'], dtype=tf.float32)\n out_tensor = tf.convert_to_tensor(item['output'], dtype=tf.float32)\n train_examples.append([in_tensor, out_tensor])\n\nfor item in train_examples:\n item[0] = tf.pad(item[0], [[1, 1], [1, 1]])\n item[1] = tf.pad(item[1], [[1, 1], [1, 1]])\n\nout_dict = {'train': []}\nfor item in train_examples:\n out_dict['train'].append({\n 'input': item[0].numpy().tolist(),\n 'output': item[1].numpy().tolist()\n })\n", "tensorflow_test_code": {"setup_code": "\nexpected_input = [\n [\n [0, 0, 0, 0, 0],\n [0, 3, 1, 2, 0],\n [0, 3, 1, 2, 0],\n [0, 3, 1, 2, 0],\n [0, 0, 0, 0, 0]\n ],\n [\n [0, 0, 0, 0, 0],\n [0, 2, 3, 8, 0],\n [0, 2, 3, 8, 0],\n [0, 2, 3, 8, 0],\n [0, 0, 0, 0, 0]\n ]\n]\n\nexpected_output = [\n [\n [0, 0, 0, 0, 0],\n [0, 4, 5, 6, 0],\n [0, 4, 5, 6, 0],\n [0, 4, 5, 6, 0],\n [0, 0, 0, 0, 0]\n ],\n [\n [0, 0, 0, 0, 0],\n [0, 6, 4, 9, 0],\n [0, 6, 4, 9, 0],\n [0, 6, 4, 9, 0],\n [0, 0, 0, 0, 0]\n ]\n]\n", "test_cases": ["assert isinstance(train_examples, list), 'train_examples should be a list'", "assert len(train_examples) == 2, f'Expected 2 training examples, got {len(train_examples)}'", "assert all(isinstance(item, list) and len(item) == 2 for item in train_examples), 'Each item in train_examples should be a list of length 2'", "assert all(isinstance(tensor, tf.Tensor) for item in train_examples for tensor in item), 'Each item in train_examples should contain tf.Tensor objects'", "assert out_dict['train'][0]['input'] == expected_input[0], f\"First input tensor does not match the expected padded tensor:\\nExpected: {expected_input[0]}\\nGot: {out_dict['train'][0]['input']}\"", "assert out_dict['train'][1]['input'] == expected_input[1], f\"Second input tensor does not match the expected padded tensor:\\nExpected: {expected_input[1]}\\nGot: {out_dict['train'][1]['input']}\"", "assert out_dict['train'][0]['output'] == expected_output[0], f\"First output tensor does not match the expected padded tensor:\\nExpected: {expected_output[0]}\\nGot: {out_dict['train'][0]['output']}\"", "assert out_dict['train'][1]['output'] == expected_output[1], f\"Second output tensor does not match the expected padded tensor:\\nExpected: {expected_output[1]}\\nGot: {out_dict['train'][1]['output']}\""]}} |
|
|