v3.0.0: Tests
Browse files
tests/__pycache__/test_quantum_layers.cpython-312-pytest-9.0.3.pyc
ADDED
|
Binary file (13.1 kB). View file
|
|
|
tests/__pycache__/test_tensor_layers.cpython-312-pytest-9.0.3.pyc
ADDED
|
Binary file (18.2 kB). View file
|
|
|
tests/test_quantum_layers.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Tests for quantum layers (PennyLane required).
|
| 3 |
+
|
| 4 |
+
Verifies:
|
| 5 |
+
- Angle embedding output range
|
| 6 |
+
- Entanglement monitor behavior
|
| 7 |
+
- Classical fallback when no PennyLane
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import sys
|
| 11 |
+
import os
|
| 12 |
+
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
import pytest
|
| 16 |
+
|
| 17 |
+
try:
|
| 18 |
+
import pennylane as qml
|
| 19 |
+
HAS_PENNYLANE = True
|
| 20 |
+
except ImportError:
|
| 21 |
+
HAS_PENNYLANE = False
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@pytest.mark.skipif(not HAS_PENNYLANE, reason="PennyLane not installed")
|
| 25 |
+
class TestQuantumAngleEmbedding:
|
| 26 |
+
def test_output_shape(self):
|
| 27 |
+
from src.quantum_layers import QuantumAngleEmbedding
|
| 28 |
+
layer = QuantumAngleEmbedding(n_qubits=4, n_layers=2, n_outputs=4)
|
| 29 |
+
x = torch.randn(8, 4)
|
| 30 |
+
y = layer(x)
|
| 31 |
+
assert y.shape == (8, 4)
|
| 32 |
+
|
| 33 |
+
def test_output_range(self):
|
| 34 |
+
from src.quantum_layers import QuantumAngleEmbedding
|
| 35 |
+
layer = QuantumAngleEmbedding(n_qubits=4, n_layers=2, n_outputs=4)
|
| 36 |
+
layer.eval()
|
| 37 |
+
with torch.no_grad():
|
| 38 |
+
x = torch.randn(16, 4)
|
| 39 |
+
y = layer(x)
|
| 40 |
+
# Expectation values of PauliZ are in [-1, 1]
|
| 41 |
+
assert y.min() >= -1.1
|
| 42 |
+
assert y.max() <= 1.1
|
| 43 |
+
|
| 44 |
+
def test_batch_dimensions(self):
|
| 45 |
+
from src.quantum_layers import QuantumAngleEmbedding
|
| 46 |
+
layer = QuantumAngleEmbedding(n_qubits=4, n_layers=2, n_outputs=3)
|
| 47 |
+
x = torch.randn(2, 10, 4) # batch, seq, n_qubits
|
| 48 |
+
y = layer(x)
|
| 49 |
+
assert y.shape == (2, 10, 3)
|
| 50 |
+
|
| 51 |
+
def test_gradient_flow(self):
|
| 52 |
+
from src.quantum_layers import QuantumAngleEmbedding
|
| 53 |
+
layer = QuantumAngleEmbedding(n_qubits=4, n_layers=1)
|
| 54 |
+
x = torch.randn(4, 4, requires_grad=False)
|
| 55 |
+
y = layer(x)
|
| 56 |
+
loss = y.sum()
|
| 57 |
+
loss.backward()
|
| 58 |
+
for p in layer.qlayer.parameters():
|
| 59 |
+
assert p.grad is not None
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class TestEntanglementMonitor:
|
| 63 |
+
def test_output_shape(self):
|
| 64 |
+
from src.quantum_layers import EntanglementMonitor
|
| 65 |
+
monitor = EntanglementMonitor(n_qubits=4)
|
| 66 |
+
# Simulated attention weights: (batch, heads, seq, seq)
|
| 67 |
+
attn = torch.randn(2, 4, 16, 16).softmax(dim=-1)
|
| 68 |
+
entropy = monitor(attn)
|
| 69 |
+
assert entropy.shape == (2, 4)
|
| 70 |
+
|
| 71 |
+
def test_uniform_attention(self):
|
| 72 |
+
from src.quantum_layers import EntanglementMonitor
|
| 73 |
+
monitor = EntanglementMonitor(n_qubits=4)
|
| 74 |
+
# Perfectly uniform attention → maximum entropy
|
| 75 |
+
attn = torch.ones(2, 2, 8, 8) / 8
|
| 76 |
+
entropy = monitor(attn)
|
| 77 |
+
expected = -torch.log(torch.tensor(1.0 / 8))
|
| 78 |
+
assert torch.allclose(entropy, expected, atol=0.2)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class TestFallback:
|
| 82 |
+
def test_classical_fallback(self):
|
| 83 |
+
from src.quantum_layers import ClassicalQuantumFallback
|
| 84 |
+
layer = ClassicalQuantumFallback(n_qubits=4, n_layers=2, n_outputs=4)
|
| 85 |
+
x = torch.randn(8, 4)
|
| 86 |
+
y = layer(x)
|
| 87 |
+
assert y.shape == (8, 4)
|
| 88 |
+
# Tanh output in [-1, 1]
|
| 89 |
+
assert y.min() >= -1.0
|
| 90 |
+
assert y.max() <= 1.0
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class TestCreateQuantumEmbedding:
|
| 94 |
+
def test_factory(self):
|
| 95 |
+
from src.quantum_layers import create_quantum_embedding
|
| 96 |
+
layer = create_quantum_embedding(128, n_qubits=4, n_layers=2)
|
| 97 |
+
x = torch.randn(4, 128)
|
| 98 |
+
y = layer(x)
|
| 99 |
+
assert y.shape[0] == 4
|
tests/test_tensor_layers.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Tests for tensor decomposition layers.
|
| 3 |
+
|
| 4 |
+
Verifies:
|
| 5 |
+
- Correct output shapes
|
| 6 |
+
- Rank truncation preserves structure
|
| 7 |
+
- Compression ratio computation
|
| 8 |
+
- Gradient flow
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import sys
|
| 12 |
+
import os
|
| 13 |
+
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
import pytest
|
| 17 |
+
from src.tensor_layers import TTLinear, TTFeedForward, factorize_dim
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class TestFactorizeDim:
|
| 21 |
+
def test_power_of_two(self):
|
| 22 |
+
factors = factorize_dim(64)
|
| 23 |
+
assert all(f >= 2 for f in factors), f"Got dead factor in {factors}"
|
| 24 |
+
|
| 25 |
+
def test_prime(self):
|
| 26 |
+
factors = factorize_dim(7)
|
| 27 |
+
# Some factors may be 1 for primes (unavoidable)
|
| 28 |
+
product = 1
|
| 29 |
+
for f in factors:
|
| 30 |
+
product *= f
|
| 31 |
+
assert product == 7, f"Prime 7 product: {factors} = {product}"
|
| 32 |
+
|
| 33 |
+
def test_one(self):
|
| 34 |
+
factors = factorize_dim(1)
|
| 35 |
+
assert factors == (1,)
|
| 36 |
+
|
| 37 |
+
def test_large(self):
|
| 38 |
+
for dim in [128, 256, 512, 1024]:
|
| 39 |
+
factors = factorize_dim(dim)
|
| 40 |
+
product = 1
|
| 41 |
+
for f in factors:
|
| 42 |
+
product *= f
|
| 43 |
+
assert product == dim, f"Product mismatch: {factors} = {product} != {dim}"
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class TestTTLinear:
|
| 47 |
+
def test_output_shape(self):
|
| 48 |
+
layer = TTLinear(64, 128, rank=8)
|
| 49 |
+
x = torch.randn(4, 64)
|
| 50 |
+
y = layer(x)
|
| 51 |
+
assert y.shape == (4, 128)
|
| 52 |
+
|
| 53 |
+
def test_batched(self):
|
| 54 |
+
layer = TTLinear(64, 128, rank=8)
|
| 55 |
+
x = torch.randn(3, 5, 64)
|
| 56 |
+
y = layer(x)
|
| 57 |
+
assert y.shape == (3, 5, 128)
|
| 58 |
+
|
| 59 |
+
def test_gradient_flow(self):
|
| 60 |
+
layer = TTLinear(64, 128, rank=8)
|
| 61 |
+
x = torch.randn(4, 64, requires_grad=False)
|
| 62 |
+
y = layer(x)
|
| 63 |
+
loss = y.sum()
|
| 64 |
+
loss.backward()
|
| 65 |
+
for core in layer.cores:
|
| 66 |
+
assert core.grad is not None
|
| 67 |
+
assert not torch.isnan(core.grad).any()
|
| 68 |
+
|
| 69 |
+
def test_set_rank_smaller(self):
|
| 70 |
+
layer = TTLinear(64, 128, rank=8)
|
| 71 |
+
x = torch.randn(4, 64)
|
| 72 |
+
y_before = layer(x)
|
| 73 |
+
|
| 74 |
+
layer.set_rank(4)
|
| 75 |
+
y_after = layer(x)
|
| 76 |
+
|
| 77 |
+
assert y_after.shape == y_before.shape
|
| 78 |
+
assert layer.rank == 4
|
| 79 |
+
|
| 80 |
+
def test_set_rank_larger(self):
|
| 81 |
+
layer = TTLinear(64, 128, rank=4)
|
| 82 |
+
layer.set_rank(8)
|
| 83 |
+
assert layer.rank == 8
|
| 84 |
+
|
| 85 |
+
def test_compression_ratio(self):
|
| 86 |
+
layer = TTLinear(128, 256, rank=8)
|
| 87 |
+
assert layer.compression_ratio > 1.0
|
| 88 |
+
|
| 89 |
+
def test_bias(self):
|
| 90 |
+
layer = TTLinear(64, 128, rank=8, bias=True)
|
| 91 |
+
assert layer.bias is not None
|
| 92 |
+
|
| 93 |
+
layer_nb = TTLinear(64, 128, rank=8, bias=False)
|
| 94 |
+
assert layer_nb.bias is None
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class TestTTFeedForward:
|
| 98 |
+
def test_output_shape(self):
|
| 99 |
+
ffn = TTFeedForward(128, ff_multiplier=4, rank=8)
|
| 100 |
+
x = torch.randn(4, 128)
|
| 101 |
+
y = ffn(x)
|
| 102 |
+
assert y.shape == (4, 128)
|
| 103 |
+
|
| 104 |
+
def test_set_rank(self):
|
| 105 |
+
ffn = TTFeedForward(128, ff_multiplier=4, rank=8)
|
| 106 |
+
x = torch.randn(4, 128)
|
| 107 |
+
y_before = ffn(x)
|
| 108 |
+
|
| 109 |
+
ffn.set_rank(4)
|
| 110 |
+
y_after = ffn(x)
|
| 111 |
+
|
| 112 |
+
assert y_after.shape == y_before.shape
|
| 113 |
+
|
| 114 |
+
def test_total_params(self):
|
| 115 |
+
ffn = TTFeedForward(128, ff_multiplier=4, rank=8)
|
| 116 |
+
params = ffn.total_params
|
| 117 |
+
assert params > 0
|
| 118 |
+
# Should be fewer than dense equivalent
|
| 119 |
+
dense = 128 * 512 + 512 * 128 # up + down
|
| 120 |
+
assert params < dense
|