File size: 3,223 Bytes
bcadbf4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
"""
Tests for quantum layers (PennyLane required).

Verifies:
  - Angle embedding output range
  - Entanglement monitor behavior
  - Classical fallback when no PennyLane
"""

import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))

import torch
import pytest

try:
    import pennylane as qml
    HAS_PENNYLANE = True
except ImportError:
    HAS_PENNYLANE = False


@pytest.mark.skipif(not HAS_PENNYLANE, reason="PennyLane not installed")
class TestQuantumAngleEmbedding:
    def test_output_shape(self):
        from src.quantum_layers import QuantumAngleEmbedding
        layer = QuantumAngleEmbedding(n_qubits=4, n_layers=2, n_outputs=4)
        x = torch.randn(8, 4)
        y = layer(x)
        assert y.shape == (8, 4)

    def test_output_range(self):
        from src.quantum_layers import QuantumAngleEmbedding
        layer = QuantumAngleEmbedding(n_qubits=4, n_layers=2, n_outputs=4)
        layer.eval()
        with torch.no_grad():
            x = torch.randn(16, 4)
            y = layer(x)
        # Expectation values of PauliZ are in [-1, 1]
        assert y.min() >= -1.1
        assert y.max() <= 1.1

    def test_batch_dimensions(self):
        from src.quantum_layers import QuantumAngleEmbedding
        layer = QuantumAngleEmbedding(n_qubits=4, n_layers=2, n_outputs=3)
        x = torch.randn(2, 10, 4)  # batch, seq, n_qubits
        y = layer(x)
        assert y.shape == (2, 10, 3)

    def test_gradient_flow(self):
        from src.quantum_layers import QuantumAngleEmbedding
        layer = QuantumAngleEmbedding(n_qubits=4, n_layers=1)
        x = torch.randn(4, 4, requires_grad=False)
        y = layer(x)
        loss = y.sum()
        loss.backward()
        for p in layer.qlayer.parameters():
            assert p.grad is not None


class TestEntanglementMonitor:
    def test_output_shape(self):
        from src.quantum_layers import EntanglementMonitor
        monitor = EntanglementMonitor(n_qubits=4)
        # Simulated attention weights: (batch, heads, seq, seq)
        attn = torch.randn(2, 4, 16, 16).softmax(dim=-1)
        entropy = monitor(attn)
        assert entropy.shape == (2, 4)

    def test_uniform_attention(self):
        from src.quantum_layers import EntanglementMonitor
        monitor = EntanglementMonitor(n_qubits=4)
        # Perfectly uniform attention → maximum entropy
        attn = torch.ones(2, 2, 8, 8) / 8
        entropy = monitor(attn)
        expected = -torch.log(torch.tensor(1.0 / 8))
        assert torch.allclose(entropy, expected, atol=0.2)


class TestFallback:
    def test_classical_fallback(self):
        from src.quantum_layers import ClassicalQuantumFallback
        layer = ClassicalQuantumFallback(n_qubits=4, n_layers=2, n_outputs=4)
        x = torch.randn(8, 4)
        y = layer(x)
        assert y.shape == (8, 4)
        # Tanh output in [-1, 1]
        assert y.min() >= -1.0
        assert y.max() <= 1.0


class TestCreateQuantumEmbedding:
    def test_factory(self):
        from src.quantum_layers import create_quantum_embedding
        layer = create_quantum_embedding(128, n_qubits=4, n_layers=2)
        x = torch.randn(4, 128)
        y = layer(x)
        assert y.shape[0] == 4