File size: 6,724 Bytes
b9c4adf | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 | """
Quantum Feature Encoding Layers.
PennyLane-based quantum circuits wrapped as PyTorch nn.Module layers.
Components:
- QuantumAngleEmbedding: Classical data → rotation angles on qubits
- QuantumAmplitudeEmbedding: Encodes data as quantum amplitudes
- EntanglementMonitor: Estimates entanglement via attention patterns
- ClassicalQuantumFallback: MLP-based fallback when PennyLane unavailable
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Optional, Tuple, List
try:
import pennylane as qml
HAS_PENNYLANE = True
except ImportError:
HAS_PENNYLANE = False
class QuantumAngleEmbedding(nn.Module):
"""
Encodes classical features into quantum states via angle encoding.
Circuit: RX(input) → [RY(θ) → CNOT ladder] × n_layers → ⟨Z_i⟩
Parameters
----------
n_qubits : int
Number of qubits (4-8 for NISQ compatibility).
n_layers : int
Number of variational circuit layers.
n_outputs : int or None
Number of expectation values to measure. Default: n_qubits.
diff_method : str
Differentiation method. 'backprop' for batched inputs,
'parameter-shift' for hardware compatibility.
"""
def __init__(self, n_qubits: int = 4, n_layers: int = 2,
n_outputs: int = None, diff_method: str = "backprop"):
super().__init__()
if not HAS_PENNYLANE:
raise ImportError(
"PennyLane is required for quantum layers. "
"Install with: pip install pennylane"
)
self.n_qubits = n_qubits
self.n_layers = n_layers
self.n_outputs = n_outputs or n_qubits
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev, interface="torch", diff_method=diff_method)
def circuit(inputs, weights):
# Angle encoding
for i in range(n_qubits):
qml.RX(inputs[..., i], wires=i)
# Variational layers with entanglement
for layer in range(n_layers):
for i in range(n_qubits):
qml.RY(weights[layer, i], wires=i)
# Nearest-neighbor CNOT ladder
for i in range(n_qubits - 1):
qml.CNOT(wires=[i, i + 1])
# Cyclic entanglement for >2 qubits
if n_qubits > 2:
qml.CNOT(wires=[n_qubits - 1, 0])
# Measure PauliZ expectation values
return [qml.expval(qml.PauliZ(i)) for i in range(self.n_outputs)]
weight_shapes = {"weights": (n_layers, n_qubits)}
self.qlayer = qml.qnn.TorchLayer(circuit, weight_shapes)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x: (*batch, n_qubits) — classical inputs mapped to rotation angles
Returns:
(*batch, n_outputs) — PauliZ expectation values in [-1, 1]
"""
return self.qlayer(x)
class EntanglementMonitor(nn.Module):
"""
Estimates entanglement entropy from attention patterns.
Uses attention distribution entropy as a classical proxy
for quantum entanglement entropy. Avoids expensive quantum
state tomography during training.
Parameters
----------
n_qubits : int
Number of qubits in the simulated quantum system.
subsystem_a : list of ints or None
Qubit indices for subsystem A (bipartition).
"""
def __init__(self, n_qubits: int = 4,
subsystem_a: Optional[List[int]] = None):
super().__init__()
self.n_qubits = n_qubits
if subsystem_a is None:
subsystem_a = list(range(n_qubits // 2))
self.subsystem_a = subsystem_a
def forward(self, attention_weights: torch.Tensor) -> torch.Tensor:
"""
Estimate entanglement from attention distributions.
Args:
attention_weights: (batch, heads, seq_len, seq_len)
Softmax-normalized attention weights.
Returns:
(batch, heads) — estimated entanglement entropy per head
"""
eps = 1e-8
entropy = -torch.sum(
attention_weights * torch.log(attention_weights + eps),
dim=-1
) # (batch, heads, seq_len)
return entropy.mean(dim=-1) # (batch, heads)
class ClassicalQuantumFallback(nn.Module):
"""
Classical MLP fallback when PennyLane is unavailable.
Uses sinusoidal activations to mimic quantum rotation gate behavior.
"""
def __init__(self, n_qubits: int = 4, n_layers: int = 2,
n_outputs: int = None):
super().__init__()
n_outputs = n_outputs or n_qubits
layers = []
in_dim = n_qubits
for _ in range(n_layers):
layers.extend([
nn.Linear(in_dim, n_qubits * 2),
nn.SiLU(), # Smooth activation like quantum gates
])
in_dim = n_qubits * 2
layers.append(nn.Linear(in_dim, n_outputs))
layers.append(nn.Tanh()) # Bound output to [-1, 1] like expectation values
self.net = nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.net(x)
def create_quantum_embedding(input_dim: int, n_qubits: int = 4,
n_layers: int = 2, output_dim: int = None,
embedding_type: str = "angle") -> nn.Module:
"""
Factory for quantum embedding layers.
Args:
input_dim: Input feature dimension.
n_qubits: Number of qubits.
n_layers: Circuit depth.
output_dim: Output dimension.
embedding_type: 'angle' or 'amplitude'.
Returns:
Quantum embedding nn.Module (or classical fallback if no PennyLane).
"""
output_dim = output_dim or n_qubits
if not HAS_PENNYLANE:
print("[WARN] PennyLane not installed. Using classical fallback.")
return nn.Sequential(
nn.Linear(input_dim, n_qubits),
ClassicalQuantumFallback(n_qubits, n_layers, output_dim),
nn.Linear(output_dim, output_dim),
)
if embedding_type == "angle":
return nn.Sequential(
nn.Linear(input_dim, n_qubits),
QuantumAngleEmbedding(n_qubits, n_layers, output_dim),
)
elif embedding_type == "amplitude":
return nn.Sequential(
nn.Linear(input_dim, 2 ** n_qubits),
nn.Softmax(dim=-1),
# Amplitude embedding would go here
nn.Linear(2 ** n_qubits, output_dim),
)
else:
raise ValueError(f"Unknown embedding type: {embedding_type}")
|