| """ |
| Quantum Feature Encoding Layers. |
| |
| PennyLane-based quantum circuits wrapped as PyTorch nn.Module layers. |
| |
| Components: |
| - QuantumAngleEmbedding: Classical data → rotation angles on qubits |
| - QuantumAmplitudeEmbedding: Encodes data as quantum amplitudes |
| - EntanglementMonitor: Estimates entanglement via attention patterns |
| - ClassicalQuantumFallback: MLP-based fallback when PennyLane unavailable |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import math |
| from typing import Optional, Tuple, List |
|
|
| try: |
| import pennylane as qml |
| HAS_PENNYLANE = True |
| except ImportError: |
| HAS_PENNYLANE = False |
|
|
|
|
| class QuantumAngleEmbedding(nn.Module): |
| """ |
| Encodes classical features into quantum states via angle encoding. |
| |
| Circuit: RX(input) → [RY(θ) → CNOT ladder] × n_layers → ⟨Z_i⟩ |
| |
| Parameters |
| ---------- |
| n_qubits : int |
| Number of qubits (4-8 for NISQ compatibility). |
| n_layers : int |
| Number of variational circuit layers. |
| n_outputs : int or None |
| Number of expectation values to measure. Default: n_qubits. |
| diff_method : str |
| Differentiation method. 'backprop' for batched inputs, |
| 'parameter-shift' for hardware compatibility. |
| """ |
|
|
| def __init__(self, n_qubits: int = 4, n_layers: int = 2, |
| n_outputs: int = None, diff_method: str = "backprop"): |
| super().__init__() |
| if not HAS_PENNYLANE: |
| raise ImportError( |
| "PennyLane is required for quantum layers. " |
| "Install with: pip install pennylane" |
| ) |
|
|
| self.n_qubits = n_qubits |
| self.n_layers = n_layers |
| self.n_outputs = n_outputs or n_qubits |
|
|
| dev = qml.device("default.qubit", wires=n_qubits) |
|
|
| @qml.qnode(dev, interface="torch", diff_method=diff_method) |
| def circuit(inputs, weights): |
| |
| for i in range(n_qubits): |
| qml.RX(inputs[..., i], wires=i) |
|
|
| |
| for layer in range(n_layers): |
| for i in range(n_qubits): |
| qml.RY(weights[layer, i], wires=i) |
| |
| for i in range(n_qubits - 1): |
| qml.CNOT(wires=[i, i + 1]) |
| |
| if n_qubits > 2: |
| qml.CNOT(wires=[n_qubits - 1, 0]) |
|
|
| |
| return [qml.expval(qml.PauliZ(i)) for i in range(self.n_outputs)] |
|
|
| weight_shapes = {"weights": (n_layers, n_qubits)} |
| self.qlayer = qml.qnn.TorchLayer(circuit, weight_shapes) |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| """ |
| Args: |
| x: (*batch, n_qubits) — classical inputs mapped to rotation angles |
| Returns: |
| (*batch, n_outputs) — PauliZ expectation values in [-1, 1] |
| """ |
| return self.qlayer(x) |
|
|
|
|
| class EntanglementMonitor(nn.Module): |
| """ |
| Estimates entanglement entropy from attention patterns. |
| |
| Uses attention distribution entropy as a classical proxy |
| for quantum entanglement entropy. Avoids expensive quantum |
| state tomography during training. |
| |
| Parameters |
| ---------- |
| n_qubits : int |
| Number of qubits in the simulated quantum system. |
| subsystem_a : list of ints or None |
| Qubit indices for subsystem A (bipartition). |
| """ |
|
|
| def __init__(self, n_qubits: int = 4, |
| subsystem_a: Optional[List[int]] = None): |
| super().__init__() |
| self.n_qubits = n_qubits |
| if subsystem_a is None: |
| subsystem_a = list(range(n_qubits // 2)) |
| self.subsystem_a = subsystem_a |
|
|
| def forward(self, attention_weights: torch.Tensor) -> torch.Tensor: |
| """ |
| Estimate entanglement from attention distributions. |
| |
| Args: |
| attention_weights: (batch, heads, seq_len, seq_len) |
| Softmax-normalized attention weights. |
| |
| Returns: |
| (batch, heads) — estimated entanglement entropy per head |
| """ |
| eps = 1e-8 |
| entropy = -torch.sum( |
| attention_weights * torch.log(attention_weights + eps), |
| dim=-1 |
| ) |
| return entropy.mean(dim=-1) |
|
|
|
|
| class ClassicalQuantumFallback(nn.Module): |
| """ |
| Classical MLP fallback when PennyLane is unavailable. |
| |
| Uses sinusoidal activations to mimic quantum rotation gate behavior. |
| """ |
|
|
| def __init__(self, n_qubits: int = 4, n_layers: int = 2, |
| n_outputs: int = None): |
| super().__init__() |
| n_outputs = n_outputs or n_qubits |
| layers = [] |
| in_dim = n_qubits |
| for _ in range(n_layers): |
| layers.extend([ |
| nn.Linear(in_dim, n_qubits * 2), |
| nn.SiLU(), |
| ]) |
| in_dim = n_qubits * 2 |
| layers.append(nn.Linear(in_dim, n_outputs)) |
| layers.append(nn.Tanh()) |
| self.net = nn.Sequential(*layers) |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| return self.net(x) |
|
|
|
|
| def create_quantum_embedding(input_dim: int, n_qubits: int = 4, |
| n_layers: int = 2, output_dim: int = None, |
| embedding_type: str = "angle") -> nn.Module: |
| """ |
| Factory for quantum embedding layers. |
| |
| Args: |
| input_dim: Input feature dimension. |
| n_qubits: Number of qubits. |
| n_layers: Circuit depth. |
| output_dim: Output dimension. |
| embedding_type: 'angle' or 'amplitude'. |
| |
| Returns: |
| Quantum embedding nn.Module (or classical fallback if no PennyLane). |
| """ |
| output_dim = output_dim or n_qubits |
|
|
| if not HAS_PENNYLANE: |
| print("[WARN] PennyLane not installed. Using classical fallback.") |
| return nn.Sequential( |
| nn.Linear(input_dim, n_qubits), |
| ClassicalQuantumFallback(n_qubits, n_layers, output_dim), |
| nn.Linear(output_dim, output_dim), |
| ) |
|
|
| if embedding_type == "angle": |
| return nn.Sequential( |
| nn.Linear(input_dim, n_qubits), |
| QuantumAngleEmbedding(n_qubits, n_layers, output_dim), |
| ) |
| elif embedding_type == "amplitude": |
| return nn.Sequential( |
| nn.Linear(input_dim, 2 ** n_qubits), |
| nn.Softmax(dim=-1), |
| |
| nn.Linear(2 ** n_qubits, output_dim), |
| ) |
| else: |
| raise ValueError(f"Unknown embedding type: {embedding_type}") |
|
|