| """ |
| NeuralQuantum NQLM Configuration for Hugging Face Transformers |
| """ |
|
|
| from transformers import PretrainedConfig |
|
|
|
|
| class NeuralQuantumNQLMConfig(PretrainedConfig): |
| """Configuration class for NeuralQuantum NQLM model""" |
| |
| model_type = "neuralquantum_nqlm" |
| |
| def __init__( |
| self, |
| vocab_size=50257, |
| hidden_size=768, |
| num_attention_heads=12, |
| num_hidden_layers=12, |
| intermediate_size=3072, |
| hidden_act="gelu", |
| hidden_dropout_prob=0.1, |
| attention_probs_dropout_prob=0.1, |
| max_position_embeddings=512, |
| type_vocab_size=2, |
| initializer_range=0.02, |
| layer_norm_eps=1e-12, |
| use_cache=True, |
| quantum_enhancement=True, |
| quantum_layers=4, |
| quantum_circuit_depth=8, |
| quantum_optimization="vqe", |
| hybrid_mode=True, |
| torch_dtype="float16", |
| **kwargs |
| ): |
| super().__init__(**kwargs) |
| |
| self.vocab_size = vocab_size |
| self.hidden_size = hidden_size |
| self.num_attention_heads = num_attention_heads |
| self.num_hidden_layers = num_hidden_layers |
| self.intermediate_size = intermediate_size |
| self.hidden_act = hidden_act |
| self.hidden_dropout_prob = hidden_dropout_prob |
| self.attention_probs_dropout_prob = attention_probs_dropout_prob |
| self.max_position_embeddings = max_position_embeddings |
| self.type_vocab_size = type_vocab_size |
| self.initializer_range = initializer_range |
| self.layer_norm_eps = layer_norm_eps |
| self.use_cache = use_cache |
| |
| |
| self.quantum_enhancement = quantum_enhancement |
| self.quantum_layers = quantum_layers |
| self.quantum_circuit_depth = quantum_circuit_depth |
| self.quantum_optimization = quantum_optimization |
| self.hybrid_mode = hybrid_mode |
| self.torch_dtype = torch_dtype |