| """ |
| Graph Neural Network models for Ruby code complexity prediction. |
| |
| This module contains PyTorch Geometric models for learning from |
| Ruby AST structures with performance optimizations. |
| """ |
|
|
| import torch |
| import torch.nn.functional as F |
| from torch_geometric.nn import GCNConv, SAGEConv, GATConv, GINConv, GraphConv, global_mean_pool |
| from torch_geometric.data import Data, Batch |
| import torch_geometric |
| from typing import Dict |
| try: |
| from sentence_transformers import SentenceTransformer |
| SENTENCE_TRANSFORMERS_AVAILABLE = True |
| except ImportError: |
| SENTENCE_TRANSFORMERS_AVAILABLE = False |
|
|
| |
| CUDA_AVAILABLE = torch.cuda.is_available() |
|
|
|
|
| class RubyComplexityGNN(torch.nn.Module): |
| """ |
| Graph Neural Network for predicting Ruby method complexity. |
| |
| This model uses Graph Convolutional Networks (GCN) or GraphSAGE layers |
| to learn from Abstract Syntax Tree representations of Ruby methods. |
| """ |
| |
| def __init__(self, input_dim: int, hidden_dim: int = 64, num_layers: int = 3, |
| conv_type: str = 'GCN', dropout: float = 0.1): |
| """ |
| Initialize the GNN model. |
| |
| Args: |
| input_dim: Dimension of input node features |
| hidden_dim: Hidden layer dimension |
| num_layers: Number of convolutional layers |
| conv_type: Type of convolution ('GCN', 'SAGE', 'GAT', 'GIN', 'GraphConv') |
| dropout: Dropout probability for regularization |
| """ |
| super().__init__() |
| |
| supported = ['GCN', 'SAGE', 'GAT', 'GIN', 'GraphConv'] |
| if conv_type not in supported: |
| raise ValueError(f"conv_type must be one of {supported}") |
| |
| self.num_layers = num_layers |
| self.conv_type = conv_type |
| self.dropout = dropout |
| self.convs = torch.nn.ModuleList() |
| |
| def _make_conv(in_dim, out_dim): |
| if conv_type == 'GCN': |
| return GCNConv(in_dim, out_dim) |
| elif conv_type == 'SAGE': |
| return SAGEConv(in_dim, out_dim) |
| elif conv_type == 'GAT': |
| return GATConv(in_dim, out_dim, heads=1) |
| elif conv_type == 'GIN': |
| mlp = torch.nn.Sequential( |
| torch.nn.Linear(in_dim, out_dim), |
| torch.nn.ReLU(), |
| torch.nn.Linear(out_dim, out_dim), |
| ) |
| return GINConv(mlp) |
| elif conv_type == 'GraphConv': |
| return GraphConv(in_dim, out_dim) |
| |
| |
| self.convs.append(_make_conv(input_dim, hidden_dim)) |
| |
| |
| for _ in range(num_layers - 2): |
| self.convs.append(_make_conv(hidden_dim, hidden_dim)) |
| |
| |
| if num_layers > 1: |
| self.convs.append(_make_conv(hidden_dim, hidden_dim)) |
| |
| |
| self.predictor = torch.nn.Linear(hidden_dim, 1) |
| |
| def forward(self, data: Data, return_embedding: bool = False) -> torch.Tensor: |
| """ |
| Forward pass through the network. |
| |
| Args: |
| data: PyTorch Geometric Data object containing graph |
| return_embedding: If True, return graph embedding instead of prediction |
| |
| Returns: |
| Complexity prediction tensor of shape (batch_size, 1) or |
| Graph embedding tensor of shape (batch_size, hidden_dim) if return_embedding=True |
| """ |
| x, edge_index, batch = data.x, data.edge_index, data.batch |
| |
| |
| for i, conv in enumerate(self.convs): |
| x = conv(x, edge_index) |
| if i < len(self.convs) - 1: |
| x = F.relu(x) |
| x = F.dropout(x, p=self.dropout, training=self.training) |
| |
| |
| embedding = global_mean_pool(x, batch) |
| |
| if return_embedding: |
| return embedding |
| |
| |
| return self.predictor(embedding) |
| |
| def get_model_info(self) -> str: |
| """ |
| Get information about the model configuration. |
| |
| Returns: |
| String describing the model architecture |
| """ |
| return (f"RubyComplexityGNN({self.conv_type}, " |
| f"layers={self.num_layers}, " |
| f"dropout={self.dropout})") |
|
|
|
|
| class ASTDecoder(torch.nn.Module): |
| """ |
| GNN-based decoder for reconstructing Abstract Syntax Trees from embeddings. |
| |
| This module takes a graph embedding and autoregressively generates node features |
| and edge structure to reconstruct an AST. |
| """ |
| |
| def __init__(self, embedding_dim: int, output_node_dim: int, hidden_dim: int = 256, |
| num_layers: int = 5, max_nodes: int = 100, conv_type: str = 'GCN', |
| gradient_checkpointing: bool = False): |
| """ |
| Initialize the AST decoder. |
| |
| Args: |
| embedding_dim: Dimension of input graph embedding |
| output_node_dim: Dimension of output node features |
| hidden_dim: Hidden layer dimension for GNN layers. |
| num_layers: Number of decoder GNN layers. |
| max_nodes: Maximum number of nodes to generate. |
| conv_type: The type of GNN layer to use ('GCN', 'SAGE', 'GAT', 'GIN', 'GraphConv'). |
| gradient_checkpointing: Whether to use gradient checkpointing for memory efficiency. |
| """ |
| super().__init__() |
| |
| self.embedding_dim = embedding_dim |
| self.output_node_dim = output_node_dim |
| self.hidden_dim = hidden_dim |
| self.num_layers = num_layers |
| self.max_nodes = max_nodes |
| self.gradient_checkpointing = gradient_checkpointing |
| |
| self.embedding_transform = torch.nn.Linear(embedding_dim, hidden_dim) |
| |
| self.convs = torch.nn.ModuleList() |
| current_dim = hidden_dim |
|
|
| for i in range(num_layers): |
| if conv_type == 'GAT': |
| heads = 4 |
| conv = GATConv(current_dim, hidden_dim, heads=heads) |
| current_dim = hidden_dim * heads |
| elif conv_type == 'GIN': |
| mlp = torch.nn.Sequential( |
| torch.nn.Linear(current_dim, current_dim), |
| torch.nn.ReLU(), |
| torch.nn.Linear(current_dim, current_dim) |
| ) |
| conv = GINConv(mlp) |
| elif conv_type == 'SAGE': |
| conv = SAGEConv(current_dim, current_dim) |
| elif conv_type == 'GCN': |
| conv = GCNConv(current_dim, current_dim) |
| elif conv_type == 'GraphConv': |
| conv = GraphConv(current_dim, current_dim) |
| else: |
| raise ValueError(f"Unsupported conv_type: {conv_type}") |
| |
| self.convs.append(conv) |
|
|
| self.node_output = torch.nn.Linear(current_dim, output_node_dim) |
| self.parent_predictor = torch.nn.Linear(current_dim, max_nodes) |
| |
| def forward(self, embedding: torch.Tensor, num_nodes_per_graph: torch.Tensor) -> dict: |
| """ |
| Forward pass to decode a batch of embeddings into AST structures. |
| |
| Args: |
| embedding: Graph embedding tensor of shape [batch_size, embedding_dim]. |
| num_nodes_per_graph: Tensor of shape [batch_size] with the number of nodes for each graph. |
| |
| Returns: |
| Dictionary containing batched node features and parent predictions. |
| """ |
| batch_size = embedding.size(0) |
| device = embedding.device |
| |
| |
| |
| |
| node_features = self.embedding_transform(embedding) |
| node_features = node_features.repeat_interleave(num_nodes_per_graph, dim=0) |
|
|
| |
| |
| |
| num_edges_per_graph = torch.clamp(num_nodes_per_graph - 1, min=0) |
| total_edges = torch.sum(num_edges_per_graph).item() |
|
|
| if total_edges == 0: |
| edge_index = torch.empty((2, 0), dtype=torch.long, device=device) |
| else: |
| |
| node_offsets = torch.cat([torch.zeros(1, device=device, dtype=num_nodes_per_graph.dtype), |
| torch.cumsum(num_nodes_per_graph[:-1], dim=0)]) |
| |
| |
| |
| total_edges = num_edges_per_graph.sum().item() |
| |
| |
| graph_indices = torch.repeat_interleave(torch.arange(len(num_nodes_per_graph), device=device), num_edges_per_graph) |
| |
| |
| edge_offsets = torch.cat([torch.zeros(1, device=device, dtype=num_edges_per_graph.dtype), |
| torch.cumsum(num_edges_per_graph[:-1], dim=0)]) |
| |
| |
| src_in_graph = torch.arange(total_edges, device=device) - edge_offsets[graph_indices] |
| |
| |
| edge_node_offsets = node_offsets[graph_indices] |
| |
| |
| src = edge_node_offsets + src_in_graph |
| dst = src + 1 |
| |
| edge_index = torch.stack([src, dst], dim=0) |
|
|
| |
| edge_index = torch_geometric.utils.to_undirected(edge_index) |
|
|
| |
| x = node_features |
| if self.gradient_checkpointing and self.training: |
| |
| def create_custom_forward(module): |
| def custom_forward(*inputs): |
| return module(*inputs) |
| return custom_forward |
| |
| for conv in self.convs: |
| x = torch.utils.checkpoint.checkpoint( |
| create_custom_forward(conv), x, edge_index, use_reentrant=False |
| ) |
| x = F.relu(x) |
| else: |
| |
| for conv in self.convs: |
| x = conv(x, edge_index) |
| x = F.relu(x) |
| |
| |
| output_node_features = self.node_output(x) |
| parent_logits = self.parent_predictor(x) |
| |
| return { |
| 'node_features': output_node_features, |
| 'parent_logits': parent_logits |
| } |
|
|
|
|
| class TreeAwareASTDecoder(torch.nn.Module): |
| """ |
| Tree-topology-aware AST decoder. |
| |
| Unlike ASTDecoder which constructs sequential chain edges (0→1→2→…), |
| this decoder uses the actual AST tree structure for GNN message passing. |
| |
| Three edge modes: |
| - 'chain': Legacy sequential edges (same as ASTDecoder). |
| - 'teacher_forced': Uses ground-truth AST edges during training. |
| - 'iterative': Two-pass: chain edges → predict parents → rebuild |
| tree edges → refine predictions. Fully feed-forward. |
| """ |
|
|
| def __init__(self, embedding_dim: int, output_node_dim: int, |
| hidden_dim: int = 256, num_layers: int = 5, |
| max_nodes: int = 100, conv_type: str = 'GCN', |
| edge_mode: str = 'teacher_forced', |
| gradient_checkpointing: bool = False): |
| super().__init__() |
| self.embedding_dim = embedding_dim |
| self.output_node_dim = output_node_dim |
| self.hidden_dim = hidden_dim |
| self.num_layers = num_layers |
| self.max_nodes = max_nodes |
| self.edge_mode = edge_mode |
| self.gradient_checkpointing = gradient_checkpointing |
|
|
| self.embedding_transform = torch.nn.Linear(embedding_dim, hidden_dim) |
|
|
| |
| self.convs = torch.nn.ModuleList() |
| current_dim = hidden_dim |
| for _ in range(num_layers): |
| conv, current_dim = self._make_conv(conv_type, current_dim, hidden_dim) |
| self.convs.append(conv) |
|
|
| self.node_output = torch.nn.Linear(current_dim, output_node_dim) |
| self.parent_predictor = torch.nn.Linear(current_dim, max_nodes) |
|
|
| |
| if edge_mode == 'iterative': |
| self.refine_convs = torch.nn.ModuleList() |
| ref_dim = current_dim |
| for _ in range(max(num_layers // 2, 1)): |
| conv, ref_dim = self._make_conv(conv_type, ref_dim, hidden_dim) |
| self.refine_convs.append(conv) |
| self.refine_node_output = torch.nn.Linear(ref_dim, output_node_dim) |
| self.refine_parent_predictor = torch.nn.Linear(ref_dim, max_nodes) |
|
|
| @staticmethod |
| def _make_conv(conv_type: str, in_dim: int, hidden_dim: int): |
| if conv_type == 'GAT': |
| heads = 4 |
| return GATConv(in_dim, hidden_dim, heads=heads), hidden_dim * heads |
| elif conv_type == 'GIN': |
| mlp = torch.nn.Sequential( |
| torch.nn.Linear(in_dim, in_dim), |
| torch.nn.ReLU(), |
| torch.nn.Linear(in_dim, in_dim), |
| ) |
| return GINConv(mlp), in_dim |
| elif conv_type == 'SAGE': |
| return SAGEConv(in_dim, in_dim), in_dim |
| elif conv_type == 'GCN': |
| return GCNConv(in_dim, in_dim), in_dim |
| elif conv_type == 'GraphConv': |
| return GraphConv(in_dim, in_dim), in_dim |
| else: |
| raise ValueError(f"Unsupported conv_type: {conv_type}") |
|
|
| |
| |
| |
|
|
| @staticmethod |
| def _build_chain_edges(num_nodes_per_graph: torch.Tensor) -> torch.Tensor: |
| """Build sequential chain edges (legacy behaviour).""" |
| device = num_nodes_per_graph.device |
| num_edges_per_graph = torch.clamp(num_nodes_per_graph - 1, min=0) |
| total_edges = num_edges_per_graph.sum().item() |
| if total_edges == 0: |
| return torch.empty((2, 0), dtype=torch.long, device=device) |
|
|
| node_offsets = torch.cat([ |
| torch.zeros(1, device=device, dtype=num_nodes_per_graph.dtype), |
| torch.cumsum(num_nodes_per_graph[:-1], dim=0), |
| ]) |
| graph_indices = torch.repeat_interleave( |
| torch.arange(len(num_nodes_per_graph), device=device), |
| num_edges_per_graph, |
| ) |
| edge_offsets = torch.cat([ |
| torch.zeros(1, device=device, dtype=num_edges_per_graph.dtype), |
| torch.cumsum(num_edges_per_graph[:-1], dim=0), |
| ]) |
| src_in_graph = torch.arange(total_edges, device=device) - edge_offsets[graph_indices] |
| edge_node_offsets = node_offsets[graph_indices] |
| src = edge_node_offsets + src_in_graph |
| dst = src + 1 |
| return torch.stack([src, dst], dim=0) |
|
|
| @staticmethod |
| def _parents_to_edges(parent_logits: torch.Tensor, |
| num_nodes_per_graph: torch.Tensor) -> torch.Tensor: |
| """Convert per-node parent logits to a hard edge_index (argmax).""" |
| device = parent_logits.device |
| total_nodes = parent_logits.size(0) |
| max_nodes = parent_logits.size(1) |
|
|
| |
| batch_vec = torch.repeat_interleave( |
| torch.arange(len(num_nodes_per_graph), device=device), |
| num_nodes_per_graph, |
| ) |
| node_offsets = torch.cat([ |
| torch.zeros(1, device=device, dtype=num_nodes_per_graph.dtype), |
| torch.cumsum(num_nodes_per_graph[:-1], dim=0), |
| ]) |
|
|
| |
| mask = torch.arange(max_nodes, device=device).unsqueeze(0).expand(total_nodes, -1) |
| graph_sizes = num_nodes_per_graph[batch_vec].unsqueeze(1) |
| parent_logits = parent_logits.clone() |
| parent_logits[mask >= graph_sizes] = float('-inf') |
|
|
| |
| local_parent = parent_logits.argmax(dim=1) |
| global_parent = local_parent + node_offsets[batch_vec] |
|
|
| |
| local_idx = torch.arange(total_nodes, device=device) - node_offsets[batch_vec] |
| is_root = local_idx == 0 |
| src = global_parent[~is_root] |
| dst = torch.arange(total_nodes, device=device)[~is_root] |
| return torch.stack([src, dst], dim=0).long() |
|
|
| |
| |
| |
|
|
| def _apply_convs(self, x, edge_index, convs): |
| edge_index = torch_geometric.utils.to_undirected(edge_index) |
| if self.gradient_checkpointing and self.training: |
| def _make_fn(module): |
| def fn(*inputs): |
| return module(*inputs) |
| return fn |
| for conv in convs: |
| x = torch.utils.checkpoint.checkpoint( |
| _make_fn(conv), x, edge_index, use_reentrant=False, |
| ) |
| x = F.relu(x) |
| else: |
| for conv in convs: |
| x = conv(x, edge_index) |
| x = F.relu(x) |
| return x |
|
|
| def forward(self, embedding: torch.Tensor, |
| num_nodes_per_graph: torch.Tensor, |
| gt_edge_index: torch.Tensor | None = None) -> dict: |
| """ |
| Args: |
| embedding: [batch_size, embedding_dim] |
| num_nodes_per_graph: [batch_size] |
| gt_edge_index: [2, num_edges] ground-truth AST edges (optional). |
| Required for teacher_forced mode during training. |
| """ |
| device = embedding.device |
| node_features = self.embedding_transform(embedding) |
| node_features = node_features.repeat_interleave(num_nodes_per_graph, dim=0) |
|
|
| |
| if self.edge_mode == 'teacher_forced' and gt_edge_index is not None: |
| first_pass_edges = gt_edge_index |
| else: |
| first_pass_edges = self._build_chain_edges(num_nodes_per_graph) |
|
|
| x = self._apply_convs(node_features, first_pass_edges, self.convs) |
| output_node_features = self.node_output(x) |
| parent_logits = self.parent_predictor(x) |
|
|
| |
| if self.edge_mode == 'iterative': |
| predicted_edges = self._parents_to_edges(parent_logits, num_nodes_per_graph) |
| if predicted_edges.size(1) > 0: |
| x2 = self._apply_convs(x, predicted_edges, self.refine_convs) |
| output_node_features = self.refine_node_output(x2) |
| parent_logits = self.refine_parent_predictor(x2) |
|
|
| return { |
| 'node_features': output_node_features, |
| 'parent_logits': parent_logits, |
| } |
|
|
|
|
| class AutoregressiveASTDecoder(torch.nn.Module): |
| """ |
| Autoregressive decoder for generating Abstract Syntax Trees sequentially. |
| |
| This decoder generates AST nodes one by one, maintaining state across generation |
| steps and considering both text description and current partial graph context. |
| """ |
| |
| def __init__(self, |
| text_embedding_dim: int = 64, |
| graph_hidden_dim: int = 64, |
| state_hidden_dim: int = 128, |
| node_types: int = 74, |
| max_nodes: int = 100, |
| sequence_model: str = 'GRU'): |
| """ |
| Initialize the AutoregressiveASTDecoder. |
| |
| Args: |
| text_embedding_dim: Dimension of text embeddings (from alignment model) |
| graph_hidden_dim: Hidden dimension for graph encoding |
| state_hidden_dim: Hidden dimension for sequential state |
| node_types: Number of possible node types (also node feature dimension) |
| max_nodes: Maximum number of nodes for connection prediction |
| sequence_model: Type of sequence model ('GRU', 'LSTM', 'Transformer') |
| """ |
| super().__init__() |
| |
| self.text_embedding_dim = text_embedding_dim |
| self.graph_hidden_dim = graph_hidden_dim |
| self.state_hidden_dim = state_hidden_dim |
| self.node_types = node_types |
| self.max_nodes = max_nodes |
| self.sequence_model = sequence_model |
| |
| |
| |
| self.graph_gnn_layers = torch.nn.ModuleList([ |
| GCNConv(node_types, graph_hidden_dim), |
| GCNConv(graph_hidden_dim, graph_hidden_dim) |
| ]) |
| self.graph_layer_norm = torch.nn.LayerNorm(graph_hidden_dim) |
| self.graph_dropout = torch.nn.Dropout(0.1) |
| |
| |
| input_size = text_embedding_dim + graph_hidden_dim |
| |
| if sequence_model == 'GRU': |
| self.state_encoder = torch.nn.GRU( |
| input_size=input_size, |
| hidden_size=state_hidden_dim, |
| num_layers=2, |
| batch_first=True, |
| dropout=0.1 |
| ) |
| elif sequence_model == 'LSTM': |
| self.state_encoder = torch.nn.LSTM( |
| input_size=input_size, |
| hidden_size=state_hidden_dim, |
| num_layers=2, |
| batch_first=True, |
| dropout=0.1 |
| ) |
| elif sequence_model == 'Transformer': |
| |
| encoder_layer = torch.nn.TransformerEncoderLayer( |
| d_model=state_hidden_dim, |
| nhead=8, |
| dim_feedforward=256, |
| dropout=0.1, |
| batch_first=True |
| ) |
| self.state_encoder = torch.nn.TransformerEncoder( |
| encoder_layer=encoder_layer, |
| num_layers=4 |
| ) |
| |
| self.input_projection = torch.nn.Linear(input_size, state_hidden_dim) |
| else: |
| raise ValueError(f"Unknown sequence model: {sequence_model}. Choose from 'GRU', 'LSTM', 'Transformer'") |
| |
| |
| |
| |
| self.node_type_predictor = torch.nn.Linear(state_hidden_dim, node_types) |
| |
| |
| self.connection_predictor = torch.nn.Sequential( |
| torch.nn.Linear(state_hidden_dim, max_nodes), |
| torch.nn.Sigmoid() |
| ) |
| |
| def forward(self, text_embedding, partial_graph=None, hidden_state=None): |
| """ |
| Forward pass for autoregressive AST generation. |
| |
| Args: |
| text_embedding: (batch_size, text_embedding_dim) - Text description embedding |
| partial_graph: Dict with keys 'x', 'edge_index', 'batch' - Current partial AST (optional) |
| hidden_state: Previous hidden state for sequence model (optional) |
| |
| Returns: |
| Dictionary containing: |
| - node_type_logits: (batch_size, node_types) - Probabilities for next node type |
| - connection_probs: (batch_size, max_nodes) - Connection probabilities |
| - hidden_state: Updated hidden state |
| """ |
| batch_size = text_embedding.size(0) |
| device = text_embedding.device |
| |
| |
| if partial_graph is not None and 'x' in partial_graph and len(partial_graph['x']) > 0: |
| |
| |
| |
| graph_features = partial_graph['x'] |
| if isinstance(graph_features, list): |
| |
| if graph_features and isinstance(graph_features[0], list): |
| graph_features = torch.tensor(graph_features, dtype=torch.float32, device=device) |
| else: |
| |
| graph_encoded = torch.zeros(batch_size, self.graph_hidden_dim, device=device) |
| else: |
| graph_features = graph_features.to(device) |
| |
| if len(graph_features.shape) == 2 and graph_features.size(0) > 0: |
| |
| edge_index = partial_graph.get('edge_index', None) |
| if edge_index is None: |
| |
| num_nodes = graph_features.size(0) |
| if num_nodes > 1: |
| edge_list = [] |
| for i in range(num_nodes - 1): |
| edge_list.extend([[i, i + 1], [i + 1, i]]) |
| edge_index = torch.tensor(edge_list, dtype=torch.long, device=device).t() |
| else: |
| |
| edge_index = torch.empty((2, 0), dtype=torch.long, device=device) |
| else: |
| if isinstance(edge_index, list): |
| edge_index = torch.tensor(edge_index, dtype=torch.long, device=device) |
| else: |
| edge_index = edge_index.to(device) |
| |
| |
| x = graph_features |
| for i, gnn_layer in enumerate(self.graph_gnn_layers): |
| x = gnn_layer(x, edge_index) |
| if i < len(self.graph_gnn_layers) - 1: |
| x = F.relu(x) |
| x = self.graph_dropout(x) |
| |
| |
| x = self.graph_layer_norm(x) |
| |
| |
| if 'batch' in partial_graph and partial_graph['batch'] is not None: |
| |
| batch_indices = partial_graph['batch'] |
| if isinstance(batch_indices, list): |
| batch_indices = torch.tensor(batch_indices, dtype=torch.long, device=device) |
| else: |
| batch_indices = batch_indices.to(device) |
| |
| |
| graph_encoded = global_mean_pool(x, batch_indices, size=batch_size) |
| |
| |
| if graph_encoded.size(0) < batch_size: |
| |
| padding = torch.zeros(batch_size - graph_encoded.size(0), self.graph_hidden_dim, device=device) |
| graph_encoded = torch.cat([graph_encoded, padding], dim=0) |
| elif graph_encoded.size(0) > batch_size: |
| |
| graph_encoded = graph_encoded[:batch_size] |
| else: |
| |
| graph_encoded = x.mean(dim=0).unsqueeze(0).expand(batch_size, -1) |
| else: |
| |
| graph_encoded = torch.zeros(batch_size, self.graph_hidden_dim, device=device) |
| else: |
| |
| graph_encoded = torch.zeros(batch_size, self.graph_hidden_dim, device=device) |
| |
| |
| combined_input = torch.cat([text_embedding, graph_encoded], dim=-1) |
| |
| |
| if self.sequence_model == 'Transformer': |
| |
| sequence_input = self.input_projection(combined_input.unsqueeze(1)) |
| sequence_output = self.state_encoder(sequence_input) |
| sequence_output = sequence_output.squeeze(1) |
| new_hidden_state = None |
| else: |
| |
| sequence_input = combined_input.unsqueeze(1) |
| sequence_output, new_hidden_state = self.state_encoder(sequence_input, hidden_state) |
| sequence_output = sequence_output.squeeze(1) |
| |
| |
| node_type_logits = self.node_type_predictor(sequence_output) |
| connection_probs = self.connection_predictor(sequence_output) |
| |
| return { |
| 'node_type_logits': node_type_logits, |
| 'connection_probs': connection_probs, |
| 'hidden_state': new_hidden_state |
| } |
| |
| def get_model_info(self) -> str: |
| """ |
| Get information about the autoregressive decoder configuration. |
| |
| Returns: |
| String describing the model architecture |
| """ |
| return (f"AutoregressiveASTDecoder(\n" |
| f" text_dim={self.text_embedding_dim}, " |
| f" graph_dim={self.graph_hidden_dim}, " |
| f" state_dim={self.state_hidden_dim}\n" |
| f" node_types={self.node_types}, " |
| f" sequence_model={self.sequence_model}\n" |
| f")") |
|
|
|
|
| class ASTAutoencoder(torch.nn.Module): |
| """ |
| Autoencoder for Abstract Syntax Trees using Graph Neural Networks. |
| |
| Combines the existing RubyComplexityGNN (as encoder) with the new ASTDecoder |
| to create an autoencoder that can reconstruct ASTs from learned embeddings. |
| """ |
| |
| def __init__(self, encoder_input_dim: int, node_output_dim: int, |
| hidden_dim: int = 64, num_layers: int = 3, |
| conv_type: str = 'GCN', dropout: float = 0.1, |
| freeze_encoder: bool = False, encoder_weights_path: str = None, |
| max_nodes: int = 100, decoder_conv_type: str = 'GCN', |
| gradient_checkpointing: bool = False, |
| decoder_edge_mode: str = 'chain'): |
| """ |
| Initialize the AST autoencoder. |
| |
| Args: |
| encoder_input_dim: Input dimension for encoder (node feature dimension) |
| node_output_dim: Output dimension for decoder node features |
| hidden_dim: Hidden dimension for both encoder and decoder |
| num_layers: Number of layers in both encoder and decoder |
| conv_type: Type of convolution for encoder ('GCN' or 'SAGE') |
| dropout: Dropout rate for encoder |
| freeze_encoder: Whether to freeze encoder weights |
| encoder_weights_path: Path to pre-trained encoder weights |
| max_nodes: Maximum number of nodes for the decoder. |
| decoder_conv_type: The GNN layer type for the decoder. |
| gradient_checkpointing: Whether to enable gradient checkpointing for memory efficiency. |
| decoder_edge_mode: Edge construction strategy for the decoder. |
| 'chain' uses the original ASTDecoder with sequential edges. |
| 'teacher_forced' or 'iterative' uses TreeAwareASTDecoder. |
| """ |
| super().__init__() |
| |
| self.decoder_edge_mode = decoder_edge_mode |
| |
| self.encoder = RubyComplexityGNN( |
| input_dim=encoder_input_dim, |
| hidden_dim=hidden_dim, |
| num_layers=num_layers, |
| conv_type=conv_type, |
| dropout=dropout |
| ) |
| |
| |
| self.encoder_weights_path = encoder_weights_path |
| if encoder_weights_path is not None: |
| try: |
| checkpoint = torch.load(encoder_weights_path, map_location='cpu', weights_only=True) |
| |
| if 'model_config' in checkpoint: |
| saved_config = checkpoint['model_config'] |
| |
| if (saved_config.get('conv_type', conv_type) != conv_type or |
| saved_config.get('hidden_dim', hidden_dim) != hidden_dim or |
| saved_config.get('num_layers', num_layers) != num_layers or |
| saved_config.get('dropout', dropout) != dropout): |
| print(f"Adjusting encoder config to match saved model: conv_type={saved_config.get('conv_type', conv_type)}") |
| self.encoder = RubyComplexityGNN( |
| input_dim=encoder_input_dim, |
| hidden_dim=saved_config.get('hidden_dim', hidden_dim), |
| num_layers=saved_config.get('num_layers', num_layers), |
| conv_type=saved_config.get('conv_type', conv_type), |
| dropout=saved_config.get('dropout', dropout) |
| ) |
| |
| hidden_dim = saved_config.get('hidden_dim', hidden_dim) |
| |
| self.encoder.load_state_dict(checkpoint['model_state_dict']) |
| print(f"Loaded encoder weights from {encoder_weights_path}") |
| except FileNotFoundError: |
| print(f"Warning: Could not find encoder weights at {encoder_weights_path}") |
| except Exception as e: |
| print(f"Warning: Could not load encoder weights: {e}") |
| |
| |
| if freeze_encoder: |
| for param in self.encoder.parameters(): |
| param.requires_grad = False |
| print("Encoder weights frozen") |
| |
| |
| if decoder_edge_mode in ('teacher_forced', 'iterative'): |
| self.decoder = TreeAwareASTDecoder( |
| embedding_dim=hidden_dim, |
| output_node_dim=node_output_dim, |
| hidden_dim=hidden_dim, |
| num_layers=num_layers, |
| max_nodes=max_nodes, |
| conv_type=decoder_conv_type, |
| edge_mode=decoder_edge_mode, |
| gradient_checkpointing=gradient_checkpointing, |
| ) |
| else: |
| self.decoder = ASTDecoder( |
| embedding_dim=hidden_dim, |
| output_node_dim=node_output_dim, |
| hidden_dim=hidden_dim, |
| num_layers=num_layers, |
| max_nodes=max_nodes, |
| conv_type=decoder_conv_type, |
| gradient_checkpointing=gradient_checkpointing, |
| ) |
| |
| self.hidden_dim = hidden_dim |
| self.freeze_encoder = freeze_encoder |
| |
| def forward(self, data: Data) -> dict: |
| """ |
| Forward pass through the autoencoder. |
| |
| Args: |
| data: PyTorch Geometric Data object containing a batch of input ASTs. |
| |
| Returns: |
| Dictionary containing reconstructed AST information for the batch. |
| """ |
| |
| embedding = self.encoder(data, return_embedding=True) |
| |
| |
| num_nodes_per_graph = torch.bincount(data.batch) |
| |
| |
| |
| if self.decoder_edge_mode != 'chain': |
| reconstruction = self.decoder( |
| embedding, num_nodes_per_graph, |
| gt_edge_index=data.edge_index, |
| ) |
| else: |
| reconstruction = self.decoder(embedding, num_nodes_per_graph) |
| |
| return { |
| 'embedding': embedding, |
| 'reconstruction': reconstruction |
| } |
| |
| def get_model_info(self) -> str: |
| """ |
| Get information about the autoencoder configuration. |
| |
| Returns: |
| String describing the model architecture |
| """ |
| encoder_info = self.encoder.get_model_info() |
| decoder_info = f"ASTDecoder(embedding_dim={self.hidden_dim})" |
| freeze_status = " [FROZEN]" if self.freeze_encoder else "" |
| |
| return (f"ASTAutoencoder(\n" |
| f" encoder: {encoder_info}{freeze_status}\n" |
| f" decoder: {decoder_info}\n" |
| f")") |
|
|
|
|
| class SimpleTextEncoder(torch.nn.Module): |
| """ |
| Simple text encoder as fallback when sentence-transformers is not available. |
| |
| This provides a basic text encoding mechanism using character-level features |
| and a simple neural network. Used as fallback for testing when internet |
| access is not available. |
| """ |
| |
| def __init__(self, output_dim: int = 384, max_length: int = 100): |
| """ |
| Initialize the simple text encoder. |
| |
| Args: |
| output_dim: Output embedding dimension |
| max_length: Maximum text length to consider |
| """ |
| super().__init__() |
| self.output_dim = output_dim |
| self.max_length = max_length |
| |
| |
| self.char_embedding = torch.nn.Embedding(256, 64) |
| |
| |
| self.rnn = torch.nn.LSTM(64, 128, batch_first=True, bidirectional=True) |
| |
| |
| self.output_proj = torch.nn.Linear(256, output_dim) |
| |
| def encode(self, texts: list, convert_to_tensor: bool = True) -> torch.Tensor: |
| """ |
| Encode texts to embeddings. |
| |
| Args: |
| texts: List of text strings |
| convert_to_tensor: Whether to return tensor (for compatibility) |
| |
| Returns: |
| Text embeddings tensor |
| """ |
| batch_size = len(texts) |
| |
| |
| char_sequences = [] |
| for text in texts: |
| |
| chars = [min(ord(c), 255) for c in text.lower()[:self.max_length]] |
| |
| chars.extend([0] * (self.max_length - len(chars))) |
| char_sequences.append(chars[:self.max_length]) |
| |
| |
| char_tensor = torch.tensor(char_sequences, dtype=torch.long) |
| char_tensor = char_tensor.to(next(self.parameters()).device) |
| |
| |
| embedded = self.char_embedding(char_tensor) |
| |
| |
| rnn_output, (hidden, _) = self.rnn(embedded) |
| |
| |
| final_hidden = torch.cat([hidden[0], hidden[1]], dim=1) |
| |
| |
| embeddings = self.output_proj(final_hidden) |
| |
| return embeddings |
| |
| def get_sentence_embedding_dimension(self) -> int: |
| """Get embedding dimension for compatibility.""" |
| return self.output_dim |
|
|
|
|
| class AlignmentModel(torch.nn.Module): |
| """ |
| Dual-encoder model for aligning text descriptions with code embeddings. |
| |
| This model combines a frozen RubyComplexityGNN (code encoder) with a |
| sentence-transformers text encoder to create aligned embeddings in the |
| same 64-dimensional space. |
| """ |
| |
| def __init__(self, input_dim: int, hidden_dim: int = 64, num_layers: int = 3, |
| conv_type: str = 'GCN', dropout: float = 0.1, |
| text_model_name: str = 'all-MiniLM-L6-v2', |
| code_encoder_weights_path: str = 'models/best_model.pt'): |
| """ |
| Initialize the alignment model. |
| |
| Args: |
| input_dim: Input dimension for code encoder (node feature dimension) |
| hidden_dim: Hidden dimension for both encoders (default: 64) |
| num_layers: Number of layers in code encoder |
| conv_type: Type of convolution for code encoder ('GCN' or 'SAGE') |
| dropout: Dropout rate for code encoder |
| text_model_name: Name of the sentence-transformers model to use |
| code_encoder_weights_path: Path to pre-trained code encoder weights (default: 'models/best_encoder_model.pt') |
| """ |
| super().__init__() |
| |
| self.hidden_dim = hidden_dim |
| |
| |
| self.code_encoder = RubyComplexityGNN( |
| input_dim=input_dim, |
| hidden_dim=hidden_dim, |
| num_layers=num_layers, |
| conv_type=conv_type, |
| dropout=dropout |
| ) |
| |
| |
| if code_encoder_weights_path is not None: |
| try: |
| checkpoint = torch.load(code_encoder_weights_path, map_location='cpu', weights_only=True) |
| |
| if 'model_state_dict' in checkpoint: |
| state_dict = checkpoint['model_state_dict'] |
| else: |
| state_dict = checkpoint |
| |
| |
| model_state = {} |
| for key, value in state_dict.items(): |
| if not key.startswith('predictor'): |
| model_state[key] = value |
| |
| self.code_encoder.load_state_dict(model_state, strict=False) |
| print(f"Loaded code encoder weights from {code_encoder_weights_path}") |
| except FileNotFoundError: |
| print(f"Warning: Could not find code encoder weights at {code_encoder_weights_path}") |
| except Exception as e: |
| print(f"Warning: Could not load code encoder weights: {e}") |
| |
| |
| for param in self.code_encoder.parameters(): |
| param.requires_grad = False |
| print("Code encoder weights frozen") |
| |
| |
| if SENTENCE_TRANSFORMERS_AVAILABLE: |
| try: |
| self.text_encoder = SentenceTransformer(text_model_name) |
| self.text_encoder_type = "sentence_transformers" |
| print(f"Using SentenceTransformer: {text_model_name}") |
| except Exception as e: |
| print(f"Warning: Could not load SentenceTransformer ({e}), using fallback") |
| self.text_encoder = SimpleTextEncoder(output_dim=384) |
| self.text_encoder_type = "simple" |
| else: |
| print("SentenceTransformers not available, using simple text encoder") |
| self.text_encoder = SimpleTextEncoder(output_dim=384) |
| self.text_encoder_type = "simple" |
| |
| |
| text_dim = self.text_encoder.get_sentence_embedding_dimension() |
| |
| |
| |
| self.text_projection = torch.nn.Sequential( |
| torch.nn.Linear(text_dim, 256), |
| torch.nn.ReLU(), |
| torch.nn.Linear(256, hidden_dim) |
| ) |
| |
| print(f"Text encoder output dim: {text_dim}, projecting to: {hidden_dim}") |
| |
| def encode_code(self, data: Data) -> torch.Tensor: |
| """ |
| Encode graph data to embeddings using the frozen code encoder. |
| |
| Args: |
| data: PyTorch Geometric Data object containing graph |
| |
| Returns: |
| Code embeddings tensor of shape (batch_size, hidden_dim) |
| """ |
| with torch.no_grad(): |
| return self.code_encoder(data, return_embedding=True) |
| |
| def encode_text(self, texts: list) -> torch.Tensor: |
| """ |
| Encode text descriptions to embeddings using the text encoder. |
| |
| Args: |
| texts: List of text descriptions |
| |
| Returns: |
| Text embeddings tensor of shape (batch_size, hidden_dim) |
| """ |
| |
| text_embeddings = self.text_encoder.encode(texts, convert_to_tensor=True) |
| |
| |
| text_embeddings = text_embeddings.clone() |
| |
| |
| projected_embeddings = self.text_projection(text_embeddings) |
| |
| return projected_embeddings |
| |
| def forward(self, data: Data, texts: list) -> dict: |
| """ |
| Forward pass through both encoders. |
| |
| Args: |
| data: PyTorch Geometric Data object containing graphs |
| texts: List of text descriptions (same length as batch size) |
| |
| Returns: |
| Dictionary containing: |
| - 'code_embeddings': Code embeddings (batch_size, hidden_dim) |
| - 'text_embeddings': Text embeddings (batch_size, hidden_dim) |
| """ |
| |
| code_embeddings = self.encode_code(data) |
| |
| |
| text_embeddings = self.encode_text(texts) |
| |
| |
| if code_embeddings.device != text_embeddings.device: |
| text_embeddings = text_embeddings.to(code_embeddings.device) |
| |
| return { |
| 'code_embeddings': code_embeddings, |
| 'text_embeddings': text_embeddings |
| } |
| |
| def get_model_info(self) -> str: |
| """ |
| Get information about the alignment model configuration. |
| |
| Returns: |
| String describing the model architecture |
| """ |
| code_info = self.code_encoder.get_model_info() |
| |
| if self.text_encoder_type == "sentence_transformers": |
| |
| model_name = self.text_encoder._model_config.get('_name_or_path') |
| if model_name is None: |
| |
| try: |
| model_name = self.text_encoder[0].auto_model.config._name_or_path |
| except (AttributeError, IndexError): |
| model_name = "SentenceTransformer" |
| text_info = f"SentenceTransformer({model_name})" |
| else: |
| text_info = f"SimpleTextEncoder(dim={self.text_encoder.output_dim})" |
| |
| |
| if isinstance(self.text_projection, torch.nn.Sequential): |
| first_layer = self.text_projection[0] |
| last_layer = self.text_projection[2] |
| projection_info = f"MLP({first_layer.in_features} -> 256 -> {last_layer.out_features})" |
| else: |
| projection_info = f"Linear({self.text_projection.in_features} -> {self.text_projection.out_features})" |
| |
| return (f"AlignmentModel(\n" |
| f" code_encoder: {code_info} [FROZEN]\n" |
| f" text_encoder: {text_info}\n" |
| f" projection: {projection_info}\n" |
| f")") |
|
|
|
|
| class HierarchicalASTDecoder(torch.nn.Module): |
| """ |
| Hierarchical, coarse-to-fine decoder for generating ASTs level by level. |
| |
| This model takes a text embedding and progressively generates an AST from the |
| root down, with each stage adding one level of depth to the tree. Uses proper |
| GNN layers to process graph structures at each level. |
| """ |
|
|
| def __init__(self, embedding_dim: int, hidden_dim: int, num_levels: int, node_feature_dim: int, conv_type: str = 'GCN'): |
| """ |
| Initialize the HierarchicalASTDecoder. |
| |
| Args: |
| embedding_dim: Dimension of the input text embedding. |
| hidden_dim: Hidden dimension for the GNN layers. |
| num_levels: The maximum depth of the AST to generate (number of stages). |
| node_feature_dim: The dimension of the node features to be predicted. |
| conv_type: The type of GNN convolution to use ('GCN' or 'SAGE'). |
| """ |
| super().__init__() |
| self.embedding_dim = embedding_dim |
| self.hidden_dim = hidden_dim |
| self.num_levels = num_levels |
| self.node_feature_dim = node_feature_dim |
| self.conv_type = conv_type |
| self.register_buffer('device_indicator', torch.empty(0)) |
|
|
| |
| if conv_type == 'GCN': |
| ConvLayer = GCNConv |
| elif conv_type == 'SAGE': |
| ConvLayer = SAGEConv |
| else: |
| raise ValueError(f"Unsupported conv_type: {conv_type}. Use 'GCN' or 'SAGE'.") |
|
|
| |
| self.level_generators = torch.nn.ModuleList() |
|
|
| for i in range(num_levels): |
| |
| |
| if i == 0: |
| input_dim = self.embedding_dim |
| else: |
| input_dim = self.hidden_dim |
| |
| |
| level_gnn = ConvLayer(input_dim, self.hidden_dim) |
| node_predictor = torch.nn.Linear(self.hidden_dim, node_feature_dim) |
| adjacency_predictor = torch.nn.Linear(self.hidden_dim, self.hidden_dim) |
|
|
| self.level_generators.append(torch.nn.ModuleDict({ |
| 'gnn': level_gnn, |
| 'node_predictor': node_predictor, |
| 'adjacency_predictor': adjacency_predictor, |
| })) |
|
|
| @property |
| def device(self): |
| """Returns the device the model is on.""" |
| return self.device_indicator.device |
|
|
| def forward(self, input_data: Data, target_level: int) -> Dict[str, torch.Tensor]: |
| """ |
| Performs a forward pass for a single level of generation. |
| |
| Args: |
| input_data: PyG Data object with node features (x) and edge indices (edge_index). |
| For level 0, x should be the text embedding repeated for initial node(s). |
| target_level: The specific AST level to generate. |
| |
| Returns: |
| Dictionary containing: |
| - pred_features: Predicted node features for next level |
| - pred_adjacency: Predicted adjacency matrix (only diagonal for memory efficiency) |
| - hidden_state: Hidden representations for next level |
| """ |
| if target_level >= self.num_levels: |
| raise ValueError(f"Target level {target_level} is out of bounds for {self.num_levels} levels.") |
|
|
| generator = self.level_generators[target_level] |
| |
| |
| hidden_state = F.relu(generator['gnn'](input_data.x, input_data.edge_index)) |
| |
| |
| pred_features = generator['node_predictor'](hidden_state) |
| |
| |
| |
| adjacency_repr = generator['adjacency_predictor'](hidden_state) |
| |
| pred_adjacency_diag = (adjacency_repr * adjacency_repr).sum(dim=1, keepdim=True) |
|
|
| return { |
| 'hidden_state': hidden_state, |
| 'pred_features': pred_features, |
| 'pred_adjacency_diag': pred_adjacency_diag |
| } |
| |
| def generate(self, embedding: torch.Tensor, max_levels: int = None, max_nodes_per_level: int = 10, max_total_nodes: int = 1000) -> list: |
| """ |
| Generate a complete AST from a text embedding using hierarchical generation. |
| |
| Args: |
| embedding: Text embedding tensor of shape (1, embedding_dim) or (embedding_dim,) |
| max_levels: Maximum depth of AST to generate (default: self.num_levels) |
| max_nodes_per_level: Maximum children per parent node (default: 10) |
| max_total_nodes: Maximum total nodes in AST to prevent runaway growth (default: 1000) |
| |
| Returns: |
| List representing AST in JSON format with 'type' and 'children' fields |
| """ |
| if max_levels is None: |
| max_levels = self.num_levels |
| |
| device = self.device |
| if embedding.dim() == 1: |
| embedding = embedding.unsqueeze(0) |
| embedding = embedding.to(device) |
| |
| |
| all_nodes = [] |
| node_id_counter = 0 |
| |
| |
| root_data = Data( |
| x=embedding, |
| edge_index=torch.empty((2, 0), dtype=torch.long, device=device) |
| ) |
| |
| with torch.no_grad(): |
| root_output = self.forward(root_data, target_level=0) |
| root_features = root_output['pred_features'] |
| root_type_idx = root_features.argmax(dim=1)[0].item() |
| |
| root_node = { |
| 'id': node_id_counter, |
| 'type_idx': root_type_idx, |
| 'features': root_features[0], |
| 'hidden': root_output['hidden_state'][0], |
| 'children': [], |
| 'level': 0 |
| } |
| all_nodes.append(root_node) |
| node_id_counter += 1 |
| |
| |
| current_level_nodes = [root_node] |
| |
| |
| for level in range(1, max_levels): |
| if len(current_level_nodes) == 0 or len(all_nodes) >= max_total_nodes: |
| break |
| |
| next_level_nodes = [] |
| |
| |
| if len(current_level_nodes) > 0: |
| |
| parent_hiddens = torch.stack([node['hidden'] for node in current_level_nodes]) |
| |
| |
| batch_indices = torch.arange(len(current_level_nodes), device=device).repeat_interleave(1) |
| batched_data = Data( |
| x=parent_hiddens, |
| edge_index=torch.empty((2, 0), dtype=torch.long, device=device), |
| batch=batch_indices |
| ) |
| |
| with torch.no_grad(): |
| output = self.forward(batched_data, target_level=level) |
| pred_features = output['pred_features'] |
| |
| |
| |
| for parent_idx, parent_node in enumerate(current_level_nodes): |
| |
| spawn_prob = torch.sigmoid(output['pred_adjacency_diag'][parent_idx, 0]).item() |
| num_children = min(int(spawn_prob * max_nodes_per_level), max_nodes_per_level) |
| |
| |
| for child_idx in range(num_children): |
| if len(all_nodes) >= max_total_nodes: |
| break |
| |
| |
| child_features = pred_features[parent_idx] |
| child_hidden = output['hidden_state'][parent_idx] |
| child_type_idx = child_features.argmax(dim=0).item() |
| |
| child_node = { |
| 'id': node_id_counter, |
| 'type_idx': child_type_idx, |
| 'features': child_features, |
| 'hidden': child_hidden, |
| 'children': [], |
| 'level': level, |
| 'parent_id': parent_node['id'] |
| } |
| |
| parent_node['children'].append(child_node) |
| all_nodes.append(child_node) |
| next_level_nodes.append(child_node) |
| node_id_counter += 1 |
| |
| current_level_nodes = next_level_nodes |
| |
| |
| def node_to_ast_json(node): |
| ast_node = { |
| 'type': f"type_{node['type_idx']}", |
| 'children': [node_to_ast_json(child) for child in node['children']] |
| } |
| return ast_node |
| |
| if len(all_nodes) > 0: |
| return [node_to_ast_json(all_nodes[0])] |
| else: |
| return [] |