| """ |
| Data processing utilities for Ruby method datasets. |
| |
| This module provides functions to load, preprocess, and prepare Ruby method |
| data for GNN training. Includes custom Dataset class for AST to graph conversion. |
| """ |
|
|
| import json |
| import random |
| import os |
| import logging |
| from pathlib import Path |
| from typing import List, Dict, Any, Tuple, Optional, Union |
| try: |
| import torch |
| from torch_geometric.data import Data |
| TORCH_AVAILABLE = True |
| except ImportError: |
| TORCH_AVAILABLE = False |
|
|
|
|
| def load_methods_json(filepath: str) -> List[Dict[str, Any]]: |
| """ |
| Load Ruby methods from JSON file. |
| |
| Args: |
| filepath: Path to the JSON file containing method data |
| |
| Returns: |
| List of method dictionaries |
| """ |
| with open(filepath, 'r') as f: |
| return json.load(f) |
|
|
|
|
| def methods_to_dataframe(methods: List[Dict[str, Any]]) -> List[Dict[str, Any]]: |
| """ |
| Convert list of method dictionaries to a structured format. |
| |
| Args: |
| methods: List of method dictionaries |
| |
| Returns: |
| List of method dictionaries (pass-through for compatibility) |
| """ |
| return methods |
|
|
|
|
| def filter_methods_by_length(methods: List[Dict[str, Any]], min_lines: int = 5, max_lines: int = 100) -> List[Dict[str, Any]]: |
| """ |
| Filter methods by source code length. |
| |
| Args: |
| methods: List of method dictionaries |
| min_lines: Minimum number of lines |
| max_lines: Maximum number of lines |
| |
| Returns: |
| Filtered list of methods |
| """ |
| filtered = [] |
| for method in methods: |
| if 'raw_source' in method: |
| line_count = len(method['raw_source'].split('\n')) |
| if min_lines <= line_count <= max_lines: |
| method['line_count'] = line_count |
| filtered.append(method) |
| return filtered |
| """ |
| Filter methods by source code length. |
| |
| Args: |
| df: DataFrame containing method data |
| min_lines: Minimum number of lines |
| max_lines: Maximum number of lines |
| |
| Returns: |
| Filtered DataFrame |
| """ |
| df['line_count'] = df['raw_source'].apply(lambda x: len(x.split('\n'))) |
| return df[(df['line_count'] >= min_lines) & (df['line_count'] <= max_lines)] |
|
|
|
|
| class ASTNodeEncoder: |
| """ |
| Encoder for mapping AST node types to feature vectors. |
| |
| This class maintains a vocabulary of AST node types found in Ruby code |
| and maps them to dense feature vectors for GNN processing. |
| """ |
| |
| def __init__(self): |
| """Initialize the node encoder with common Ruby AST node types.""" |
| |
| self.node_types = [ |
| 'def', 'defs', 'args', 'arg', 'begin', 'end', 'lvasgn', 'ivasgn', 'gvasgn', |
| 'cvasgn', 'send', 'block', 'if', 'unless', 'while', 'until', 'for', 'case', |
| 'when', 'rescue', 'ensure', 'retry', 'break', 'next', 'redo', 'return', |
| 'yield', 'super', 'zsuper', 'lambda', 'proc', 'and', 'or', 'not', 'true', |
| 'false', 'nil', 'self', 'int', 'float', 'str', 'sym', 'regexp', 'array', |
| 'hash', 'pair', 'splat', 'kwsplat', 'block_pass', 'const', 'cbase', |
| 'lvar', 'ivar', 'gvar', 'cvar', 'casgn', 'masgn', 'mlhs', 'op_asgn', |
| 'and_asgn', 'or_asgn', 'back_ref', 'nth_ref', 'class', 'sclass', 'module', |
| 'defined?', 'alias', 'undef', 'range', 'irange', 'erange', 'regopt' |
| ] |
| |
| |
| self.type_to_idx = {node_type: idx for idx, node_type in enumerate(self.node_types)} |
| self.unknown_idx = len(self.node_types) |
| self.vocab_size = len(self.node_types) + 1 |
| |
| def encode_node_type(self, node_type: str) -> int: |
| """ |
| Encode a node type to its integer index. |
| |
| Args: |
| node_type: The AST node type string |
| |
| Returns: |
| Integer index for the node type |
| """ |
| return self.type_to_idx.get(node_type, self.unknown_idx) |
| |
| def create_node_features(self, node_type: str) -> List[float]: |
| """ |
| Create feature vector for a node type. |
| |
| Args: |
| node_type: The AST node type string |
| |
| Returns: |
| Feature vector as list of floats |
| """ |
| |
| features = [0.0] * self.vocab_size |
| idx = self.encode_node_type(node_type) |
| features[idx] = 1.0 |
| return features |
|
|
|
|
| class ASTGraphConverter: |
| """ |
| Converter for transforming AST JSON to graph representation. |
| |
| This class parses the AST JSON structure and converts it into |
| a graph format suitable for GNN processing. |
| """ |
| |
| def __init__(self): |
| """Initialize the AST to graph converter.""" |
| self.node_encoder = ASTNodeEncoder() |
| self.reset() |
| |
| def reset(self): |
| """Reset the converter state for processing a new AST.""" |
| self.nodes = [] |
| self.edges = [] |
| self.edge_attrs = [] |
| self.node_depths = [] |
| self.node_child_indices = [] |
| self.node_count = 0 |
| |
| def parse_ast_json(self, ast_json: str) -> Dict[str, Any]: |
| """ |
| Parse AST JSON string and convert to graph representation. |
| |
| Args: |
| ast_json: JSON string representing the AST |
| |
| Returns: |
| Dictionary containing node features, edge indices, and edge attributes. |
| edge_attr contains [child_index, depth, num_siblings] per edge. |
| node_pos contains [child_index, depth] per node for positional encoding. |
| """ |
| self.reset() |
| |
| try: |
| ast_data = json.loads(ast_json) |
| self._process_node(ast_data, parent_idx=None, depth=0, child_index=0, num_siblings=1) |
| |
| |
| if not self.nodes: |
| |
| node_features = [[0.0] * self.node_encoder.vocab_size] |
| edge_index = [[], []] |
| edge_attr = [] |
| node_pos = [[0, 0]] |
| else: |
| node_features = self.nodes |
| if self.edges: |
| |
| edge_index = [[], []] |
| for parent, child in self.edges: |
| edge_index[0].append(parent) |
| edge_index[1].append(child) |
| else: |
| edge_index = [[], []] |
| edge_attr = self.edge_attrs |
| node_pos = list(zip(self.node_child_indices, self.node_depths)) |
| |
| return { |
| 'x': node_features, |
| 'edge_index': edge_index, |
| 'edge_attr': edge_attr, |
| 'node_pos': node_pos, |
| 'num_nodes': len(self.nodes) if self.nodes else 1 |
| } |
| |
| except (json.JSONDecodeError, Exception): |
| |
| return { |
| 'x': [[0.0] * self.node_encoder.vocab_size], |
| 'edge_index': [[], []], |
| 'edge_attr': [], |
| 'node_pos': [[0, 0]], |
| 'num_nodes': 1 |
| } |
| |
| def _process_node(self, node: Union[Dict, List, str, int, float, None], |
| parent_idx: Optional[int] = None, depth: int = 0, |
| child_index: int = 0, num_siblings: int = 1) -> int: |
| """ |
| Recursively process an AST node and its children. |
| |
| Args: |
| node: The AST node (dict, list, or primitive) |
| parent_idx: Index of the parent node |
| depth: Depth of the current node in the AST |
| child_index: Position of this node among its siblings (0-based) |
| num_siblings: Total number of siblings (including this node) |
| |
| Returns: |
| Index of the current node |
| """ |
| if isinstance(node, dict) and 'type' in node: |
| |
| node_type = node['type'] |
| current_idx = self.node_count |
| self.node_count += 1 |
| |
| |
| features = self.node_encoder.create_node_features(node_type) |
| self.nodes.append(features) |
| self.node_depths.append(depth) |
| self.node_child_indices.append(child_index) |
| |
| |
| if parent_idx is not None: |
| self.edges.append((parent_idx, current_idx)) |
| self.edge_attrs.append([child_index, depth, num_siblings]) |
| |
| |
| if 'children' in node: |
| children = node['children'] |
| n_children = len(children) |
| for i, child in enumerate(children): |
| self._process_node(child, current_idx, depth=depth + 1, |
| child_index=i, num_siblings=n_children) |
| |
| return current_idx |
| |
| elif isinstance(node, list): |
| |
| n_items = len(node) |
| for i, child in enumerate(node): |
| self._process_node(child, parent_idx, depth=depth, |
| child_index=i, num_siblings=n_items) |
| return parent_idx if parent_idx is not None else -1 |
| |
| else: |
| |
| if parent_idx is not None: |
| current_idx = self.node_count |
| self.node_count += 1 |
| |
| |
| leaf_type = 'leaf_' + type(node).__name__ |
| features = self.node_encoder.create_node_features(leaf_type) |
| self.nodes.append(features) |
| self.node_depths.append(depth) |
| self.node_child_indices.append(child_index) |
| |
| |
| self.edges.append((parent_idx, current_idx)) |
| self.edge_attrs.append([child_index, depth, num_siblings]) |
| |
| return current_idx |
| return -1 |
|
|
|
|
| def load_jsonl_file(filepath: str, limit: Optional[int] = None) -> List[Dict[str, Any]]: |
| """ |
| Load data from a JSONL file. |
| |
| Args: |
| filepath: Path to the JSONL file |
| limit: Optional maximum number of lines to load. |
| |
| Returns: |
| List of dictionaries from the JSONL file |
| """ |
| data = [] |
| with open(filepath, 'r', encoding='utf-8') as f: |
| for i, line in enumerate(f): |
| if limit is not None and i >= limit: |
| break |
| line = line.strip() |
| if line: |
| try: |
| data.append(json.loads(line)) |
| except json.JSONDecodeError: |
| continue |
| return data |
|
|
|
|
| class RubyASTDataset: |
| """ |
| Dataset class for loading Ruby AST data and converting to graph format. |
| |
| This class loads JSONL files containing Ruby method data and converts |
| the AST representations to graph objects suitable for GNN training. |
| """ |
| |
| def __init__(self, jsonl_path: str, transform=None, limit: Optional[int] = None): |
| """ |
| Initialize the dataset. |
| |
| Args: |
| jsonl_path: Path to the JSONL file containing method data |
| transform: Optional transform to apply to each sample |
| limit: Optional maximum number of samples to load. |
| """ |
| self.jsonl_path = jsonl_path |
| self.transform = transform |
| self.converter = ASTGraphConverter() |
| |
| |
| self.data = load_jsonl_file(jsonl_path, limit=limit) |
| |
| print(f"Loaded {len(self.data)} samples from {jsonl_path}") |
| |
| def __len__(self) -> int: |
| """Return the number of samples in the dataset.""" |
| return len(self.data) |
| |
| def __getitem__(self, idx: int) -> Dict[str, Any]: |
| """ |
| Get a sample from the dataset. |
| |
| Args: |
| idx: Index of the sample |
| |
| Returns: |
| Dictionary containing graph data and target |
| """ |
| if idx < 0 or idx >= len(self.data): |
| raise IndexError(f"Index {idx} out of range for dataset of size {len(self.data)}") |
| |
| sample = self.data[idx] |
| |
| |
| graph_data = self.converter.parse_ast_json(sample['ast_json']) |
| |
| |
| result = { |
| 'x': graph_data['x'], |
| 'edge_index': graph_data['edge_index'], |
| 'y': [sample.get('complexity_score', 5.0)], |
| 'num_nodes': graph_data['num_nodes'], |
| 'id': sample.get('id', f'sample_{idx}'), |
| 'repo_name': sample.get('repo_name', ''), |
| 'file_path': sample.get('file_path', '') |
| } |
| |
| |
| if self.transform: |
| result = self.transform(result) |
| |
| return result |
| |
| def get_feature_dim(self) -> int: |
| """Return the dimension of node features.""" |
| return self.converter.node_encoder.vocab_size |
|
|
|
|
| def collate_graphs(batch: List[Dict[str, Any]]) -> Dict[str, Any]: |
| """ |
| Collate function for batching graph data. |
| |
| Args: |
| batch: List of graph data dictionaries |
| |
| Returns: |
| Batched graph data |
| """ |
| if not batch: |
| raise ValueError("Cannot collate empty batch") |
| |
| |
| all_x = [] |
| all_edge_index = [[], []] |
| all_y = [] |
| batch_idx = [] |
| node_offset = 0 |
| |
| metadata = { |
| 'ids': [], |
| 'repo_names': [], |
| 'file_paths': [] |
| } |
| |
| for i, sample in enumerate(batch): |
| |
| all_x.extend(sample['x']) |
| |
| |
| edges = sample['edge_index'] |
| if len(edges[0]) > 0: |
| for j in range(len(edges[0])): |
| all_edge_index[0].append(edges[0][j] + node_offset) |
| all_edge_index[1].append(edges[1][j] + node_offset) |
| |
| |
| all_y.extend(sample['y']) |
| |
| |
| num_nodes = sample['num_nodes'] |
| batch_idx.extend([i] * num_nodes) |
| node_offset += num_nodes |
| |
| |
| metadata['ids'].append(sample['id']) |
| metadata['repo_names'].append(sample['repo_name']) |
| metadata['file_paths'].append(sample['file_path']) |
| |
| return { |
| 'x': all_x, |
| 'edge_index': all_edge_index, |
| 'y': all_y, |
| 'batch': batch_idx, |
| 'num_graphs': len(batch), |
| 'metadata': metadata |
| } |
|
|
|
|
| class SimpleDataLoader: |
| """ |
| Simple DataLoader implementation for batching data. |
| |
| This provides a basic implementation that can be used when PyTorch |
| DataLoader is not available, and can easily be replaced with the real |
| PyTorch DataLoader when dependencies are installed. |
| """ |
| |
| def __init__(self, dataset, batch_size: int = 1, shuffle: bool = False, collate_fn=None): |
| """ |
| Initialize the DataLoader. |
| |
| Args: |
| dataset: Dataset to load from |
| batch_size: Number of samples per batch |
| shuffle: Whether to shuffle the data |
| collate_fn: Function to collate samples into batches |
| """ |
| self.dataset = dataset |
| self.batch_size = batch_size |
| self.shuffle = shuffle |
| self.collate_fn = collate_fn or collate_graphs |
| |
| |
| self.indices = list(range(len(dataset))) |
| if shuffle: |
| import random |
| random.shuffle(self.indices) |
| |
| def __len__(self) -> int: |
| """Return number of batches.""" |
| return (len(self.dataset) + self.batch_size - 1) // self.batch_size |
| |
| def __iter__(self): |
| """Iterate over batches.""" |
| for i in range(0, len(self.dataset), self.batch_size): |
| batch_indices = self.indices[i:i + self.batch_size] |
| batch = [self.dataset[idx] for idx in batch_indices] |
| yield self.collate_fn(batch) |
|
|
|
|
| class PairedDataset: |
| """ |
| Dataset class for loading paired Ruby AST and text description data. |
| |
| This class loads the paired_data.jsonl file containing Ruby method data |
| and converts AST representations to graph objects paired with text descriptions. |
| For each method, it randomly samples one description from the available descriptions. |
| """ |
| |
| def __init__(self, jsonl_path: str, transform=None, seed: Optional[int] = None, limit: Optional[int] = None): |
| """ |
| Initialize the paired dataset. |
| |
| Args: |
| jsonl_path: Path to the paired_data.jsonl file |
| transform: Optional transform to apply to each sample |
| seed: Random seed for consistent description sampling |
| limit: Optional maximum number of samples to load. |
| """ |
| self.jsonl_path = jsonl_path |
| self.transform = transform |
| self.converter = ASTGraphConverter() |
| |
| if seed is not None: |
| random.seed(seed) |
| |
| |
| self.data = load_jsonl_file(jsonl_path, limit=limit) |
| |
| print(f"Loaded {len(self.data)} samples from {jsonl_path}") |
| |
| def __len__(self) -> int: |
| """Return the number of samples in the dataset.""" |
| return len(self.data) |
| |
| def __getitem__(self, idx: int) -> Tuple[Dict[str, Any], str]: |
| """ |
| Get a sample from the dataset. |
| |
| Args: |
| idx: Index of the sample |
| |
| Returns: |
| Tuple of (graph_data, text_description) |
| """ |
| if idx < 0 or idx >= len(self.data): |
| raise IndexError(f"Index {idx} out of range for dataset of size {len(self.data)}") |
| |
| sample = self.data[idx] |
| |
| |
| graph_data = self.converter.parse_ast_json(sample['ast_json']) |
| |
| |
| descriptions = sample.get('descriptions', []) |
| if descriptions: |
| description = random.choice(descriptions) |
| text_description = description['text'] |
| else: |
| |
| text_description = sample.get('method_name', 'unknown_method') |
| |
| |
| graph_result = { |
| 'x': graph_data['x'], |
| 'edge_index': graph_data['edge_index'], |
| 'num_nodes': graph_data['num_nodes'], |
| 'id': sample.get('id', f'sample_{idx}'), |
| 'repo_name': sample.get('repo_name', ''), |
| 'file_path': sample.get('file_path', '') |
| } |
| |
| |
| if self.transform: |
| graph_result = self.transform(graph_result) |
| |
| return graph_result, text_description |
| |
| def get_feature_dim(self) -> int: |
| """Return the dimension of node features.""" |
| return self.converter.node_encoder.vocab_size |
|
|
|
|
| def collate_paired_data(batch: List[Tuple[Dict[str, Any], str]]) -> Tuple[Dict[str, Any], List[str]]: |
| """ |
| Collate function for batching paired graph and text data. |
| |
| Args: |
| batch: List of (graph_data, text_description) tuples |
| |
| Returns: |
| Tuple of (batched_graph_data, list_of_text_descriptions) |
| """ |
| if not batch: |
| raise ValueError("Cannot collate empty batch") |
| |
| |
| graph_batch = [item[0] for item in batch] |
| text_batch = [item[1] for item in batch] |
| |
| |
| all_x = [] |
| all_edge_index = [[], []] |
| batch_idx = [] |
| node_offset = 0 |
| |
| metadata = { |
| 'ids': [], |
| 'repo_names': [], |
| 'file_paths': [] |
| } |
| |
| for i, sample in enumerate(graph_batch): |
| |
| all_x.extend(sample['x']) |
| |
| |
| edges = sample['edge_index'] |
| if len(edges[0]) > 0: |
| for j in range(len(edges[0])): |
| all_edge_index[0].append(edges[0][j] + node_offset) |
| all_edge_index[1].append(edges[1][j] + node_offset) |
| |
| |
| num_nodes = sample['num_nodes'] |
| batch_idx.extend([i] * num_nodes) |
| node_offset += num_nodes |
| |
| |
| metadata['ids'].append(sample['id']) |
| metadata['repo_names'].append(sample['repo_name']) |
| metadata['file_paths'].append(sample['file_path']) |
| |
| batched_graphs = { |
| 'x': all_x, |
| 'edge_index': all_edge_index, |
| 'batch': batch_idx, |
| 'num_graphs': len(batch), |
| 'metadata': metadata |
| } |
| |
| return batched_graphs, text_batch |
|
|
|
|
| class PairedDataLoader: |
| """ |
| DataLoader for paired graph and text data. |
| |
| Extends SimpleDataLoader to handle paired (graph, text) data. |
| """ |
| |
| def __init__(self, dataset, batch_size: int = 1, shuffle: bool = False): |
| """ |
| Initialize the PairedDataLoader. |
| |
| Args: |
| dataset: PairedDataset to load from |
| batch_size: Number of samples per batch |
| shuffle: Whether to shuffle the data |
| """ |
| self.dataset = dataset |
| self.batch_size = batch_size |
| self.shuffle = shuffle |
| |
| |
| self.indices = list(range(len(dataset))) |
| if shuffle: |
| random.shuffle(self.indices) |
| |
| def __len__(self) -> int: |
| """Return number of batches.""" |
| return (len(self.dataset) + self.batch_size - 1) // self.batch_size |
| |
| def __iter__(self): |
| """Iterate over batches.""" |
| for i in range(0, len(self.dataset), self.batch_size): |
| batch_indices = self.indices[i:i + self.batch_size] |
| batch = [self.dataset[idx] for idx in batch_indices] |
| yield collate_paired_data(batch) |
|
|
|
|
|
|
| class PrecomputedRubyASTDataset: |
| """ |
| Dataset class for loading precomputed Ruby AST graph data. |
| |
| This class can load .pt files containing pre-converted PyTorch Geometric |
| Data objects for speed, but also supports processing .jsonl files as a fallback. |
| """ |
|
|
| def __init__(self, path: str, transform=None): |
| """ |
| Initialize the dataset. |
| |
| Args: |
| path: Path to the .pt or .jsonl file containing graph data. |
| transform: Optional transform to apply to each sample. |
| """ |
| self.path = path |
| self.transform = transform |
|
|
| if not TORCH_AVAILABLE: |
| raise ImportError("PyTorch and PyG are required for this dataset.") |
|
|
| if path.endswith('.pt'): |
| |
| self.data = torch.load(path, weights_only=False) |
| print(f"Loaded {len(self.data)} precomputed graphs from {path}") |
| elif path.endswith('.jsonl'): |
| print(f"Processing JSONL file into graphs: {path}") |
| jsonl_data = load_jsonl_file(path) |
| converter = ASTGraphConverter() |
| self.data = [] |
| for sample in jsonl_data: |
| graph_data = converter.parse_ast_json(sample['ast_json']) |
|
|
| x = torch.tensor(graph_data['x'], dtype=torch.float) |
| edge_index = torch.tensor(graph_data['edge_index'], dtype=torch.long) |
| y = torch.tensor([sample.get('complexity_score', 5.0)], dtype=torch.float) |
|
|
| data_obj = Data(x=x, edge_index=edge_index, y=y) |
|
|
| |
| ea = graph_data.get('edge_attr', []) |
| data_obj.edge_attr = torch.tensor( |
| ea if ea else [], dtype=torch.float, |
| ).reshape(-1, 3) if ea else torch.zeros((0, 3), dtype=torch.float) |
|
|
| np_ = graph_data.get('node_pos', []) |
| data_obj.node_pos = torch.tensor( |
| np_ if np_ else [[0, 0]], dtype=torch.float, |
| ) |
|
|
| self.data.append(data_obj) |
| print(f"Converted {len(self.data)} graphs from {path}") |
| else: |
| raise ValueError(f"Unsupported file type: {path}. Please provide a .pt or .jsonl file.") |
| |
| def __len__(self) -> int: |
| """Return the number of samples in the dataset.""" |
| return len(self.data) |
| |
| def __getitem__(self, idx: int): |
| """ |
| Get a sample from the dataset. |
| |
| Args: |
| idx: Index of the sample |
| |
| Returns: |
| PyTorch Geometric Data object |
| """ |
| if idx < 0 or idx >= len(self.data): |
| raise IndexError(f"Index {idx} out of range for dataset of size {len(self.data)}") |
| |
| sample = self.data[idx] |
| |
| if self.transform: |
| sample = self.transform(sample) |
| |
| return sample |
|
|
|
|
| class PreCollatedDataset: |
| """ |
| Dataset class for loading pre-collated batches of graph data. |
| |
| This class loads a .pt file where each item is an already-collated |
| `torch_geometric.data.Batch` object. This is the most efficient |
| way to load data as it eliminates all real-time collation overhead. |
| """ |
| def __init__(self, pt_path: str): |
| """ |
| Initialize the dataset. |
| |
| Args: |
| pt_path: Path to the .pt file containing pre-collated batches. |
| """ |
| |
| self.batches = torch.load(pt_path, weights_only=False) |
| print(f"Loaded {len(self.batches)} pre-collated batches from {pt_path}") |
|
|
| def __len__(self): |
| return len(self.batches) |
|
|
| def __getitem__(self, idx): |
| return self.batches[idx] |
|
|
|
|
| def create_data_loaders(train_path: str, val_path: str, batch_size: int = 32, shuffle: bool = True, num_workers: Optional[int] = None, pre_collated: bool = False): |
| """ |
| Create train and validation data loaders. |
| |
| Supports two modes: |
| 1. Standard loading from a dataset of individual graphs (`pre_collated=False`). |
| This uses a PyG DataLoader to perform real-time batching. |
| 2. Pre-collated loading from a dataset of pre-batched graphs (`pre_collated=True`). |
| This is the most performant option, as it has near-zero CPU overhead. |
| |
| Args: |
| train_path: Path to training .pt file. |
| val_path: Path to validation .pt file. |
| batch_size: Batch size (used only if `pre_collated=False`). |
| shuffle: Whether to shuffle training data. |
| num_workers: Number of workers for data loading (used only if `pre_collated=False`). |
| pre_collated: Whether the dataset files contain pre-collated batches. |
| |
| Returns: |
| Tuple of (train_loader, val_loader) |
| """ |
| if not TORCH_AVAILABLE: |
| raise ImportError("PyTorch is required to create data loaders.") |
|
|
| if pre_collated: |
| |
| train_dataset = PreCollatedDataset(train_path) |
| val_dataset = PreCollatedDataset(val_path) |
| |
| |
| |
| collate_fn = lambda x: x[0] |
| |
| |
| |
| |
| from torch.utils.data import DataLoader |
| train_loader = DataLoader(train_dataset, batch_size=1, shuffle=shuffle, num_workers=0, collate_fn=collate_fn) |
| val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=0, collate_fn=collate_fn) |
| |
| print("✅ Using pre-collated data loader (maximum performance).") |
|
|
| else: |
| |
| from torch_geometric.loader import DataLoader |
| train_dataset = PrecomputedRubyASTDataset(train_path) |
| val_dataset = PrecomputedRubyASTDataset(val_path) |
|
|
| if num_workers is None: |
| num_workers = os.cpu_count() |
|
|
| train_loader = DataLoader( |
| train_dataset, |
| batch_size=batch_size, |
| shuffle=shuffle, |
| num_workers=num_workers, |
| pin_memory=torch.cuda.is_available(), |
| persistent_workers=num_workers > 0 |
| ) |
| val_loader = DataLoader( |
| val_dataset, |
| batch_size=batch_size, |
| shuffle=False, |
| num_workers=num_workers, |
| pin_memory=torch.cuda.is_available(), |
| persistent_workers=num_workers > 0 |
| ) |
| |
| print(f"✅ Using standard PyG DataLoader with {num_workers} workers.") |
| |
| return train_loader, val_loader |
|
|
|
|
|
|
| def create_paired_data_loaders(paired_data_path: str, batch_size: int = 32, shuffle: bool = True, seed: Optional[int] = None): |
| """ |
| Create data loader for paired graph and text data. |
| |
| Args: |
| paired_data_path: Path to paired_data.jsonl file |
| batch_size: Batch size for the loader |
| shuffle: Whether to shuffle the data |
| seed: Random seed for consistent description sampling |
| |
| Returns: |
| PairedDataLoader instance |
| """ |
| dataset = PairedDataset(paired_data_path, seed=seed) |
| loader = PairedDataLoader(dataset, batch_size=batch_size, shuffle=shuffle) |
| |
| return loader |
|
|
|
|
| class AutoregressiveASTDataset: |
| """ |
| Dataset class for autoregressive AST generation training. |
| |
| This class loads paired Ruby AST and text description data and converts |
| each AST into a sequence of (partial_graph, target_node) pairs for |
| autoregressive training. Each method generates multiple training examples. |
| """ |
| |
| def __init__(self, paired_data_path: str, max_sequence_length: int = 50, seed: Optional[int] = None, |
| precomputed_embeddings_path: Optional[str] = None): |
| """ |
| Initialize the autoregressive dataset. |
| |
| Args: |
| paired_data_path: Path to the paired_data.jsonl file |
| max_sequence_length: Maximum number of nodes per sequence |
| seed: Random seed for consistent description sampling |
| precomputed_embeddings_path: Path to pre-computed text embeddings file (optional) |
| """ |
| self.paired_data_path = paired_data_path |
| self.max_sequence_length = max_sequence_length |
| self.converter = ASTGraphConverter() |
| |
| if seed is not None: |
| random.seed(seed) |
| |
| |
| self.precomputed_embeddings = {} |
| if precomputed_embeddings_path and os.path.exists(precomputed_embeddings_path): |
| try: |
| if TORCH_AVAILABLE: |
| self.precomputed_embeddings = torch.load(precomputed_embeddings_path, map_location='cpu', weights_only=True) |
| print(f"✅ Loaded {len(self.precomputed_embeddings)} pre-computed text embeddings") |
| else: |
| print("⚠️ PyTorch not available, skipping pre-computed embeddings") |
| except Exception as e: |
| print(f"⚠️ Warning: Could not load pre-computed embeddings: {e}") |
| elif precomputed_embeddings_path: |
| print(f"⚠️ Warning: Pre-computed embeddings file not found: {precomputed_embeddings_path}") |
| |
| |
| self.paired_data = load_jsonl_file(paired_data_path) |
| |
| |
| self.sequential_pairs = [] |
| self._generate_all_sequential_pairs() |
| |
| print(f"Loaded {len(self.paired_data)} methods from {paired_data_path}") |
| print(f"Generated {len(self.sequential_pairs)} sequential training pairs") |
| |
| def _generate_all_sequential_pairs(self): |
| """Generate sequential training pairs from all ASTs in the dataset.""" |
| for sample in self.paired_data: |
| try: |
| |
| descriptions = sample.get('descriptions', []) |
| if descriptions: |
| description = random.choice(descriptions) |
| text_description = description['text'] |
| else: |
| |
| text_description = sample.get('method_name', 'unknown_method') |
| |
| |
| sequential_pairs = self._create_sequential_pairs( |
| sample['ast_json'], |
| text_description |
| ) |
| |
| |
| self.sequential_pairs.extend(sequential_pairs) |
| |
| except Exception as e: |
| |
| print(f"Warning: Skipping sample {sample.get('id', 'unknown')} due to error: {e}") |
| continue |
| |
| def _create_sequential_pairs(self, ast_json: str, text_description: str) -> List[Dict[str, Any]]: |
| """ |
| Convert single AST into sequence of (partial_graph, target_node) pairs. |
| |
| Args: |
| ast_json: JSON string representing the AST |
| text_description: Text description for this method |
| |
| Returns: |
| List of sequential training pairs |
| """ |
| pairs = [] |
| |
| try: |
| |
| nodes, connections = self._extract_nodes_and_connections_in_order(ast_json) |
| |
| |
| if len(nodes) > self.max_sequence_length: |
| nodes = nodes[:self.max_sequence_length] |
| |
| filtered_connections = [] |
| for src, tgt in connections: |
| if src < self.max_sequence_length and tgt < self.max_sequence_length: |
| filtered_connections.append((src, tgt)) |
| connections = filtered_connections |
| |
| |
| text_embedding = None |
| if text_description in self.precomputed_embeddings: |
| text_embedding = self.precomputed_embeddings[text_description] |
| |
| |
| for i in range(len(nodes)): |
| |
| partial_graph = self._build_partial_graph(nodes[:i]) |
| |
| |
| target_node = nodes[i] |
| |
| |
| |
| target_connections = self._create_target_connections(i, connections) |
| |
| pair = { |
| 'text_description': text_description, |
| 'text_embedding': text_embedding, |
| 'partial_graph': partial_graph, |
| 'target_node': target_node, |
| 'target_connections': target_connections, |
| 'step': i, |
| 'total_steps': len(nodes) |
| } |
| |
| pairs.append(pair) |
| |
| except Exception as e: |
| |
| print(f"Warning: Failed to create sequential pairs: {e}") |
| |
| return pairs |
| |
| def _extract_nodes_and_connections_in_order(self, ast_json: str) -> Tuple[List[Dict[str, Any]], List[Tuple[int, int]]]: |
| """ |
| Extract nodes and their connections from AST in proper depth-first order. |
| |
| Args: |
| ast_json: JSON string representing the AST |
| |
| Returns: |
| Tuple of (nodes_list, connections_list) where connections are (parent_idx, child_idx) pairs |
| """ |
| try: |
| ast_data = json.loads(ast_json) |
| nodes = [] |
| connections = [] |
| self._traverse_ast_nodes_with_connections(ast_data, nodes, connections, parent_idx=None) |
| return nodes, connections |
| except (json.JSONDecodeError, Exception): |
| |
| return [], [] |
| |
| def _traverse_ast_nodes_with_connections(self, node: Union[Dict, List, str, int, float, None], |
| nodes: List[Dict[str, Any]], |
| connections: List[Tuple[int, int]], |
| parent_idx: Optional[int] = None): |
| """ |
| Recursively traverse AST and collect nodes and connections in depth-first order. |
| |
| Args: |
| node: Current AST node |
| nodes: List to collect nodes |
| connections: List to collect connections as (parent_idx, child_idx) pairs |
| parent_idx: Index of parent node |
| """ |
| if isinstance(node, dict) and 'type' in node: |
| |
| current_idx = len(nodes) |
| node_info = { |
| 'node_type': node['type'], |
| 'features': self.converter.node_encoder.create_node_features(node['type']), |
| 'raw_node': node |
| } |
| nodes.append(node_info) |
| |
| |
| if parent_idx is not None: |
| connections.append((parent_idx, current_idx)) |
| |
| |
| if 'children' in node: |
| for child in node['children']: |
| self._traverse_ast_nodes_with_connections(child, nodes, connections, current_idx) |
| |
| elif isinstance(node, list): |
| |
| for child in node: |
| self._traverse_ast_nodes_with_connections(child, nodes, connections, parent_idx) |
| |
| def _create_target_connections(self, node_idx: int, all_connections: List[Tuple[int, int]]) -> List[float]: |
| """ |
| Create target connection vector for a specific node being added. |
| |
| Args: |
| node_idx: Index of the node being added to the graph |
| all_connections: List of all connections in the full AST as (parent_idx, child_idx) pairs |
| |
| Returns: |
| Binary vector of length max_nodes indicating which existing nodes to connect to |
| """ |
| |
| target_vector = [0.0] * 100 |
| |
| |
| |
| for parent_idx, child_idx in all_connections: |
| if child_idx == node_idx and parent_idx < node_idx and parent_idx < 100: |
| target_vector[parent_idx] = 1.0 |
| |
| return target_vector |
| |
| def _traverse_ast_nodes(self, node: Union[Dict, List, str, int, float, None], nodes: List[Dict[str, Any]]): |
| """ |
| Recursively traverse AST and collect nodes in depth-first order. |
| |
| Args: |
| node: Current AST node |
| nodes: List to collect nodes |
| """ |
| if isinstance(node, dict) and 'type' in node: |
| |
| node_info = { |
| 'node_type': node['type'], |
| 'features': self.converter.node_encoder.create_node_features(node['type']), |
| 'raw_node': node |
| } |
| nodes.append(node_info) |
| |
| |
| if 'children' in node: |
| for child in node['children']: |
| self._traverse_ast_nodes(child, nodes) |
| |
| elif isinstance(node, list): |
| |
| for child in node: |
| self._traverse_ast_nodes(child, nodes) |
| |
| def _build_partial_graph(self, nodes: List[Dict[str, Any]]) -> Dict[str, Any]: |
| """ |
| Build partial graph from first i nodes. |
| |
| Args: |
| nodes: List of nodes to include in partial graph |
| |
| Returns: |
| Partial graph representation |
| """ |
| if not nodes: |
| |
| return { |
| 'x': [], |
| 'edge_index': [[], []], |
| 'num_nodes': 0 |
| } |
| |
| |
| node_features = [node['features'] for node in nodes] |
| |
| |
| |
| |
| edge_list = [] |
| for i in range(len(nodes) - 1): |
| edge_list.append([i, i + 1]) |
| edge_list.append([i + 1, i]) |
| |
| if edge_list: |
| edge_index = [[], []] |
| for source, target in edge_list: |
| edge_index[0].append(source) |
| edge_index[1].append(target) |
| else: |
| edge_index = [[], []] |
| |
| return { |
| 'x': node_features, |
| 'edge_index': edge_index, |
| 'num_nodes': len(nodes) |
| } |
| |
| def __len__(self) -> int: |
| """Return the number of sequential training pairs.""" |
| return len(self.sequential_pairs) |
| |
| def __getitem__(self, idx: int) -> Dict[str, Any]: |
| """ |
| Get a sequential training pair. |
| |
| Args: |
| idx: Index of the training pair |
| |
| Returns: |
| Dictionary containing partial graph and target node data |
| """ |
| if idx < 0 or idx >= len(self.sequential_pairs): |
| raise IndexError(f"Index {idx} out of range for dataset of size {len(self.sequential_pairs)}") |
| |
| return self.sequential_pairs[idx] |
| |
| def get_feature_dim(self) -> int: |
| """Return the dimension of node features.""" |
| return self.converter.node_encoder.vocab_size |
|
|
|
|
| def collate_autoregressive_data(batch: List[Dict[str, Any]]) -> Dict[str, Any]: |
| """ |
| Collate function for batching autoregressive training data. |
| |
| Args: |
| batch: List of sequential training pairs |
| |
| Returns: |
| Batched autoregressive training data |
| """ |
| if not batch: |
| raise ValueError("Cannot collate empty batch") |
| |
| |
| text_descriptions = [item['text_description'] for item in batch] |
| text_embeddings = [item.get('text_embedding') for item in batch] |
| steps = [item['step'] for item in batch] |
| total_steps = [item['total_steps'] for item in batch] |
| |
| |
| partial_graphs = [item['partial_graph'] for item in batch] |
| |
| |
| all_x = [] |
| all_edge_index = [[], []] |
| batch_idx = [] |
| node_offset = 0 |
| |
| for i, graph in enumerate(partial_graphs): |
| |
| if graph['x']: |
| all_x.extend(graph['x']) |
| |
| |
| edges = graph['edge_index'] |
| if len(edges[0]) > 0: |
| for j in range(len(edges[0])): |
| all_edge_index[0].append(edges[0][j] + node_offset) |
| all_edge_index[1].append(edges[1][j] + node_offset) |
| |
| |
| num_nodes = graph['num_nodes'] |
| batch_idx.extend([i] * num_nodes) |
| node_offset += num_nodes |
| |
| |
| target_nodes = [item['target_node'] for item in batch] |
| target_node_types = [node['node_type'] for node in target_nodes] |
| target_node_features = [node['features'] for node in target_nodes] |
| target_connections = [item['target_connections'] for item in batch] |
| |
| return { |
| 'text_descriptions': text_descriptions, |
| 'text_embeddings': text_embeddings, |
| 'partial_graphs': { |
| 'x': all_x, |
| 'edge_index': all_edge_index, |
| 'batch': batch_idx, |
| 'num_graphs': len(batch) |
| }, |
| 'target_node_types': target_node_types, |
| 'target_node_features': target_node_features, |
| 'target_connections': target_connections, |
| 'steps': steps, |
| 'total_steps': total_steps |
| } |
|
|
|
|
| class AutoregressiveDataLoader: |
| """ |
| DataLoader for autoregressive AST training data. |
| """ |
| |
| def __init__(self, dataset: AutoregressiveASTDataset, batch_size: int = 8, shuffle: bool = True): |
| """ |
| Initialize the AutoregressiveDataLoader. |
| |
| Args: |
| dataset: AutoregressiveASTDataset to load from |
| batch_size: Number of sequential pairs per batch |
| shuffle: Whether to shuffle the data |
| """ |
| self.dataset = dataset |
| self.batch_size = batch_size |
| self.shuffle = shuffle |
| |
| |
| self.indices = list(range(len(dataset))) |
| if shuffle: |
| random.shuffle(self.indices) |
| |
| def __len__(self) -> int: |
| """Return number of batches.""" |
| return (len(self.dataset) + self.batch_size - 1) // self.batch_size |
| |
| def __iter__(self): |
| """Iterate over batches.""" |
| for i in range(0, len(self.dataset), self.batch_size): |
| batch_indices = self.indices[i:i + self.batch_size] |
| batch = [self.dataset[idx] for idx in batch_indices] |
| yield collate_autoregressive_data(batch) |
|
|
|
|
| def create_autoregressive_data_loader(paired_data_path: str, batch_size: int = 8, shuffle: bool = True, |
| max_sequence_length: int = 50, seed: Optional[int] = None, |
| precomputed_embeddings_path: Optional[str] = None, |
| num_workers: Optional[int] = None, pin_memory: bool = True): |
| """ |
| Create data loader for autoregressive AST training. |
| |
| Args: |
| paired_data_path: Path to paired_data.jsonl file |
| batch_size: Number of sequential pairs per batch |
| shuffle: Whether to shuffle the data |
| max_sequence_length: Maximum sequence length per method |
| seed: Random seed for consistent description sampling |
| precomputed_embeddings_path: Path to pre-computed text embeddings file |
| num_workers: Number of worker processes for data loading (defaults to CPU count) |
| pin_memory: Whether to use pinned memory for faster GPU transfer |
| |
| Returns: |
| DataLoader instance (PyTorch DataLoader if available, otherwise AutoregressiveDataLoader) |
| """ |
| dataset = AutoregressiveASTDataset( |
| paired_data_path, |
| max_sequence_length=max_sequence_length, |
| seed=seed, |
| precomputed_embeddings_path=precomputed_embeddings_path |
| ) |
| |
| |
| if TORCH_AVAILABLE: |
| import os |
| if num_workers is None: |
| num_workers = os.cpu_count() |
| |
| try: |
| from torch.utils.data import DataLoader |
| |
| |
| loader = DataLoader( |
| dataset, |
| batch_size=batch_size, |
| shuffle=shuffle, |
| num_workers=num_workers, |
| pin_memory=pin_memory and torch.cuda.is_available(), |
| collate_fn=collate_autoregressive_data, |
| persistent_workers=num_workers > 0, |
| prefetch_factor=2 if num_workers > 0 else 2 |
| ) |
| |
| print(f"✅ Using optimized PyTorch DataLoader with {num_workers} workers, pin_memory={pin_memory and torch.cuda.is_available()}") |
| return loader |
| |
| except Exception as e: |
| print(f"⚠️ Warning: Could not create PyTorch DataLoader ({e}), falling back to custom loader") |
| |
| |
| loader = AutoregressiveDataLoader(dataset, batch_size=batch_size, shuffle=shuffle) |
| print("ℹ️ Using custom AutoregressiveDataLoader") |
| |
| return loader |
|
|
|
|
| class HierarchicalASTDataset(RubyASTDataset): |
| """ |
| Dataset for loading a single level of a hierarchical AST dataset. |
| |
| This class inherits from RubyASTDataset to reuse the same AST-to-graph |
| conversion logic. It is used to load one of the `_level_N.jsonl` files. |
| """ |
| def __init__(self, jsonl_path: str, transform=None): |
| """ |
| Initialize the dataset for a specific AST level. |
| |
| Args: |
| jsonl_path: Path to the JSONL file for a specific level. |
| transform: Optional transform to apply to each sample. |
| """ |
| super().__init__(jsonl_path, transform) |
|
|
|
|
| def create_hierarchical_data_loader(dataset_path: str, batch_size: int, shuffle: bool, num_workers: Optional[int] = None): |
| """ |
| Creates a data loader for a specific level of the hierarchical dataset. |
| |
| Args: |
| dataset_path: The full path to the `_level_N.jsonl` file. |
| batch_size: The batch size for the data loader. |
| shuffle: Whether to shuffle the data. |
| num_workers: The number of worker processes for data loading. |
| |
| Returns: |
| A DataLoader instance for the specified dataset level. |
| """ |
| dataset = HierarchicalASTDataset(dataset_path) |
|
|
| if TORCH_AVAILABLE: |
| try: |
| from torch_geometric.loader import DataLoader |
| if num_workers is None: |
| num_workers = os.cpu_count() |
| |
| loader = DataLoader( |
| dataset, |
| batch_size=batch_size, |
| shuffle=shuffle, |
| num_workers=num_workers, |
| pin_memory=torch.cuda.is_available(), |
| persistent_workers=num_workers > 0, |
| collate_fn=collate_graphs |
| ) |
| logging.info(f"Created PyG DataLoader for {dataset_path} with {num_workers} workers.") |
| return loader |
| except ImportError: |
| logging.warning("PyTorch Geometric not found. Falling back to SimpleDataLoader.") |
| |
| |
| return SimpleDataLoader(dataset, batch_size=batch_size, shuffle=shuffle, collate_fn=collate_graphs) |
|
|
|
|
| class HierarchicalPairedDataset(PairedDataset): |
| """ |
| Dataset for loading a single level of a hierarchical dataset with paired text. |
| |
| This class inherits from PairedDataset to reuse the same logic for |
| processing graph data and randomly sampling text descriptions. |
| """ |
| def __init__(self, jsonl_path: str, transform=None, seed: Optional[int] = None, limit: Optional[int] = None): |
| """ |
| Initialize the dataset for a specific AST level. |
| |
| Args: |
| jsonl_path: Path to the JSONL file for a specific level (e.g., train_paired_data_level_0.jsonl). |
| transform: Optional transform to apply to each sample. |
| seed: Random seed for consistent description sampling. |
| limit: Optional maximum number of samples to load. |
| """ |
| super().__init__(jsonl_path, transform, seed, limit) |
|
|
|
|
| def create_hierarchical_paired_data_loader(dataset_path: str, batch_size: int, shuffle: bool, num_workers: Optional[int] = None, limit: Optional[int] = None): |
| """ |
| Creates a data loader for a specific level of the hierarchical paired dataset. |
| |
| Args: |
| dataset_path: The full path to the `_level_N.jsonl` file. |
| batch_size: The batch size for the data loader. |
| shuffle: Whether to shuffle the data. |
| num_workers: The number of worker processes for data loading. |
| limit: Optional maximum number of samples to load. |
| |
| Returns: |
| A DataLoader instance for the specified dataset level. |
| """ |
| dataset = HierarchicalPairedDataset(dataset_path, limit=limit) |
|
|
| if TORCH_AVAILABLE: |
| try: |
| from torch.utils.data import DataLoader |
| if num_workers is None: |
| num_workers = 0 |
| |
| loader = DataLoader( |
| dataset, |
| batch_size=batch_size, |
| shuffle=shuffle, |
| num_workers=num_workers, |
| pin_memory=torch.cuda.is_available(), |
| persistent_workers=num_workers > 0, |
| collate_fn=collate_paired_data |
| ) |
| logging.info(f"Created PyTorch DataLoader for {dataset_path} with {num_workers} workers.") |
| return loader |
| except (ImportError, Exception) as e: |
| logging.warning(f"PyTorch DataLoader creation failed ({e}). Falling back to PairedDataLoader.") |
|
|
| |
| return PairedDataLoader(dataset, batch_size=batch_size, shuffle=shuffle) |