| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ |
| Generate responses with transparent reasoning using OpenAI's open source GPT OSS models. |
| |
| This implementation uses standard Transformers library for maximum compatibility. |
| The models output structured reasoning in separate channels, allowing you to |
| capture both the thinking process and final response. |
| |
| Example usage: |
| # Generate haiku with reasoning |
| uv run gpt_oss_transformers.py \\ |
| --input-dataset davanstrien/haiku_dpo \\ |
| --output-dataset username/haiku-reasoning \\ |
| --prompt-column question |
| |
| # Any prompt dataset with custom settings |
| uv run gpt_oss_transformers.py \\ |
| --input-dataset username/prompts \\ |
| --output-dataset username/responses-with-reasoning \\ |
| --prompt-column prompt \\ |
| --reasoning-level high \\ |
| --max-samples 100 |
| |
| # HF Jobs execution |
| hf jobs uv run --flavor a10g-small \\ |
| https://huggingface.co/datasets/uv-scripts/openai-oss/raw/main/gpt_oss_transformers.py \\ |
| --input-dataset username/prompts \\ |
| --output-dataset username/responses-with-reasoning |
| """ |
|
|
| import argparse |
| import logging |
| import os |
| import re |
| import sys |
| from datetime import datetime |
| from typing import Dict, List, Optional |
|
|
| import torch |
| from datasets import Dataset, load_dataset |
| from huggingface_hub import DatasetCard, get_token, login |
| from tqdm.auto import tqdm |
| from transformers import ( |
| AutoModelForCausalLM, |
| AutoTokenizer, |
| GenerationConfig, |
| set_seed, |
| ) |
|
|
| |
| os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" |
|
|
| logging.basicConfig( |
| level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" |
| ) |
| logger = logging.getLogger(__name__) |
|
|
|
|
| def check_gpu_availability() -> int: |
| """Check if CUDA is available and return the number of GPUs.""" |
| if not torch.cuda.is_available(): |
| logger.error("CUDA is not available. This script requires a GPU.") |
| logger.error( |
| "Please run on a machine with NVIDIA GPU or use HF Jobs with GPU flavor." |
| ) |
| sys.exit(1) |
|
|
| num_gpus = torch.cuda.device_count() |
| for i in range(num_gpus): |
| gpu_name = torch.cuda.get_device_name(i) |
| gpu_memory = torch.cuda.get_device_properties(i).total_memory / 1024**3 |
| logger.info(f"GPU {i}: {gpu_name} with {gpu_memory:.1f} GB memory") |
|
|
| return num_gpus |
|
|
|
|
| def parse_channels(raw_output: str) -> Dict[str, str]: |
| """ |
| Extract think/content from GPT OSS channel-based output. |
| |
| Expected format: |
| <|start|>assistant<|channel|>analysis<|message|>CHAIN_OF_THOUGHT<|end|> |
| <|start|>assistant<|channel|>final<|message|>ACTUAL_MESSAGE |
| """ |
| think = "" |
| content = "" |
|
|
| |
| analysis_pattern = ( |
| r"<\|start\|>assistant<\|channel\|>analysis<\|message\|>(.*?)<\|end\|>" |
| ) |
| analysis_match = re.search(analysis_pattern, raw_output, re.DOTALL) |
| if analysis_match: |
| think = analysis_match.group(1).strip() |
|
|
| |
| final_pattern = ( |
| r"<\|start\|>assistant<\|channel\|>final<\|message\|>(.*?)(?:<\|end\|>|$)" |
| ) |
| final_match = re.search(final_pattern, raw_output, re.DOTALL) |
| if final_match: |
| content = final_match[1].strip() |
|
|
| |
| if not think and not content: |
| content = raw_output.strip() |
|
|
| return {"think": think, "content": content, "raw_output": raw_output} |
|
|
|
|
| def create_dataset_card( |
| input_dataset: str, |
| model_id: str, |
| prompt_column: str, |
| reasoning_level: str, |
| num_examples: int, |
| generation_time: str, |
| num_gpus: int, |
| temperature: float, |
| max_tokens: int, |
| ) -> str: |
| """Create a dataset card documenting the generation process.""" |
| return f"""--- |
| tags: |
| - generated |
| - synthetic |
| - reasoning |
| - openai-gpt-oss |
| --- |
| |
| # Generated Responses with Reasoning (Transformers) |
| |
| This dataset contains AI-generated responses with transparent chain-of-thought reasoning using OpenAI GPT OSS models via Transformers. |
| |
| ## Generation Details |
| |
| - **Source Dataset**: [{input_dataset}](https://huggingface.co/datasets/{input_dataset}) |
| - **Model**: [{model_id}](https://huggingface.co/{model_id}) |
| - **Reasoning Level**: {reasoning_level} |
| - **Number of Examples**: {num_examples:,} |
| - **Generation Date**: {generation_time} |
| - **Implementation**: Transformers (fallback) |
| - **GPUs Used**: {num_gpus} |
| |
| ## Dataset Structure |
| |
| Each example contains: |
| - `prompt`: The input prompt from the source dataset |
| - `think`: The model's internal reasoning process |
| - `content`: The final response |
| - `raw_output`: Complete model output with channel markers |
| - `reasoning_level`: The reasoning effort level used |
| - `model`: Model identifier |
| |
| ## Generation Script |
| |
| Generated using [uv-scripts/openai-oss](https://huggingface.co/datasets/uv-scripts/openai-oss). |
| |
| To reproduce: |
| ```bash |
| uv run gpt_oss_transformers.py \\ |
| --input-dataset {input_dataset} \\ |
| --output-dataset <your-dataset> \\ |
| --prompt-column {prompt_column} \\ |
| --model-id {model_id} \\ |
| --reasoning-level {reasoning_level} |
| ``` |
| """ |
|
|
|
|
| def main( |
| input_dataset: str, |
| output_dataset_hub_id: str, |
| prompt_column: str = "prompt", |
| model_id: str = "openai/gpt-oss-20b", |
| reasoning_level: str = "high", |
| max_samples: Optional[int] = None, |
| temperature: float = 0.7, |
| max_tokens: int = 512, |
| batch_size: int = 1, |
| seed: int = 42, |
| hf_token: Optional[str] = None, |
| ): |
| """ |
| Main generation pipeline using Transformers. |
| |
| Args: |
| input_dataset: Source dataset on Hugging Face Hub |
| output_dataset_hub_id: Where to save results on Hugging Face Hub |
| prompt_column: Column containing the prompts |
| model_id: OpenAI GPT OSS model to use |
| reasoning_level: Reasoning effort level (high/medium/low) |
| max_samples: Maximum number of samples to process |
| temperature: Sampling temperature |
| max_tokens: Maximum tokens to generate |
| batch_size: Batch size for generation |
| seed: Random seed for reproducibility |
| hf_token: Hugging Face authentication token |
| """ |
| generation_start_time = datetime.now().isoformat() |
| set_seed(seed) |
|
|
| |
| num_gpus = check_gpu_availability() |
|
|
| |
| HF_TOKEN = hf_token or os.environ.get("HF_TOKEN") or get_token() |
|
|
| if not HF_TOKEN: |
| logger.error("No HuggingFace token found. Please provide token via:") |
| logger.error(" 1. --hf-token argument") |
| logger.error(" 2. HF_TOKEN environment variable") |
| logger.error(" 3. Run 'huggingface-cli login'") |
| sys.exit(1) |
|
|
| logger.info("HuggingFace token found, authenticating...") |
| login(token=HF_TOKEN) |
|
|
| |
| logger.info(f"Loading tokenizer: {model_id}") |
| tokenizer = AutoTokenizer.from_pretrained( |
| model_id, padding_side="left" if "120b" in model_id else "right" |
| ) |
|
|
| |
| if tokenizer.pad_token is None: |
| tokenizer.pad_token = tokenizer.eos_token |
|
|
| |
| device_map = {"tp_plan": "auto"} if "120b" in model_id else "auto" |
|
|
| |
| logger.info(f"Loading model: {model_id}") |
| logger.info("This may take a few minutes for large models...") |
|
|
| try: |
| model = AutoModelForCausalLM.from_pretrained( |
| model_id, |
| torch_dtype=torch.bfloat16, |
| **device_map, |
| ) |
| model.eval() |
| except Exception as e: |
| logger.error(f"Failed to load model: {e}") |
| logger.error("Trying with default configuration...") |
| |
| model = AutoModelForCausalLM.from_pretrained( |
| model_id, |
| torch_dtype="auto", |
| device_map="auto", |
| ) |
| model.eval() |
|
|
| |
| generation_config = GenerationConfig( |
| max_new_tokens=max_tokens, |
| temperature=temperature, |
| do_sample=temperature > 0, |
| eos_token_id=tokenizer.eos_token_id, |
| pad_token_id=tokenizer.pad_token_id, |
| ) |
|
|
| |
| logger.info(f"Loading dataset: {input_dataset}") |
| dataset = load_dataset(input_dataset, split="train") |
|
|
| |
| if prompt_column not in dataset.column_names: |
| logger.error( |
| f"Column '{prompt_column}' not found. Available columns: {dataset.column_names}" |
| ) |
| sys.exit(1) |
|
|
| |
| if max_samples: |
| dataset = dataset.select(range(min(max_samples, len(dataset)))) |
| total_examples = len(dataset) |
| logger.info(f"Processing {total_examples:,} examples") |
|
|
| |
| logger.info(f"Applying chat template with reasoning_level={reasoning_level}...") |
| prompts = [] |
| original_prompts = [] |
|
|
| for example in tqdm(dataset, desc="Preparing prompts"): |
| prompt_text = example[prompt_column] |
| original_prompts.append(prompt_text) |
|
|
| |
| messages = [{"role": "user", "content": prompt_text}] |
|
|
| |
| try: |
| prompt = tokenizer.apply_chat_template( |
| messages, |
| reasoning_effort=reasoning_level, |
| add_generation_prompt=True, |
| tokenize=False, |
| ) |
| except TypeError: |
| |
| logger.warning( |
| "reasoning_effort parameter not supported, using standard template" |
| ) |
| prompt = tokenizer.apply_chat_template( |
| messages, add_generation_prompt=True, tokenize=False |
| ) |
| prompts.append(prompt) |
|
|
| |
| logger.info(f"Starting generation for {len(prompts):,} prompts...") |
| results = [] |
|
|
| for i in tqdm(range(0, len(prompts), batch_size), desc="Generating"): |
| batch_prompts = prompts[i : i + batch_size] |
| batch_original = original_prompts[i : i + batch_size] |
|
|
| |
| inputs = tokenizer( |
| batch_prompts, return_tensors="pt", padding=True, truncation=True |
| ).to(model.device) |
|
|
| |
| with torch.no_grad(): |
| outputs = model.generate(**inputs, generation_config=generation_config) |
|
|
| |
| for j, output in enumerate(outputs): |
| |
| output_ids = output[inputs.input_ids.shape[1] :] |
| raw_output = tokenizer.decode(output_ids, skip_special_tokens=False) |
| parsed = parse_channels(raw_output) |
|
|
| result = { |
| "prompt": batch_original[j], |
| "think": parsed["think"], |
| "content": parsed["content"], |
| "raw_output": parsed["raw_output"], |
| "reasoning_level": reasoning_level, |
| "model": model_id, |
| } |
| results.append(result) |
|
|
| |
| logger.info("Creating output dataset...") |
| output_dataset = Dataset.from_list(results) |
|
|
| |
| logger.info("Creating dataset card...") |
| card_content = create_dataset_card( |
| input_dataset=input_dataset, |
| model_id=model_id, |
| prompt_column=prompt_column, |
| reasoning_level=reasoning_level, |
| num_examples=total_examples, |
| generation_time=generation_start_time, |
| num_gpus=num_gpus, |
| temperature=temperature, |
| max_tokens=max_tokens, |
| ) |
|
|
| |
| logger.info(f"Pushing dataset to: {output_dataset_hub_id}") |
| output_dataset.push_to_hub(output_dataset_hub_id, token=HF_TOKEN) |
|
|
| |
| card = DatasetCard(card_content) |
| card.push_to_hub(output_dataset_hub_id, token=HF_TOKEN) |
|
|
| logger.info("✅ Generation complete!") |
| logger.info( |
| f"Dataset available at: https://huggingface.co/datasets/{output_dataset_hub_id}" |
| ) |
|
|
|
|
| if __name__ == "__main__": |
| if len(sys.argv) > 1: |
| parser = argparse.ArgumentParser( |
| description="Generate responses with reasoning using OpenAI GPT OSS models (Transformers)", |
| formatter_class=argparse.RawDescriptionHelpFormatter, |
| epilog=""" |
| Examples: |
| # Generate haiku with reasoning |
| uv run gpt_oss_transformers.py \\ |
| --input-dataset davanstrien/haiku_dpo \\ |
| --output-dataset username/haiku-reasoning \\ |
| --prompt-column question |
| |
| # Any prompt dataset |
| uv run gpt_oss_transformers.py \\ |
| --input-dataset username/prompts \\ |
| --output-dataset username/responses-reasoning \\ |
| --reasoning-level high \\ |
| --max-samples 100 |
| |
| # Use larger 120B model (requires 80GB+ GPU) |
| uv run gpt_oss_transformers.py \\ |
| --input-dataset username/prompts \\ |
| --output-dataset username/responses-reasoning \\ |
| --model-id openai/gpt-oss-120b |
| """, |
| ) |
|
|
| parser.add_argument( |
| "--input-dataset", |
| type=str, |
| required=True, |
| help="Input dataset on Hugging Face Hub", |
| ) |
| parser.add_argument( |
| "--output-dataset", |
| type=str, |
| required=True, |
| help="Output dataset name on Hugging Face Hub", |
| ) |
| parser.add_argument( |
| "--prompt-column", |
| type=str, |
| default="prompt", |
| help="Column containing prompts (default: prompt)", |
| ) |
| parser.add_argument( |
| "--model-id", |
| type=str, |
| default="openai/gpt-oss-20b", |
| help="Model to use (default: openai/gpt-oss-20b)", |
| ) |
| parser.add_argument( |
| "--reasoning-level", |
| type=str, |
| choices=["high", "medium", "low"], |
| default="high", |
| help="Reasoning effort level (default: high)", |
| ) |
| parser.add_argument( |
| "--max-samples", type=int, help="Maximum number of samples to process" |
| ) |
| parser.add_argument( |
| "--temperature", |
| type=float, |
| default=0.7, |
| help="Sampling temperature (default: 0.7)", |
| ) |
| parser.add_argument( |
| "--max-tokens", |
| type=int, |
| default=512, |
| help="Maximum tokens to generate (default: 512)", |
| ) |
| parser.add_argument( |
| "--batch-size", |
| type=int, |
| default=1, |
| help="Batch size for generation (default: 1)", |
| ) |
| parser.add_argument( |
| "--seed", |
| type=int, |
| default=42, |
| help="Random seed (default: 42)", |
| ) |
| parser.add_argument( |
| "--hf-token", |
| type=str, |
| help="Hugging Face token (can also use HF_TOKEN env var)", |
| ) |
|
|
| args = parser.parse_args() |
|
|
| main( |
| input_dataset=args.input_dataset, |
| output_dataset_hub_id=args.output_dataset, |
| prompt_column=args.prompt_column, |
| model_id=args.model_id, |
| reasoning_level=args.reasoning_level, |
| max_samples=args.max_samples, |
| temperature=args.temperature, |
| max_tokens=args.max_tokens, |
| batch_size=args.batch_size, |
| seed=args.seed, |
| hf_token=args.hf_token, |
| ) |
| else: |
| |
| print(""" |
| OpenAI GPT OSS Reasoning Generation Script (Transformers) |
| ======================================================== |
| |
| This script requires arguments. For usage information: |
| uv run gpt_oss_transformers.py --help |
| |
| Example HF Jobs command for 20B model: |
| hf jobs uv run \\ |
| --flavor a10g-small \\ |
| https://huggingface.co/datasets/uv-scripts/openai-oss/raw/main/gpt_oss_transformers.py \\ |
| --input-dataset davanstrien/haiku_dpo \\ |
| --output-dataset username/haiku-reasoning \\ |
| --prompt-column question \\ |
| --reasoning-level high |
| |
| Example HF Jobs command for 120B model: |
| hf jobs uv run \\ |
| --flavor a100-large \\ |
| https://huggingface.co/datasets/uv-scripts/openai-oss/raw/main/gpt_oss_transformers.py \\ |
| --input-dataset username/prompts \\ |
| --output-dataset username/responses-reasoning \\ |
| --model-id openai/gpt-oss-120b \\ |
| --reasoning-level high |
| """) |
|
|