| """Text generation wrapper.""" |
| import torch |
| from typing import List, Dict |
| from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig |
|
|
| _model_cache = {} |
| _tok_cache = {} |
|
|
|
|
| def load_model(model_name: str, load_in_4bit: bool = True): |
| cache_key = f"{model_name}:{load_in_4bit}" |
| if cache_key in _model_cache: |
| return _model_cache[cache_key], _tok_cache[cache_key] |
| tok = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) |
| if tok.pad_token is None: |
| tok.pad_token = tok.eos_token |
| if load_in_4bit: |
| bnb = BitsAndBytesConfig( |
| load_in_4bit=True, |
| bnb_4bit_compute_dtype=torch.bfloat16, |
| bnb_4bit_use_double_quant=True, |
| bnb_4bit_quant_type="nf4", |
| ) |
| model = AutoModelForCausalLM.from_pretrained( |
| model_name, quantization_config=bnb, device_map="auto", |
| trust_remote_code=True, torch_dtype=torch.bfloat16, |
| ) |
| else: |
| model = AutoModelForCausalLM.from_pretrained( |
| model_name, device_map="auto", |
| trust_remote_code=True, torch_dtype=torch.bfloat16, |
| ) |
| model.eval() |
| _model_cache[cache_key] = model |
| _tok_cache[cache_key] = tok |
| return model, tok |
|
|
|
|
| def generate_text(messages: List[Dict[str, str]], model_name: str, max_new_tokens: int = 80): |
| model, tokenizer = load_model(model_name) |
| inputs = tokenizer.apply_chat_template( |
| messages, tokenize=True, return_tensors="pt", |
| add_generation_prompt=True, return_dict=True, |
| ) |
| dev = next(model.parameters()).device |
| inputs = {k: v.to(dev) for k, v in inputs.items()} |
| with torch.no_grad(): |
| outputs = model.generate( |
| **inputs, max_new_tokens=max_new_tokens, |
| do_sample=False, pad_token_id=tokenizer.pad_token_id, |
| ) |
| gen = outputs[0][inputs["input_ids"].shape[1]:] |
| return tokenizer.decode(gen, skip_special_tokens=True).strip() |
|
|