Usage
This model was fine-tuned using PEFT/LoRA, with the linear layers updated
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
import torch
base_model = AutoModelForCausalLM.from_pretrained(
"google/gemma-3-1b-it",
torch_dtype=torch.bfloat16,
device_map="auto",
)
model = PeftModel.from_pretrained(
base_model,
"zoeeyys/gemma-3-1b-coachfinetuned-v1-all-linear"
)
tokenizer = AutoTokenizer.from_pretrained(
"google/gemma-3-1b-it",
padding_side="left"
)
messages = [
{"role": "user", "content": "Hi! I want to plan my life."}
]
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, max_new_tokens=128)
print(
tokenizer.decode(
outputs[0][inputs["input_ids"].shape[-1]:],
skip_special_tokens=True
)
)
- Downloads last month
- 15