from vllm import LLM
from vllm.sampling_params import SamplingParams


model_name = "akahana/indo-psikologi-sft"
sampling_params = SamplingParams(max_tokens=256, temperature=0.0)
llm = LLM(
    model=model_name,
    trust_remote_code=True,
    max_model_len=2048,
    max_num_batched_tokens=2048,
    tensor_parallel_size=2, # if you have multigpus
)

prompt = "### psikologi ### \n Hai aku lagi terpuruk hari ini"

messages = [
    {
        "role": "user",
        "content": prompt,
    }
]

outputs = llm.chat(messages, sampling_params=sampling_params)
output = outputs[0].outputs[0].text.strip()
messages.append({"role":"assistant","content":output})
output
Downloads last month
6
Safetensors
Model size
0.6B params
Tensor type
BF16
·
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Model tree for akahana/indo-psikologi-sft

Finetuned
Qwen/Qwen3-0.6B
Finetuned
(802)
this model