| from fastapi import FastAPI |
| from pydantic import BaseModel |
| from llama_cpp import Llama |
|
|
| |
| llm = Llama( |
| model_path="phi-3-mini-4k-instruct.Q4_K.gguf", |
| n_ctx=4096, |
| n_threads=2, |
| ) |
|
|
| |
| class Validation(BaseModel): |
| user_prompt: str |
| system_prompt: str |
| max_tokens: int = 1024 |
| temperature: float = 0.01 |
|
|
| |
| app = FastAPI() |
|
|
| |
| @app.post("/generate_response") |
| async def generate_response(item: Validation): |
| |
| prompt = f"<|user|>\n{item.system_prompt}\n<|end|>\n<|user|>\n{item.user_prompt}\n<|end|>\n<|assistant|>" |
|
|
| |
| output = llm(prompt, max_tokens=item.max_tokens, temperature=item.temperature, echo=True) |
| |
| |
| return output['choices'][0]['text'] |
|
|