| from fastapi import FastAPI, Query, Request |
| from pydantic import BaseModel |
| from huggingface_hub import InferenceClient |
| import uvicorn |
|
|
|
|
| app = FastAPI() |
|
|
| client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") |
|
|
| class Item(BaseModel): |
| prompt: str |
| history: list |
| system_prompt: str |
| temperature: float = 0.0 |
| max_new_tokens: int = 16384 |
| top_p: float = 0.15 |
| repetition_penalty: float = 1.0 |
|
|
| def format_prompt(message, history): |
| prompt = "<s>" |
| for user_prompt, bot_response in history: |
| prompt += f"[INST] {user_prompt} [/INST]" |
| prompt += f" {bot_response}</s> " |
| prompt += f"[INST] {message} [/INST]" |
| return prompt |
|
|
| def generate(item: Item): |
| temperature = float(item.temperature) |
| if temperature < 1e-2: |
| temperature = 1e-2 |
| top_p = float(item.top_p) |
|
|
| generate_kwargs = dict( |
| temperature=temperature, |
| max_new_tokens=item.max_new_tokens, |
| top_p=top_p, |
| repetition_penalty=item.repetition_penalty, |
| do_sample=True, |
| seed=42, |
| ) |
|
|
| formatted_prompt = format_prompt(f"{item.system_prompt}, {item.prompt}", item.history) |
| stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) |
| output = "" |
|
|
| for response in stream: |
| output += response.token.text |
| return output |
|
|
| @app.post("/chat/completions") |
| async def generate_text(item: Item): |
| return {"response": generate(item)} |
|
|
| @app.get("/ping") |
| async def ping(request: Request): |
| return "pong" |
|
|
|
|