download
raw
3.95 kB
from abc import ABC, abstractmethod
class Responser(ABC):
@abstractmethod
def respond(self, system_info: str, user_prompt: str) -> str:
pass
import openai
from openai import OpenAI
class TurboResponser(Responser):
"""OpenAI LLM responser (新版 openai 库兼容)"""
def __init__(self, api_key: str = None, api_base: str = None, model: str = 'gpt-4o'):
"""
初始化客户端和模型
:param api_key: OpenAI API 密钥
:param api_base: 可选,自定义 API base 地址
:param model: 使用的模型
"""
# api_key = "ak-8f3d147b2c9a5e6m0n4p8x2v7y1k3l9"
# api_base = "https://models-proxy.stepfun-inc.com/v1"
self.client = OpenAI(api_key=api_key, base_url=api_base, timeout=1000)
self.model = model
def respond(self, system_info: str, user_prompt: str, temperature: float = 0.2) -> str:
"""
回应系统信息和用户提示
:param system_info: 系统提示内容
:param user_prompt: 用户输入内容
:return: 返回模型生成的回复
"""
messages = [
{"role": "system", "content": system_info},
{"role": "user", "content": user_prompt}
]
if 'qwen3-235b-a22b' in self.model:
response = self.client.chat.completions.create(
model=self.model,
messages=messages,
temperature=temperature,
max_tokens=16000,
extra_body={
"enable_thinking": False,
}
)
return response.choices[0].message.content
response = self.client.chat.completions.create(
model=self.model,
messages=messages,
temperature=temperature,
max_tokens=16000,
)
return response.choices[0].message.content
def respond_complete(self, prompt, temperature=0.2):
response = openai.Completion.create(
prompt=prompt,
model=self.model,
temperature=temperature,
max_tokens=100000
)
generated_text = response.choices[0].text.strip()
return generated_text
import openai
from openai import OpenAI
class OpenResponser(Responser):
"""OpenAI LLM responser (新版 openai 库兼容)"""
def __init__(self, api_key: str = None, api_base: str = None, model: str = 'gpt-4o'):
"""
初始化客户端和模型
:param api_key: OpenAI API 密钥
:param api_base: 可选,自定义 API base 地址
:param model: 使用的模型
"""
api_key = 'ak-83f4a216c9b7e5d0n48m2p9k3l7x4c2'
api_base = 'https://api.siliconflow.cn/v1'
self.client = OpenAI(api_key=api_key, base_url=api_base)
self.model = model
def respond(self, system_info: str, user_prompt: str, temperature: float = 0) -> str:
"""
回应系统信息和用户提示
:param system_info: 系统提示内容
:param user_prompt: 用户输入内容
:return: 返回模型生成的回复
"""
messages = [
{"role": "system", "content": system_info},
{"role": "user", "content": user_prompt}
]
response = self.client.chat.completions.create(
model=self.model,
messages=messages,
temperature=temperature
)
return response.choices[0].message.content
if __name__ == "__main__":
responser = TurboResponser(model="qwen3-235b-a22b", api_base="https://models-proxy.stepfun-inc.com/v1", api_key="ak-3f8a2c9e1b7d4f6h5j2k8m3n9p4r6t7")
anwser = responser.respond(system_info="you are a helpful assistant!",
user_prompt="你是什么模型")
print(anwser)

Xet Storage Details

Size:
3.95 kB
·
Xet hash:
04afa4fcf3201949443bf104f06591474f9d81a9fa2a5d991ed00a6cf67f0a17

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.