File size: 2,221 Bytes
3262c83 050d23d 3262c83 1c5d844 3262c83 050d23d 3262c83 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 | from langchain.tools import BaseTool
from langchain_openai.chat_models import ChatOpenAI
from config import config
import requests
class CodeGenTool(BaseTool):
"""
Инструмент для генерации кода по текстовому описанию или на основе шаблона.
Формат команды:
- "spec text" для чистой генерации кода.
- "template=<path_or_url>,spec=<description>" для генерации на основе шаблона.
"""
name: str = "code_gen"
description: str = (
"Generate or complete code snippets in various programming languages. Use for questions like ‘write a function…’, ‘generate SQL…’, or general code generation tasks."
)
def _run(self, command: str) -> str:
# Проверяем наличие шаблона
template_content = None
spec = command
if command.startswith("template=") and ",spec=" in command:
tmpl, spec_part = command.split(",spec=", 1)
_, path_or_url = tmpl.split("=", 1)
path_or_url = path_or_url.strip()
# Загружаем содержимое шаблона
if path_or_url.startswith("http"): # URL
resp = requests.get(path_or_url)
resp.raise_for_status()
template_content = resp.text
else: # локальный файл
with open(path_or_url, "r", encoding="utf-8") as f:
template_content = f.read()
spec = spec_part.strip()
# Собираем промпт
if template_content:
prompt = (
f"# Template provided below:\n```python\n{template_content}\n```\n"
f"Write Python code to: {spec}"
)
else:
prompt = f"Write Python code to: {spec}"
llm = ChatOpenAI(
model_name=config.OPENAI_MODEL,
openai_api_key=config.OPENAI_API_KEY,
temperature=0.2
)
return llm.invoke(prompt)
async def _arun(self, command: str) -> str:
raise NotImplementedError("Async not supported.")
|