VinogradovAI's picture
Update tools/codegen_tool.py
1c5d844 verified
from langchain.tools import BaseTool
from langchain_openai.chat_models import ChatOpenAI
from config import config
import requests
class CodeGenTool(BaseTool):
"""
Инструмент для генерации кода по текстовому описанию или на основе шаблона.
Формат команды:
- "spec text" для чистой генерации кода.
- "template=<path_or_url>,spec=<description>" для генерации на основе шаблона.
"""
name: str = "code_gen"
description: str = (
"Generate or complete code snippets in various programming languages. Use for questions like ‘write a function…’, ‘generate SQL…’, or general code generation tasks."
)
def _run(self, command: str) -> str:
# Проверяем наличие шаблона
template_content = None
spec = command
if command.startswith("template=") and ",spec=" in command:
tmpl, spec_part = command.split(",spec=", 1)
_, path_or_url = tmpl.split("=", 1)
path_or_url = path_or_url.strip()
# Загружаем содержимое шаблона
if path_or_url.startswith("http"): # URL
resp = requests.get(path_or_url)
resp.raise_for_status()
template_content = resp.text
else: # локальный файл
with open(path_or_url, "r", encoding="utf-8") as f:
template_content = f.read()
spec = spec_part.strip()
# Собираем промпт
if template_content:
prompt = (
f"# Template provided below:\n```python\n{template_content}\n```\n"
f"Write Python code to: {spec}"
)
else:
prompt = f"Write Python code to: {spec}"
llm = ChatOpenAI(
model_name=config.OPENAI_MODEL,
openai_api_key=config.OPENAI_API_KEY,
temperature=0.2
)
return llm.invoke(prompt)
async def _arun(self, command: str) -> str:
raise NotImplementedError("Async not supported.")