Umer797's picture
Update llm_node.py
71f7f0b verified
raw
history blame
272 Bytes
from langchain_community.chat_models import ChatOpenAI
def llm_node(question):
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
prompt = f"Answer this carefully and exactly:\n{question}"
response = llm.invoke(prompt)
return response.content.strip()