Spaces:
Build error
Build error
T-K-O-H commited on
Commit ·
65c8966
1
Parent(s): 501ef25
Update code to use environment variables and improve error handling
Browse files- Dockerfile +0 -18
- agent_graph.py +1 -1
- app.py +16 -91
- requirements.txt +2 -4
- test_agent.py +8 -20
- tools.py +3 -14
Dockerfile
CHANGED
|
@@ -9,9 +9,6 @@ RUN apt-get update && apt-get install -y \
|
|
| 9 |
software-properties-common \
|
| 10 |
&& rm -rf /var/lib/apt/lists/*
|
| 11 |
|
| 12 |
-
# Create a non-root user
|
| 13 |
-
RUN useradd -m -u 1000 chainlit_user
|
| 14 |
-
|
| 15 |
# Copy requirements first for better caching
|
| 16 |
COPY requirements.txt /code/requirements.txt
|
| 17 |
RUN pip install --no-cache-dir -r requirements.txt
|
|
@@ -19,25 +16,10 @@ RUN pip install --no-cache-dir -r requirements.txt
|
|
| 19 |
# Copy the rest of the application
|
| 20 |
COPY . /code/
|
| 21 |
|
| 22 |
-
# Create necessary directories and set permissions
|
| 23 |
-
RUN mkdir -p /code/.files && \
|
| 24 |
-
chown -R chainlit_user:chainlit_user /code && \
|
| 25 |
-
chmod -R 755 /code
|
| 26 |
-
|
| 27 |
# Set environment variables
|
| 28 |
ENV HOST=0.0.0.0
|
| 29 |
ENV PORT=7860
|
| 30 |
ENV PYTHONPATH=/code
|
| 31 |
-
ENV OPENAI_API_KEY=${OPENAI_API_KEY}
|
| 32 |
-
ENV PYTHONUNBUFFERED=1
|
| 33 |
-
|
| 34 |
-
# Create log directory and set permissions
|
| 35 |
-
RUN mkdir -p /code/logs && \
|
| 36 |
-
chown -R chainlit_user:chainlit_user /code/logs && \
|
| 37 |
-
chmod -R 755 /code/logs
|
| 38 |
-
|
| 39 |
-
# Switch to non-root user
|
| 40 |
-
USER chainlit_user
|
| 41 |
|
| 42 |
# Command to run the application
|
| 43 |
CMD chainlit run app.py --host $HOST --port $PORT
|
|
|
|
| 9 |
software-properties-common \
|
| 10 |
&& rm -rf /var/lib/apt/lists/*
|
| 11 |
|
|
|
|
|
|
|
|
|
|
| 12 |
# Copy requirements first for better caching
|
| 13 |
COPY requirements.txt /code/requirements.txt
|
| 14 |
RUN pip install --no-cache-dir -r requirements.txt
|
|
|
|
| 16 |
# Copy the rest of the application
|
| 17 |
COPY . /code/
|
| 18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
# Set environment variables
|
| 20 |
ENV HOST=0.0.0.0
|
| 21 |
ENV PORT=7860
|
| 22 |
ENV PYTHONPATH=/code
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
# Command to run the application
|
| 25 |
CMD chainlit run app.py --host $HOST --port $PORT
|
agent_graph.py
CHANGED
|
@@ -18,7 +18,7 @@ if not openai_api_key:
|
|
| 18 |
|
| 19 |
# Initialize the LLM
|
| 20 |
llm = ChatOpenAI(
|
| 21 |
-
model="gpt-
|
| 22 |
temperature=0,
|
| 23 |
openai_api_key=openai_api_key
|
| 24 |
)
|
|
|
|
| 18 |
|
| 19 |
# Initialize the LLM
|
| 20 |
llm = ChatOpenAI(
|
| 21 |
+
model="gpt-4",
|
| 22 |
temperature=0,
|
| 23 |
openai_api_key=openai_api_key
|
| 24 |
)
|
app.py
CHANGED
|
@@ -3,11 +3,6 @@ import chainlit as cl
|
|
| 3 |
from agent_graph import agent_node
|
| 4 |
from dotenv import load_dotenv # Import dotenv
|
| 5 |
from typing import List, Dict
|
| 6 |
-
import logging
|
| 7 |
-
|
| 8 |
-
# Configure logging
|
| 9 |
-
logging.basicConfig(level=logging.INFO)
|
| 10 |
-
logger = logging.getLogger(__name__)
|
| 11 |
|
| 12 |
# Load environment variables from .env file
|
| 13 |
load_dotenv()
|
|
@@ -16,7 +11,6 @@ load_dotenv()
|
|
| 16 |
openai_api_key = os.getenv("OPENAI_API_KEY")
|
| 17 |
|
| 18 |
if not openai_api_key:
|
| 19 |
-
logger.error("OpenAI API key is missing in the .env file")
|
| 20 |
raise ValueError("OpenAI API key is missing in the .env file")
|
| 21 |
|
| 22 |
# Store chat history
|
|
@@ -24,12 +18,10 @@ chat_histories: Dict[str, List[Dict[str, str]]] = {}
|
|
| 24 |
|
| 25 |
@cl.on_chat_start
|
| 26 |
async def start_chat():
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
welcome_message = """👋 Welcome to the Stock Price Calculator!
|
| 33 |
|
| 34 |
I can help you with:
|
| 35 |
• Getting real-time stock prices
|
|
@@ -40,29 +32,15 @@ Try these examples:
|
|
| 40 |
• Ask 'How many MSFT shares can I buy with $10000?'
|
| 41 |
|
| 42 |
What would you like to know?"""
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
await cl.Message(
|
| 46 |
-
content=welcome_message,
|
| 47 |
-
author="Assistant",
|
| 48 |
-
type="text"
|
| 49 |
-
).send()
|
| 50 |
-
|
| 51 |
-
logger.info("Chat session started successfully")
|
| 52 |
-
except Exception as e:
|
| 53 |
-
logger.error(f"Error in start_chat: {str(e)}")
|
| 54 |
-
raise
|
| 55 |
|
| 56 |
@cl.on_message
|
| 57 |
async def handle_message(message: cl.Message):
|
| 58 |
try:
|
| 59 |
-
logger.info(f"Received message: {message.content}")
|
| 60 |
-
|
| 61 |
# Get chat history for this session
|
| 62 |
session_id = cl.user_session.get("id")
|
| 63 |
-
|
| 64 |
-
chat_histories[session_id] = []
|
| 65 |
-
history = chat_histories[session_id]
|
| 66 |
|
| 67 |
# Add current message to history
|
| 68 |
history.append({"role": "user", "content": message.content})
|
|
@@ -75,78 +53,25 @@ async def handle_message(message: cl.Message):
|
|
| 75 |
|
| 76 |
# Process the message with agent_node
|
| 77 |
response = agent_node(state)
|
| 78 |
-
logger.info(f"Agent response: {response}")
|
| 79 |
|
| 80 |
# Send the response back to the user
|
| 81 |
if isinstance(response, dict) and "output" in response:
|
| 82 |
-
# Format the response for better display
|
| 83 |
-
formatted_response = response["output"]
|
| 84 |
-
|
| 85 |
# Add response to history
|
| 86 |
-
history.append({"role": "assistant", "content":
|
| 87 |
-
|
| 88 |
-
# Check if the response contains ticker suggestions
|
| 89 |
-
if "Did you mean one of these?" in formatted_response:
|
| 90 |
-
# Split the response into main message and suggestions
|
| 91 |
-
main_msg, suggestions = formatted_response.split("Did you mean one of these?")
|
| 92 |
-
|
| 93 |
-
# Send the main message
|
| 94 |
-
await cl.Message(
|
| 95 |
-
content=main_msg.strip(),
|
| 96 |
-
author="Assistant",
|
| 97 |
-
type="text"
|
| 98 |
-
).send()
|
| 99 |
-
|
| 100 |
-
# Create a list of suggestions
|
| 101 |
-
suggestion_list = suggestions.strip().split('\n')
|
| 102 |
-
elements = []
|
| 103 |
-
for suggestion in suggestion_list:
|
| 104 |
-
if suggestion.strip():
|
| 105 |
-
elements.append(cl.Text(name="Suggestion", content=suggestion.strip()))
|
| 106 |
-
|
| 107 |
-
# Send the suggestions
|
| 108 |
-
if elements:
|
| 109 |
-
await cl.Message(
|
| 110 |
-
content="",
|
| 111 |
-
elements=elements,
|
| 112 |
-
author="Assistant",
|
| 113 |
-
type="text"
|
| 114 |
-
).send()
|
| 115 |
-
else:
|
| 116 |
-
# Send regular response
|
| 117 |
-
await cl.Message(
|
| 118 |
-
content=formatted_response,
|
| 119 |
-
author="Assistant",
|
| 120 |
-
type="text"
|
| 121 |
-
).send()
|
| 122 |
else:
|
| 123 |
-
|
| 124 |
-
logger.error(f"{error_msg} Response: {response}")
|
| 125 |
-
await cl.Message(
|
| 126 |
-
content=error_msg,
|
| 127 |
-
author="Assistant",
|
| 128 |
-
type="text"
|
| 129 |
-
).send()
|
| 130 |
|
| 131 |
# Update history in storage
|
| 132 |
chat_histories[session_id] = history
|
| 133 |
|
| 134 |
except Exception as e:
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
await cl.Message(
|
| 138 |
-
content="Sorry, something went wrong while processing your request. Please try again.",
|
| 139 |
-
author="Assistant",
|
| 140 |
-
type="text"
|
| 141 |
-
).send()
|
| 142 |
|
| 143 |
@cl.on_chat_end
|
| 144 |
async def end_chat():
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
del chat_histories[session_id]
|
| 150 |
-
logger.info("Chat session ended successfully")
|
| 151 |
-
except Exception as e:
|
| 152 |
-
logger.error(f"Error in end_chat: {str(e)}")
|
|
|
|
| 3 |
from agent_graph import agent_node
|
| 4 |
from dotenv import load_dotenv # Import dotenv
|
| 5 |
from typing import List, Dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
# Load environment variables from .env file
|
| 8 |
load_dotenv()
|
|
|
|
| 11 |
openai_api_key = os.getenv("OPENAI_API_KEY")
|
| 12 |
|
| 13 |
if not openai_api_key:
|
|
|
|
| 14 |
raise ValueError("OpenAI API key is missing in the .env file")
|
| 15 |
|
| 16 |
# Store chat history
|
|
|
|
| 18 |
|
| 19 |
@cl.on_chat_start
|
| 20 |
async def start_chat():
|
| 21 |
+
# Initialize empty chat history for this session
|
| 22 |
+
chat_histories[cl.user_session.get("id")] = []
|
| 23 |
+
|
| 24 |
+
welcome_message = """👋 Welcome to the Stock Price Calculator!
|
|
|
|
|
|
|
| 25 |
|
| 26 |
I can help you with:
|
| 27 |
• Getting real-time stock prices
|
|
|
|
| 32 |
• Ask 'How many MSFT shares can I buy with $10000?'
|
| 33 |
|
| 34 |
What would you like to know?"""
|
| 35 |
+
|
| 36 |
+
await cl.Message(content=welcome_message).send()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
@cl.on_message
|
| 39 |
async def handle_message(message: cl.Message):
|
| 40 |
try:
|
|
|
|
|
|
|
| 41 |
# Get chat history for this session
|
| 42 |
session_id = cl.user_session.get("id")
|
| 43 |
+
history = chat_histories.get(session_id, [])
|
|
|
|
|
|
|
| 44 |
|
| 45 |
# Add current message to history
|
| 46 |
history.append({"role": "user", "content": message.content})
|
|
|
|
| 53 |
|
| 54 |
# Process the message with agent_node
|
| 55 |
response = agent_node(state)
|
|
|
|
| 56 |
|
| 57 |
# Send the response back to the user
|
| 58 |
if isinstance(response, dict) and "output" in response:
|
|
|
|
|
|
|
|
|
|
| 59 |
# Add response to history
|
| 60 |
+
history.append({"role": "assistant", "content": response["output"]})
|
| 61 |
+
await cl.Message(content=response["output"]).send()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
else:
|
| 63 |
+
await cl.Message(content="Received an invalid response format from the agent.").send()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
|
| 65 |
# Update history in storage
|
| 66 |
chat_histories[session_id] = history
|
| 67 |
|
| 68 |
except Exception as e:
|
| 69 |
+
print(f"Error occurred: {e}")
|
| 70 |
+
await cl.Message(content="Sorry, something went wrong while processing your request.").send()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
|
| 72 |
@cl.on_chat_end
|
| 73 |
async def end_chat():
|
| 74 |
+
# Clean up chat history when session ends
|
| 75 |
+
session_id = cl.user_session.get("id")
|
| 76 |
+
if session_id in chat_histories:
|
| 77 |
+
del chat_histories[session_id]
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
|
@@ -1,9 +1,7 @@
|
|
| 1 |
-
chainlit
|
| 2 |
langchain-openai>=0.0.3
|
| 3 |
langchain-core>=0.1.4
|
| 4 |
openai>=1.7.0
|
| 5 |
python-dotenv>=1.0.0
|
| 6 |
yfinance>=0.2.36
|
| 7 |
-
|
| 8 |
-
numpy>=1.24.0
|
| 9 |
-
requests>=2.31.0
|
|
|
|
| 1 |
+
chainlit
|
| 2 |
langchain-openai>=0.0.3
|
| 3 |
langchain-core>=0.1.4
|
| 4 |
openai>=1.7.0
|
| 5 |
python-dotenv>=1.0.0
|
| 6 |
yfinance>=0.2.36
|
| 7 |
+
typing-extensions>=4.9.0
|
|
|
|
|
|
test_agent.py
CHANGED
|
@@ -1,18 +1,10 @@
|
|
| 1 |
from langchain.chat_models import ChatOpenAI
|
| 2 |
from langchain.agents import initialize_agent, Tool, AgentType
|
| 3 |
from tools import get_price, buying_power_tool
|
| 4 |
-
from dotenv import load_dotenv
|
| 5 |
-
import os
|
| 6 |
-
|
| 7 |
-
# Load environment variables from .env file
|
| 8 |
-
load_dotenv()
|
| 9 |
|
| 10 |
-
#
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
model_name="gpt-3.5-turbo",
|
| 14 |
-
openai_api_key=os.getenv("OPENAI_API_KEY")
|
| 15 |
-
)
|
| 16 |
|
| 17 |
# Define tools
|
| 18 |
tools = [
|
|
@@ -23,18 +15,14 @@ tools = [
|
|
| 23 |
),
|
| 24 |
Tool(
|
| 25 |
name="BuyingPowerCalculator",
|
| 26 |
-
func=buying_power_tool,
|
| 27 |
-
description="Calculate how many shares you can buy. Input format: '
|
| 28 |
)
|
| 29 |
]
|
| 30 |
|
| 31 |
-
#
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
llm,
|
| 35 |
-
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
|
| 36 |
-
verbose=True
|
| 37 |
-
)
|
| 38 |
|
| 39 |
# Run a test
|
| 40 |
response = agent.run("How much is AAPL?")
|
|
|
|
| 1 |
from langchain.chat_models import ChatOpenAI
|
| 2 |
from langchain.agents import initialize_agent, Tool, AgentType
|
| 3 |
from tools import get_price, buying_power_tool
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
+
# Make sure OpenAI key is set in your environment
|
| 6 |
+
import os
|
| 7 |
+
os.environ["OPENAI_API_KEY"] = "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"
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
# Define tools
|
| 10 |
tools = [
|
|
|
|
| 15 |
),
|
| 16 |
Tool(
|
| 17 |
name="BuyingPowerCalculator",
|
| 18 |
+
func=lambda q: buying_power_tool(q.split(',')[0], float(q.split(',')[1])),
|
| 19 |
+
description="Calculate how many shares you can buy. Input format: 'AAPL,5000'"
|
| 20 |
)
|
| 21 |
]
|
| 22 |
|
| 23 |
+
# Create agent
|
| 24 |
+
llm = ChatOpenAI(model="gpt-4", temperature=0)
|
| 25 |
+
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
# Run a test
|
| 28 |
response = agent.run("How much is AAPL?")
|
tools.py
CHANGED
|
@@ -10,13 +10,7 @@ def get_price(ticker):
|
|
| 10 |
stock = yf.Ticker(ticker)
|
| 11 |
|
| 12 |
# Get the stock info
|
| 13 |
-
|
| 14 |
-
info = stock.info
|
| 15 |
-
except Exception as e:
|
| 16 |
-
if "404" in str(e):
|
| 17 |
-
return f"'{ticker}' is not a valid stock ticker. Please check the symbol and try again."
|
| 18 |
-
return f"Error fetching data for {ticker}: {str(e)}"
|
| 19 |
-
|
| 20 |
if 'regularMarketPrice' not in info or info['regularMarketPrice'] is None:
|
| 21 |
return f"Could not get the current price for {ticker}. Please verify the ticker symbol."
|
| 22 |
|
|
@@ -47,13 +41,8 @@ def buying_power_tool(query):
|
|
| 47 |
|
| 48 |
# Get stock price
|
| 49 |
stock = yf.Ticker(ticker)
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
except Exception as e:
|
| 53 |
-
if "404" in str(e):
|
| 54 |
-
return f"'{ticker}' is not a valid stock ticker. Please check the symbol and try again."
|
| 55 |
-
return f"Error fetching data for {ticker}: {str(e)}"
|
| 56 |
-
|
| 57 |
if 'regularMarketPrice' not in info or info['regularMarketPrice'] is None:
|
| 58 |
return f"Could not get the current price for {ticker}. Please verify the ticker symbol."
|
| 59 |
|
|
|
|
| 10 |
stock = yf.Ticker(ticker)
|
| 11 |
|
| 12 |
# Get the stock info
|
| 13 |
+
info = stock.info
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
if 'regularMarketPrice' not in info or info['regularMarketPrice'] is None:
|
| 15 |
return f"Could not get the current price for {ticker}. Please verify the ticker symbol."
|
| 16 |
|
|
|
|
| 41 |
|
| 42 |
# Get stock price
|
| 43 |
stock = yf.Ticker(ticker)
|
| 44 |
+
info = stock.info
|
| 45 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
if 'regularMarketPrice' not in info or info['regularMarketPrice'] is None:
|
| 47 |
return f"Could not get the current price for {ticker}. Please verify the ticker symbol."
|
| 48 |
|