More ideas...
Browse files- Dockerfile +21 -2
- build.sh +14 -0
- ollama_setup.sh +18 -0
- run.sh +2 -0
- stop.sh +4 -0
Dockerfile
CHANGED
|
@@ -1,13 +1,32 @@
|
|
| 1 |
-
FROM
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
WORKDIR /code
|
| 4 |
|
| 5 |
COPY ./requirements.txt /code/requirements.txt
|
| 6 |
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
COPY . .
|
| 10 |
|
| 11 |
EXPOSE 7860
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
CMD ["shiny", "run", "app.py", "--host", "0.0.0.0", "--port", "7860"]
|
|
|
|
| 1 |
+
FROM ubuntu:22.04
|
| 2 |
+
|
| 3 |
+
USER root
|
| 4 |
+
|
| 5 |
+
RUN apt-get update
|
| 6 |
+
RUN apt-get install -y python3-pip netcat curl
|
| 7 |
|
| 8 |
WORKDIR /code
|
| 9 |
|
| 10 |
COPY ./requirements.txt /code/requirements.txt
|
| 11 |
|
| 12 |
+
|
| 13 |
+
# Install ollama
|
| 14 |
+
RUN curl -fsSL https://ollama.com/install.sh | sh
|
| 15 |
+
|
| 16 |
+
RUN pip3 install --no-cache-dir --upgrade -r /code/requirements.txt
|
| 17 |
|
| 18 |
COPY . .
|
| 19 |
|
| 20 |
EXPOSE 7860
|
| 21 |
|
| 22 |
+
|
| 23 |
+
# Copy the ollama_setup.sh script
|
| 24 |
+
COPY ollama_setup.sh /usr/local/bin/ollama_setup.sh
|
| 25 |
+
|
| 26 |
+
# Set permissions for the ollama_setup.sh script
|
| 27 |
+
RUN chmod +x /usr/local/bin/ollama_setup.sh
|
| 28 |
+
|
| 29 |
+
# Use the ollama_setup.sh
|
| 30 |
+
RUN /usr/local/bin/ollama_setup.sh
|
| 31 |
+
|
| 32 |
CMD ["shiny", "run", "app.py", "--host", "0.0.0.0", "--port", "7860"]
|
build.sh
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# Set the platform flag if we're on ARM
|
| 4 |
+
arch=$(uname -m)
|
| 5 |
+
if [[ "$arch" == "arm64" || "$arch" == "aarch64" ]]; then
|
| 6 |
+
platform_flag="--platform linux/amd64"
|
| 7 |
+
else
|
| 8 |
+
platform_flag=""
|
| 9 |
+
fi
|
| 10 |
+
|
| 11 |
+
# Build the Docker image
|
| 12 |
+
|
| 13 |
+
docker build --progress=plain $platform_flag "$@" -t "openworm_ai" .
|
| 14 |
+
|
ollama_setup.sh
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# Start Ollama server in the background
|
| 4 |
+
/usr/local/bin/ollama serve &
|
| 5 |
+
|
| 6 |
+
# Wait for the server to be ready
|
| 7 |
+
while ! nc -z localhost 11434; do
|
| 8 |
+
echo "Waiting for Ollama server to start..."
|
| 9 |
+
sleep 2
|
| 10 |
+
done
|
| 11 |
+
|
| 12 |
+
# Pull the model
|
| 13 |
+
echo "Pulling the model..."
|
| 14 |
+
#ollama pull nomic-embed-text
|
| 15 |
+
ollama pull llama3.2:1b
|
| 16 |
+
|
| 17 |
+
# Keep the container running
|
| 18 |
+
#wait
|
run.sh
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
docker run --name=openworm_ai_0 -p 8888:7860 openworm_ai
|
stop.sh
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
docker stop openworm_ai_0
|
| 4 |
+
docker rm openworm_ai_0
|