barisaydin commited on
Commit
edf8eda
1 Parent(s): 4464da3

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +16 -5
Dockerfile CHANGED
@@ -3,7 +3,7 @@ FROM ubuntu:latest
3
 
4
  # Update the package list, install curl, and clean up to reduce image size
5
  RUN apt-get update && \
6
- apt-get install -y curl && \
7
  apt-get clean && \
8
  rm -rf /var/lib/apt/lists/*
9
 
@@ -11,7 +11,8 @@ RUN apt-get update && \
11
  RUN curl -fsSL https://ollama.com/install.sh | sh
12
 
13
  # Set the Ollama host environment variable
14
- ENV OLLAMA_HOST=0.0.0.0
 
15
 
16
  # Create the Ollama directory and set permissions
17
  RUN mkdir -p /.ollama && chmod 777 /.ollama
@@ -22,9 +23,19 @@ RUN mkdir -p /usr/share/ollama/.ollama/models && chmod -R 777 /usr/share/ollama/
22
  # Set the Ollama models environment variable
23
  ENV OLLAMA_MODELS="/usr/share/ollama/.ollama/models"
24
 
 
 
 
 
 
 
25
  # Expose the Ollama server port
26
- EXPOSE 7860
27
 
28
  # Run the Ollama server and pull the model
29
- CMD ollama serve & sleep 5 && ollama pull llama3 && ollama pull llava-llama3 && wait
30
- # CMD ollama serve & sleep 5 && ollama pull llama3:70b && wait
 
 
 
 
 
3
 
4
  # Update the package list, install curl, and clean up to reduce image size
5
  RUN apt-get update && \
6
+ apt-get install -y curl nginx && \
7
  apt-get clean && \
8
  rm -rf /var/lib/apt/lists/*
9
 
 
11
  RUN curl -fsSL https://ollama.com/install.sh | sh
12
 
13
  # Set the Ollama host environment variable
14
+ # ENV OLLAMA_HOST=0.0.0.0
15
+ ENV OLLAMA_HOST=127.0.0.1
16
 
17
  # Create the Ollama directory and set permissions
18
  RUN mkdir -p /.ollama && chmod 777 /.ollama
 
23
  # Set the Ollama models environment variable
24
  ENV OLLAMA_MODELS="/usr/share/ollama/.ollama/models"
25
 
26
+ # Copy the Nginx configuration file
27
+ COPY nginx.conf /etc/nginx/nginx.conf
28
+
29
+ # Copy Lua script
30
+ COPY validate_api_key.lua /usr/local/openresty/nginx/lua/validate_api_key.lua
31
+
32
  # Expose the Ollama server port
33
+ EXPOSE 80
34
 
35
  # Run the Ollama server and pull the model
36
+ CMD ollama serve & \
37
+ sleep 5 && \
38
+ ollama pull llama3 && \
39
+ ollama pull llava-llama3 && \
40
+ wait && \
41
+ nginx -g 'daemon off;'