File size: 776 Bytes
ad56392
 
013b4eb
ad56392
013b4eb
ad56392
 
013b4eb
ad56392
 
013b4eb
ad56392
013b4eb
ad56392
 
013b4eb
ad56392
 
013b4eb
ad56392
 
013b4eb
 
ad56392
013b4eb
 
 
 
ad56392
013b4eb
ad56392
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
FROM ollama/ollama:latest

RUN apt-get update && apt-get install -y curl

# Add a new user
RUN useradd -m -u 1000 user

# Switch to non-root user
USER user

# Set environment variables
ENV HOME=/home/user \
    PATH=/home/user/.local/bin:$PATH \
    OLLAMA_HOST=0.0.0.0

# Set the working directory
WORKDIR $HOME/app

# Copy the Modelfile
COPY --chown=user:user Modelfile $HOME/app/

# Download the llama.gguf file
RUN curl -fsSL -o llama.gguf "https://huggingface.co/gingdev/llama7b-ictu-v2/resolve/main/llama7b_q4_k_m.gguf?download=true"

# Ensure the server is ready before creating the model
RUN ollama serve & \
    timeout 30 sh -c 'until curl -s http://127.0.0.1:11434/health; do sleep 1; done' && \
    ollama create llama -f Modelfile

# Expose the port
EXPOSE 11434