FROM ollama/ollama:latest RUN apt-get update && apt-get install -y curl # Add a new user RUN useradd -m -u 1000 user # Switch to non-root user USER user # Set environment variables ENV HOME=/home/user \ PATH=/home/user/.local/bin:$PATH \ OLLAMA_HOST=0.0.0.0 # Set the working directory WORKDIR $HOME/app # Copy the Modelfile COPY --chown=user:user Modelfile $HOME/app/ # Download the llama.gguf file RUN curl -fsSL -o llama.gguf "https://huggingface.co/gingdev/llama7b-ictu-v2/resolve/main/llama7b_q4_k_m.gguf?download=true" # Ensure the server is ready before creating the model RUN ollama serve & \ timeout 30 sh -c 'until curl -s http://127.0.0.1:11434/health; do sleep 1; done' && \ ollama create llama -f Modelfile # Expose the port EXPOSE 11434