Spaces:
Sleeping
Sleeping
File size: 1,084 Bytes
360362a 8af09d7 52d4d06 8af09d7 9a09898 360362a 8af09d7 360362a 8af09d7 360362a 8af09d7 52d4d06 8af09d7 52d4d06 0fb8e0d 360362a 6031cce |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
# Builder stage
FROM ubuntu:latest
RUN apt update && apt install curl -y
RUN curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey \
| sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg
RUN curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list \
| sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' \
| sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
RUN sudo apt-get update
RUN apt-get install -y nvidia-container-toolkit
RUN curl https://ollama.ai/install.sh | sh
# Create the directory and give appropriate permissions
RUN mkdir -p /.ollama && chmod 777 /.ollama
WORKDIR /.ollama
# Copy the entry point script
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
# Set the entry point script as the default command
ENTRYPOINT ["/entrypoint.sh"]
CMD ["ollama", "serve"]
# Set the model as an environment variable (this can be overridden)
ENV model=llama2
# Expose the server port
EXPOSE 7860 |