|
|
|
FROM python:3.11-slim |
|
|
|
|
|
RUN apt-get update && \ |
|
apt-get install -y \ |
|
libopenblas-dev \ |
|
ninja-build \ |
|
build-essential \ |
|
pkg-config \ |
|
curl \ |
|
unzip |
|
RUN curl -L https://github.com/ggerganov/llama.cpp/archive/refs/tags/b1382.zip -o b1382.zip |
|
RUN unzip b1382.zip |
|
RUN mv llama.cpp-b1382 llama.cpp |
|
RUN cd llama.cpp && \ |
|
make LLAMA_OPENBLAS=1 -j2 |
|
|
|
|
|
RUN mkdir /model && \ |
|
curl -L https://huggingface.co/TheBloke/Mistral-7B-OpenOrca-GGUF/resolve/main/mistral-7b-openorca.Q4_K_M.gguf -o model/gguf-model.bin |
|
|
|
COPY . . |
|
|
|
|
|
RUN chmod +x ./start_server.sh |
|
|
|
|
|
ENV HOST=0.0.0.0 |
|
ENV PORT=7860 |
|
|
|
|
|
EXPOSE ${PORT} |
|
|
|
|
|
CMD ["/bin/sh", "./start_server.sh"] |