FROM python:3.11.6-bullseye RUN apt update && apt install -y cmake libopenblas-dev RUN git clone https://github.com/ggerganov/llama.cpp WORKDIR llama.cpp RUN mkdir build WORKDIR build RUN cmake .. -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS -DLLAMA_NATIVE=ON RUN cmake --build . --config Release WORKDIR bin RUN wget https://huggingface.co/mys/ggml_llava-v1.5-13b/resolve/main/mmproj-model-f16.gguf RUN wget https://huggingface.co/TheBloke/Mythalion-13B-GGUF/resolve/main/mythalion-13b.Q2_K.gguf -O ggml-model-f16.gguf CMD ["./server", "-m", "ggml-model-f16.gguf", "--mmproj", "mmproj-model-f16.gguf", "--host", "0.0.0.0", "--port", "7860", "-c", "2048", "--batch-size", "1024", "--verbose"]