llava-server / Dockerfile
spuun's picture
Update Dockerfile
28ac1de
raw
history blame
647 Bytes
FROM python:3.11.6-bullseye
RUN apt update && apt install -y cmake
RUN git clone https://github.com/ggerganov/llama.cpp
WORKDIR llama.cpp
RUN mkdir build
WORKDIR build
RUN cmake .. -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS -DLLAMA_NATIVE=ON
RUN cmake --build . --config Release
WORKDIR bin
RUN wget https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/mmproj-model-f16.gguf
RUN wget https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/ggml-model-f16.gguf
CMD ["./server", "-m", "ggml-model-f16.gguf", "--mmproj", "mmproj-model-f16.gguf", "--host", "0.0.0.0", "--port", "7860", "-c", "2048", "--batch-size", "1024", "--verbose"]