mixtral-api-local / Dockerfile
muryshev's picture
Update Dockerfile
016f9d0 verified
raw
history blame contribute delete
No virus
1.79 kB
ARG UBUNTU_VERSION=22.04
ARG CUDA_VERSION=12.6.0
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
FROM ${BASE_CUDA_DEV_CONTAINER} as build
ARG CUDA_DOCKER_ARCH="default"
RUN apt-get update --fix-missing && \
apt-get install -y --no-install-recommends git build-essential gcc cmake curl libcurl4-openssl-dev && \
rm -rf /var/lib/apt/lists/*
WORKDIR /build
RUN git clone https://github.com/ggerganov/llama.cpp.git
WORKDIR /build/llama.cpp
#Минимальная версия для работы очереди запросов 821f0a271e7c9ee737945245dd7abfa22cc9b5b0
RUN git checkout b3465
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
# ENV LLAMA_CUBLAS=1
ENV GGML_CUDA=1
# Use the default CUDA archs if not specified
RUN mkdir build && \
if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \
export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \
fi && \
cmake -B build -DGGML_CUDA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
cmake --build build --config Release --target llama-server -j$(nproc)
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
RUN apt-get update --fix-missing && \
apt-get install -y --no-install-recommends wget libgomp1 libcurl4-openssl-dev && \
rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY --from=build /build/llama.cpp/build/ggml/src/libggml.so /app/libggml.so
COPY --from=build /build/llama.cpp/build/src/libllama.so /app/libllama.so
COPY --from=build /build/llama.cpp/build/bin/llama-server /app/server
COPY ./run.sh /app/run.sh
WORKDIR /app
EXPOSE 7867
# Make the script executable
RUN chmod +x run.sh
# CMD to run your script
CMD ./run.sh