File size: 898 Bytes
2a513dd
 
 
 
 
 
 
 
 
 
bfd7589
 
2a513dd
 
 
 
 
 
 
5e94cc3
4d68b27
2a513dd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# Grab a fresh copy of the Python image
FROM python:3.11-slim

# Install build and runtime dependencies
RUN apt-get update && \
    apt-get install -y \
    libopenblas-dev \
    ninja-build \
    build-essential \
    pkg-config \
    curl \
    unzip
RUN curl -L https://github.com/ggerganov/llama.cpp/archive/refs/tags/b1382.zip -o b1382.zip
RUN unzip b1382.zip
RUN mv llama.cpp-b1382 llama.cpp
RUN cd llama.cpp && \
    make LLAMA_OPENBLAS=1 -j2

# Download model
RUN mkdir /model && \
    curl -L https://huggingface.co/TheBloke/Mistral-7B-OpenOrca-GGUF/resolve/main/mistral-7b-openorca.Q4_K_M.gguf -o model/gguf-model.bin

COPY . .

# Make the server start script executable
RUN chmod +x ./start_server.sh

# Set environment variable for the host
ENV HOST=0.0.0.0
ENV PORT=7860

# Expose a port for the server
EXPOSE ${PORT}

# Run the server start script
CMD ["/bin/sh", "./start_server.sh"]