File size: 1,021 Bytes
c84c1b2
d19e237
c84c1b2
d19e237
c84c1b2
 
382bd2f
c84c1b2
 
 
 
 
 
 
d19e237
f8bf968
4848ae7
c84c1b2
187bfaf
382bd2f
 
 
 
c84c1b2
 
dc5ffc0
c84c1b2
d19e237
c84c1b2
 
 
 
382bd2f
c84c1b2
0984342
c84c1b2
 
 
0984342
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
ARG UBUNTU_VERSION=22.04
ARG BASE_CPU_CONTAINER=ubuntu:${UBUNTU_VERSION}

FROM ${BASE_CPU_CONTAINER} as build

RUN apt-get update && \
    apt-get install -y build-essential git make cmake wget

WORKDIR /build

RUN git clone https://github.com/ggerganov/llama.cpp.git

WORKDIR /build/llama.cpp

# Ensure that LLAMA_CUBLAS is not set to 1
#ENV LLAMA_CUBLAS=0
ENV LDFLAGS="-static"

RUN make llama-server
#RUN mkdir build && \
#    cd build && \
#    cmake .. && \
#    cmake --build . --config Release --target llama-server

WORKDIR /data
RUN wget https://huggingface.co/brunopio/Llama3-8B-1.58-100B-tokens-GGUF/resolve/main/Llama3-8B-1.58-100B-tokens-TQ2_0.gguf -nv -O model.gguf

FROM ${BASE_CPU_CONTAINER} as runtime

WORKDIR /app

# Copy the executable from the build stage
COPY --from=build /build/llama.cpp/llama-server /app
COPY --from=build /data/model.gguf /data/model.gguf
COPY ./run.sh /app/run.sh
WORKDIR /app
EXPOSE 7860

# Make the script executable
RUN chmod +x run.sh

# CMD to run your script
CMD ./run.sh