Sergidev commited on
Commit
c6456fa
1 Parent(s): 41ab0bd

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +38 -7
Dockerfile CHANGED
@@ -1,4 +1,4 @@
1
- FROM nvidia/cuda:12.1.0-runtime-ubuntu22.04
2
 
3
  # Install system dependencies
4
  RUN apt-get update && apt-get install -y \
@@ -6,21 +6,52 @@ RUN apt-get update && apt-get install -y \
6
  python3.10 \
7
  python3-pip \
8
  wget \
 
 
 
9
  && rm -rf /var/lib/apt/lists/*
10
 
11
  WORKDIR /app
12
 
13
- # Install PyTorch first
14
- RUN pip3 install --no-cache-dir torch==2.4.0
 
 
 
 
 
15
 
16
- # Install FastChat from source without editable mode
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  RUN git clone -b self-lengthen https://github.com/quanshr/FastChat.git && \
18
  cd FastChat && \
19
  pip3 install ".[model_worker,webui]"
20
 
21
- # Install other dependencies
22
- COPY requirements.txt .
23
- RUN pip3 install --no-cache-dir -r requirements.txt
24
 
25
  # Copy project files
26
  COPY . .
 
1
+ FROM nvidia/cuda:12.1.0-devel-ubuntu22.04
2
 
3
  # Install system dependencies
4
  RUN apt-get update && apt-get install -y \
 
6
  python3.10 \
7
  python3-pip \
8
  wget \
9
+ ninja-build \
10
+ gcc \
11
+ g++ \
12
  && rm -rf /var/lib/apt/lists/*
13
 
14
  WORKDIR /app
15
 
16
+ # Install basic Python packages first
17
+ RUN pip3 install --no-cache-dir \
18
+ packaging \
19
+ setuptools \
20
+ wheel \
21
+ numpy \
22
+ torch==2.4.0
23
 
24
+ # Install CUDA toolkit
25
+ ENV CUDA_HOME=/usr/local/cuda
26
+ ENV PATH=${CUDA_HOME}/bin:${PATH}
27
+ ENV LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH}
28
+
29
+ # Install dependencies in order
30
+ COPY requirements.txt .
31
+ RUN pip3 install --no-cache-dir \
32
+ transformers==4.43.2 \
33
+ accelerate \
34
+ peft \
35
+ datasets \
36
+ sentencepiece \
37
+ protobuf \
38
+ tiktoken \
39
+ scipy \
40
+ gradio \
41
+ cn2an>=0.5.22 \
42
+ langdetect>=1.0.9 \
43
+ openai \
44
+ tqdm \
45
+ && pip3 install --no-cache-dir flash-attn --no-build-isolation \
46
+ && pip3 install --no-cache-dir vllm==0.5.5 vllm-flash-attn
47
+
48
+ # Install FastChat
49
  RUN git clone -b self-lengthen https://github.com/quanshr/FastChat.git && \
50
  cd FastChat && \
51
  pip3 install ".[model_worker,webui]"
52
 
53
+ # Install LLaMA Factory
54
+ RUN pip3 install --no-cache-dir llamafactory
 
55
 
56
  # Copy project files
57
  COPY . .