update docker to compile latest bnb to properly support qlora
Browse files- .github/workflows/base.yml +3 -0
- docker/Dockerfile-base +12 -1
- requirements.txt +6 -0
.github/workflows/base.yml
CHANGED
@@ -16,9 +16,11 @@ jobs:
|
|
16 |
include:
|
17 |
- cuda: cu118
|
18 |
cuda_version: 11.8.0
|
|
|
19 |
pytorch: 2.0.0
|
20 |
- cuda: cu117
|
21 |
cuda_version: 11.7.0
|
|
|
22 |
pytorch: 1.13.1
|
23 |
steps:
|
24 |
- name: Checkout
|
@@ -47,5 +49,6 @@ jobs:
|
|
47 |
cache-to: type=gha,mode=max
|
48 |
build-args: |
|
49 |
CUDA_VERSION=${{ matrix.cuda_version }}
|
|
|
50 |
CUDA=${{ matrix.cuda }}
|
51 |
PYTORCH_VERSION=${{ matrix.pytorch }}
|
|
|
16 |
include:
|
17 |
- cuda: cu118
|
18 |
cuda_version: 11.8.0
|
19 |
+
cuda_version_bnb: 118
|
20 |
pytorch: 2.0.0
|
21 |
- cuda: cu117
|
22 |
cuda_version: 11.7.0
|
23 |
+
cuda_version_bnb: 117
|
24 |
pytorch: 1.13.1
|
25 |
steps:
|
26 |
- name: Checkout
|
|
|
49 |
cache-to: type=gha,mode=max
|
50 |
build-args: |
|
51 |
CUDA_VERSION=${{ matrix.cuda_version }}
|
52 |
+
CUDA_VERSION_BNB=${{ matrix.cuda_version_bnb }}
|
53 |
CUDA=${{ matrix.cuda }}
|
54 |
PYTORCH_VERSION=${{ matrix.pytorch }}
|
docker/Dockerfile-base
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
ARG CUDA_VERSION="11.8.0"
|
|
|
2 |
ARG CUDNN_VERSION="8"
|
3 |
ARG UBUNTU_VERSION="22.04"
|
4 |
ARG MAX_JOBS=4
|
@@ -58,6 +59,15 @@ RUN git clone https://github.com/microsoft/DeepSpeed.git && \
|
|
58 |
cd DeepSpeed && \
|
59 |
MAX_CONCURRENCY=8 DS_BUILD_SPARSE_ATTN=0 DS_BUILD_OPS=1 python3 setup.py bdist_wheel
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
FROM base-builder
|
62 |
|
63 |
# recompile apex
|
@@ -68,13 +78,14 @@ RUN cd apex && MAX_JOBS=1 python3 -m pip install --global-option="--cpp_ext" --g
|
|
68 |
|
69 |
RUN mkdir /workspace/wheels
|
70 |
COPY --from=deepspeed-builder /workspace/DeepSpeed/dist/deepspeed-*.whl wheels
|
|
|
71 |
COPY --from=flash-attn-builder /workspace/flash-attention/dist/flash_attn-*.whl wheels
|
72 |
COPY --from=flash-attn-builder /workspace/flash-attention/csrc/fused_dense_lib/dist/fused_dense_lib-*.whl wheels
|
73 |
COPY --from=flash-attn-builder /workspace/flash-attention/csrc/xentropy/dist/xentropy_cuda_lib-*.whl wheels
|
74 |
COPY --from=flash-attn-builder /workspace/flash-attention/csrc/rotary/dist/rotary_emb-*.whl wheels
|
75 |
COPY --from=flash-attn-builder /workspace/flash-attention/csrc/layer_norm/dist/dropout_layer_norm-*.whl wheels
|
76 |
|
77 |
-
RUN pip3 install wheels/deepspeed-*.whl wheels/flash_attn-*.whl wheels/fused_dense_lib-*.whl wheels/xentropy_cuda_lib-*.whl wheels/rotary_emb-*.whl wheels/dropout_layer_norm-*.whl
|
78 |
RUN git lfs install --skip-repo
|
79 |
RUN pip3 install "peft @ git+https://github.com/huggingface/peft.git@main" \
|
80 |
"accelerate @ git+https://github.com/huggingface/accelerate.git@main" \
|
|
|
1 |
ARG CUDA_VERSION="11.8.0"
|
2 |
+
ARG CUDA_VERSION_BNB="118"
|
3 |
ARG CUDNN_VERSION="8"
|
4 |
ARG UBUNTU_VERSION="22.04"
|
5 |
ARG MAX_JOBS=4
|
|
|
59 |
cd DeepSpeed && \
|
60 |
MAX_CONCURRENCY=8 DS_BUILD_SPARSE_ATTN=0 DS_BUILD_OPS=1 python3 setup.py bdist_wheel
|
61 |
|
62 |
+
FROM base-builder AS bnb-builder
|
63 |
+
|
64 |
+
WORKDIR /workspace
|
65 |
+
|
66 |
+
RUN git clone https://github.com/TimDettmers/bitsandbytes.git && \
|
67 |
+
cd bitsandbytes && \
|
68 |
+
CUDA_VERSION=$CUDA_VERSION_BNB make cuda11x \
|
69 |
+
python setup.py bdist_wheel
|
70 |
+
|
71 |
FROM base-builder
|
72 |
|
73 |
# recompile apex
|
|
|
78 |
|
79 |
RUN mkdir /workspace/wheels
|
80 |
COPY --from=deepspeed-builder /workspace/DeepSpeed/dist/deepspeed-*.whl wheels
|
81 |
+
COPY --from=bnb-builder /workspace/bitsandbytes/dist/bitsandbytes-*.whl wheels
|
82 |
COPY --from=flash-attn-builder /workspace/flash-attention/dist/flash_attn-*.whl wheels
|
83 |
COPY --from=flash-attn-builder /workspace/flash-attention/csrc/fused_dense_lib/dist/fused_dense_lib-*.whl wheels
|
84 |
COPY --from=flash-attn-builder /workspace/flash-attention/csrc/xentropy/dist/xentropy_cuda_lib-*.whl wheels
|
85 |
COPY --from=flash-attn-builder /workspace/flash-attention/csrc/rotary/dist/rotary_emb-*.whl wheels
|
86 |
COPY --from=flash-attn-builder /workspace/flash-attention/csrc/layer_norm/dist/dropout_layer_norm-*.whl wheels
|
87 |
|
88 |
+
RUN pip3 install wheels/deepspeed-*.whl wheels/flash_attn-*.whl wheels/fused_dense_lib-*.whl wheels/xentropy_cuda_lib-*.whl wheels/rotary_emb-*.whl wheels/dropout_layer_norm-*.whl wheels/bitsandbytes-*.whl
|
89 |
RUN git lfs install --skip-repo
|
90 |
RUN pip3 install "peft @ git+https://github.com/huggingface/peft.git@main" \
|
91 |
"accelerate @ git+https://github.com/huggingface/accelerate.git@main" \
|
requirements.txt
CHANGED
@@ -11,3 +11,9 @@ sentencepiece
|
|
11 |
wandb
|
12 |
einops
|
13 |
xformers
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
wandb
|
12 |
einops
|
13 |
xformers
|
14 |
+
# qlora things
|
15 |
+
bert-score==0.3.13
|
16 |
+
evaluate==0.4.0
|
17 |
+
rouge-score==0.1.2
|
18 |
+
scipy
|
19 |
+
scikit-learn==1.2.2
|