Graduation / pipelines /dockerfile
DuyTa's picture
Update pipelines/dockerfile
240ee25 verified
raw
history blame
1.7 kB
FROM nvidia/cuda:12.0.0-cudnn8-runtime-ubuntu22.04 AS base
# Use arguments to make CUDA version and GPU usage configurable
ARG USE_CUDA=true
ARG USE_CUDA_VER=cu120
## Basis Environment ##
ENV ENV=prod \
PORT=9099 \
USE_CUDA_DOCKER=${USE_CUDA} \
USE_CUDA_DOCKER_VER=${USE_CUDA_VER}
# Install system dependencies including Python 3.11
RUN apt-get update && \
apt-get install -y \
python3.11 python3.11-venv python3-pip gcc build-essential curl git pkg-config libicu-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Set Python 3.11 as default and install pip for Python 3.11
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 && \
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && \
python3.11 get-pip.py && \
update-alternatives --install /usr/bin/pip3 pip3 /usr/local/bin/pip3.11 1 && \
rm get-pip.py
# Work directory
WORKDIR /app
# Copy and install bm25s module
COPY ./bm25s ./bm25s
RUN pip install ./bm25s
# Copy the requirements file
COPY ./requirements.txt .
# Install Python dependencies, using CUDA or CPU torch based on build arguments
RUN pip install uv && \
if [ "$USE_CUDA" = "true" ]; then \
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/${USE_CUDA_DOCKER_VER} --no-cache-dir; \
else \
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir; \
fi
# Install other Python dependencies
RUN pip install -r requirements.txt --no-cache-dir
# Copy the application code
COPY . .
# Expose the port
ENV HOST="0.0.0.0"
ENV PORT="9099"
# Set entrypoint
ENTRYPOINT [ "bash", "start.sh" ]