# Use an official PyTorch image with CUDA support as the base image FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime # Install Git and system libraries required for OpenGL without interactive prompts ENV DEBIAN_FRONTEND=noninteractive # Install Git and OpenGL libraries, and libglib2.0 RUN apt-get update && apt-get install -y git libgl1-mesa-glx libglib2.0-0 # Install necessary dependencies, including CMake, a C++ compiler, and others RUN apt-get update && apt-get install -y unzip ffmpeg cmake g++ build-essential # Set up a new user named "user" with user ID 1000 RUN useradd -m -u 1000 user # Switch to the "user" user USER user ENV HOME=/home/user \ PATH=/home/user/.local/bin:$PATH \ PYTHONPATH=$HOME/app \ PYTHONUNBUFFERED=1 \ GRADIO_ALLOW_FLAGGING=never \ GRADIO_NUM_PORTS=1 \ GRADIO_SERVER_NAME=0.0.0.0 \ GRADIO_THEME=huggingface \ GRADIO_SHARE=False \ SYSTEM=spaces # Set the working directory to the user's home directory WORKDIR $HOME/app # Clone your repository or add your code to the container RUN git clone -b main https://github.com/fffiloni/video-retalking $HOME/app # Install specific versions of PyTorch, TorchVision RUN pip install torch==2.0.1 torchvision==0.15.2 # Install dependencies RUN pip install --no-cache-dir -r requirements.txt USER root # Update package lists and install aria2 without sudo RUN apt-get update && apt-get install -y aria2 USER user # Download checkpoint files using aria2 RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/video-retalking/resolve/main/30_net_gen.pth -d $HOME/app/checkpoints -o 30_net_gen.pth RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/video-retalking/resolve/main/BFM.zip -d $HOME/app/checkpoints -o BFM.zip RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/video-retalking/resolve/main/DNet.pt -d $HOME/app/checkpoints -o DNet.pt RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/video-retalking/resolve/main/ENet.pth -d $HOME/app/checkpoints -o ENet.pth RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/video-retalking/resolve/main/GFPGANv1.3.pth -d $HOME/app/checkpoints -o GFPGANv1.3.pth RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/video-retalking/resolve/main/GPEN-BFR-512.pth -d $HOME/app/checkpoints -o GPEN-BFR-512.pth RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/video-retalking/resolve/main/LNet.pth -d $HOME/app/checkpoints -o LNet.pth RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/video-retalking/resolve/main/ParseNet-latest.pth -d $HOME/app/checkpoints -o ParseNet-latest.pth RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/video-retalking/resolve/main/RetinaFace-R50.pth -d $HOME/app/checkpoints -o RetinaFace-R50.pth RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/video-retalking/resolve/main/expression.mat -d $HOME/app/checkpoints -o expression.mat RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/video-retalking/resolve/main/face3d_pretrain_epoch_20.pth -d $HOME/app/checkpoints -o face3d_pretrain_epoch_20.pth RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/video-retalking/resolve/main/shape_predictor_68_face_landmarks.dat -d $HOME/app/checkpoints -o shape_predictor_68_face_landmarks.dat RUN unzip -d $HOME/app/checkpoints/BFM $HOME/app/checkpoints/BFM.zip RUN find $HOME/app # Set the environment variable to specify the GPU device ENV CUDA_DEVICE_ORDER=PCI_BUS_ID ENV CUDA_VISIBLE_DEVICES=0 # Run your app.py script CMD ["python", "webUI.py"]