# thecollabagepatch/magenta:latest - optimized FROM nvidia/cuda:12.6.2-cudnn-runtime-ubuntu22.04 # CUDA libs present + on loader path RUN apt-get update && apt-get install -y --no-install-recommends \ cuda-libraries-12-4 && rm -rf /var/lib/apt/lists/* ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/cuda-12.4/lib64:/usr/local/cuda-12.4/compat:/usr/local/cuda/targets/x86_64-linux/lib:${LD_LIBRARY_PATH} RUN ln -sf /usr/local/cuda/targets/x86_64-linux/lib /usr/local/cuda/lib64 || true # Ensure the NVIDIA repo key is present (non-interactive) and install cuDNN 9.8 RUN set -eux; \ apt-get update && apt-get install -y --no-install-recommends gnupg ca-certificates curl; \ install -d -m 0755 /usr/share/keyrings; \ curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/3bf863cc.pub \ | gpg --batch --yes --dearmor -o /usr/share/keyrings/cuda-archive-keyring.gpg; \ apt-get update; \ apt-mark unhold libcudnn9-cuda-12 || true; \ apt-get install -y --no-install-recommends \ 'libcudnn9-cuda-12=9.8.*' \ 'libcudnn9-dev-cuda-12=9.8.*' \ --allow-downgrades --allow-change-held-packages; \ apt-mark hold libcudnn9-cuda-12 || true; \ ldconfig; \ rm -rf /var/lib/apt/lists/* # Optional preload workaround if still needed ENV LD_PRELOAD=/usr/local/cuda/lib64/libcusparse.so.12:/usr/local/cuda/lib64/libcublas.so.12:/usr/local/cuda/lib64/libcublasLt.so.12:/usr/local/cuda/lib64/libcufft.so.11:/usr/local/cuda/lib64/libcusolver.so.11 # Better allocator (less fragmentation than BFC during XLA autotune) ENV TF_GPU_ALLOCATOR=cuda_malloc_async # Let cuBLAS use TF32 fast path on Ada (L40S) for big GEMMs ENV TF_ENABLE_CUBLAS_TF32=1 NVIDIA_TF32_OVERRIDE=1 ENV DEBIAN_FRONTEND=noninteractive \ PYTHONUNBUFFERED=1 \ PIP_NO_CACHE_DIR=1 \ TF_FORCE_GPU_ALLOW_GROWTH=true \ XLA_PYTHON_CLIENT_PREALLOCATE=false ENV JAX_PLATFORMS="" # --- OS dependencies --- RUN apt-get update && apt-get install -y --no-install-recommends \ software-properties-common curl ca-certificates git \ libsndfile1 ffmpeg \ build-essential pkg-config \ && add-apt-repository ppa:deadsnakes/ppa -y \ && apt-get update && apt-get install -y --no-install-recommends \ python3.11 python3.11-venv python3.11-distutils python3-pip \ && rm -rf /var/lib/apt/lists/* # Make python3 => 3.11 for convenience RUN ln -sf /usr/bin/python3.11 /usr/bin/python && python -m pip install --upgrade pip # --- Python dependencies (install order matters!) --- # 1) Install JAX first RUN python -m pip install "jax[cuda12]==0.7.1" "jaxlib==0.7.1" # 2) Install seqio dependencies manually (excluding tfds-nightly and tensorflow-text) RUN python -m pip install \ absl-py clu sentencepiece # 3) Install seqio without dependencies RUN python -m pip install --no-deps "seqio==0.0.11" # # 4) Create a patch file and apply it to seqio # RUN echo 'Patching seqio/vocabularies.py...' && \ # VOCAB_FILE="/usr/local/lib/python3.11/dist-packages/seqio/vocabularies.py" && \ # cp "$VOCAB_FILE" "${VOCAB_FILE}.bak" && \ # cat "$VOCAB_FILE" | sed 's/^import tensorflow_text as tf_text$/# import tensorflow_text as tf_text # Patched for Magenta RT/' > "${VOCAB_FILE}.tmp" && \ # mv "${VOCAB_FILE}.tmp" "$VOCAB_FILE" && \ # echo "Verifying patch..." && \ # grep "# import tensorflow_text" "$VOCAB_FILE" && \ # echo "Patch successful!" || (echo "Patch failed!" && exit 1) Patch seqio to remove tensorflow-text dependency # 5) Install correct protobuf version for stable TensorFlow RUN python -m pip install "protobuf>=3.20.3,<6.0.0" # 6) Install stable TensorFlow (not nightly) RUN python -m pip install --no-deps \ "tensorflow[and-cuda]>=2.18.0" \ "tensorflow-hub==0.16.1" # 6) Install remaining packages (consolidated, no duplicates) RUN python -m pip install \ tf2jax gin-config librosa resampy soundfile \ fastapi uvicorn[standard] python-multipart pyloudnorm \ google-cloud-storage "numpy==2.1.3" \ huggingface_hub gradio soxr cached-property # 7) Install airio RUN python -m pip install 'git+https://github.com/google/airio.git@main' RUN python -c "\ file_path = '/usr/local/lib/python3.11/dist-packages/seqio/vocabularies.py'; \ content = open(file_path, 'r').read(); \ content = content.replace('import tensorflow_text as tf_text', '# import tensorflow_text as tf_text'); \ open(file_path, 'w').write(content); \ print('Patched seqio/vocabularies.py')" # 8) Install t5x and flaxformer without dependencies RUN python -m pip install --no-deps \ "t5x @ git+https://github.com/google-research/t5x.git@92c5b46" \ "flaxformer @ git+https://github.com/google/flaxformer@399ea3a" RUN python -m pip install aqtp fiddle # 9) Install magenta-rt without dependencies RUN python -m pip install --no-deps \ 'git+https://github.com/magenta/magenta-realtime#egg=magenta_rt[gpu]' # 10) Remove conflicting stable tensorflow packages RUN python -m pip uninstall -y tensorflow tensorflow-cpu tensorflow-text || true # Install stable TensorFlow instead of nightly (following KEHANG's recommendation) RUN python -m pip install --upgrade \ "tensorflow[and-cuda]>=2.18.0" # 11) Verify TensorFlow installation RUN python -c "import tensorflow as tf; print('='*50); print('TensorFlow version:', tf.__version__); print('='*50)" # Switch to non-root user for security RUN useradd -m -u 1000 appuser WORKDIR /home/appuser/app # Copy application files COPY --chown=appuser:appuser app.py /home/appuser/app/app.py COPY --chown=appuser:appuser utils.py /home/appuser/app/utils.py COPY --chown=appuser:appuser jam_worker.py /home/appuser/app/jam_worker.py COPY --chown=appuser:appuser one_shot_generation.py /home/appuser/app/one_shot_generation.py COPY --chown=appuser:appuser model_management.py /home/appuser/app/model_management.py COPY --chown=appuser:appuser documentation.html /home/appuser/app/documentation.html COPY --chown=appuser:appuser lil_demo_540p.mp4 /home/appuser/app/lil_demo_540p.mp4 COPY --chown=appuser:appuser magentaRT_rt_tester.html /home/appuser/app/magentaRT_rt_tester.html COPY --chown=appuser:appuser magenta_prompts.js /home/appuser/app/magenta_prompts.js COPY --chown=appuser:appuser docs/ /home/appuser/app/docs/ USER appuser EXPOSE 7860 CMD ["bash", "-lc", "python -m uvicorn app:app --host 0.0.0.0 --port ${PORT:-7860}"]