# Start from the TGI base image FROM ghcr.io/huggingface/text-generation-inference:1.3 as base COPY ./requirements.txt /code/requirements.txt # Install JupyterLab and plugins RUN pip install jupyterlab jupyterlab-vim==0.15.1 jupyterlab-vimrc RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt # Install Visual Studio Code CLI RUN curl -Lk 'https://code.visualstudio.com/sha/download?build=stable&os=cli-alpine-x64' \ --output vscode_cli.tar.gz \ && tar -xvf vscode_cli.tar.gz \ && chmod +x ./code \ && mv code /usr/local/bin/ # Create a non-root user with UID 1000 RUN useradd -m -u 1000 -s /bin/bash user # Switch to the non-root user USER user # Set working directory WORKDIR /home/user # Add local python bin directory to PATH ENV PATH="/home/user/.local/bin:${PATH}" # Copy any necessary files (if needed) # COPY --chown=user:user your-files /home/user/your-destination # AWS Sagemaker compatible image # Assuming this part remains the same from your original Dockerfile FROM base as sagemaker COPY sagemaker-entrypoint.sh entrypoint.sh RUN chmod +x entrypoint.sh ENTRYPOINT ["./entrypoint.sh"] # Final image FROM base # Switch to the non-root user USER user # Set working directory WORKDIR /home/user # Set home to the user's home directory ENV HOME=/home/user \ PATH=/home/user/.local/bin:$PATH \ PYTHONPATH=$HOME/app \ PYTHONUNBUFFERED=1 \ GRADIO_ALLOW_FLAGGING=never \ GRADIO_NUM_PORTS=1 \ GRADIO_SERVER_NAME=0.0.0.0 \ GRADIO_THEME=huggingface \ SYSTEM=spaces ## Add JupyterLab entrypoint #ENTRYPOINT ["jupyter", "lab", "--ip=0.0.0.0", "--NotebookApp.token=''", "--port", "7860", "--no-browser"] # Optional: Set CMD to launch TGI or any other command #CMD ["text-generation-launcher", "--json-output"] # Copy the current directory contents into the container at $HOME/app setting the owner to the user COPY --chown=user . $HOME/app # Ensure run.sh is executable RUN chmod +x $HOME/app/run.sh # Set the CMD to run your script CMD ["$HOME/app/run.sh"]