Spaces:
Paused
Paused
black==23.7.0 | |
chardet==5.1.0 | |
clip @ git+https://github.com/openai/CLIP.git | |
einops>=0.6.1 | |
fairscale>=0.4.13 | |
fire>=0.5.0 | |
fsspec>=2023.6.0 | |
invisible-watermark>=0.2.0 | |
matplotlib>=3.7.2 | |
natsort>=8.4.0 | |
ninja>=1.11.1 | |
numpy==1.23.0 | |
omegaconf>=2.3.0 | |
open-clip-torch>=2.20.0 | |
pandas>=2.0.3 | |
pillow | |
pudb>=2022.1.3 | |
pytorch-lightning==1.4.2 | |
pyyaml>=5.4.1 | |
scipy>=1.10.1 | |
streamlit>=0.73.1 | |
tensorboardx==2.6 | |
timm>=0.9.2 | |
tokenizers==0.12.1 | |
torch>=2.0.1 | |
torchaudio>=2.0.2 | |
torchdata==0.6.1 | |
torchmetrics==0.6.0 | |
torchvision>=0.15.2 | |
tqdm>=4.65.0 | |
transformers==4.30.0 | |
triton==2.0.0 | |
urllib3<1.27,>=1.25.4 | |
wandb>=0.15.6 | |
webdataset>=0.2.33 | |
wheel>=0.41.0 | |
xformers>=0.0.22 | |
streamlit-keyup==0.2.0 | |
# Basic | |
scikit-image==0.19.3 | |
opencv-python==4.7.0.72 | |
imageio-ffmpeg==0.4.2 | |
imageio==2.9.0 | |
kornia==0.6.11 | |
# Training | |
# byted-dataloader==0.3.7 | |
# byted-mloops==0.2.21 | |
accelerate==0.17.0 | |
ema-pytorch==0.2.1 | |
tensorboard==2.11.2 | |
# Language Model | |
regex==2022.10.31 | |
ftfy==6.1.1 | |
# open_clip_torch==2.16.0 | |
# Model | |
einops_exts==0.0.4 | |
rotary_embedding_torch==0.2.1 | |
entmax==1.1 | |
torchdiffeq==0.2.3 | |
diffusers==0.26.0 | |
# apt-get install ffmpeg libsm6 libxext6 -y | |
# Face Keypoint | |
thriftpy2 | |
decord | |
face-alignment | |
huggingface_hub==0.25.2 | |
gdown==5.2.0 |