Spaces:
Runtime error
Runtime error
fix
Browse files- requirements.txt +1 -1
- utils/image2text.py +2 -2
requirements.txt
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
transformers==4.27.4
|
2 |
sentencepiece==0.1.97
|
3 |
sacremoses==0.0.53
|
4 |
-
clip-interrogator==0.6.0
|
5 |
torch==2.0.0
|
6 |
gradio==3.24.1
|
7 |
pillow>=9.0.0
|
@@ -10,3 +9,4 @@ onnxruntime>=1.12.0
|
|
10 |
protobuf<=3.20.1,>=3.12.2
|
11 |
opencv-python==4.7.0.72
|
12 |
huggingface-hub==0.13.2
|
|
|
|
1 |
transformers==4.27.4
|
2 |
sentencepiece==0.1.97
|
3 |
sacremoses==0.0.53
|
|
|
4 |
torch==2.0.0
|
5 |
gradio==3.24.1
|
6 |
pillow>=9.0.0
|
|
|
9 |
protobuf<=3.20.1,>=3.12.2
|
10 |
opencv-python==4.7.0.72
|
11 |
huggingface-hub==0.13.2
|
12 |
+
clip-interrogator==0.6.0
|
utils/image2text.py
CHANGED
@@ -30,8 +30,8 @@ class Models(object):
|
|
30 |
LABEL_FILENAME = "selected_tags.csv"
|
31 |
|
32 |
# CLIP models
|
33 |
-
VIT_H_14_MODEL_REPO = "
|
34 |
-
VIT_L_14_MODEL_REPO = "
|
35 |
|
36 |
def __init__(self):
|
37 |
pass
|
|
|
30 |
LABEL_FILENAME = "selected_tags.csv"
|
31 |
|
32 |
# CLIP models
|
33 |
+
VIT_H_14_MODEL_REPO = "ViT-H-14/laion2b_s32b_b79k" # Stable Diffusion 2.X
|
34 |
+
VIT_L_14_MODEL_REPO = "ViT-L-14/openai" # Stable Diffusion 1.X
|
35 |
|
36 |
def __init__(self):
|
37 |
pass
|