Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from transformers import
|
2 |
import torchvision.transforms as T
|
3 |
import torch.nn.functional as F
|
4 |
from PIL import Image, ImageFile
|
@@ -60,6 +60,9 @@ def infer(image, labels):
|
|
60 |
|
61 |
with gr.Blocks() as demo:
|
62 |
gr.Markdown("# EVACLIP vs CLIP 💥 ")
|
|
|
|
|
|
|
63 |
with gr.Row():
|
64 |
with gr.Column():
|
65 |
image_input = gr.Image(type="pil")
|
|
|
1 |
+
from transformers import CLIPImageProcessor, pipeline, CLIPTokenizer
|
2 |
import torchvision.transforms as T
|
3 |
import torch.nn.functional as F
|
4 |
from PIL import Image, ImageFile
|
|
|
60 |
|
61 |
with gr.Blocks() as demo:
|
62 |
gr.Markdown("# EVACLIP vs CLIP 💥 ")
|
63 |
+
gr.Markdown("[EVACLIP](https://huggingface.co/BAAI/EVA-CLIP-8B) is CLIP scaled to the moon! 🔥")
|
64 |
+
gr.Markdown("It's a state-of-the-art zero-shot image classification model, which is also outperforming predecessors on text-image retrieval and linear probing.")
|
65 |
+
gr.Markdown("In this demo, compare EVACLIP outputs to CLIP outputs ✨")
|
66 |
with gr.Row():
|
67 |
with gr.Column():
|
68 |
image_input = gr.Image(type="pil")
|