Spaces:
Sleeping
Sleeping
bharathj16
commited on
Commit
•
1ec5b97
1
Parent(s):
a655929
Update app.py
Browse files
app.py
CHANGED
@@ -1,16 +1,45 @@
|
|
1 |
-
from transformers import ViTImageProcessor, ViTForImageClassification
|
2 |
-
from PIL import Image
|
3 |
import requests
|
4 |
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
image = Image.open(requests.get(url, stream=True).raw)
|
7 |
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
logits = outputs.logits
|
14 |
-
# model predicts one of the 1000 ImageNet classes
|
15 |
-
predicted_class_idx = logits.argmax(-1).item()
|
16 |
-
print("Predicted class:", model.config.id2label[predicted_class_idx])
|
|
|
|
|
|
|
1 |
import requests
|
2 |
|
3 |
+
from PIL import Image
|
4 |
+
from transformers import AutoProcessor, AutoModelForVision2Seq
|
5 |
+
|
6 |
+
|
7 |
+
model = AutoModelForVision2Seq.from_pretrained("microsoft/kosmos-2-patch14-224")
|
8 |
+
processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224")
|
9 |
+
|
10 |
+
prompt = "<grounding>An image of"
|
11 |
+
|
12 |
+
url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.png"
|
13 |
image = Image.open(requests.get(url, stream=True).raw)
|
14 |
|
15 |
+
# The original Kosmos-2 demo saves the image first then reload it. For some images, this will give slightly different image input and change the generation outputs.
|
16 |
+
image.save("new_image.jpg")
|
17 |
+
image = Image.open("new_image.jpg")
|
18 |
+
|
19 |
+
inputs = processor(text=prompt, images=image, return_tensors="pt")
|
20 |
+
|
21 |
+
generated_ids = model.generate(
|
22 |
+
pixel_values=inputs["pixel_values"],
|
23 |
+
input_ids=inputs["input_ids"],
|
24 |
+
attention_mask=inputs["attention_mask"],
|
25 |
+
image_embeds=None,
|
26 |
+
image_embeds_position_mask=inputs["image_embeds_position_mask"],
|
27 |
+
use_cache=True,
|
28 |
+
max_new_tokens=128,
|
29 |
+
)
|
30 |
+
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
31 |
+
|
32 |
+
# Specify `cleanup_and_extract=False` in order to see the raw model generation.
|
33 |
+
processed_text = processor.post_process_generation(generated_text, cleanup_and_extract=False)
|
34 |
+
|
35 |
+
print(processed_text)
|
36 |
+
# `<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911></object>.`
|
37 |
+
|
38 |
+
# By default, the generated text is cleanup and the entities are extracted.
|
39 |
+
processed_text, entities = processor.post_process_generation(generated_text)
|
40 |
+
|
41 |
+
print(processed_text)
|
42 |
+
# `An image of a snowman warming himself by a fire.`
|
43 |
|
44 |
+
print(entities)
|
45 |
+
# `[('a snowman', (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), ('a fire', (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)])]`
|
|
|
|
|
|
|
|