File size: 3,422 Bytes
d66d160
8090b75
 
 
4625865
8f420ad
c47a41a
43604c6
41c00ad
c47a41a
8090b75
d66d160
 
 
 
 
 
c47a41a
d66d160
 
 
3ce0ef7
d66d160
d7d1270
c47a41a
 
 
 
 
41c00ad
 
43604c6
 
 
 
 
 
 
 
 
 
 
f08c73b
43604c6
 
2979393
 
 
 
41c00ad
 
 
43604c6
f08c73b
 
41c00ad
 
 
 
 
f08c73b
41c00ad
f08c73b
41c00ad
 
c47a41a
 
41c00ad
 
98ec703
c47a41a
 
 
 
 
 
41c00ad
 
c47a41a
 
d7d1270
c47a41a
ab65f9b
c47a41a
 
d7d1270
 
 
c47a41a
3decb3e
c47a41a
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
from transformers import ViTConfig, ViTForImageClassification
from transformers import ViTFeatureExtractor
from PIL import Image
import requests
import matplotlib.pyplot as plt
import gradio as gr
from transformers import ImageClassificationPipeline, PerceiverForImageClassificationConvProcessing, PerceiverFeatureExtractor
from transformers import VisionEncoderDecoderModel
from transformers import AutoTokenizer
import torch


# option 1: load with randomly initialized weights (train from scratch)

config = ViTConfig(num_hidden_layers=12, hidden_size=768)
model = ViTForImageClassification(config)

#print(config)

feature_extractor = ViTFeatureExtractor()
# or, to load one that corresponds to a checkpoint on the hub:
#feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224")

#the following gets called by classify_image() 
feature_extractor = PerceiverFeatureExtractor.from_pretrained("deepmind/vision-perceiver-conv")
model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv")

image_pipe = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor)

'''


# initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized
model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained(
    "google/vit-base-patch16-224-in21k", "bert-base-uncased"
)
# saving model after fine-tuning
model.save_pretrained("./vit-bert")
# load fine-tuned model
model = VisionEncoderDecoderModel.from_pretrained("./vit-bert")


'''

repo_name = "ydshieh/vit-gpt2-coco-en"
#test_image = "cats.jpg"
url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
test_image = Image.open(requests.get(url, stream=True).raw)
test_image.save("cats.png")
feature_extractor2 = ViTFeatureExtractor.from_pretrained(repo_name)
tokenizer = AutoTokenizer.from_pretrained(repo_name)
model2 = VisionEncoderDecoderModel.from_pretrained(repo_name)
pixel_values = feature_extractor2(test_image, return_tensors="pt").pixel_values
print("Pixel Values")
print(pixel_values)
# autoregressively generate text (using beam search or other decoding strategy)
generated_ids = model2.generate(pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True)
# decode into text
preds = tokenizer.batch_decode(generated_ids[0], skip_special_tokens=True)
preds = [pred.strip() for pred in preds]
print("Predictions")
print(preds)



def classify_image(image):
  results = image_pipe(image)
  
  print("RESULTS")
  print(results)
  # convert to format Gradio expects
  output = {}
  for prediction in results:
    predicted_label = prediction['label']
    score = prediction['score']
    output[predicted_label] = score
  print("OUTPUT")
  print(output)
  return output


image = gr.inputs.Image(type="pil")
image_piped = ""
label = gr.outputs.Label(num_top_classes=5)
examples = [["cats.jpg"], ["dog.jpg"]]
title = "Generate a Story from an Image"
description = "Demo for classifying images with Perceiver IO. To use it, simply upload an image and click 'submit' to let the model predict the 5 most probable ImageNet classes. Results will show up in a few seconds."  + image_piped
article = "<p style='text-align: center'></p>"

gr.Interface(fn=classify_image, inputs=image, outputs=label, title=title, description=description, examples="", enable_queue=True).launch(debug=True)