from transformers import ViTConfig, ViTForImageClassification from transformers import ViTFeatureExtractor from PIL import Image import requests import matplotlib.pyplot as plt import gradio as gr from transformers import ImageClassificationPipeline, PerceiverForImageClassificationConvProcessing, PerceiverFeatureExtractor from transformers import VisionEncoderDecoderModel from transformers import AutoTokenizer import torch # option 1: load with randomly initialized weights (train from scratch) config = ViTConfig(num_hidden_layers=12, hidden_size=768) model = ViTForImageClassification(config) #print(config) feature_extractor = ViTFeatureExtractor() # or, to load one that corresponds to a checkpoint on the hub: #feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224") #the following gets called by classify_image() feature_extractor = PerceiverFeatureExtractor.from_pretrained("deepmind/vision-perceiver-conv") model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv") image_pipe = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor) ''' # initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained( "google/vit-base-patch16-224-in21k", "bert-base-uncased" ) # saving model after fine-tuning model.save_pretrained("./vit-bert") # load fine-tuned model model = VisionEncoderDecoderModel.from_pretrained("./vit-bert") ''' repo_name = "ydshieh/vit-gpt2-coco-en" #test_image = "cats.jpg" url = 'http://images.cocodataset.org/val2017/000000039769.jpg' test_image = Image.open(requests.get(url, stream=True).raw) test_image.save("cats.png") feature_extractor2 = ViTFeatureExtractor.from_pretrained(repo_name) tokenizer = AutoTokenizer.from_pretrained(repo_name) model2 = VisionEncoderDecoderModel.from_pretrained(repo_name) pixel_values = feature_extractor2(test_image, return_tensors="pt").pixel_values print("Pixel Values") print(pixel_values) # autoregressively generate text (using beam search or other decoding strategy) generated_ids = model2.generate(pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True) # decode into text preds = tokenizer.batch_decode(generated_ids[0], skip_special_tokens=True) preds = [pred.strip() for pred in preds] print("Predictions") print(preds) def classify_image(image): results = image_pipe(image) print("RESULTS") print(results) # convert to format Gradio expects output = {} for prediction in results: predicted_label = prediction['label'] score = prediction['score'] output[predicted_label] = score print("OUTPUT") print(output) return output image = gr.inputs.Image(type="pil") image_piped = "" label = gr.outputs.Label(num_top_classes=5) examples = [["cats.jpg"], ["dog.jpg"]] title = "Generate a Story from an Image" description = "Demo for classifying images with Perceiver IO. To use it, simply upload an image and click 'submit' to let the model predict the 5 most probable ImageNet classes. Results will show up in a few seconds." + image_piped article = "
" gr.Interface(fn=classify_image, inputs=image, outputs=label, title=title, description=description, examples="", enable_queue=True).launch(debug=True)