from transformers import DetrImageProcessor, DetrForObjectDetection import torch from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) # you can specify the revision tag if you don't want the timm dependency processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50", revision="no_timm") model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50", revision="no_timm") inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) # convert outputs (bounding boxes and class logits) to COCO API # let's only keep detections with score > 0.9 target_sizes = torch.tensor([image.size[::-1]]) results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.9)[0] for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): box = [round(i, 2) for i in box.tolist()] print( f"Detected {model.config.id2label[label.item()]} with confidence " f"{round(score.item(), 3)} at location {box}" ) import os from dotenv import load_dotenv import google.generativeai as genai from pathlib import Path import gradio as gr # Load environment variables from .env file load_dotenv() # Get the API key from the environment API_KEY = os.getenv("GOOGLE_API_KEY") # Set up the model with the API key genai.configure(api_key=API_KEY) # Set up the model generation_config = { "temperature": 0.7, "top_p": 0.9, "top_k": 40, "max_output_tokens": 4000, } safety_settings = [ { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE" }, { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE" }, { "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE" }, { "category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE" } ] model = genai.GenerativeModel(model_name="gemini-1.5-flash-latest", generation_config=generation_config, safety_settings=safety_settings) def input_image_setup(file_loc): if not (img := Path(file_loc)).exists(): raise FileNotFoundError(f"Could not find image: {img}") image_parts = [ { "mime_type": "image/jpeg", "data": Path(file_loc).read_bytes() } ] return image_parts def generate_gemini_response(input_prompt, image_loc): image_prompt = input_image_setup(image_loc) prompt_parts = [input_prompt, image_prompt[0]] response = model.generate_content(prompt_parts) return response.text input_prompt = """ give the plate, the model and the color of the car """ def upload_file(files): if not files: return None, "Image not uploaded" file_paths = [file.name for file in files] response = generate_gemini_response(input_prompt, file_paths[0]) return file_paths[0], response with gr.Blocks() as demo: header = gr.Label("RADARPICK: Vous avez été radarisé!") image_output = gr.Image() upload_button = gr.UploadButton("Click to upload an image", file_types=["image"], file_count="multiple") generate_button = gr.Button("Generer") file_output = gr.Textbox(label="Generated Caption/Post Content") def process_generate(files): if not files: return None, "Image not uploaded" return upload_file(files) upload_button.upload(fn=lambda files: files[0].name if files else None, inputs=[upload_button], outputs=image_output) generate_button.click(fn=process_generate, inputs=[upload_button], outputs=[image_output, file_output]) demo.launch(debug=True)