Spaces:
Running
Running
import os | |
import google.generativeai as genai | |
import gradio as gr | |
from PIL import Image | |
import moviepy.editor as mp | |
from google.generativeai.types import HarmBlockThreshold, HarmCategory | |
# Configure Google API Key and model | |
GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY") | |
genai.configure(api_key=GOOGLE_API_KEY) | |
model = genai.GenerativeModel("gemini-1.5-pro-latest") | |
# Safety and instruction settings | |
safety_settings = { | |
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, | |
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, | |
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, | |
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, | |
} | |
system_instructions = [ | |
"You are an advocate against gender-based violence.", | |
"Analyze the content for signs of gender discrimination and provide actionable advice." | |
] | |
# Function to analyze text | |
def analyze_text(text): | |
prompt = f"Analyze this text for any instances of gender-based discrimination and provide tips: {text}" | |
generation_config = genai.types.GenerationConfig( | |
temperature=0.5, | |
max_output_tokens=300, | |
stop_sequences=["\n"], | |
top_k=40, | |
top_p=0.9, | |
system_instructions=system_instructions, | |
safety_settings=safety_settings | |
) | |
response = model.generate_content([prompt], generation_config=generation_config) | |
return response.text if response else "No response generated." | |
# Function to analyze images | |
def analyze_image(image): | |
image = image.convert("RGB") # Convert image to RGB | |
prompt = "Analyze this image for any instances of gender-based discrimination and provide tips." | |
generation_config = genai.types.GenerationConfig( | |
temperature=0.5, | |
max_output_tokens=300, | |
stop_sequences=["\n"], | |
top_k=40, | |
top_p=0.9, | |
system_instructions=system_instructions, | |
safety_settings=safety_settings | |
) | |
response = model.generate_content([prompt, image], generation_config=generation_config) | |
return response.text if response else "No response generated." | |
# Function to analyze videos | |
def analyze_video(video): | |
prompt = "Analyze this video for any instances of gender-based discrimination and provide tips." | |
clip = mp.VideoFileClip(video.name) | |
frame = clip.get_frame(1) # Get a frame at t=1 second | |
image = Image.fromarray(frame) | |
image = image.convert("RGB") | |
generation_config = genai.types.GenerationConfig( | |
temperature=0.5, | |
max_output_tokens=300, | |
stop_sequences=["\n"], | |
top_k=40, | |
top_p=0.9, | |
system_instructions=system_instructions, | |
safety_settings=safety_settings | |
) | |
response = model.generate_content([prompt, image], generation_config=generation_config) | |
return response.text if response else "No response generated." | |
# Gradio interface setup | |
with gr.Blocks() as app: | |
with gr.Tab("Text Analysis"): | |
text_input = gr.Textbox(label="Enter Text", placeholder="Type here...", lines=4) | |
text_output = gr.Textbox(label="Analysis Output", lines=6) | |
analyze_text_btn = gr.Button("Analyze Text") | |
analyze_text_btn.click( | |
fn=analyze_text, | |
inputs=text_input, | |
outputs=text_output | |
) | |
with gr.Tab("Image Analysis"): | |
image_input = gr.Image(label="Upload Image") | |
image_output = gr.Textbox(label="Analysis Output", lines=6) | |
analyze_image_btn = gr.Button("Analyze Image") | |
analyze_image_btn.click( | |
fn=analyze_image, | |
inputs=image_input, | |
outputs=image_output | |
) | |
with gr.Tab("Video Analysis"): | |
video_input = gr.Video(label="Upload Video") | |
video_output = gr.Textbox(label="Analysis Output", lines=6) | |
analyze_video_btn = gr.Button("Analyze Video") | |
analyze_video_btn.click( | |
fn=analyze_video, | |
inputs=video_input, | |
outputs=video_output | |
) | |
app.launch() | |