Rahatara commited on
Commit
dd51152
·
verified ·
1 Parent(s): b25d23d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -25
app.py CHANGED
@@ -3,48 +3,105 @@ import google.generativeai as genai
3
  import gradio as gr
4
  from PIL import Image
5
  import moviepy.editor as mp
 
6
 
7
  # Configure Google API Key and model
8
  GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
9
  genai.configure(api_key=GOOGLE_API_KEY)
10
  model = genai.GenerativeModel("gemini-1.5-pro-latest")
11
 
12
- # Analysis function
13
- def analyze_content(content):
14
- if isinstance(content, str): # Text content
15
- prompt = f"Analyze this text for any instances of gender-based discrimination and provide tips: {content}"
16
- elif isinstance(content, Image.Image): # Image content
17
- content = content.convert("RGB") # Convert image to RGB
18
- prompt = "Analyze this image for any instances of gender-based discrimination and provide tips."
19
- content = [prompt, content] # The model expects list inputs for images
20
- else: # Video content
21
- prompt = "Analyze this video for any instances of gender-based discrimination and provide tips."
22
- clip = mp.VideoFileClip(content.name)
23
- frame = clip.get_frame(1) # Get a frame at t=1 second
24
- image = Image.fromarray(frame)
25
- image = image.convert("RGB")
26
- content = [prompt, image] # Use a single frame for analysis
27
 
 
 
 
28
  generation_config = genai.types.GenerationConfig(
29
  temperature=0.5,
30
  max_output_tokens=300,
31
  stop_sequences=["\n"],
32
  top_k=40,
33
- top_p=0.9
 
 
34
  )
35
- response = model.generate_content(content, generation_config=generation_config)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  return response.text if response else "No response generated."
37
 
38
  # Gradio interface setup
39
  with gr.Blocks() as app:
40
- with gr.Tab("Upload Content"):
41
- input_content = gr.DataInput(label="Upload text, image, or video")
42
- output_analysis = gr.Textbox(label="Discrimination Analysis Output")
43
- analyze_button = gr.Button("Analyze Discrimination")
44
- analyze_button.click(
45
- fn=analyze_content,
46
- inputs=input_content,
47
- outputs=output_analysis
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  )
49
 
50
  app.launch()
 
3
  import gradio as gr
4
  from PIL import Image
5
  import moviepy.editor as mp
6
+ from google.generativeai.types import HarmBlockThreshold, HarmCategory
7
 
8
  # Configure Google API Key and model
9
  GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
10
  genai.configure(api_key=GOOGLE_API_KEY)
11
  model = genai.GenerativeModel("gemini-1.5-pro-latest")
12
 
13
+ # Safety and instruction settings
14
+ safety_settings = {
15
+ HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
16
+ HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
17
+ HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
18
+ HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
19
+ }
20
+ system_instructions = [
21
+ "You are an advocate against gender-based violence.",
22
+ "Analyze the content for signs of gender discrimination and provide actionable advice."
23
+ ]
 
 
 
 
24
 
25
+ # Function to analyze text
26
+ def analyze_text(text):
27
+ prompt = f"Analyze this text for any instances of gender-based discrimination and provide tips: {text}"
28
  generation_config = genai.types.GenerationConfig(
29
  temperature=0.5,
30
  max_output_tokens=300,
31
  stop_sequences=["\n"],
32
  top_k=40,
33
+ top_p=0.9,
34
+ system_instructions=system_instructions,
35
+ safety_settings=safety_settings
36
  )
37
+ response = model.generate_content([prompt], generation_config=generation_config)
38
+ return response.text if response else "No response generated."
39
+
40
+ # Function to analyze images
41
+ def analyze_image(image):
42
+ image = image.convert("RGB") # Convert image to RGB
43
+ prompt = "Analyze this image for any instances of gender-based discrimination and provide tips."
44
+ generation_config = genai.types.GenerationConfig(
45
+ temperature=0.5,
46
+ max_output_tokens=300,
47
+ stop_sequences=["\n"],
48
+ top_k=40,
49
+ top_p=0.9,
50
+ system_instructions=system_instructions,
51
+ safety_settings=safety_settings
52
+ )
53
+ response = model.generate_content([prompt, image], generation_config=generation_config)
54
+ return response.text if response else "No response generated."
55
+
56
+ # Function to analyze videos
57
+ def analyze_video(video):
58
+ prompt = "Analyze this video for any instances of gender-based discrimination and provide tips."
59
+ clip = mp.VideoFileClip(video.name)
60
+ frame = clip.get_frame(1) # Get a frame at t=1 second
61
+ image = Image.fromarray(frame)
62
+ image = image.convert("RGB")
63
+ generation_config = genai.types.GenerationConfig(
64
+ temperature=0.5,
65
+ max_output_tokens=300,
66
+ stop_sequences=["\n"],
67
+ top_k=40,
68
+ top_p=0.9,
69
+ system_instructions=system_instructions,
70
+ safety_settings=safety_settings
71
+ )
72
+ response = model.generate_content([prompt, image], generation_config=generation_config)
73
  return response.text if response else "No response generated."
74
 
75
  # Gradio interface setup
76
  with gr.Blocks() as app:
77
+ with gr.Tab("Text Analysis"):
78
+ text_input = gr.Textbox(label="Enter Text", placeholder="Type here...", lines=4)
79
+ text_output = gr.Textbox(label="Analysis Output", lines=6)
80
+ analyze_text_btn = gr.Button("Analyze Text")
81
+ analyze_text_btn.click(
82
+ fn=analyze_text,
83
+ inputs=text_input,
84
+ outputs=text_output
85
+ )
86
+
87
+ with gr.Tab("Image Analysis"):
88
+ image_input = gr.Image(label="Upload Image")
89
+ image_output = gr.Textbox(label="Analysis Output", lines=6)
90
+ analyze_image_btn = gr.Button("Analyze Image")
91
+ analyze_image_btn.click(
92
+ fn=analyze_image,
93
+ inputs=image_input,
94
+ outputs=image_output
95
+ )
96
+
97
+ with gr.Tab("Video Analysis"):
98
+ video_input = gr.Video(label="Upload Video")
99
+ video_output = gr.Textbox(label="Analysis Output", lines=6)
100
+ analyze_video_btn = gr.Button("Analyze Video")
101
+ analyze_video_btn.click(
102
+ fn=analyze_video,
103
+ inputs=video_input,
104
+ outputs=video_output
105
  )
106
 
107
  app.launch()