Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -7,13 +7,11 @@ import gradio as gr
|
|
7 |
from PIL import Image
|
8 |
|
9 |
print("google-generativeai:", genai.__version__)
|
10 |
-
|
11 |
GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
|
12 |
|
13 |
# UI Titles and Subtitles
|
14 |
TITLE = "<h1 align='center'>π Gender Bias Detection App π</h1>"
|
15 |
SUBTITLE = "<h2 align='center'>Detect and analyze gender-based discrimination in communication.</h2>"
|
16 |
-
|
17 |
IMAGE_WIDTH = 512
|
18 |
|
19 |
def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
|
@@ -27,7 +25,6 @@ def user(text_prompt: str, chatbot: List[Tuple[str, str]]):
|
|
27 |
return "", chatbot + [[text_prompt, None]]
|
28 |
|
29 |
def bot(
|
30 |
-
google_key: str,
|
31 |
image_prompt: Optional[Image.Image],
|
32 |
temperature: float,
|
33 |
max_output_tokens: int,
|
@@ -36,12 +33,11 @@ def bot(
|
|
36 |
top_p: float,
|
37 |
chatbot: List[Tuple[str, str]]
|
38 |
):
|
39 |
-
|
40 |
-
if not google_key:
|
41 |
raise ValueError("GOOGLE_API_KEY is not set. Please set it up.")
|
42 |
-
|
43 |
text_prompt = chatbot[-1][0]
|
44 |
-
genai.configure(api_key=
|
45 |
generation_config = genai.types.GenerationConfig(
|
46 |
temperature=temperature,
|
47 |
max_output_tokens=max_output_tokens,
|
@@ -50,14 +46,14 @@ def bot(
|
|
50 |
top_p=top_p,
|
51 |
instructions="Analyze this text for gender-based discrimination, including implicit biases and stereotypes. Provide specific examples and explain why each example demonstrates bias. Also, suggest tips for how to address or mitigate these biases within the text."
|
52 |
)
|
53 |
-
|
54 |
-
model_name = "gemini-1.5-pro-latest"
|
55 |
model = genai.GenerativeModel(model_name)
|
|
|
|
|
56 |
inputs = [text_prompt] if image_prompt is None else [text_prompt, preprocess_image(image_prompt)]
|
57 |
|
58 |
response = model.generate_content(inputs, stream=True, generation_config=generation_config)
|
59 |
response.resolve()
|
60 |
-
|
61 |
chatbot[-1][1] = ""
|
62 |
for chunk in response:
|
63 |
for i in range(0, len(chunk.text), 10):
|
@@ -74,11 +70,16 @@ with gr.Blocks() as demo:
|
|
74 |
image_input = gr.Image(type="pil", label="Upload Image")
|
75 |
submit_button = gr.Button("Analyze")
|
76 |
chatbot_output = gr.Chatbot(label="Analysis Output")
|
77 |
-
|
78 |
submit_button.click(
|
|
|
|
|
|
|
|
|
|
|
79 |
fn=bot,
|
80 |
-
inputs=[
|
81 |
outputs=[chatbot_output]
|
82 |
)
|
83 |
|
84 |
-
demo.launch()
|
|
|
7 |
from PIL import Image
|
8 |
|
9 |
print("google-generativeai:", genai.__version__)
|
|
|
10 |
GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
|
11 |
|
12 |
# UI Titles and Subtitles
|
13 |
TITLE = "<h1 align='center'>π Gender Bias Detection App π</h1>"
|
14 |
SUBTITLE = "<h2 align='center'>Detect and analyze gender-based discrimination in communication.</h2>"
|
|
|
15 |
IMAGE_WIDTH = 512
|
16 |
|
17 |
def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
|
|
|
25 |
return "", chatbot + [[text_prompt, None]]
|
26 |
|
27 |
def bot(
|
|
|
28 |
image_prompt: Optional[Image.Image],
|
29 |
temperature: float,
|
30 |
max_output_tokens: int,
|
|
|
33 |
top_p: float,
|
34 |
chatbot: List[Tuple[str, str]]
|
35 |
):
|
36 |
+
if not GOOGLE_API_KEY:
|
|
|
37 |
raise ValueError("GOOGLE_API_KEY is not set. Please set it up.")
|
38 |
+
|
39 |
text_prompt = chatbot[-1][0]
|
40 |
+
genai.configure(api_key=GOOGLE_API_KEY) # Use the global API key
|
41 |
generation_config = genai.types.GenerationConfig(
|
42 |
temperature=temperature,
|
43 |
max_output_tokens=max_output_tokens,
|
|
|
46 |
top_p=top_p,
|
47 |
instructions="Analyze this text for gender-based discrimination, including implicit biases and stereotypes. Provide specific examples and explain why each example demonstrates bias. Also, suggest tips for how to address or mitigate these biases within the text."
|
48 |
)
|
49 |
+
model_name = "gemini-1.5-pro-latest"
|
|
|
50 |
model = genai.GenerativeModel(model_name)
|
51 |
+
|
52 |
+
# Correctly handle inputs based on image_prompt
|
53 |
inputs = [text_prompt] if image_prompt is None else [text_prompt, preprocess_image(image_prompt)]
|
54 |
|
55 |
response = model.generate_content(inputs, stream=True, generation_config=generation_config)
|
56 |
response.resolve()
|
|
|
57 |
chatbot[-1][1] = ""
|
58 |
for chunk in response:
|
59 |
for i in range(0, len(chunk.text), 10):
|
|
|
70 |
image_input = gr.Image(type="pil", label="Upload Image")
|
71 |
submit_button = gr.Button("Analyze")
|
72 |
chatbot_output = gr.Chatbot(label="Analysis Output")
|
73 |
+
|
74 |
submit_button.click(
|
75 |
+
fn=user, # Call user function first
|
76 |
+
inputs=[text_input, chatbot_output],
|
77 |
+
outputs=[chatbot_output],
|
78 |
+
queue=False # Prevent user input from being queued
|
79 |
+
).then(
|
80 |
fn=bot,
|
81 |
+
inputs=[image_input, 0.4, 1024, "END", 32, 1, chatbot_output],
|
82 |
outputs=[chatbot_output]
|
83 |
)
|
84 |
|
85 |
+
demo.launch()
|