Spaces:
Running
Running
Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Necessary Imports
|
2 |
+
import os
|
3 |
+
import time
|
4 |
+
import base64
|
5 |
+
import PIL.Image
|
6 |
+
import gradio as gr
|
7 |
+
import google.generativeai as genai
|
8 |
+
|
9 |
+
from dotenv import load_dotenv
|
10 |
+
|
11 |
+
# Load the Environment Variables
|
12 |
+
load_dotenv()
|
13 |
+
|
14 |
+
# Set the Gemini API Key
|
15 |
+
genai.configure(api_key=os.environ.get("GOOGLE_API_KEY"))
|
16 |
+
|
17 |
+
# Set up the model configuration for content generation
|
18 |
+
generation_config = {
|
19 |
+
"temperature": 0.4,
|
20 |
+
"top_p": 1,
|
21 |
+
"top_k": 32,
|
22 |
+
"max_output_tokens": 1400,
|
23 |
+
}
|
24 |
+
|
25 |
+
# Define safety settings for content generation
|
26 |
+
safety_settings = [
|
27 |
+
{"category": f"HARM_CATEGORY_{category}", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}
|
28 |
+
for category in [
|
29 |
+
"HARASSMENT",
|
30 |
+
"HATE_SPEECH",
|
31 |
+
"SEXUALLY_EXPLICIT",
|
32 |
+
"DANGEROUS_CONTENT",
|
33 |
+
]
|
34 |
+
]
|
35 |
+
|
36 |
+
# Create the Gemini Models for Text and Vision respectively
|
37 |
+
txt_model = genai.GenerativeModel(
|
38 |
+
model_name="gemini-pro",
|
39 |
+
generation_config=generation_config,
|
40 |
+
safety_settings=safety_settings,
|
41 |
+
)
|
42 |
+
vis_model = genai.GenerativeModel(
|
43 |
+
model_name="gemini-pro-vision",
|
44 |
+
generation_config=generation_config,
|
45 |
+
safety_settings=safety_settings,
|
46 |
+
)
|
47 |
+
|
48 |
+
# System Prompt
|
49 |
+
system_prompt = """
|
50 |
+
Model: "As a trusted medical chatbot, your role is crucial in providing accurate information and guidance to users seeking medical assistance.
|
51 |
+
You will be presented with symptoms, medical history, or queries related to various health concerns, and your task is to offer insightful analysis, recommendations, and information to aid users in understanding their health conditions and making informed decisions.
|
52 |
+
|
53 |
+
**Analysis Guidelines:**
|
54 |
+
|
55 |
+
1. **Symptom Evaluation:** Carefully assess the symptoms described by the user to understand their medical condition accurately.
|
56 |
+
2. **Medical History Review:** Consider any relevant medical history provided by the user to contextualize their current health concerns and potential risk factors.
|
57 |
+
3. **Diagnosis Discussion:** Based on the presented symptoms and medical history, discuss possible diagnoses or conditions that align with the user's situation.
|
58 |
+
4. **Treatment Options:** Provide information on recommended treatments, therapies, or lifestyle changes for managing the identified medical condition.
|
59 |
+
5. **Preventive Measures:** Offer preventive strategies or advice to help users minimize the risk of future health issues or complications.
|
60 |
+
6. **Important Note:** While your guidance is valuable, it's essential to emphasize the importance of consulting with qualified healthcare professionals for accurate diagnosis and personalized medical care.
|
61 |
+
|
62 |
+
**Refusal Policy:**
|
63 |
+
If the user provides information or queries not related to medical concerns, kindly inform them that this chatbot is designed to address only medical inquiries. Politely encourage them to seek assistance from appropriate sources for non-medical matters.
|
64 |
+
|
65 |
+
Your role as a medical chatbot is to empower users with knowledge and guidance to support their health and well-being. Proceed to assist users with their medical inquiries, ensuring clarity, empathy, and accuracy in your responses."
|
66 |
+
|
67 |
+
"""
|
68 |
+
|
69 |
+
|
70 |
+
# Image to Base 64 Converter Function
|
71 |
+
def image_to_base64(image_path):
|
72 |
+
"""
|
73 |
+
Convert an image file to a base64 encoded string.
|
74 |
+
|
75 |
+
Args:
|
76 |
+
image_path (str): The path to the image file.
|
77 |
+
|
78 |
+
Returns:
|
79 |
+
str: The base64 encoded string representation of the image.
|
80 |
+
"""
|
81 |
+
# Open Image and Encode it to Base64
|
82 |
+
with open(image_path, "rb") as img:
|
83 |
+
encoded_string = base64.b64encode(img.read())
|
84 |
+
|
85 |
+
# Return the Encoded String
|
86 |
+
return encoded_string.decode("utf-8")
|
87 |
+
|
88 |
+
|
89 |
+
# Function that takes User Inputs and displays it on ChatUI
|
90 |
+
def query_message(history, txt, img):
|
91 |
+
"""
|
92 |
+
Adds a query message to the chat history.
|
93 |
+
|
94 |
+
Parameters:
|
95 |
+
history (list): The chat history.
|
96 |
+
txt (str): The text message.
|
97 |
+
img (str): The image file path.
|
98 |
+
|
99 |
+
Returns:
|
100 |
+
list: The updated chat history.
|
101 |
+
"""
|
102 |
+
if not img:
|
103 |
+
history += [(txt, None)]
|
104 |
+
return history
|
105 |
+
|
106 |
+
# Convert Image to Base64
|
107 |
+
base64 = image_to_base64(img)
|
108 |
+
|
109 |
+
# Display Image on Chat UI and return the history
|
110 |
+
data_url = f"data:image/jpeg;base64,{base64}"
|
111 |
+
history += [(f"{txt} ![]({data_url})", None)]
|
112 |
+
return history
|
113 |
+
|
114 |
+
|
115 |
+
# Function that takes User Inputs, generates Response and displays on Chat UI
|
116 |
+
def llm_response(history, text, img):
|
117 |
+
"""
|
118 |
+
Generate a response based on the input.
|
119 |
+
|
120 |
+
Parameters:
|
121 |
+
history (list): A list of previous chat history.
|
122 |
+
text (str): The input text.
|
123 |
+
img (str): The path to an image file (optional).
|
124 |
+
|
125 |
+
Returns:
|
126 |
+
list: The updated chat history.
|
127 |
+
"""
|
128 |
+
|
129 |
+
# Generate Response based on the Input
|
130 |
+
if not img:
|
131 |
+
response = txt_model.generate_content(f"{system_prompt}User: {text}")
|
132 |
+
else:
|
133 |
+
# Open Image and Generate Response
|
134 |
+
img = PIL.Image.open(img)
|
135 |
+
response = vis_model.generate_content([f"{system_prompt}User: {text}", img])
|
136 |
+
|
137 |
+
# Display Response on Chat UI and return the history
|
138 |
+
history += [(None, response.text)]
|
139 |
+
return history
|
140 |
+
|
141 |
+
|
142 |
+
# Interface Code using Gradio
|
143 |
+
with gr.Blocks() as app:
|
144 |
+
with gr.Row():
|
145 |
+
# Image UI
|
146 |
+
image_box = gr.Image(type="filepath")
|
147 |
+
|
148 |
+
# Chat UI
|
149 |
+
chatbot = gr.Chatbot(scale=2, height=750)
|
150 |
+
text_box = gr.Textbox(
|
151 |
+
placeholder="Enter text and press enter, or upload an image",
|
152 |
+
container=False,
|
153 |
+
)
|
154 |
+
|
155 |
+
# Button to Submit the Input and Generate Response
|
156 |
+
btn = gr.Button("Submit")
|
157 |
+
clicked = btn.click(query_message, [chatbot, text_box, image_box], chatbot).then(
|
158 |
+
llm_response, [chatbot, text_box, image_box], chatbot
|
159 |
+
)
|
160 |
+
|
161 |
+
# Launch the Interface
|
162 |
+
app.queue()
|
163 |
+
app.launch(debug=False)
|