Spaces:
Runtime error
Runtime error
ariankhalfani
commited on
Commit
•
f7d6dc4
1
Parent(s):
04b1ad0
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import numpy as np
|
4 |
+
from cataract import combined_prediction, save_cataract_prediction_to_db, predict_object_detection
|
5 |
+
from glaucoma import combined_prediction_glaucoma, submit_to_db, predict_image
|
6 |
+
from database import get_db_data, format_db_data, get_context_db_data
|
7 |
+
from chatbot import chatbot, update_patient_history, generate_voice_response
|
8 |
+
from PIL import Image
|
9 |
+
|
10 |
+
# Define the custom theme
|
11 |
+
theme = gr.themes.Soft(
|
12 |
+
primary_hue="neutral",
|
13 |
+
secondary_hue="neutral",
|
14 |
+
neutral_hue="gray",
|
15 |
+
font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif']
|
16 |
+
).set(
|
17 |
+
body_background_fill="#ffffff",
|
18 |
+
block_background_fill="#0a2b42",
|
19 |
+
block_border_width="1px",
|
20 |
+
block_title_background_fill="#0a2b42",
|
21 |
+
input_background_fill="#ffffff",
|
22 |
+
button_secondary_background_fill="#0a2b42",
|
23 |
+
border_color_primary="#800080",
|
24 |
+
background_fill_secondary="#ffffff",
|
25 |
+
color_accent_soft="transparent"
|
26 |
+
)
|
27 |
+
|
28 |
+
# Define custom CSS
|
29 |
+
css = """
|
30 |
+
body {
|
31 |
+
color: #0a2b42; /* Dark blue font */
|
32 |
+
}
|
33 |
+
.light body {
|
34 |
+
color: #0a2b42; /* Dark blue font */
|
35 |
+
}
|
36 |
+
input, textarea {
|
37 |
+
background-color: #ffffff !important; /* White background for text boxes */
|
38 |
+
color: #0a2b42 !important; /* Dark blue font for text boxes */
|
39 |
+
}
|
40 |
+
"""
|
41 |
+
|
42 |
+
logo_url = "https://huggingface.co/spaces/Nexus-Community/Nexus-App/resolve/main/Wellness-Nexus.png"
|
43 |
+
db_path_cataract = "cataract_results.db"
|
44 |
+
db_path_glaucoma = "glaucoma_results.db"
|
45 |
+
|
46 |
+
def display_db_data():
|
47 |
+
"""Fetch and format the data from the database for display."""
|
48 |
+
glaucoma_data, cataract_data = get_db_data(db_path_glaucoma, db_path_cataract)
|
49 |
+
context_data = get_context_db_data(db_path_context)
|
50 |
+
formatted_data = format_db_data(glaucoma_data, cataract_data, context_data)
|
51 |
+
return formatted_data
|
52 |
+
|
53 |
+
def check_db_status():
|
54 |
+
"""Check the status of the databases and return a status message."""
|
55 |
+
cataract_status = "Loaded" if os.path.exists(db_path_cataract) else "Not Loaded"
|
56 |
+
glaucoma_status = "Loaded" if os.path.exists(db_path_glaucoma) else "Not Loaded"
|
57 |
+
context_status = "Loaded" if os.path.exists(db_path_context) else "Not Loaded"
|
58 |
+
return f"Cataract Database: {cataract_status}\nGlaucoma Database: {glaucoma_status}\nContext Database: {context_status}"
|
59 |
+
|
60 |
+
def toggle_input_visibility(input_type):
|
61 |
+
if input_type == "Voice":
|
62 |
+
return gr.update(visible=True), gr.update(visible=False)
|
63 |
+
else:
|
64 |
+
return gr.update(visible=False), gr.update(visible=True)
|
65 |
+
|
66 |
+
def process_image(image):
|
67 |
+
# Run the analyzer model
|
68 |
+
blended_image, red_quantity, green_quantity, blue_quantity, raw_response, stage, save_message, debug_info = combined_prediction(image)
|
69 |
+
|
70 |
+
# Run the object detection model
|
71 |
+
predicted_image_od, raw_response_od = predict_object_detection(image)
|
72 |
+
|
73 |
+
return blended_image, red_quantity, green_quantity, blue_quantity, raw_response, stage, save_message, debug_info, predicted_image_od, raw_response_od
|
74 |
+
|
75 |
+
with gr.Blocks(theme=theme) as demo:
|
76 |
+
gr.HTML(f"<img src='{logo_url}' alt='Logo' width='150'/>")
|
77 |
+
gr.Markdown("## Wellness-Nexus V.1.0")
|
78 |
+
gr.Markdown("This app helps people to diagnose their cataract and glaucoma, both respectively #1 and #2 cause of blindness in the world")
|
79 |
+
|
80 |
+
with gr.Tab("Cataract Screener and Analyzer"):
|
81 |
+
with gr.Row():
|
82 |
+
image_input = gr.Image(type="numpy", label="Upload an Image")
|
83 |
+
submit_btn = gr.Button("Submit")
|
84 |
+
|
85 |
+
with gr.Row():
|
86 |
+
segmented_image_cataract = gr.Image(type="numpy", label="Segmented Image")
|
87 |
+
predicted_image_od = gr.Image(type="numpy", label="Predicted Image")
|
88 |
+
|
89 |
+
with gr.Column():
|
90 |
+
red_quantity_cataract = gr.Slider(label="Red Quantity", minimum=0, maximum=255, interactive=False)
|
91 |
+
green_quantity_cataract = gr.Slider(label="Green Quantity", minimum=0, maximum=255, interactive=False)
|
92 |
+
blue_quantity_cataract = gr.Slider(label="Blue Quantity", minimum=0, maximum=255, interactive=False)
|
93 |
+
|
94 |
+
with gr.Row():
|
95 |
+
cataract_stage = gr.Textbox(label="Cataract Stage", interactive=False)
|
96 |
+
raw_response_cataract = gr.Textbox(label="Raw Response", interactive=False)
|
97 |
+
submit_value_btn_cataract = gr.Button("Submit Values to Database")
|
98 |
+
db_response_cataract = gr.Textbox(label="Database Response")
|
99 |
+
debug_cataract = gr.Textbox(label="Debug Message", interactive=False)
|
100 |
+
|
101 |
+
submit_btn.click(
|
102 |
+
process_image,
|
103 |
+
inputs=image_input,
|
104 |
+
outputs=[
|
105 |
+
segmented_image_cataract, red_quantity_cataract, green_quantity_cataract, blue_quantity_cataract, raw_response_cataract, cataract_stage, db_response_cataract, debug_cataract, predicted_image_od
|
106 |
+
]
|
107 |
+
)
|
108 |
+
|
109 |
+
submit_value_btn_cataract.click(
|
110 |
+
lambda img, red, green, blue, stage: save_cataract_prediction_to_db(Image.fromarray(img), red, green, blue, stage),
|
111 |
+
inputs=[segmented_image_cataract, red_quantity_cataract, green_quantity_cataract, blue_quantity_cataract, cataract_stage],
|
112 |
+
outputs=[db_response_cataract, debug_cataract]
|
113 |
+
)
|
114 |
+
|
115 |
+
with gr.Tab("Glaucoma Analyzer and Screener"):
|
116 |
+
with gr.Row():
|
117 |
+
image_input = gr.Image(type="numpy", label="Upload an Image")
|
118 |
+
mask_threshold_slider = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.5, label="Mask Threshold")
|
119 |
+
|
120 |
+
with gr.Row():
|
121 |
+
submit_btn_segmentation = gr.Button("Submit Segmentation")
|
122 |
+
submit_btn_od = gr.Button("Submit Object Detection")
|
123 |
+
|
124 |
+
with gr.Row():
|
125 |
+
segmented_image = gr.Image(type="numpy", label="Segmented Image")
|
126 |
+
predicted_image_od = gr.Image(type="numpy", label="Predicted Image")
|
127 |
+
|
128 |
+
with gr.Row():
|
129 |
+
raw_response_od = gr.Textbox(label="Raw Result")
|
130 |
+
|
131 |
+
with gr.Column():
|
132 |
+
cup_area = gr.Textbox(label="Cup Area")
|
133 |
+
disk_area = gr.Textbox(label="Disk Area")
|
134 |
+
rim_area = gr.Textbox(label="Rim Area")
|
135 |
+
rim_to_disc_ratio = gr.Textbox(label="Rim to Disc Ratio")
|
136 |
+
ddls_stage = gr.Textbox(label="DDLS Stage")
|
137 |
+
|
138 |
+
with gr.Column():
|
139 |
+
submit_value_btn = gr.Button("Submit Values to Database")
|
140 |
+
db_response = gr.Textbox(label="Database Response")
|
141 |
+
debug_glaucoma = gr.Textbox(label="Debug Message", interactive=False)
|
142 |
+
|
143 |
+
def process_segmentation_image(img, mask_thresh):
|
144 |
+
# Run the segmentation model
|
145 |
+
return combined_prediction_glaucoma(img, mask_thresh)
|
146 |
+
|
147 |
+
def process_od_image(img):
|
148 |
+
# Run the object detection model
|
149 |
+
image_with_boxes, raw_predictions = predict_image(img)
|
150 |
+
return image_with_boxes, raw_predictions
|
151 |
+
|
152 |
+
submit_btn_segmentation.click(
|
153 |
+
fn=process_segmentation_image,
|
154 |
+
inputs=[image_input, mask_threshold_slider],
|
155 |
+
outputs=[
|
156 |
+
segmented_image, cup_area, disk_area, rim_area, rim_to_disc_ratio, ddls_stage
|
157 |
+
]
|
158 |
+
)
|
159 |
+
|
160 |
+
submit_btn_od.click(
|
161 |
+
fn=process_od_image,
|
162 |
+
inputs=[image_input],
|
163 |
+
outputs=[
|
164 |
+
predicted_image_od, raw_response_od
|
165 |
+
]
|
166 |
+
)
|
167 |
+
|
168 |
+
submit_value_btn.click(
|
169 |
+
lambda img, cup, disk, rim, ratio, stage: submit_to_db(img, cup, disk, rim, ratio, stage),
|
170 |
+
inputs=[image_input, cup_area, disk_area, rim_area, rim_to_disc_ratio, ddls_stage],
|
171 |
+
outputs=[db_response, debug_glaucoma]
|
172 |
+
)
|
173 |
+
|
174 |
+
with gr.Tab("Chatbot"):
|
175 |
+
with gr.Row():
|
176 |
+
input_type_dropdown = gr.Dropdown(label="Input Type", choices=["Voice", "Text"], value="Voice")
|
177 |
+
tts_model_dropdown = gr.Dropdown(label="TTS Model", choices=["Ryan (ESPnet)", "Nithu (Custom)"], value="Nithu (Custom)")
|
178 |
+
submit_btn_chatbot = gr.Button("Submit")
|
179 |
+
|
180 |
+
with gr.Row():
|
181 |
+
audio_input = gr.Audio(type="filepath", label="Record your voice", visible=True)
|
182 |
+
text_input = gr.Textbox(label="Type your question", visible=False)
|
183 |
+
|
184 |
+
with gr.Row():
|
185 |
+
answer_textbox = gr.Textbox(label="Answer")
|
186 |
+
answer_audio = gr.Audio(label="Answer as Speech", type="filepath")
|
187 |
+
generate_voice_btn = gr.Button("Generate Voice Response")
|
188 |
+
|
189 |
+
with gr.Row():
|
190 |
+
log_messages_textbox = gr.Textbox(label="Log Messages", lines=10)
|
191 |
+
db_status_textbox = gr.Textbox(label="Database Status", interactive=False)
|
192 |
+
|
193 |
+
input_type_dropdown.change(
|
194 |
+
fn=toggle_input_visibility,
|
195 |
+
inputs=[input_type_dropdown],
|
196 |
+
outputs=[audio_input, text_input]
|
197 |
+
)
|
198 |
+
|
199 |
+
submit_btn_chatbot.click(
|
200 |
+
fn=chatbot,
|
201 |
+
inputs=[audio_input, input_type_dropdown, text_input],
|
202 |
+
outputs=[answer_textbox, db_status_textbox]
|
203 |
+
)
|
204 |
+
|
205 |
+
generate_voice_btn.click(
|
206 |
+
fn=generate_voice_response,
|
207 |
+
inputs=[tts_model_dropdown, answer_textbox],
|
208 |
+
outputs=[answer_audio, db_status_textbox]
|
209 |
+
)
|
210 |
+
|
211 |
+
fetch_db_btn = gr.Button("Fetch Database")
|
212 |
+
fetch_db_btn.click(
|
213 |
+
fn=update_patient_history,
|
214 |
+
inputs=[],
|
215 |
+
outputs=[db_status_textbox]
|
216 |
+
)
|
217 |
+
|
218 |
+
with gr.Tab("Database Upload and View"):
|
219 |
+
gr.Markdown("### Store and Retrieve Context Information")
|
220 |
+
|
221 |
+
db_display = gr.HTML()
|
222 |
+
load_db_btn = gr.Button("Load Database Content")
|
223 |
+
load_db_btn.click(display_db_data, outputs=db_display)
|
224 |
+
|
225 |
+
demo.launch()
|