luigi12345
commited on
Commit
β’
131b493
1
Parent(s):
f752fee
app.py
CHANGED
@@ -4,12 +4,10 @@ import numpy as np
|
|
4 |
import torch.nn.functional as F
|
5 |
from torch import nn
|
6 |
from transformers import AutoImageProcessor, Swinv2ForImageClassification, SegformerForSemanticSegmentation
|
7 |
-
import matplotlib.pyplot as plt
|
8 |
import streamlit as st
|
9 |
from PIL import Image
|
10 |
import io
|
11 |
import zipfile
|
12 |
-
import os
|
13 |
|
14 |
# --- GlaucomaModel Class ---
|
15 |
class GlaucomaModel(object):
|
@@ -48,8 +46,16 @@ class GlaucomaModel(object):
|
|
48 |
)
|
49 |
seg_probs = F.softmax(upsampled_logits, dim=1)
|
50 |
pred_disc_cup = upsampled_logits.argmax(dim=1)[0]
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
return pred_disc_cup.numpy().astype(np.uint8), cup_confidence, disc_confidence
|
54 |
|
55 |
def process(self, image):
|
@@ -90,64 +96,133 @@ def add_mask(image, mask, classes, colors, alpha=0.5):
|
|
90 |
output = cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0)
|
91 |
return output, overlay
|
92 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
# --- Streamlit Interface ---
|
94 |
def main():
|
95 |
-
st.set_page_config(layout="wide")
|
96 |
-
st.title("Batch Glaucoma Screening from Retinal Fundus Images")
|
97 |
|
98 |
-
#
|
99 |
-
st.
|
100 |
-
|
101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
|
103 |
-
confident_images = []
|
104 |
-
download_confident_images = []
|
105 |
-
|
106 |
if uploaded_files:
|
107 |
for uploaded_file in uploaded_files:
|
108 |
image = Image.open(uploaded_file).convert('RGB')
|
109 |
image_np = np.array(image).astype(np.uint8)
|
110 |
|
111 |
-
with st.spinner(f'Processing {uploaded_file.name}...'):
|
112 |
model = GlaucomaModel(device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"))
|
113 |
disease_idx, disc_cup_image, vcdr, cls_conf, cup_conf, disc_conf, cropped_image = model.process(image_np)
|
114 |
|
115 |
-
#
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
|
136 |
-
# Download
|
137 |
if download_confident_images:
|
|
|
|
|
138 |
with zipfile.ZipFile("confident_cropped_images.zip", "w") as zf:
|
139 |
for cropped_image, name in download_confident_images:
|
140 |
img_buffer = io.BytesIO()
|
141 |
Image.fromarray(cropped_image).save(img_buffer, format="PNG")
|
142 |
zf.writestr(f"{name}_cropped.png", img_buffer.getvalue())
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
|
|
|
|
148 |
)
|
149 |
else:
|
150 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
|
152 |
if __name__ == '__main__':
|
153 |
-
main()
|
|
|
4 |
import torch.nn.functional as F
|
5 |
from torch import nn
|
6 |
from transformers import AutoImageProcessor, Swinv2ForImageClassification, SegformerForSemanticSegmentation
|
|
|
7 |
import streamlit as st
|
8 |
from PIL import Image
|
9 |
import io
|
10 |
import zipfile
|
|
|
11 |
|
12 |
# --- GlaucomaModel Class ---
|
13 |
class GlaucomaModel(object):
|
|
|
46 |
)
|
47 |
seg_probs = F.softmax(upsampled_logits, dim=1)
|
48 |
pred_disc_cup = upsampled_logits.argmax(dim=1)[0]
|
49 |
+
|
50 |
+
# Calculate segmentation confidence based on probability distribution
|
51 |
+
# For each pixel classified as cup/disc, check how confident the model is
|
52 |
+
cup_mask = pred_disc_cup == 2
|
53 |
+
disc_mask = pred_disc_cup == 1
|
54 |
+
|
55 |
+
# Get confidence only for pixels predicted as cup/disc
|
56 |
+
cup_confidence = seg_probs[0, 2, cup_mask].mean().item() * 100 if cup_mask.any() else 0
|
57 |
+
disc_confidence = seg_probs[0, 1, disc_mask].mean().item() * 100 if disc_mask.any() else 0
|
58 |
+
|
59 |
return pred_disc_cup.numpy().astype(np.uint8), cup_confidence, disc_confidence
|
60 |
|
61 |
def process(self, image):
|
|
|
96 |
output = cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0)
|
97 |
return output, overlay
|
98 |
|
99 |
+
def get_confidence_level(confidence):
|
100 |
+
if confidence >= 90:
|
101 |
+
return "Very High"
|
102 |
+
elif confidence >= 75:
|
103 |
+
return "High"
|
104 |
+
elif confidence >= 60:
|
105 |
+
return "Moderate"
|
106 |
+
elif confidence >= 45:
|
107 |
+
return "Low"
|
108 |
+
else:
|
109 |
+
return "Very Low"
|
110 |
+
|
111 |
# --- Streamlit Interface ---
|
112 |
def main():
|
113 |
+
st.set_page_config(layout="wide", page_title="Glaucoma Screening Tool")
|
|
|
114 |
|
115 |
+
# Header with better styling
|
116 |
+
st.markdown("""
|
117 |
+
<h1 style='text-align: center;'>Glaucoma Screening from Retinal Fundus Images</h1>
|
118 |
+
<p style='text-align: center; color: gray;'>Upload retinal images for automated glaucoma detection and optic disc/cup segmentation</p>
|
119 |
+
""", unsafe_allow_html=True)
|
120 |
+
|
121 |
+
# Sidebar with better organization
|
122 |
+
with st.sidebar:
|
123 |
+
st.markdown("### Upload Settings")
|
124 |
+
uploaded_files = st.file_uploader("Upload Retinal Images",
|
125 |
+
type=['png', 'jpeg', 'jpg'],
|
126 |
+
accept_multiple_files=True,
|
127 |
+
help="Support multiple images in PNG, JPEG formats")
|
128 |
+
|
129 |
+
st.markdown("### Analysis Settings")
|
130 |
+
st.info("π Set confidence threshold to filter results")
|
131 |
+
confidence_threshold = st.slider(
|
132 |
+
"Classification Confidence Threshold (%)",
|
133 |
+
0, 100, 70,
|
134 |
+
help="Images with confidence above this threshold will be marked as reliable predictions")
|
135 |
|
|
|
|
|
|
|
136 |
if uploaded_files:
|
137 |
for uploaded_file in uploaded_files:
|
138 |
image = Image.open(uploaded_file).convert('RGB')
|
139 |
image_np = np.array(image).astype(np.uint8)
|
140 |
|
141 |
+
with st.spinner(f'π Processing {uploaded_file.name}...'):
|
142 |
model = GlaucomaModel(device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"))
|
143 |
disease_idx, disc_cup_image, vcdr, cls_conf, cup_conf, disc_conf, cropped_image = model.process(image_np)
|
144 |
|
145 |
+
# Create expandable section for each image
|
146 |
+
with st.expander(f"π Analysis Results: {uploaded_file.name}", expanded=True):
|
147 |
+
# Image display section
|
148 |
+
cols = st.columns(3)
|
149 |
+
cols[0].image(image_np, caption="Original Image", use_column_width=True)
|
150 |
+
cols[1].image(disc_cup_image, caption="Segmentation Overlay", use_column_width=True)
|
151 |
+
cols[2].image(cropped_image, caption="Region of Interest", use_column_width=True)
|
152 |
+
|
153 |
+
# Metrics section with clear separation
|
154 |
+
st.markdown("---")
|
155 |
+
metric_cols = st.columns(3)
|
156 |
+
|
157 |
+
# Classification Results
|
158 |
+
with metric_cols[0]:
|
159 |
+
st.markdown("### π Classification")
|
160 |
+
diagnosis = model.cls_id2label[disease_idx]
|
161 |
+
is_confident = cls_conf >= confidence_threshold
|
162 |
+
|
163 |
+
# Color-coded diagnosis
|
164 |
+
if diagnosis == "Glaucoma":
|
165 |
+
st.markdown(f"<div style='padding: 10px; background-color: #ffebee; border-radius: 5px;'>"
|
166 |
+
f"<h4 style='color: #c62828;'>Diagnosis: {diagnosis}</h4></div>",
|
167 |
+
unsafe_allow_html=True)
|
168 |
+
else:
|
169 |
+
st.markdown(f"<div style='padding: 10px; background-color: #e8f5e9; border-radius: 5px;'>"
|
170 |
+
f"<h4 style='color: #2e7d32;'>Diagnosis: {diagnosis}</h4></div>",
|
171 |
+
unsafe_allow_html=True)
|
172 |
+
|
173 |
+
st.metric("Classification Confidence", f"{cls_conf:.1f}%")
|
174 |
+
if not is_confident:
|
175 |
+
st.warning("β οΈ Below confidence threshold")
|
176 |
+
|
177 |
+
# Segmentation Results
|
178 |
+
with metric_cols[1]:
|
179 |
+
st.markdown("### π― Segmentation Quality")
|
180 |
+
st.metric("Optic Cup Confidence", f"{cup_conf:.1f}%")
|
181 |
+
st.metric("Optic Disc Confidence", f"{disc_conf:.1f}%")
|
182 |
+
|
183 |
+
# Confidence level explanation
|
184 |
+
cup_level = get_confidence_level(cup_conf)
|
185 |
+
disc_level = get_confidence_level(disc_conf)
|
186 |
+
st.info(f"Cup Detection: {cup_level}\nDisc Detection: {disc_level}")
|
187 |
+
|
188 |
+
# Clinical Metrics
|
189 |
+
with metric_cols[2]:
|
190 |
+
st.markdown("### π Clinical Metrics")
|
191 |
+
st.metric("Cup-to-Disc Ratio (CDR)", f"{vcdr:.3f}")
|
192 |
+
|
193 |
+
# CDR interpretation
|
194 |
+
if vcdr > 0.7:
|
195 |
+
st.warning("β οΈ Elevated CDR (>0.7)")
|
196 |
+
elif vcdr > 0.5:
|
197 |
+
st.info("βΉοΈ Borderline CDR (0.5-0.7)")
|
198 |
+
else:
|
199 |
+
st.success("β
Normal CDR (<0.5)")
|
200 |
|
201 |
+
# Download section
|
202 |
if download_confident_images:
|
203 |
+
st.sidebar.markdown("---")
|
204 |
+
st.sidebar.markdown("### Download Results")
|
205 |
with zipfile.ZipFile("confident_cropped_images.zip", "w") as zf:
|
206 |
for cropped_image, name in download_confident_images:
|
207 |
img_buffer = io.BytesIO()
|
208 |
Image.fromarray(cropped_image).save(img_buffer, format="PNG")
|
209 |
zf.writestr(f"{name}_cropped.png", img_buffer.getvalue())
|
210 |
+
|
211 |
+
st.sidebar.download_button(
|
212 |
+
label="π₯ Download Analysis Results",
|
213 |
+
data=open("confident_cropped_images.zip", "rb"),
|
214 |
+
file_name="glaucoma_analysis_results.zip",
|
215 |
+
mime="application/zip",
|
216 |
+
help="Download cropped images and analysis results"
|
217 |
)
|
218 |
else:
|
219 |
+
# Welcome message when no files are uploaded
|
220 |
+
st.markdown("""
|
221 |
+
<div style='text-align: center; padding: 50px;'>
|
222 |
+
<h3>π Welcome to the Glaucoma Screening Tool</h3>
|
223 |
+
<p>Upload retinal fundus images using the sidebar to begin analysis</p>
|
224 |
+
</div>
|
225 |
+
""", unsafe_allow_html=True)
|
226 |
|
227 |
if __name__ == '__main__':
|
228 |
+
main()
|