Spaces:
Running
Running
luxmorocco
commited on
Commit
•
d05d494
1
Parent(s):
b3991da
Update app.py
Browse files
app.py
CHANGED
@@ -130,6 +130,30 @@ class VBDDataset(Dataset):
|
|
130 |
return self.image_ids.shape[0]
|
131 |
|
132 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
def generate_diagnostic_report(predictions, labels, threshold=0.5):
|
134 |
# Initialize an empty report string
|
135 |
report = "Diagnostic Report:\n\n"
|
@@ -251,9 +275,22 @@ uploaded_file = st.sidebar.file_uploader("Upload Chest X-ray image", type=["png"
|
|
251 |
|
252 |
# Load the model (use your model loading function)
|
253 |
# Ensure the model path is correct and accessible
|
254 |
-
|
255 |
-
model
|
256 |
-
model.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
257 |
|
258 |
def process_image(image_path):
|
259 |
# Load and transform the image
|
|
|
130 |
return self.image_ids.shape[0]
|
131 |
|
132 |
|
133 |
+
def get_train_transform():
|
134 |
+
return A.Compose([
|
135 |
+
A.Flip(0.5),
|
136 |
+
ToTensorV2(p=1.0)
|
137 |
+
], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']})
|
138 |
+
|
139 |
+
def get_valid_transform():
|
140 |
+
return A.Compose([
|
141 |
+
ToTensorV2(p=1.0)
|
142 |
+
])
|
143 |
+
|
144 |
+
|
145 |
+
def collate_fn(batch):
|
146 |
+
return tuple(zip(*batch))
|
147 |
+
|
148 |
+
|
149 |
+
|
150 |
+
def format_prediction_string(labels, boxes, scores):
|
151 |
+
pred_strings = []
|
152 |
+
for j in zip(labels, scores, boxes):
|
153 |
+
pred_strings.append("{0} {1:.4f} {2} {3} {4} {5}".format(
|
154 |
+
j[0], j[1], j[2][0], j[2][1], j[2][2], j[2][3]))
|
155 |
+
return " ".join(pred_strings)
|
156 |
+
|
157 |
def generate_diagnostic_report(predictions, labels, threshold=0.5):
|
158 |
# Initialize an empty report string
|
159 |
report = "Diagnostic Report:\n\n"
|
|
|
275 |
|
276 |
# Load the model (use your model loading function)
|
277 |
# Ensure the model path is correct and accessible
|
278 |
+
|
279 |
+
#model = create_model(num_classes=15)
|
280 |
+
#model.load_state_dict(torch.load('Doctoria CXR Thoraric Full Model.pth', map_location=torch.device('cpu')))
|
281 |
+
#model.eval()
|
282 |
+
|
283 |
+
def load_model(model_path):
|
284 |
+
# Create an instance of the VinDetector model
|
285 |
+
model = VinDetector(num_classes=14) # Adjust num_classes as needed
|
286 |
+
|
287 |
+
# Load the saved state_dict
|
288 |
+
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
|
289 |
+
|
290 |
+
model.eval() # Set the model to evaluation mode
|
291 |
+
return model
|
292 |
+
|
293 |
+
model = load_model('Doctoria CXR Thoraric Full Model.pth')
|
294 |
|
295 |
def process_image(image_path):
|
296 |
# Load and transform the image
|