Spaces:
Running
Running
xiazhi1
commited on
Commit
β’
f2d83f6
1
Parent(s):
a3351b1
add image output2
Browse files
app.py
CHANGED
@@ -8,14 +8,14 @@ from cell_segmentation.inference.inference_cellvit_experiment_monuseg import Inf
|
|
8 |
|
9 |
|
10 |
## local | remote
|
11 |
-
RUN_MODE = "
|
12 |
if RUN_MODE != "local":
|
13 |
-
os.system("wget https://huggingface.co/xiazhi/LKCell
|
14 |
## examples
|
15 |
-
os.system("wget https://huggingface.co/xiazhi/LKCell
|
16 |
-
os.system("wget https://huggingface.co/xiazhi/LKCell
|
17 |
-
os.system("wget https://huggingface.co/xiazhi/LKCell
|
18 |
-
os.system("wget https://huggingface.co/xiazhi/LKCell
|
19 |
|
20 |
## step 1: set up model
|
21 |
|
@@ -62,10 +62,12 @@ def click_process(image_input , type_dataset):
|
|
62 |
resize_shape = (512,512)
|
63 |
image_input = cv2.resize(image_input, resize_shape)
|
64 |
monuseg_inf.run_single_image_inference(monuseg_inf.model, image_input)
|
65 |
-
|
66 |
-
image_output = cv2.imread("
|
67 |
-
image_output = cv2.cvtColor(image_output, cv2.COLOR_BGR2RGB)
|
68 |
-
|
|
|
|
|
69 |
|
70 |
|
71 |
demo = gr.Blocks(title="LkCell")
|
@@ -81,13 +83,14 @@ with demo:
|
|
81 |
Type_dataset = gr.Radio(choices=["pannuke", "monuseg"], label=" input image's dataset type",value="pannuke")
|
82 |
|
83 |
with gr.Column():
|
84 |
-
|
85 |
-
|
|
|
86 |
with gr.Row():
|
87 |
Button_run = gr.Button("π Submit (ει) ")
|
88 |
-
clear_button = gr.ClearButton(components=[Image_input,Type_dataset,image_output],value="π§Ή Clear (ζΈ
ι€)")
|
89 |
|
90 |
-
Button_run.click(fn=click_process, inputs=[Image_input, Type_dataset ], outputs=[image_output])
|
91 |
|
92 |
## guiline
|
93 |
gr.Markdown(value="""
|
@@ -103,7 +106,7 @@ with demo:
|
|
103 |
['3.png', "monuseg"],
|
104 |
['4.png', "monuseg"],
|
105 |
],
|
106 |
-
inputs=[Image_input, Type_dataset], outputs=[image_output], label="Examples")
|
107 |
gr.HTML(value="""
|
108 |
<p style="text-align:center; color:orange"> <a href='https://github.com/ziwei-cui/LKCellv1' target='_blank'>Github Repo</a></p>
|
109 |
""")
|
|
|
8 |
|
9 |
|
10 |
## local | remote
|
11 |
+
RUN_MODE = "local"
|
12 |
if RUN_MODE != "local":
|
13 |
+
os.system("wget https://huggingface.co/xiazhi/LKCell/resolve/main/model_best.pth")
|
14 |
## examples
|
15 |
+
os.system("wget https://huggingface.co/xiazhi/LKCell/resolve/main/1.png")
|
16 |
+
os.system("wget https://huggingface.co/xiazhi/LKCell/resolve/main/2.png")
|
17 |
+
os.system("wget https://huggingface.co/xiazhi/LKCell/resolve/main/3.png")
|
18 |
+
os.system("wget https://huggingface.co/xiazhi/LKCell/resolve/main/4.png")
|
19 |
|
20 |
## step 1: set up model
|
21 |
|
|
|
62 |
resize_shape = (512,512)
|
63 |
image_input = cv2.resize(image_input, resize_shape)
|
64 |
monuseg_inf.run_single_image_inference(monuseg_inf.model, image_input)
|
65 |
+
|
66 |
+
image_output = cv2.imread("raw_pred.png")
|
67 |
+
image_output = cv2.cvtColor(image_output, cv2.COLOR_BGR2RGB)
|
68 |
+
image_output2 = cv2.imread("pred_img.png")
|
69 |
+
image_output2 = cv2.cvtColor(image_output2, cv2.COLOR_BGR2RGB)
|
70 |
+
return image_output,image_output2
|
71 |
|
72 |
|
73 |
demo = gr.Blocks(title="LkCell")
|
|
|
83 |
Type_dataset = gr.Radio(choices=["pannuke", "monuseg"], label=" input image's dataset type",value="pannuke")
|
84 |
|
85 |
with gr.Column():
|
86 |
+
image_output = gr.Image(type="numpy", label="image prediction",height=480,width=480)
|
87 |
+
image_output2 = gr.Image(type="numpy", label="all predictions",height=480)
|
88 |
+
|
89 |
with gr.Row():
|
90 |
Button_run = gr.Button("π Submit (ει) ")
|
91 |
+
clear_button = gr.ClearButton(components=[Image_input,Type_dataset,image_output,image_output2],value="π§Ή Clear (ζΈ
ι€)")
|
92 |
|
93 |
+
Button_run.click(fn=click_process, inputs=[Image_input, Type_dataset ], outputs=[image_output,image_output2])
|
94 |
|
95 |
## guiline
|
96 |
gr.Markdown(value="""
|
|
|
106 |
['3.png', "monuseg"],
|
107 |
['4.png', "monuseg"],
|
108 |
],
|
109 |
+
inputs=[Image_input, Type_dataset], outputs=[image_output,image_output2], label="Examples")
|
110 |
gr.HTML(value="""
|
111 |
<p style="text-align:center; color:orange"> <a href='https://github.com/ziwei-cui/LKCellv1' target='_blank'>Github Repo</a></p>
|
112 |
""")
|
cell_segmentation/inference/inference_cellvit_experiment_monuseg.py
CHANGED
@@ -874,7 +874,7 @@ class MoNuSegInference:
|
|
874 |
for poly, c in zip(pred_contours_polygon, pred_contour_colors_polygon)
|
875 |
]
|
876 |
placeholder[: h, 3 * w : 4 * w, :3] = np.asarray(pred_cell_image) / 255
|
877 |
-
|
878 |
# plotting
|
879 |
fig, axs = plt.subplots(figsize=(3, 2), dpi=1200)
|
880 |
axs.imshow(placeholder)
|
@@ -901,7 +901,7 @@ class MoNuSegInference:
|
|
901 |
for y_seg in grid_y:
|
902 |
axs.axhline(y_seg, color="black")
|
903 |
|
904 |
-
fig.suptitle(f"
|
905 |
fig.tight_layout()
|
906 |
fig.savefig("pred_img.png")
|
907 |
plt.close()
|
|
|
874 |
for poly, c in zip(pred_contours_polygon, pred_contour_colors_polygon)
|
875 |
]
|
876 |
placeholder[: h, 3 * w : 4 * w, :3] = np.asarray(pred_cell_image) / 255
|
877 |
+
pred_cell_image.save("raw_pred.png")
|
878 |
# plotting
|
879 |
fig, axs = plt.subplots(figsize=(3, 2), dpi=1200)
|
880 |
axs.imshow(placeholder)
|
|
|
901 |
for y_seg in grid_y:
|
902 |
axs.axhline(y_seg, color="black")
|
903 |
|
904 |
+
fig.suptitle(f"All Predictions for input image", fontsize=6)
|
905 |
fig.tight_layout()
|
906 |
fig.savefig("pred_img.png")
|
907 |
plt.close()
|
cell_segmentation/inference/inference_cellvit_experiment_pannuke.py
CHANGED
@@ -1088,7 +1088,7 @@ class InferenceCellViT:
|
|
1088 |
for y_seg in grid_y:
|
1089 |
axs.axhline(y_seg, color="black")
|
1090 |
|
1091 |
-
fig.suptitle(f"Predictions for input image")
|
1092 |
fig.tight_layout()
|
1093 |
fig.savefig("pred_img.png")
|
1094 |
plt.close()
|
|
|
1088 |
for y_seg in grid_y:
|
1089 |
axs.axhline(y_seg, color="black")
|
1090 |
|
1091 |
+
fig.suptitle(f"All Predictions for input image")
|
1092 |
fig.tight_layout()
|
1093 |
fig.savefig("pred_img.png")
|
1094 |
plt.close()
|