Spaces:
Running
Running
File size: 1,818 Bytes
7d1df75 bd55f7e 837bdb5 7d1df75 837bdb5 7d1df75 837bdb5 7d1df75 837bdb5 7d1df75 837bdb5 7d1df75 837bdb5 7d1df75 cf9c8e3 7d1df75 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
import os
import torch
import gradio as gr
from PIL import Image
from torchvision.transforms import transforms
from modelscope import snapshot_download
MODEL_DIR = snapshot_download("Genius-Society/HEp2", cache_dir="./__pycache__")
CLASSES = [
"Centromere",
"Golgi",
"Homogeneous",
"NuMem",
"Nucleolar",
"Speckled",
]
def embeding(img_path: str):
compose = transforms.Compose(
[
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.RandomAffine(5),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
img = Image.open(img_path).convert("RGB")
return compose(img)
def infer(target: str):
model = torch.load(f"{MODEL_DIR}/save.pt", map_location=torch.device("cpu"))
if not target:
return None, "Please upload a cell picture!"
torch.cuda.empty_cache()
input: torch.Tensor = embeding(target)
output: torch.Tensor = model(input.unsqueeze(0))
predict = torch.max(output.data, 1)[1]
return os.path.basename(target), CLASSES[predict]
if __name__ == "__main__":
example_imgs = []
for cls in CLASSES:
example_imgs.append(f"{MODEL_DIR}/examples/{cls}.png")
with gr.Blocks() as demo:
gr.Interface(
fn=infer,
inputs=gr.Image(type="filepath", label="Upload a cell picture"),
outputs=[
gr.Textbox(label="Picture name", show_copy_button=True),
gr.Textbox(label="Recognition result", show_copy_button=True),
],
title="It is recommended to upload HEp2 cell images in PNG format.",
examples=example_imgs,
flagging_mode="never",
cache_examples=False,
)
demo.launch()
|