Spaces:
Running
Running
from pathlib import Path | |
import torch | |
import gradio as gr | |
from torch import nn | |
from PIL import Image | |
import numpy as np | |
LABELS = Path('class_names.txt').read_text().splitlines() | |
model = nn.Sequential( | |
nn.Conv2d(1, 32, 3, padding='same'), | |
nn.ReLU(), | |
nn.MaxPool2d(2), | |
nn.Conv2d(32, 64, 3, padding='same'), | |
nn.ReLU(), | |
nn.MaxPool2d(2), | |
nn.Conv2d(64, 128, 3, padding='same'), | |
nn.ReLU(), | |
nn.MaxPool2d(2), | |
nn.Flatten(), | |
nn.Linear(1152, 256), | |
nn.ReLU(), | |
nn.Linear(256, len(LABELS)), | |
) | |
state_dict = torch.load('pytorch_model.bin', map_location='cpu') | |
model.load_state_dict(state_dict, strict=False) | |
model.eval() | |
def predict(im_dict): | |
im_raw = im_dict['composite'][:,:,3] | |
img = Image.fromarray(im_raw) | |
img_small = img.resize([24,28],resample=0) | |
im = np.array(np.uint8(img_small)) | |
im = np.transpose(im,(1,0)) | |
x = torch.tensor(im, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255. | |
with torch.no_grad(): | |
out = model(x) | |
probabilities = torch.nn.functional.softmax(out[0], dim=0) | |
values, indices = torch.topk(probabilities, 5) | |
return {LABELS[i]: v.item() for i, v in zip(indices, values)} | |
interface = gr.Interface( | |
predict, | |
inputs="sketchpad", | |
outputs='label', | |
title="Sketch Recognition", | |
description="Who wants to play Pictionary? Draw a common object like a shovel or a laptop, and the algorithm will guess in real time!", | |
article = "<p style='text-align: center'>Sketch Recognition | Demo Model</p>", | |
live=True) | |
interface.launch(debug=True) |