Spaces:
Running
Running
import torch | |
import gradio as gr | |
from torchvision import transforms as T | |
from torch.utils.data import DataLoader | |
import matplotlib.pyplot as plt | |
from PIL import Image | |
import numpy as np | |
classes=["Other","Anger","Contempt","Happiness","Surprise"] | |
# load a resnet18 model pretrained on ImageNet | |
# and turn off autograd on model's parameters | |
def load_model(idx): | |
model = torch.jit.load('model_2_60acc.pt',map_location=torch.device('cpu')).eval() | |
for param in model.parameters(): | |
param.requires_grad = False | |
return model | |
model=load_model(0) | |
# preprocess data | |
pretrained_std = torch.Tensor([0.229, 0.224, 0.225]) | |
pretrained_mean = torch.Tensor([0.485, 0.456, 0.406]) | |
optical_flow_t = T.Compose([ | |
T.Resize((224,224)), | |
T.ToTensor(), | |
T.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225]), | |
]) | |
ogpic=['ogtest0.png','ogtest1.png','ogtest2.png','ogtest3.png','ogtest4.png','ogtest5.png'] | |
ofpic=['oftest0.jpg','oftest1.jpg','oftest2.jpg','oftest3.jpg','oftest4.jpg','oftest5.jpg'] | |
diffpic=['difftest0.png','difftest1.png','difftest2.png','difftest3.png','difftest4.png','difftest5.png'] | |
exp=[['ogtest0.png'],['ogtest1.png'],['ogtest2.png'],['ogtest3.png'],['ogtest4.png'],['ogtest5.png']] | |
vid=["vidtest0.mp4","vidtest1.mp4","vidtest2.mp4","vidtest3.mp4","vidtest4.mp4","vidtest5.mp4"] | |
actual=["Contempt","Other","Happiness","Anger","Other","Contempt"] | |
def main(): | |
with gr.Blocks() as demo: | |
aa=gr.Variable(value=0) | |
def set_example_image(img): | |
aa.value=img | |
return gr.Image.update(value=exp[img][0]) | |
def predss(img): | |
#print(Image.open(ofpic[a]).shape()) | |
processed_img = optical_flow_t(Image.open(ofpic[aa.value])) | |
tb = torch.unsqueeze(processed_img, dim=0) | |
loaded_test = DataLoader(tb, batch_size=1,shuffle=False) | |
# get predictions | |
for i, inputs in enumerate(loaded_test): | |
with torch.no_grad(): | |
output = model(inputs.to(torch.device('cpu'))) # Feed Network | |
probs = torch.nn.functional.softmax(output[0], dim=0) | |
top5_prob, top5_idx = torch.topk(probs, 5) | |
preds = {classes[idx]: prob.item() for idx, prob in zip(top5_idx, top5_prob)} | |
return ogpic[aa.value],ofpic[aa.value],diffpic[aa.value],vid[aa.value], preds,actual[aa.value] | |
gr.Markdown('''## Micro-expression recognition | |
''') | |
with gr.Box(): | |
input_image = gr.Image(type="pil", label="Input Image") | |
example_images = gr.Dataset(components=[input_image], | |
samples=[['ogtest0.png'],['ogtest1.png'],['ogtest2.png'],['ogtest3.png'],['ogtest4.png'],['ogtest5.png']] | |
,type="index") | |
with gr.Row(): | |
btn = gr.Button("Process") | |
gr.Markdown('''### Original Image''') | |
with gr.Box(): | |
with gr.Row(): | |
img_before = gr.Image(label="Original Image") | |
img_after1 = gr.Image(label="Different frame") | |
with gr.Row(): | |
img_after = gr.Image(label="Optical flow") | |
label_predict = gr.Label(label="Model Prediction") | |
with gr.Box(): | |
with gr.Row(): | |
video = gr.Video(label="Original Video") | |
with gr.Row(): | |
label_actual=gr.Label(label="Actual Emotion") | |
# events | |
btn.click(fn=predss, | |
inputs=[input_image], | |
outputs=[img_before,img_after,img_after1,video,label_predict,label_actual]) | |
example_images.click(fn=set_example_image, | |
inputs=example_images, | |
outputs=example_images.components) | |
demo.launch() | |
if __name__ == '__main__': | |
main() |