Spaces:
Running
Running
Brightttttt
commited on
Commit
·
00f5df0
1
Parent(s):
36124a8
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import gradio as gr
|
3 |
+
from torchvision import transforms as T
|
4 |
+
from torch.utils.data import DataLoader
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
from PIL import Image
|
7 |
+
import numpy as np
|
8 |
+
import imageio
|
9 |
+
|
10 |
+
|
11 |
+
classes=["Other","Anger","Contempt","Happiness","Surprise"]
|
12 |
+
|
13 |
+
|
14 |
+
# load a resnet18 model pretrained on ImageNet
|
15 |
+
# and turn off autograd on model's parameters
|
16 |
+
def load_model(idx):
|
17 |
+
model = torch.jit.load('/content/model_2_60acc.pt',map_location=torch.device('cpu')).eval()
|
18 |
+
for param in model.parameters():
|
19 |
+
param.requires_grad = False
|
20 |
+
return model
|
21 |
+
model=load_model(0)
|
22 |
+
|
23 |
+
# preprocess data
|
24 |
+
pretrained_std = torch.Tensor([0.229, 0.224, 0.225])
|
25 |
+
pretrained_mean = torch.Tensor([0.485, 0.456, 0.406])
|
26 |
+
|
27 |
+
optical_flow_t = T.Compose([
|
28 |
+
T.Resize((224,224)),
|
29 |
+
T.ToTensor(),
|
30 |
+
T.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225]),
|
31 |
+
])
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
+
ogpic=['/content/ogtest0.png','/content/ogtest1.png','/content/ogtest2.png','/content/ogtest3.png','/content/ogtest4.png','/content/ogtest5.png']
|
38 |
+
ofpic=['/content/oftest0.jpg','/content/oftest1.jpg','/content/oftest2.jpg','/content/oftest3.jpg','/content/oftest4.jpg','/content/oftest5.jpg']
|
39 |
+
diffpic=['/content/difftest0.png','/content/difftest1.png','/content/difftest2.png','/content/difftest3.png','/content/difftest4.png','/content/difftest5.png']
|
40 |
+
exp=[['/content/ogtest0.png'],['/content/ogtest1.png'],['/content/ogtest2.png'],['/content/ogtest3.png'],['/content/ogtest4.png'],['/content/ogtest5.png']]
|
41 |
+
vid=["/content/vidtest0.mp4","/content/vidtest1.mp4","/content/vidtest2.mp4","/content/vidtest3.mp4","/content/vidtest4.mp4","/content/vidtest5.mp4"]
|
42 |
+
actual=["Contempt","Other","Happiness","Anger","Other","Contempt"]
|
43 |
+
def main():
|
44 |
+
|
45 |
+
with gr.Blocks() as demo:
|
46 |
+
aa=gr.Variable(value=0)
|
47 |
+
def set_example_image(img):
|
48 |
+
aa.value=img
|
49 |
+
return gr.Image.update(value=exp[img][0])
|
50 |
+
|
51 |
+
def predss(img):
|
52 |
+
#print(Image.open(ofpic[a]).shape())
|
53 |
+
processed_img = optical_flow_t(Image.open(ofpic[aa.value]))
|
54 |
+
tb = torch.unsqueeze(processed_img, dim=0)
|
55 |
+
loaded_test = DataLoader(tb, batch_size=1,shuffle=False)
|
56 |
+
# get predictions
|
57 |
+
for i, inputs in enumerate(loaded_test):
|
58 |
+
with torch.no_grad():
|
59 |
+
output = model(inputs.to(torch.device('cpu'))) # Feed Network
|
60 |
+
probs = torch.nn.functional.softmax(output[0], dim=0)
|
61 |
+
top5_prob, top5_idx = torch.topk(probs, 5)
|
62 |
+
preds = {classes[idx]: prob.item() for idx, prob in zip(top5_idx, top5_prob)}
|
63 |
+
return ogpic[aa.value],ofpic[aa.value],diffpic[aa.value],vid[aa.value], preds,actual[aa.value]
|
64 |
+
gr.Markdown('''## Micro-expression recognition
|
65 |
+
|
66 |
+
''')
|
67 |
+
|
68 |
+
with gr.Box():
|
69 |
+
input_image = gr.Image(type="pil", label="Input Image")
|
70 |
+
example_images = gr.Dataset(components=[input_image],
|
71 |
+
samples=[['/content/ogtest0.png'],['/content/ogtest1.png'],['/content/ogtest2.png'],['/content/ogtest3.png'],['/content/ogtest4.png'],['/content/ogtest5.png']]
|
72 |
+
,type="index")
|
73 |
+
|
74 |
+
with gr.Row():
|
75 |
+
btn = gr.Button("Process")
|
76 |
+
gr.Markdown('''### Original Image''')
|
77 |
+
with gr.Box():
|
78 |
+
with gr.Row():
|
79 |
+
img_before = gr.Image(label="Original Image")
|
80 |
+
img_after1 = gr.Image(label="Different frame")
|
81 |
+
with gr.Row():
|
82 |
+
img_after = gr.Image(label="Optical flow")
|
83 |
+
label_predict = gr.Label(label="Prediction")
|
84 |
+
with gr.Box():
|
85 |
+
with gr.Row():
|
86 |
+
video = gr.Video(label="Original Video")
|
87 |
+
with gr.Row():
|
88 |
+
label_actual=gr.Label(label="Actual Emotion")
|
89 |
+
|
90 |
+
|
91 |
+
# events
|
92 |
+
btn.click(fn=predss,
|
93 |
+
inputs=[input_image],
|
94 |
+
outputs=[img_before,img_after,img_after1,video,label_predict,label_actual])
|
95 |
+
|
96 |
+
example_images.click(fn=set_example_image,
|
97 |
+
inputs=example_images,
|
98 |
+
outputs=example_images.components)
|
99 |
+
|
100 |
+
demo.launch()
|
101 |
+
|
102 |
+
if __name__ == '__main__':
|
103 |
+
main()
|