lakshya-raj commited on
Commit
583f98e
·
1 Parent(s): baaac86

v3.0.1-Test Image Classifier with Pytorch

Browse files
Files changed (2) hide show
  1. app.py +33 -12
  2. requirements.txt +5 -1
app.py CHANGED
@@ -1,20 +1,41 @@
1
  import gradio as gr
2
  import numpy as np
 
 
 
 
3
 
4
- def sepia(input_img):
5
- sepia_filter = np.array([
6
- [0.393, 0.769, 0.189],
7
- [0.349, 0.686, 0.168],
8
- [0.272, 0.534, 0.131]
9
- ])
10
- sepia_img = input_img.dot(sepia_filter.T)
11
- sepia_img /= sepia_img.max()
12
- return sepia_img
13
 
14
- def greet(name):
15
- return "Hello " + name + "!!"
 
16
 
17
- demo = gr.Interface(fn=sepia, inputs="image", outputs="image")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  demo.launch()
19
 
20
  # iface = gr.Interface(fn=greet, inputs="text", outputs="text")
 
1
  import gradio as gr
2
  import numpy as np
3
+ import torch
4
+ import requests
5
+ from PIL import Image
6
+ from torchvision import transforms
7
 
8
+ model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()
 
 
 
 
 
 
 
 
9
 
10
+ # Download human-readable labels for ImageNet.
11
+ response = requests.get("https://git.io/JJkYN")
12
+ labels = response.text.split("\n")
13
 
14
+ # def sepia(input_img):
15
+ # sepia_filter = np.array([
16
+ # [0.393, 0.769, 0.189],
17
+ # [0.349, 0.686, 0.168],
18
+ # [0.272, 0.534, 0.131]
19
+ # ])
20
+ # sepia_img = input_img.dot(sepia_filter.T)
21
+ # sepia_img /= sepia_img.max()
22
+ # return sepia_img
23
+
24
+ # def greet(name):
25
+ # return "Hello " + name + "!!"
26
+
27
+ def predict(inp):
28
+ inp = transforms.ToTensor()(inp).unsqueeze(0)
29
+ with torch.no_grad():
30
+ prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
31
+ confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
32
+ return confidences
33
+
34
+ # demo = gr.Interface(fn=sepia, inputs="image", outputs="image")
35
+ demo = gr.Interface(fn=predict,
36
+ inputs=gr.Image(type="pil"),
37
+ outputs=gr.Label(num_top_classes=3),
38
+ examples=["lion.jpg", "cheetah.jpg"])
39
  demo.launch()
40
 
41
  # iface = gr.Interface(fn=greet, inputs="text", outputs="text")
requirements.txt CHANGED
@@ -1,2 +1,6 @@
1
  gradio
2
- numpy
 
 
 
 
 
1
  gradio
2
+ numpy
3
+ torch
4
+ requests
5
+ PIL
6
+ torchvision