luis56125 commited on
Commit
2e3563e
1 Parent(s): 4196c2f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -33
app.py CHANGED
@@ -1,48 +1,46 @@
1
- from fastai.vision.all import *
2
  import gradio as gr
3
- import torchvision.transforms as transforms
 
4
  from pathlib import Path
5
  import PIL
 
 
 
6
 
7
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
  model = torch.jit.load("unet.pth")
9
  model = model.cpu()
10
  model.eval()
11
 
12
-
13
  def transform_image(image):
 
 
 
 
 
 
 
 
 
14
 
15
- #mask = PILMask.create(Path(str(image).replace("Images","Labels").replace("color","gt").replace(".jpg",".png")))
16
- #image = PIL.Image.open(image)
17
- my_transforms = transforms.Compose([transforms.ToTensor(),
18
- transforms.Normalize(
19
- [0.485, 0.456, 0.406],
20
- [0.229, 0.224, 0.225])])
21
- image_aux = image
22
- #my_transforms(image_aux).unsqueeze(0).to(device)
23
- image = transforms.Resize((480,640))(Image.fromarray(image))
24
- tensor = my_transforms(image_aux).unsqueeze(0).to(device)
25
- #tensor = transform_image(image=image)
26
-
27
-
28
-
29
- model.to(device)
30
- with torch.no_grad():
31
  outputs = model(tensor)
32
-
33
- outputs = torch.argmax(outputs,1)
34
-
35
- mask = np.array(outputs.cpu())
36
- mask[mask==0]=255
37
- mask[mask==1]=150
38
- mask[mask==2]=76
39
- mask[mask==3]=25
40
- mask[mask==4]=0
41
-
42
- mask=np.reshape(mask,(480,640))
43
- return Image.fromarray(mask.astype('uint8'))
 
 
44
 
45
 
46
  # Creamos la interfaz y la lanzamos.
47
- gr.Interface(fn=transform_image, inputs=gr.inputs.Image(shape=(640, 480)), outputs=gr.outputs.Image(),examples=['color_188.jpg','color_189.jpg']).launch(share=False)
48
-
 
 
1
  import gradio as gr
2
+ from fastai.vision.all import *
3
+
4
  from pathlib import Path
5
  import PIL
6
+ import torchvision.transforms as transforms
7
+
8
+
9
 
10
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
  model = torch.jit.load("unet.pth")
12
  model = model.cpu()
13
  model.eval()
14
 
 
15
  def transform_image(image):
16
+ my_transforms = transforms.Compose([transforms.ToTensor(),
17
+ transforms.Normalize(
18
+ [0.485, 0.456, 0.406],
19
+ [0.229, 0.224, 0.225])])
20
+ image_aux = image
21
+
22
+ image = transforms.Resize((480,640))(Image.fromarray(image))
23
+ tensor = my_transforms(image_aux).unsqueeze(0).to(device)
24
+
25
 
26
+ model.to(device)
27
+ with torch.no_grad():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  outputs = model(tensor)
29
+
30
+ outputs = torch.argmax(outputs,1)
31
+
32
+
33
+ mask = np.array(outputs.cpu())
34
+ mask[mask==0]=255
35
+ mask[mask==1]=150
36
+ mask[mask==2]=76
37
+ mask[mask==3]=25
38
+ mask[mask==4]=0
39
+
40
+ mask=np.reshape(mask,(480,640))
41
+ return Image.fromarray(mask.astype('uint8'))
42
+
43
 
44
 
45
  # Creamos la interfaz y la lanzamos.
46
+ gr.Interface(fn=transform_image, inputs=gr.inputs.Image(shape=(640, 480)), outputs=gr.outputs.Image(),examples=['color_154.jpg','color_189.jpg']).launch(share=False)