AI-Manith commited on
Commit
6bd4420
1 Parent(s): 0767bad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -1,9 +1,8 @@
1
  import streamlit as st
2
  from PIL import Image
3
  import torch
4
- from torchvision import transforms, utils
5
  from facenet_pytorch import MTCNN
6
- from torchvision.transforms.functional import to_pil_image
7
 
8
  # Function to load the ViT model and MTCNN
9
  def load_model_and_mtcnn(model_path):
@@ -21,7 +20,7 @@ def preprocess_image(image, mtcnn, device):
21
  cropped_faces = mtcnn(image)
22
  if cropped_faces is not None and len(cropped_faces) > 0:
23
  # Convert the first detected face tensor back to PIL Image for further processing
24
- processed_image = to_pil_image(cropped_faces[0].cpu())
25
  except Exception as e:
26
  st.write(f"Exception in face detection: {e}")
27
  processed_image = image
@@ -40,8 +39,8 @@ def predict(image_tensor, model, device):
40
  model.eval()
41
  with torch.no_grad():
42
  outputs = model(image_tensor)
43
- # Adjust for your model's output if it does not have a 'logits' attribute
44
- probabilities = torch.nn.functional.softmax(outputs.logits, dim=1)
45
  predicted_class = torch.argmax(probabilities, dim=1)
46
  return predicted_class, probabilities
47
 
@@ -61,4 +60,4 @@ if uploaded_file is not None:
61
 
62
  st.write(f"Predicted class: {predicted_class.item()}")
63
  # Display the final processed image
64
- st.image(final_image, caption='Processed Image', use_column_width=True)
 
1
  import streamlit as st
2
  from PIL import Image
3
  import torch
4
+ from torchvision import transforms
5
  from facenet_pytorch import MTCNN
 
6
 
7
  # Function to load the ViT model and MTCNN
8
  def load_model_and_mtcnn(model_path):
 
20
  cropped_faces = mtcnn(image)
21
  if cropped_faces is not None and len(cropped_faces) > 0:
22
  # Convert the first detected face tensor back to PIL Image for further processing
23
+ processed_image = cropped_faces[0].cpu()
24
  except Exception as e:
25
  st.write(f"Exception in face detection: {e}")
26
  processed_image = image
 
39
  model.eval()
40
  with torch.no_grad():
41
  outputs = model(image_tensor)
42
+ # Adjust for your model's output structure
43
+ probabilities = torch.nn.functional.softmax(outputs, dim=1)
44
  predicted_class = torch.argmax(probabilities, dim=1)
45
  return predicted_class, probabilities
46
 
 
60
 
61
  st.write(f"Predicted class: {predicted_class.item()}")
62
  # Display the final processed image
63
+ st.image(final_image, caption='Processed Image', use_column_width=True)