AI-Manith commited on
Commit
fb641b5
1 Parent(s): 376d76a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -9
app.py CHANGED
@@ -16,13 +16,12 @@ def load_model_and_mtcnn(model_path):
16
  # Function to preprocess the image and return both the tensor and the final PIL image for display
17
  def preprocess_image(image, mtcnn, device):
18
  processed_image = image # Initialize with the original image
19
- cropped_image = None
20
  try:
21
  # Directly call mtcnn with the image to get cropped faces
22
  cropped_faces = mtcnn(image)
23
  if cropped_faces is not None and len(cropped_faces) > 0:
24
  # Convert the first detected face tensor back to PIL Image for further processing
25
- cropped_image = to_pil_image(cropped_faces[0].cpu())
26
  except Exception as e:
27
  st.write(f"Exception in face detection: {e}")
28
  processed_image = image
@@ -32,13 +31,9 @@ def preprocess_image(image, mtcnn, device):
32
  transforms.ToTensor(),
33
  transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
34
  ])
35
- # Apply the transformation to the cropped image if available
36
- if cropped_image is not None:
37
- processed_image = transform(cropped_image).to(device)
38
- # Add a batch dimension
39
- processed_image = processed_image.unsqueeze(0)
40
-
41
- return processed_image, cropped_image
42
 
43
  # Function for inference
44
  def predict(image_tensor, model, device):
 
16
  # Function to preprocess the image and return both the tensor and the final PIL image for display
17
  def preprocess_image(image, mtcnn, device):
18
  processed_image = image # Initialize with the original image
 
19
  try:
20
  # Directly call mtcnn with the image to get cropped faces
21
  cropped_faces = mtcnn(image)
22
  if cropped_faces is not None and len(cropped_faces) > 0:
23
  # Convert the first detected face tensor back to PIL Image for further processing
24
+ processed_image = to_pil_image(cropped_faces[0].cpu(),mode='RGB')
25
  except Exception as e:
26
  st.write(f"Exception in face detection: {e}")
27
  processed_image = image
 
31
  transforms.ToTensor(),
32
  transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
33
  ])
34
+ image_tensor = transform(processed_image).to(device)
35
+ image_tensor = image_tensor.unsqueeze(0) # Add a batch dimension
36
+ return image_tensor, processed_image
 
 
 
 
37
 
38
  # Function for inference
39
  def predict(image_tensor, model, device):