Carzit commited on
Commit
677c2f0
1 Parent(s): 81626e2

Upload 4 files

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. app.py +25 -17
  3. crop.py +1 -1
  4. examples/Eda.png +3 -0
  5. examples/Fairies.png +0 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ examples/Eda.png filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -21,7 +21,7 @@ class FaceCrop:
21
  def __init__(self):
22
  self.device = select_device()
23
  self.half = self.device.type != 'cpu'
24
- self.results = {}
25
 
26
  def load_dataset(self, source):
27
  self.source = source
@@ -75,20 +75,16 @@ class FaceCrop:
75
  det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
76
 
77
  # Write results
78
- ind = 0
79
  for *xyxy, conf, cls in det:
80
- if conf > 0.6: # Write to file
81
-
82
  x, y, w, h = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()
83
- self.results[ind] = crop(self.source, (x, y), mode=self.mode, size=self.target_size, box=(w, h), face_ratio=self.face_ratio, shreshold=self.threshold)
84
-
85
- ind += 1
86
 
87
- def run(img, mode, width, height):
88
  face_crop_pipeline.load_dataset(img)
89
- face_crop_pipeline.set_crop_config(mode=mode, target_size=(width,height))
90
  face_crop_pipeline.process()
91
- return face_crop_pipeline.results[0]
92
 
93
  if __name__ == '__main__':
94
  model_path = huggingface_hub.hf_hub_download("Carzit/yolo5x_anime", "yolov5x_anime.pt")
@@ -97,17 +93,29 @@ if __name__ == '__main__':
97
 
98
  app = gr.Blocks()
99
  with app:
100
- gr.Markdown("# Anime Face Crop")
101
  with gr.Row():
102
- input_img = gr.Image(label="input image", image_mode="RGB", type='filepath')
103
- output_img = gr.Image(label="result", image_mode="RGB")
 
104
  with gr.Row():
105
- crop_mode = gr.Dropdown([0, 1, 2, 3], label="Crop Mode", info="0:Auto; 1:No Scale; 2:Full Screen; 3:Fixed Face Ratio")
106
- tgt_width = gr.Slider(10, 2048, value=512, label="Width")
107
- tgt_height = gr.Slider(10, 2048, value=512, label="Height")
108
 
 
 
 
109
  run_btn = gr.Button(variant="primary")
110
- run_btn.click(run, [input_img, crop_mode, tgt_width, tgt_height], [output_img])
 
 
 
 
 
 
 
 
111
  app.launch()
112
 
113
 
 
21
  def __init__(self):
22
  self.device = select_device()
23
  self.half = self.device.type != 'cpu'
24
+ self.results = []
25
 
26
  def load_dataset(self, source):
27
  self.source = source
 
75
  det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
76
 
77
  # Write results
 
78
  for *xyxy, conf, cls in det:
79
+ if conf > 0.6: # Write to file
 
80
  x, y, w, h = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()
81
+ self.results.append(crop(self.source, (x, y), mode=self.mode, size=self.target_size, box=(w, h), face_ratio=self.face_ratio, shreshold=self.threshold))
 
 
82
 
83
+ def run(img, mode, width, height, face_ratio, threshold):
84
  face_crop_pipeline.load_dataset(img)
85
+ face_crop_pipeline.set_crop_config(mode=mode, target_size=(width,height), face_ratio=face_ratio, threshold=threshold)
86
  face_crop_pipeline.process()
87
+ return face_crop_pipeline.results
88
 
89
  if __name__ == '__main__':
90
  model_path = huggingface_hub.hf_hub_download("Carzit/yolo5x_anime", "yolov5x_anime.pt")
 
93
 
94
  app = gr.Blocks()
95
  with app:
96
+ gr.Markdown("# Face Crop Anime")
97
  with gr.Row():
98
+ input_img = gr.Image(label="Input Image", image_mode="RGB", type='filepath')
99
+ output_img = gr.Gallery(label="Cropped Image")
100
+
101
  with gr.Row():
102
+ crop_mode = gr.Dropdown(['Auto', 'No Scale', 'Full Screen', 'Fixed Face Propotion'], label="Crop Mode", value='Auto', type='index')
103
+ tgt_width = gr.Slider(32, 2048, value=512, label="Width")
104
+ tgt_height = gr.Slider(32, 2048, value=512, label="Height")
105
 
106
+ with gr.Row():
107
+ face_ratio = gr.Slider(1, 5, step=0.1, value=2, label="Face Ratio", info="Necessary if choosing \'Auto\' or 'Fixed Face Propotion' Mode")
108
+ threshold = gr.Slider(1, 5, step=0.1, value=1.5, label="Threshold", info="Necessary if choosing \'Auto\' Mode")
109
  run_btn = gr.Button(variant="primary")
110
+
111
+ with gr.Row():
112
+ examples_data = [["examples/Eda.png"],["examples/Fairies.png"]]
113
+ examples = gr.Dataset(components=[input_img], samples=examples_data)
114
+
115
+ examples.click(lambda x: x[0], [examples], [input_img])
116
+ run_btn.click(run, [input_img, crop_mode, tgt_width, tgt_height, face_ratio, threshold], [output_img])
117
+
118
+
119
  app.launch()
120
 
121
 
crop.py CHANGED
@@ -3,7 +3,7 @@ from pathlib import Path
3
  from PIL import Image
4
  import numpy as np
5
 
6
- def crop(image_path, point, mode=0, size=(512, 512), box=None, face_ratio=3, shreshold=1.5):
7
  img = Image.open(image_path)
8
  img_width, img_height = img.size
9
  tgt_width, tgt_height = size
 
3
  from PIL import Image
4
  import numpy as np
5
 
6
+ def crop(image_path, point, mode=0, size=(512, 512), box=None, face_ratio=2, shreshold=1.5):
7
  img = Image.open(image_path)
8
  img_width, img_height = img.size
9
  tgt_width, tgt_height = size
examples/Eda.png ADDED

Git LFS Details

  • SHA256: 58544ddc04fc88f4375f3b075a00d65bfa4f0266933b4ae4e3d96ab956e455f0
  • Pointer size: 132 Bytes
  • Size of remote file: 2.17 MB
examples/Fairies.png ADDED