datnguyentien204 commited on
Commit
71b941b
1 Parent(s): abb3944

Upload folder using huggingface_hub (#3)

Browse files

- 8011396be3de90a989ea7598db808c8deb851510f847357a7ce7cad16451682d (3f83137cd90613fa2e3116fe931afb5ed353b5d6)

dataset/widerface.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76c08d226ca61ce75f8e5e8056c05e6c7c89aa030080ad455321b41a84f02858
3
+ size 1834228959
layers/functions/prior_box.py CHANGED
@@ -5,7 +5,7 @@ from math import ceil
5
 
6
 
7
  class PriorBox(object):
8
- def __init__(self, cfg, image_size=None, phase='train'):
9
  super(PriorBox, self).__init__()
10
  self.min_sizes = cfg['min_sizes']
11
  self.steps = cfg['steps']
 
5
 
6
 
7
  class PriorBox(object):
8
+ def __init__(self, cfg, image_size=None, phase='test'):
9
  super(PriorBox, self).__init__()
10
  self.min_sizes = cfg['min_sizes']
11
  self.steps = cfg['steps']
models/retinaface.py CHANGED
@@ -46,10 +46,10 @@ class LandmarkHead(nn.Module):
46
  return out.view(out.shape[0], -1, 10)
47
 
48
  class RetinaFace(nn.Module):
49
- def __init__(self, cfg = None, phase = 'train'):
50
  """
51
  :param cfg: Network related settings.
52
- :param phase: train or test.
53
  """
54
  super(RetinaFace,self).__init__()
55
  self.phase = phase
@@ -120,7 +120,7 @@ class RetinaFace(nn.Module):
120
  classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)],dim=1)
121
  ldm_regressions = torch.cat([self.LandmarkHead[i](feature) for i, feature in enumerate(features)], dim=1)
122
 
123
- if self.phase == 'train':
124
  output = (bbox_regressions, classifications, ldm_regressions)
125
  else:
126
  output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions)
 
46
  return out.view(out.shape[0], -1, 10)
47
 
48
  class RetinaFace(nn.Module):
49
+ def __init__(self, cfg = None, phase = 'test'):
50
  """
51
  :param cfg: Network related settings.
52
+ :param phase: test or test.
53
  """
54
  super(RetinaFace,self).__init__()
55
  self.phase = phase
 
120
  classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)],dim=1)
121
  ldm_regressions = torch.cat([self.LandmarkHead[i](feature) for i, feature in enumerate(features)], dim=1)
122
 
123
+ if self.phase == 'test':
124
  output = (bbox_regressions, classifications, ldm_regressions)
125
  else:
126
  output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions)
train.py CHANGED
@@ -14,7 +14,7 @@ import math
14
  from models.retinaface import RetinaFace
15
 
16
  parser = argparse.ArgumentParser(description='Retinaface Training')
17
- parser.add_argument('--training_dataset', default='./dataset/widerface/widerface/train/label.txt', help='Training dataset directory')
18
  parser.add_argument('--network', default='mobile0.25', help='Backbone network mobile0.25 or resnet50')
19
  parser.add_argument('--num_workers', default=4, type=int, help='Number of workers used in dataloading')
20
  parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float, help='initial learning rate')
@@ -117,7 +117,7 @@ def train():
117
  step_index += 1
118
  lr = adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size)
119
 
120
- # load train data
121
  images, targets = next(batch_iterator)
122
  images = images.cuda()
123
  targets = [anno.cuda() for anno in targets]
 
14
  from models.retinaface import RetinaFace
15
 
16
  parser = argparse.ArgumentParser(description='Retinaface Training')
17
+ parser.add_argument('--training_dataset', default='./dataset/widerface/widerface/test/label.txt', help='Training dataset directory')
18
  parser.add_argument('--network', default='mobile0.25', help='Backbone network mobile0.25 or resnet50')
19
  parser.add_argument('--num_workers', default=4, type=int, help='Number of workers used in dataloading')
20
  parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float, help='initial learning rate')
 
117
  step_index += 1
118
  lr = adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size)
119
 
120
+ # load test data
121
  images, targets = next(batch_iterator)
122
  images = images.cuda()
123
  targets = [anno.cuda() for anno in targets]
utils/box_utils.py CHANGED
@@ -208,7 +208,7 @@ def encode_landm(matched, priors, variances):
208
  # Adapted from https://github.com/Hakuyume/chainer-ssd
209
  def decode(loc, priors, variances):
210
  """Decode locations from predictions using priors to undo
211
- the encoding we did for offset regression at train time.
212
  Args:
213
  loc (tensor): location predictions for loc layers,
214
  Shape: [num_priors,4]
@@ -228,7 +228,7 @@ def decode(loc, priors, variances):
228
 
229
  def decode_landm(pre, priors, variances):
230
  """Decode landm from predictions using priors to undo
231
- the encoding we did for offset regression at train time.
232
  Args:
233
  pre (tensor): landm predictions for loc layers,
234
  Shape: [num_priors,10]
 
208
  # Adapted from https://github.com/Hakuyume/chainer-ssd
209
  def decode(loc, priors, variances):
210
  """Decode locations from predictions using priors to undo
211
+ the encoding we did for offset regression at test time.
212
  Args:
213
  loc (tensor): location predictions for loc layers,
214
  Shape: [num_priors,4]
 
228
 
229
  def decode_landm(pre, priors, variances):
230
  """Decode landm from predictions using priors to undo
231
+ the encoding we did for offset regression at test time.
232
  Args:
233
  pre (tensor): landm predictions for loc layers,
234
  Shape: [num_priors,10]