diff --git a/__init_paths.py b/__init_paths.py
new file mode 100755
index 0000000000000000000000000000000000000000..86e0eedfcd9d07589020eacef75eab4691adea9f
--- /dev/null
+++ b/__init_paths.py
@@ -0,0 +1,30 @@
+'''
+@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
+@author: yangxy (yangtao9009@gmail.com)
+'''
+import os.path as osp
+import sys
+
+def add_path(path):
+    if path not in sys.path:
+        sys.path.insert(0, path)
+
+this_dir = osp.dirname(__file__)
+
+path = osp.join(this_dir, 'face_detect')
+add_path(path)
+
+path = osp.join(this_dir, 'face_parse')
+add_path(path)
+
+path = osp.join(this_dir, 'face_model')
+add_path(path)
+
+path = osp.join(this_dir, 'sr_model')
+add_path(path)
+
+path = osp.join(this_dir, 'loss')
+add_path(path)
+
+path = osp.join(this_dir, 'data_loader')
+add_path(path)
\ No newline at end of file
diff --git a/align_faces.py b/align_faces.py
new file mode 100755
index 0000000000000000000000000000000000000000..f22aa519c39e895861f466f0e69a33b6fc3efe2e
--- /dev/null
+++ b/align_faces.py
@@ -0,0 +1,266 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Mon Apr 24 15:43:29 2017
+@author: zhaoy
+"""
+"""
+@Modified by yangxy (yangtao9009@gmail.com)
+"""
+import cv2
+import numpy as np
+from skimage import transform as trans
+
+# reference facial points, a list of coordinates (x,y)
+REFERENCE_FACIAL_POINTS = [
+    [30.29459953, 51.69630051],
+    [65.53179932, 51.50139999],
+    [48.02519989, 71.73660278],
+    [33.54930115, 92.3655014],
+    [62.72990036, 92.20410156]
+]
+
+DEFAULT_CROP_SIZE = (96, 112)
+
+
+def _umeyama(src, dst, estimate_scale=True, scale=1.0):
+    """Estimate N-D similarity transformation with or without scaling.
+    Parameters
+    ----------
+    src : (M, N) array
+        Source coordinates.
+    dst : (M, N) array
+        Destination coordinates.
+    estimate_scale : bool
+        Whether to estimate scaling factor.
+    Returns
+    -------
+    T : (N + 1, N + 1)
+        The homogeneous similarity transformation matrix. The matrix contains
+        NaN values only if the problem is not well-conditioned.
+    References
+    ----------
+    .. [1] "Least-squares estimation of transformation parameters between two
+            point patterns", Shinji Umeyama, PAMI 1991, :DOI:`10.1109/34.88573`
+    """
+
+    num = src.shape[0]
+    dim = src.shape[1]
+
+    # Compute mean of src and dst.
+    src_mean = src.mean(axis=0)
+    dst_mean = dst.mean(axis=0)
+
+    # Subtract mean from src and dst.
+    src_demean = src - src_mean
+    dst_demean = dst - dst_mean
+
+    # Eq. (38).
+    A = dst_demean.T @ src_demean / num
+
+    # Eq. (39).
+    d = np.ones((dim,), dtype=np.double)
+    if np.linalg.det(A) < 0:
+        d[dim - 1] = -1
+
+    T = np.eye(dim + 1, dtype=np.double)
+
+    U, S, V = np.linalg.svd(A)
+
+    # Eq. (40) and (43).
+    rank = np.linalg.matrix_rank(A)
+    if rank == 0:
+        return np.nan * T
+    elif rank == dim - 1:
+        if np.linalg.det(U) * np.linalg.det(V) > 0:
+            T[:dim, :dim] = U @ V
+        else:
+            s = d[dim - 1]
+            d[dim - 1] = -1
+            T[:dim, :dim] = U @ np.diag(d) @ V
+            d[dim - 1] = s
+    else:
+        T[:dim, :dim] = U @ np.diag(d) @ V
+
+    if estimate_scale:
+        # Eq. (41) and (42).
+        scale = 1.0 / src_demean.var(axis=0).sum() * (S @ d)
+    else:
+        scale = scale
+
+    T[:dim, dim] = dst_mean - scale * (T[:dim, :dim] @ src_mean.T)
+    T[:dim, :dim] *= scale
+
+    return T, scale
+
+
+class FaceWarpException(Exception):
+    def __str__(self):
+        return 'In File {}:{}'.format(
+            __file__, super.__str__(self))
+
+
+def get_reference_facial_points(output_size=None,
+                                inner_padding_factor=0.0,
+                                outer_padding=(0, 0),
+                                default_square=False):
+    tmp_5pts = np.array(REFERENCE_FACIAL_POINTS)
+    tmp_crop_size = np.array(DEFAULT_CROP_SIZE)
+
+    # 0) make the inner region a square
+    if default_square:
+        size_diff = max(tmp_crop_size) - tmp_crop_size
+        tmp_5pts += size_diff / 2
+        tmp_crop_size += size_diff
+
+    if (output_size and
+            output_size[0] == tmp_crop_size[0] and
+            output_size[1] == tmp_crop_size[1]):
+        print('output_size == DEFAULT_CROP_SIZE {}: return default reference points'.format(tmp_crop_size))
+        return tmp_5pts
+
+    if (inner_padding_factor == 0 and
+            outer_padding == (0, 0)):
+        if output_size is None:
+            print('No paddings to do: return default reference points')
+            return tmp_5pts
+        else:
+            raise FaceWarpException(
+                'No paddings to do, output_size must be None or {}'.format(tmp_crop_size))
+
+    # check output size
+    if not (0 <= inner_padding_factor <= 1.0):
+        raise FaceWarpException('Not (0 <= inner_padding_factor <= 1.0)')
+
+    if ((inner_padding_factor > 0 or outer_padding[0] > 0 or outer_padding[1] > 0)
+            and output_size is None):
+        output_size = tmp_crop_size * \
+                      (1 + inner_padding_factor * 2).astype(np.int32)
+        output_size += np.array(outer_padding)
+        print('              deduced from paddings, output_size = ', output_size)
+
+    if not (outer_padding[0] < output_size[0]
+            and outer_padding[1] < output_size[1]):
+        raise FaceWarpException('Not (outer_padding[0] < output_size[0]'
+                                'and outer_padding[1] < output_size[1])')
+
+    # 1) pad the inner region according inner_padding_factor
+    # print('---> STEP1: pad the inner region according inner_padding_factor')
+    if inner_padding_factor > 0:
+        size_diff = tmp_crop_size * inner_padding_factor * 2
+        tmp_5pts += size_diff / 2
+        tmp_crop_size += np.round(size_diff).astype(np.int32)
+
+    # print('              crop_size = ', tmp_crop_size)
+    # print('              reference_5pts = ', tmp_5pts)
+
+    # 2) resize the padded inner region
+    # print('---> STEP2: resize the padded inner region')
+    size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2
+    # print('              crop_size = ', tmp_crop_size)
+    # print('              size_bf_outer_pad = ', size_bf_outer_pad)
+
+    if size_bf_outer_pad[0] * tmp_crop_size[1] != size_bf_outer_pad[1] * tmp_crop_size[0]:
+        raise FaceWarpException('Must have (output_size - outer_padding)'
+                                '= some_scale * (crop_size * (1.0 + inner_padding_factor)')
+
+    scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0]
+    # print('              resize scale_factor = ', scale_factor)
+    tmp_5pts = tmp_5pts * scale_factor
+    #    size_diff = tmp_crop_size * (scale_factor - min(scale_factor))
+    #    tmp_5pts = tmp_5pts + size_diff / 2
+    tmp_crop_size = size_bf_outer_pad
+    # print('              crop_size = ', tmp_crop_size)
+    # print('              reference_5pts = ', tmp_5pts)
+
+    # 3) add outer_padding to make output_size
+    reference_5point = tmp_5pts + np.array(outer_padding)
+    tmp_crop_size = output_size
+    # print('---> STEP3: add outer_padding to make output_size')
+    # print('              crop_size = ', tmp_crop_size)
+    # print('              reference_5pts = ', tmp_5pts)
+    #
+    # print('===> end get_reference_facial_points\n')
+
+    return reference_5point
+
+
+def get_affine_transform_matrix(src_pts, dst_pts):
+    tfm = np.float32([[1, 0, 0], [0, 1, 0]])
+    n_pts = src_pts.shape[0]
+    ones = np.ones((n_pts, 1), src_pts.dtype)
+    src_pts_ = np.hstack([src_pts, ones])
+    dst_pts_ = np.hstack([dst_pts, ones])
+
+    A, res, rank, s = np.linalg.lstsq(src_pts_, dst_pts_)
+
+    if rank == 3:
+        tfm = np.float32([
+            [A[0, 0], A[1, 0], A[2, 0]],
+            [A[0, 1], A[1, 1], A[2, 1]]
+        ])
+    elif rank == 2:
+        tfm = np.float32([
+            [A[0, 0], A[1, 0], 0],
+            [A[0, 1], A[1, 1], 0]
+        ])
+
+    return tfm
+
+
+def warp_and_crop_face(src_img,
+                       facial_pts,
+                       reference_pts=None,
+                       crop_size=(96, 112),
+                       align_type='smilarity'): #smilarity cv2_affine affine
+    if reference_pts is None:
+        if crop_size[0] == 96 and crop_size[1] == 112:
+            reference_pts = REFERENCE_FACIAL_POINTS
+        else:
+            default_square = False
+            inner_padding_factor = 0
+            outer_padding = (0, 0)
+            output_size = crop_size
+
+            reference_pts = get_reference_facial_points(output_size,
+                                                        inner_padding_factor,
+                                                        outer_padding,
+                                                        default_square)
+
+    ref_pts = np.float32(reference_pts)
+    ref_pts_shp = ref_pts.shape
+    if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2:
+        raise FaceWarpException(
+            'reference_pts.shape must be (K,2) or (2,K) and K>2')
+
+    if ref_pts_shp[0] == 2:
+        ref_pts = ref_pts.T
+
+    src_pts = np.float32(facial_pts)
+    src_pts_shp = src_pts.shape
+    if max(src_pts_shp) < 3 or min(src_pts_shp) != 2:
+        raise FaceWarpException(
+            'facial_pts.shape must be (K,2) or (2,K) and K>2')
+
+    if src_pts_shp[0] == 2:
+        src_pts = src_pts.T
+
+    if src_pts.shape != ref_pts.shape:
+        raise FaceWarpException(
+            'facial_pts and reference_pts must have the same shape')
+
+    if align_type is 'cv2_affine':
+        tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])
+        tfm_inv = cv2.getAffineTransform(ref_pts[0:3], src_pts[0:3])
+    elif align_type is 'affine':
+        tfm = get_affine_transform_matrix(src_pts, ref_pts)
+        tfm_inv = get_affine_transform_matrix(ref_pts, src_pts)
+    else:
+        params, scale = _umeyama(src_pts, ref_pts)
+        tfm = params[:2, :]
+
+        params, _ = _umeyama(ref_pts, src_pts, False, scale=1.0/scale)
+        tfm_inv = params[:2, :]
+
+    face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]), flags=3)
+
+    return face_img, tfm_inv
diff --git a/data_loader/dataset_face.py b/data_loader/dataset_face.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff13cf1f7d581d0e2dc9a1bda9001a78f5733901
--- /dev/null
+++ b/data_loader/dataset_face.py
@@ -0,0 +1,103 @@
+import numpy as np
+import cv2
+import os
+import glob
+import math
+import random
+import torch
+import torch.nn.functional as F
+from torch.utils.data import Dataset
+
+import degradations
+
+
+class GFPGAN_degradation(object):
+    def __init__(self):
+        self.kernel_list = ['iso', 'aniso']
+        self.kernel_prob = [0.5, 0.5]
+        self.blur_kernel_size = 41
+        self.blur_sigma = [0.1, 10]
+        self.downsample_range = [0.8, 8]
+        self.noise_range = [0, 20]
+        self.jpeg_range = [60, 100]
+        self.gray_prob = 0.2
+        self.color_jitter_prob = 0.0
+        self.color_jitter_pt_prob = 0.0
+        self.shift = 20/255.
+    
+    def degrade_process(self, img_gt):
+        if random.random() > 0.5:
+            img_gt = cv2.flip(img_gt, 1)
+
+        h, w = img_gt.shape[:2]
+       
+        # random color jitter 
+        if np.random.uniform() < self.color_jitter_prob:
+            jitter_val = np.random.uniform(-self.shift, self.shift, 3).astype(np.float32)
+            img_gt = img_gt + jitter_val
+            img_gt = np.clip(img_gt, 0, 1)    
+
+        # random grayscale
+        if np.random.uniform() < self.gray_prob:
+            img_gt = cv2.cvtColor(img_gt, cv2.COLOR_BGR2GRAY)
+            img_gt = np.tile(img_gt[:, :, None], [1, 1, 3])
+        
+        # ------------------------ generate lq image ------------------------ #
+        # blur
+        kernel = degradations.random_mixed_kernels(
+                self.kernel_list,
+                self.kernel_prob,
+                self.blur_kernel_size,
+                self.blur_sigma,
+                self.blur_sigma, [-math.pi, math.pi],
+                noise_range=None)
+        img_lq = cv2.filter2D(img_gt, -1, kernel)
+        # downsample
+        scale = np.random.uniform(self.downsample_range[0], self.downsample_range[1])
+        img_lq = cv2.resize(img_lq, (int(w // scale), int(h // scale)), interpolation=cv2.INTER_LINEAR)
+        
+        # noise
+        if self.noise_range is not None:
+            img_lq = degradations.random_add_gaussian_noise(img_lq, self.noise_range)
+        # jpeg compression
+        if self.jpeg_range is not None:
+            img_lq = degradations.random_add_jpg_compression(img_lq, self.jpeg_range)
+
+        # round and clip
+        img_lq = np.clip((img_lq * 255.0).round(), 0, 255) / 255.
+
+        # resize to original size
+        img_lq = cv2.resize(img_lq, (w, h), interpolation=cv2.INTER_LINEAR)
+
+        return img_gt, img_lq
+
+class FaceDataset(Dataset):
+    def __init__(self, path, resolution=512):
+        self.resolution = resolution
+
+        self.HQ_imgs = glob.glob(os.path.join(path, '*.*'))
+        self.length = len(self.HQ_imgs)
+
+        self.degrader = GFPGAN_degradation()
+
+    def __len__(self):
+        return self.length
+
+    def __getitem__(self, index):
+        img_gt = cv2.imread(self.HQ_imgs[index], cv2.IMREAD_COLOR)
+        img_gt = cv2.resize(img_gt, (self.resolution, self.resolution), interpolation=cv2.INTER_AREA)
+        
+        # BFR degradation
+        # We adopt the degradation of GFPGAN for simplicity, which however differs from our implementation in the paper.
+        # Data degradation plays a key role in BFR. Please replace it with your own methods.
+        img_gt = img_gt.astype(np.float32)/255.
+        img_gt, img_lq = self.degrader.degrade_process(img_gt)
+
+        img_gt =  (torch.from_numpy(img_gt) - 0.5) / 0.5
+        img_lq =  (torch.from_numpy(img_lq) - 0.5) / 0.5
+        
+        img_gt = img_gt.permute(2, 0, 1).flip(0) # BGR->RGB
+        img_lq = img_lq.permute(2, 0, 1).flip(0) # BGR->RGB
+
+        return img_lq, img_gt
+
diff --git a/data_loader/degradations.py b/data_loader/degradations.py
new file mode 100644
index 0000000000000000000000000000000000000000..241de6552fb0dcec83fd8fede3e3a17bc8ba647e
--- /dev/null
+++ b/data_loader/degradations.py
@@ -0,0 +1,765 @@
+import cv2
+import math
+import numpy as np
+import random
+import torch
+from scipy import special
+from scipy.stats import multivariate_normal
+from torchvision.transforms.functional_tensor import rgb_to_grayscale
+
+# -------------------------------------------------------------------- #
+# --------------------------- blur kernels --------------------------- #
+# -------------------------------------------------------------------- #
+
+
+# --------------------------- util functions --------------------------- #
+def sigma_matrix2(sig_x, sig_y, theta):
+    """Calculate the rotated sigma matrix (two dimensional matrix).
+
+    Args:
+        sig_x (float):
+        sig_y (float):
+        theta (float): Radian measurement.
+
+    Returns:
+        ndarray: Rotated sigma matrix.
+    """
+    d_matrix = np.array([[sig_x**2, 0], [0, sig_y**2]])
+    u_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
+    return np.dot(u_matrix, np.dot(d_matrix, u_matrix.T))
+
+
+def mesh_grid(kernel_size):
+    """Generate the mesh grid, centering at zero.
+
+    Args:
+        kernel_size (int):
+
+    Returns:
+        xy (ndarray): with the shape (kernel_size, kernel_size, 2)
+        xx (ndarray): with the shape (kernel_size, kernel_size)
+        yy (ndarray): with the shape (kernel_size, kernel_size)
+    """
+    ax = np.arange(-kernel_size // 2 + 1., kernel_size // 2 + 1.)
+    xx, yy = np.meshgrid(ax, ax)
+    xy = np.hstack((xx.reshape((kernel_size * kernel_size, 1)), yy.reshape(kernel_size * kernel_size,
+                                                                           1))).reshape(kernel_size, kernel_size, 2)
+    return xy, xx, yy
+
+
+def pdf2(sigma_matrix, grid):
+    """Calculate PDF of the bivariate Gaussian distribution.
+
+    Args:
+        sigma_matrix (ndarray): with the shape (2, 2)
+        grid (ndarray): generated by :func:`mesh_grid`,
+            with the shape (K, K, 2), K is the kernel size.
+
+    Returns:
+        kernel (ndarrray): un-normalized kernel.
+    """
+    inverse_sigma = np.linalg.inv(sigma_matrix)
+    kernel = np.exp(-0.5 * np.sum(np.dot(grid, inverse_sigma) * grid, 2))
+    return kernel
+
+
+def cdf2(d_matrix, grid):
+    """Calculate the CDF of the standard bivariate Gaussian distribution.
+        Used in skewed Gaussian distribution.
+
+    Args:
+        d_matrix (ndarrasy): skew matrix.
+        grid (ndarray): generated by :func:`mesh_grid`,
+            with the shape (K, K, 2), K is the kernel size.
+
+    Returns:
+        cdf (ndarray): skewed cdf.
+    """
+    rv = multivariate_normal([0, 0], [[1, 0], [0, 1]])
+    grid = np.dot(grid, d_matrix)
+    cdf = rv.cdf(grid)
+    return cdf
+
+
+def bivariate_Gaussian(kernel_size, sig_x, sig_y, theta, grid=None, isotropic=True):
+    """Generate a bivariate isotropic or anisotropic Gaussian kernel.
+
+    In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
+
+    Args:
+        kernel_size (int):
+        sig_x (float):
+        sig_y (float):
+        theta (float): Radian measurement.
+        grid (ndarray, optional): generated by :func:`mesh_grid`,
+            with the shape (K, K, 2), K is the kernel size. Default: None
+        isotropic (bool):
+
+    Returns:
+        kernel (ndarray): normalized kernel.
+    """
+    if grid is None:
+        grid, _, _ = mesh_grid(kernel_size)
+    if isotropic:
+        sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
+    else:
+        sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
+    kernel = pdf2(sigma_matrix, grid)
+    kernel = kernel / np.sum(kernel)
+    return kernel
+
+
+def bivariate_generalized_Gaussian(kernel_size, sig_x, sig_y, theta, beta, grid=None, isotropic=True):
+    """Generate a bivariate generalized Gaussian kernel.
+        Described in `Parameter Estimation For Multivariate Generalized
+        Gaussian Distributions`_
+        by Pascal et. al (2013).
+
+    In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
+
+    Args:
+        kernel_size (int):
+        sig_x (float):
+        sig_y (float):
+        theta (float): Radian measurement.
+        beta (float): shape parameter, beta = 1 is the normal distribution.
+        grid (ndarray, optional): generated by :func:`mesh_grid`,
+            with the shape (K, K, 2), K is the kernel size. Default: None
+
+    Returns:
+        kernel (ndarray): normalized kernel.
+
+    .. _Parameter Estimation For Multivariate Generalized Gaussian
+    Distributions: https://arxiv.org/abs/1302.6498
+    """
+    if grid is None:
+        grid, _, _ = mesh_grid(kernel_size)
+    if isotropic:
+        sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
+    else:
+        sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
+    inverse_sigma = np.linalg.inv(sigma_matrix)
+    kernel = np.exp(-0.5 * np.power(np.sum(np.dot(grid, inverse_sigma) * grid, 2), beta))
+    kernel = kernel / np.sum(kernel)
+    return kernel
+
+
+def bivariate_plateau(kernel_size, sig_x, sig_y, theta, beta, grid=None, isotropic=True):
+    """Generate a plateau-like anisotropic kernel.
+    1 / (1+x^(beta))
+
+    Ref: https://stats.stackexchange.com/questions/203629/is-there-a-plateau-shaped-distribution
+
+    In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
+
+    Args:
+        kernel_size (int):
+        sig_x (float):
+        sig_y (float):
+        theta (float): Radian measurement.
+        beta (float): shape parameter, beta = 1 is the normal distribution.
+        grid (ndarray, optional): generated by :func:`mesh_grid`,
+            with the shape (K, K, 2), K is the kernel size. Default: None
+
+    Returns:
+        kernel (ndarray): normalized kernel.
+    """
+    if grid is None:
+        grid, _, _ = mesh_grid(kernel_size)
+    if isotropic:
+        sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
+    else:
+        sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
+    inverse_sigma = np.linalg.inv(sigma_matrix)
+    kernel = np.reciprocal(np.power(np.sum(np.dot(grid, inverse_sigma) * grid, 2), beta) + 1)
+    kernel = kernel / np.sum(kernel)
+    return kernel
+
+
+def random_bivariate_Gaussian(kernel_size,
+                              sigma_x_range,
+                              sigma_y_range,
+                              rotation_range,
+                              noise_range=None,
+                              isotropic=True):
+    """Randomly generate bivariate isotropic or anisotropic Gaussian kernels.
+
+    In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
+
+    Args:
+        kernel_size (int):
+        sigma_x_range (tuple): [0.6, 5]
+        sigma_y_range (tuple): [0.6, 5]
+        rotation range (tuple): [-math.pi, math.pi]
+        noise_range(tuple, optional): multiplicative kernel noise,
+            [0.75, 1.25]. Default: None
+
+    Returns:
+        kernel (ndarray):
+    """
+    assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
+    assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
+    sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
+    if isotropic is False:
+        assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
+        assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
+        sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
+        rotation = np.random.uniform(rotation_range[0], rotation_range[1])
+    else:
+        sigma_y = sigma_x
+        rotation = 0
+
+    kernel = bivariate_Gaussian(kernel_size, sigma_x, sigma_y, rotation, isotropic=isotropic)
+
+    # add multiplicative noise
+    if noise_range is not None:
+        assert noise_range[0] < noise_range[1], 'Wrong noise range.'
+        noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
+        kernel = kernel * noise
+    kernel = kernel / np.sum(kernel)
+    return kernel
+
+
+def random_bivariate_generalized_Gaussian(kernel_size,
+                                          sigma_x_range,
+                                          sigma_y_range,
+                                          rotation_range,
+                                          beta_range,
+                                          noise_range=None,
+                                          isotropic=True):
+    """Randomly generate bivariate generalized Gaussian kernels.
+
+    In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
+
+    Args:
+        kernel_size (int):
+        sigma_x_range (tuple): [0.6, 5]
+        sigma_y_range (tuple): [0.6, 5]
+        rotation range (tuple): [-math.pi, math.pi]
+        beta_range (tuple): [0.5, 8]
+        noise_range(tuple, optional): multiplicative kernel noise,
+            [0.75, 1.25]. Default: None
+
+    Returns:
+        kernel (ndarray):
+    """
+    assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
+    assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
+    sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
+    if isotropic is False:
+        assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
+        assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
+        sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
+        rotation = np.random.uniform(rotation_range[0], rotation_range[1])
+    else:
+        sigma_y = sigma_x
+        rotation = 0
+
+    # assume beta_range[0] < 1 < beta_range[1]
+    if np.random.uniform() < 0.5:
+        beta = np.random.uniform(beta_range[0], 1)
+    else:
+        beta = np.random.uniform(1, beta_range[1])
+
+    kernel = bivariate_generalized_Gaussian(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic)
+
+    # add multiplicative noise
+    if noise_range is not None:
+        assert noise_range[0] < noise_range[1], 'Wrong noise range.'
+        noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
+        kernel = kernel * noise
+    kernel = kernel / np.sum(kernel)
+    return kernel
+
+
+def random_bivariate_plateau(kernel_size,
+                             sigma_x_range,
+                             sigma_y_range,
+                             rotation_range,
+                             beta_range,
+                             noise_range=None,
+                             isotropic=True):
+    """Randomly generate bivariate plateau kernels.
+
+    In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
+
+    Args:
+        kernel_size (int):
+        sigma_x_range (tuple): [0.6, 5]
+        sigma_y_range (tuple): [0.6, 5]
+        rotation range (tuple): [-math.pi/2, math.pi/2]
+        beta_range (tuple): [1, 4]
+        noise_range(tuple, optional): multiplicative kernel noise,
+            [0.75, 1.25]. Default: None
+
+    Returns:
+        kernel (ndarray):
+    """
+    assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
+    assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
+    sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
+    if isotropic is False:
+        assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
+        assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
+        sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
+        rotation = np.random.uniform(rotation_range[0], rotation_range[1])
+    else:
+        sigma_y = sigma_x
+        rotation = 0
+
+    # TODO: this may be not proper
+    if np.random.uniform() < 0.5:
+        beta = np.random.uniform(beta_range[0], 1)
+    else:
+        beta = np.random.uniform(1, beta_range[1])
+
+    kernel = bivariate_plateau(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic)
+    # add multiplicative noise
+    if noise_range is not None:
+        assert noise_range[0] < noise_range[1], 'Wrong noise range.'
+        noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
+        kernel = kernel * noise
+    kernel = kernel / np.sum(kernel)
+
+    return kernel
+
+
+def random_mixed_kernels(kernel_list,
+                         kernel_prob,
+                         kernel_size=21,
+                         sigma_x_range=(0.6, 5),
+                         sigma_y_range=(0.6, 5),
+                         rotation_range=(-math.pi, math.pi),
+                         betag_range=(0.5, 8),
+                         betap_range=(0.5, 8),
+                         noise_range=None):
+    """Randomly generate mixed kernels.
+
+    Args:
+        kernel_list (tuple): a list name of kernel types,
+            support ['iso', 'aniso', 'skew', 'generalized', 'plateau_iso',
+            'plateau_aniso']
+        kernel_prob (tuple): corresponding kernel probability for each
+            kernel type
+        kernel_size (int):
+        sigma_x_range (tuple): [0.6, 5]
+        sigma_y_range (tuple): [0.6, 5]
+        rotation range (tuple): [-math.pi, math.pi]
+        beta_range (tuple): [0.5, 8]
+        noise_range(tuple, optional): multiplicative kernel noise,
+            [0.75, 1.25]. Default: None
+
+    Returns:
+        kernel (ndarray):
+    """
+    kernel_type = random.choices(kernel_list, kernel_prob)[0]
+    if kernel_type == 'iso':
+        kernel = random_bivariate_Gaussian(
+            kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=True)
+    elif kernel_type == 'aniso':
+        kernel = random_bivariate_Gaussian(
+            kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=False)
+    elif kernel_type == 'generalized_iso':
+        kernel = random_bivariate_generalized_Gaussian(
+            kernel_size,
+            sigma_x_range,
+            sigma_y_range,
+            rotation_range,
+            betag_range,
+            noise_range=noise_range,
+            isotropic=True)
+    elif kernel_type == 'generalized_aniso':
+        kernel = random_bivariate_generalized_Gaussian(
+            kernel_size,
+            sigma_x_range,
+            sigma_y_range,
+            rotation_range,
+            betag_range,
+            noise_range=noise_range,
+            isotropic=False)
+    elif kernel_type == 'plateau_iso':
+        kernel = random_bivariate_plateau(
+            kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=True)
+    elif kernel_type == 'plateau_aniso':
+        kernel = random_bivariate_plateau(
+            kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=False)
+    return kernel
+
+
+np.seterr(divide='ignore', invalid='ignore')
+
+
+def circular_lowpass_kernel(cutoff, kernel_size, pad_to=0):
+    """2D sinc filter, ref: https://dsp.stackexchange.com/questions/58301/2-d-circularly-symmetric-low-pass-filter
+
+    Args:
+        cutoff (float): cutoff frequency in radians (pi is max)
+        kernel_size (int): horizontal and vertical size, must be odd.
+        pad_to (int): pad kernel size to desired size, must be odd or zero.
+    """
+    assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
+    kernel = np.fromfunction(
+        lambda x, y: cutoff * special.j1(cutoff * np.sqrt(
+            (x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)) / (2 * np.pi * np.sqrt(
+                (x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)), [kernel_size, kernel_size])
+    kernel[(kernel_size - 1) // 2, (kernel_size - 1) // 2] = cutoff**2 / (4 * np.pi)
+    kernel = kernel / np.sum(kernel)
+    if pad_to > kernel_size:
+        pad_size = (pad_to - kernel_size) // 2
+        kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
+    return kernel
+
+
+# ------------------------------------------------------------- #
+# --------------------------- noise --------------------------- #
+# ------------------------------------------------------------- #
+
+# ----------------------- Gaussian Noise ----------------------- #
+
+
+def generate_gaussian_noise(img, sigma=10, gray_noise=False):
+    """Generate Gaussian noise.
+
+    Args:
+        img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
+        sigma (float): Noise scale (measured in range 255). Default: 10.
+
+    Returns:
+        (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
+            float32.
+    """
+    if gray_noise:
+        noise = np.float32(np.random.randn(*(img.shape[0:2]))) * sigma / 255.
+        noise = np.expand_dims(noise, axis=2).repeat(3, axis=2)
+    else:
+        noise = np.float32(np.random.randn(*(img.shape))) * sigma / 255.
+    return noise
+
+
+def add_gaussian_noise(img, sigma=10, clip=True, rounds=False, gray_noise=False):
+    """Add Gaussian noise.
+
+    Args:
+        img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
+        sigma (float): Noise scale (measured in range 255). Default: 10.
+
+    Returns:
+        (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
+            float32.
+    """
+    noise = generate_gaussian_noise(img, sigma, gray_noise)
+    out = img + noise
+    if clip and rounds:
+        out = np.clip((out * 255.0).round(), 0, 255) / 255.
+    elif clip:
+        out = np.clip(out, 0, 1)
+    elif rounds:
+        out = (out * 255.0).round() / 255.
+    return out
+
+
+def generate_gaussian_noise_pt(img, sigma=10, gray_noise=0):
+    """Add Gaussian noise (PyTorch version).
+
+    Args:
+        img (Tensor): Shape (b, c, h, w), range[0, 1], float32.
+        scale (float | Tensor): Noise scale. Default: 1.0.
+
+    Returns:
+        (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
+            float32.
+    """
+    b, _, h, w = img.size()
+    if not isinstance(sigma, (float, int)):
+        sigma = sigma.view(img.size(0), 1, 1, 1)
+    if isinstance(gray_noise, (float, int)):
+        cal_gray_noise = gray_noise > 0
+    else:
+        gray_noise = gray_noise.view(b, 1, 1, 1)
+        cal_gray_noise = torch.sum(gray_noise) > 0
+
+    if cal_gray_noise:
+        noise_gray = torch.randn(*img.size()[2:4], dtype=img.dtype, device=img.device) * sigma / 255.
+        noise_gray = noise_gray.view(b, 1, h, w)
+
+    # always calculate color noise
+    noise = torch.randn(*img.size(), dtype=img.dtype, device=img.device) * sigma / 255.
+
+    if cal_gray_noise:
+        noise = noise * (1 - gray_noise) + noise_gray * gray_noise
+    return noise
+
+
+def add_gaussian_noise_pt(img, sigma=10, gray_noise=0, clip=True, rounds=False):
+    """Add Gaussian noise (PyTorch version).
+
+    Args:
+        img (Tensor): Shape (b, c, h, w), range[0, 1], float32.
+        scale (float | Tensor): Noise scale. Default: 1.0.
+
+    Returns:
+        (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
+            float32.
+    """
+    noise = generate_gaussian_noise_pt(img, sigma, gray_noise)
+    out = img + noise
+    if clip and rounds:
+        out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
+    elif clip:
+        out = torch.clamp(out, 0, 1)
+    elif rounds:
+        out = (out * 255.0).round() / 255.
+    return out
+
+
+# ----------------------- Random Gaussian Noise ----------------------- #
+def random_generate_gaussian_noise(img, sigma_range=(0, 10), gray_prob=0):
+    sigma = np.random.uniform(sigma_range[0], sigma_range[1])
+    if np.random.uniform() < gray_prob:
+        gray_noise = True
+    else:
+        gray_noise = False
+    return generate_gaussian_noise(img, sigma, gray_noise)
+
+
+def random_add_gaussian_noise(img, sigma_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
+    noise = random_generate_gaussian_noise(img, sigma_range, gray_prob)
+    out = img + noise
+    if clip and rounds:
+        out = np.clip((out * 255.0).round(), 0, 255) / 255.
+    elif clip:
+        out = np.clip(out, 0, 1)
+    elif rounds:
+        out = (out * 255.0).round() / 255.
+    return out
+
+
+def random_generate_gaussian_noise_pt(img, sigma_range=(0, 10), gray_prob=0):
+    sigma = torch.rand(
+        img.size(0), dtype=img.dtype, device=img.device) * (sigma_range[1] - sigma_range[0]) + sigma_range[0]
+    gray_noise = torch.rand(img.size(0), dtype=img.dtype, device=img.device)
+    gray_noise = (gray_noise < gray_prob).float()
+    return generate_gaussian_noise_pt(img, sigma, gray_noise)
+
+
+def random_add_gaussian_noise_pt(img, sigma_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
+    noise = random_generate_gaussian_noise_pt(img, sigma_range, gray_prob)
+    out = img + noise
+    if clip and rounds:
+        out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
+    elif clip:
+        out = torch.clamp(out, 0, 1)
+    elif rounds:
+        out = (out * 255.0).round() / 255.
+    return out
+
+
+# ----------------------- Poisson (Shot) Noise ----------------------- #
+
+
+def generate_poisson_noise(img, scale=1.0, gray_noise=False):
+    """Generate poisson noise.
+
+    Ref: https://github.com/scikit-image/scikit-image/blob/main/skimage/util/noise.py#L37-L219
+
+    Args:
+        img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
+        scale (float): Noise scale. Default: 1.0.
+        gray_noise (bool): Whether generate gray noise. Default: False.
+
+    Returns:
+        (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
+            float32.
+    """
+    if gray_noise:
+        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+    # round and clip image for counting vals correctly
+    img = np.clip((img * 255.0).round(), 0, 255) / 255.
+    vals = len(np.unique(img))
+    vals = 2**np.ceil(np.log2(vals))
+    out = np.float32(np.random.poisson(img * vals) / float(vals))
+    noise = out - img
+    if gray_noise:
+        noise = np.repeat(noise[:, :, np.newaxis], 3, axis=2)
+    return noise * scale
+
+
+def add_poisson_noise(img, scale=1.0, clip=True, rounds=False, gray_noise=False):
+    """Add poisson noise.
+
+    Args:
+        img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
+        scale (float): Noise scale. Default: 1.0.
+        gray_noise (bool): Whether generate gray noise. Default: False.
+
+    Returns:
+        (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
+            float32.
+    """
+    noise = generate_poisson_noise(img, scale, gray_noise)
+    out = img + noise
+    if clip and rounds:
+        out = np.clip((out * 255.0).round(), 0, 255) / 255.
+    elif clip:
+        out = np.clip(out, 0, 1)
+    elif rounds:
+        out = (out * 255.0).round() / 255.
+    return out
+
+
+def generate_poisson_noise_pt(img, scale=1.0, gray_noise=0):
+    """Generate a batch of poisson noise (PyTorch version)
+
+    Args:
+        img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32.
+        scale (float | Tensor): Noise scale. Number or Tensor with shape (b).
+            Default: 1.0.
+        gray_noise (float | Tensor): 0-1 number or Tensor with shape (b).
+            0 for False, 1 for True. Default: 0.
+
+    Returns:
+        (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
+            float32.
+    """
+    b, _, h, w = img.size()
+    if isinstance(gray_noise, (float, int)):
+        cal_gray_noise = gray_noise > 0
+    else:
+        gray_noise = gray_noise.view(b, 1, 1, 1)
+        cal_gray_noise = torch.sum(gray_noise) > 0
+    if cal_gray_noise:
+        img_gray = rgb_to_grayscale(img, num_output_channels=1)
+        # round and clip image for counting vals correctly
+        img_gray = torch.clamp((img_gray * 255.0).round(), 0, 255) / 255.
+        # use for-loop to get the unique values for each sample
+        vals_list = [len(torch.unique(img_gray[i, :, :, :])) for i in range(b)]
+        vals_list = [2**np.ceil(np.log2(vals)) for vals in vals_list]
+        vals = img_gray.new_tensor(vals_list).view(b, 1, 1, 1)
+        out = torch.poisson(img_gray * vals) / vals
+        noise_gray = out - img_gray
+        noise_gray = noise_gray.expand(b, 3, h, w)
+
+    # always calculate color noise
+    # round and clip image for counting vals correctly
+    img = torch.clamp((img * 255.0).round(), 0, 255) / 255.
+    # use for-loop to get the unique values for each sample
+    vals_list = [len(torch.unique(img[i, :, :, :])) for i in range(b)]
+    vals_list = [2**np.ceil(np.log2(vals)) for vals in vals_list]
+    vals = img.new_tensor(vals_list).view(b, 1, 1, 1)
+    out = torch.poisson(img * vals) / vals
+    noise = out - img
+    if cal_gray_noise:
+        noise = noise * (1 - gray_noise) + noise_gray * gray_noise
+    if not isinstance(scale, (float, int)):
+        scale = scale.view(b, 1, 1, 1)
+    return noise * scale
+
+
+def add_poisson_noise_pt(img, scale=1.0, clip=True, rounds=False, gray_noise=0):
+    """Add poisson noise to a batch of images (PyTorch version).
+
+    Args:
+        img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32.
+        scale (float | Tensor): Noise scale. Number or Tensor with shape (b).
+            Default: 1.0.
+        gray_noise (float | Tensor): 0-1 number or Tensor with shape (b).
+            0 for False, 1 for True. Default: 0.
+
+    Returns:
+        (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
+            float32.
+    """
+    noise = generate_poisson_noise_pt(img, scale, gray_noise)
+    out = img + noise
+    if clip and rounds:
+        out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
+    elif clip:
+        out = torch.clamp(out, 0, 1)
+    elif rounds:
+        out = (out * 255.0).round() / 255.
+    return out
+
+
+# ----------------------- Random Poisson (Shot) Noise ----------------------- #
+
+
+def random_generate_poisson_noise(img, scale_range=(0, 1.0), gray_prob=0):
+    scale = np.random.uniform(scale_range[0], scale_range[1])
+    if np.random.uniform() < gray_prob:
+        gray_noise = True
+    else:
+        gray_noise = False
+    return generate_poisson_noise(img, scale, gray_noise)
+
+
+def random_add_poisson_noise(img, scale_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
+    noise = random_generate_poisson_noise(img, scale_range, gray_prob)
+    out = img + noise
+    if clip and rounds:
+        out = np.clip((out * 255.0).round(), 0, 255) / 255.
+    elif clip:
+        out = np.clip(out, 0, 1)
+    elif rounds:
+        out = (out * 255.0).round() / 255.
+    return out
+
+
+def random_generate_poisson_noise_pt(img, scale_range=(0, 1.0), gray_prob=0):
+    scale = torch.rand(
+        img.size(0), dtype=img.dtype, device=img.device) * (scale_range[1] - scale_range[0]) + scale_range[0]
+    gray_noise = torch.rand(img.size(0), dtype=img.dtype, device=img.device)
+    gray_noise = (gray_noise < gray_prob).float()
+    return generate_poisson_noise_pt(img, scale, gray_noise)
+
+
+def random_add_poisson_noise_pt(img, scale_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
+    noise = random_generate_poisson_noise_pt(img, scale_range, gray_prob)
+    out = img + noise
+    if clip and rounds:
+        out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
+    elif clip:
+        out = torch.clamp(out, 0, 1)
+    elif rounds:
+        out = (out * 255.0).round() / 255.
+    return out
+
+
+# ------------------------------------------------------------------------ #
+# --------------------------- JPEG compression --------------------------- #
+# ------------------------------------------------------------------------ #
+
+
+def add_jpg_compression(img, quality=90):
+    """Add JPG compression artifacts.
+
+    Args:
+        img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
+        quality (float): JPG compression quality. 0 for lowest quality, 100 for
+            best quality. Default: 90.
+
+    Returns:
+        (Numpy array): Returned image after JPG, shape (h, w, c), range[0, 1],
+            float32.
+    """
+    img = np.clip(img, 0, 1)
+    encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
+    _, encimg = cv2.imencode('.jpg', img * 255., encode_param)
+    img = np.float32(cv2.imdecode(encimg, 1)) / 255.
+    return img
+
+
+def random_add_jpg_compression(img, quality_range=(90, 100)):
+    """Randomly add JPG compression artifacts.
+
+    Args:
+        img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
+        quality_range (tuple[float] | list[float]): JPG compression quality
+            range. 0 for lowest quality, 100 for best quality.
+            Default: (90, 100).
+
+    Returns:
+        (Numpy array): Returned image after JPG, shape (h, w, c), range[0, 1],
+            float32.
+    """
+    quality = int(np.random.uniform(quality_range[0], quality_range[1]))
+    return add_jpg_compression(img, quality)
diff --git a/distributed.py b/distributed.py
new file mode 100644
index 0000000000000000000000000000000000000000..51fa243257ef302e2015d5ff36ac531b86a9a0ce
--- /dev/null
+++ b/distributed.py
@@ -0,0 +1,126 @@
+import math
+import pickle
+
+import torch
+from torch import distributed as dist
+from torch.utils.data.sampler import Sampler
+
+
+def get_rank():
+    if not dist.is_available():
+        return 0
+
+    if not dist.is_initialized():
+        return 0
+
+    return dist.get_rank()
+
+
+def synchronize():
+    if not dist.is_available():
+        return
+
+    if not dist.is_initialized():
+        return
+
+    world_size = dist.get_world_size()
+
+    if world_size == 1:
+        return
+
+    dist.barrier()
+
+
+def get_world_size():
+    if not dist.is_available():
+        return 1
+
+    if not dist.is_initialized():
+        return 1
+
+    return dist.get_world_size()
+
+
+def reduce_sum(tensor):
+    if not dist.is_available():
+        return tensor
+
+    if not dist.is_initialized():
+        return tensor
+
+    tensor = tensor.clone()
+    dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
+
+    return tensor
+
+
+def gather_grad(params):
+    world_size = get_world_size()
+    
+    if world_size == 1:
+        return
+
+    for param in params:
+        if param.grad is not None:
+            dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
+            param.grad.data.div_(world_size)
+
+
+def all_gather(data):
+    world_size = get_world_size()
+
+    if world_size == 1:
+        return [data]
+
+    buffer = pickle.dumps(data)
+    storage = torch.ByteStorage.from_buffer(buffer)
+    tensor = torch.ByteTensor(storage).to('cuda')
+
+    local_size = torch.IntTensor([tensor.numel()]).to('cuda')
+    size_list = [torch.IntTensor([0]).to('cuda') for _ in range(world_size)]
+    dist.all_gather(size_list, local_size)
+    size_list = [int(size.item()) for size in size_list]
+    max_size = max(size_list)
+
+    tensor_list = []
+    for _ in size_list:
+        tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda'))
+
+    if local_size != max_size:
+        padding = torch.ByteTensor(size=(max_size - local_size,)).to('cuda')
+        tensor = torch.cat((tensor, padding), 0)
+
+    dist.all_gather(tensor_list, tensor)
+
+    data_list = []
+
+    for size, tensor in zip(size_list, tensor_list):
+        buffer = tensor.cpu().numpy().tobytes()[:size]
+        data_list.append(pickle.loads(buffer))
+
+    return data_list
+
+
+def reduce_loss_dict(loss_dict):
+    world_size = get_world_size()
+
+    if world_size < 2:
+        return loss_dict
+
+    with torch.no_grad():
+        keys = []
+        losses = []
+
+        for k in sorted(loss_dict.keys()):
+            keys.append(k)
+            losses.append(loss_dict[k])
+
+        losses = torch.stack(losses, 0)
+        dist.reduce(losses, dst=0)
+
+        if dist.get_rank() == 0:
+            losses /= world_size
+
+        reduced_losses = {k: v for k, v in zip(keys, losses)}
+
+    return reduced_losses
diff --git a/face_colorization.py b/face_colorization.py
new file mode 100755
index 0000000000000000000000000000000000000000..53b370e039a00f7545052b799a24b35329a02070
--- /dev/null
+++ b/face_colorization.py
@@ -0,0 +1,48 @@
+'''
+@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
+@author: yangxy (yangtao9009@gmail.com)
+'''
+import os
+import cv2
+import glob
+import time
+import numpy as np
+from PIL import Image
+import __init_paths
+from face_model.face_gan import FaceGAN
+
+class FaceColorization(object):
+    def __init__(self, base_dir='./', size=1024, model=None, channel_multiplier=2):
+        self.facegan = FaceGAN(base_dir, size, model, channel_multiplier)
+
+    # make sure the face image is well aligned. Please refer to face_enhancement.py
+    def process(self, gray):
+        # colorize the face
+        out = self.facegan.process(gray)
+
+        return out
+        
+
+if __name__=='__main__':
+    model = {'name':'GPEN-Colorization-1024', 'size':1024}
+    
+    indir = 'examples/grays'
+    outdir = 'examples/outs-colorization'
+    os.makedirs(outdir, exist_ok=True)
+
+    facecolorizer = FaceColorization(size=model['size'], model=model['name'], channel_multiplier=2)
+
+    files = sorted(glob.glob(os.path.join(indir, '*.*g')))
+    for n, file in enumerate(files[:]):
+        filename = os.path.basename(file)
+        
+        grayf = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
+        grayf = cv2.cvtColor(grayf, cv2.COLOR_GRAY2BGR) # channel: 1->3
+
+        colorf = facecolorizer.process(grayf)
+        
+        grayf = cv2.resize(grayf, colorf.shape[:2])
+        cv2.imwrite(os.path.join(outdir, '.'.join(filename.split('.')[:-1])+'.jpg'), np.hstack((grayf, colorf)))
+        
+        if n%10==0: print(n, file)
+        
diff --git a/face_detect/data/FDDB/img_list.txt b/face_detect/data/FDDB/img_list.txt
new file mode 100755
index 0000000000000000000000000000000000000000..5cf3d3199ca5c9c5ef4a904f1b9c89b821a7978a
--- /dev/null
+++ b/face_detect/data/FDDB/img_list.txt
@@ -0,0 +1,2845 @@
+2002/08/11/big/img_591
+2002/08/26/big/img_265
+2002/07/19/big/img_423
+2002/08/24/big/img_490
+2002/08/31/big/img_17676
+2002/07/31/big/img_228
+2002/07/24/big/img_402
+2002/08/04/big/img_769
+2002/07/19/big/img_581
+2002/08/13/big/img_723
+2002/08/12/big/img_821
+2003/01/17/big/img_610
+2002/08/13/big/img_1116
+2002/08/28/big/img_19238
+2002/08/21/big/img_660
+2002/08/14/big/img_607
+2002/08/05/big/img_3708
+2002/08/19/big/img_511
+2002/08/07/big/img_1316
+2002/07/25/big/img_1047
+2002/07/23/big/img_474
+2002/07/27/big/img_970
+2002/09/02/big/img_15752
+2002/09/01/big/img_16378
+2002/09/01/big/img_16189
+2002/08/26/big/img_276
+2002/07/24/big/img_518
+2002/08/14/big/img_1027
+2002/08/24/big/img_733
+2002/08/15/big/img_249
+2003/01/15/big/img_1371
+2002/08/07/big/img_1348
+2003/01/01/big/img_331
+2002/08/23/big/img_536
+2002/07/30/big/img_224
+2002/08/10/big/img_763
+2002/08/21/big/img_293
+2002/08/15/big/img_1211
+2002/08/15/big/img_1194
+2003/01/15/big/img_390
+2002/08/06/big/img_2893
+2002/08/17/big/img_691
+2002/08/07/big/img_1695
+2002/08/16/big/img_829
+2002/07/25/big/img_201
+2002/08/23/big/img_36
+2003/01/15/big/img_763
+2003/01/15/big/img_637
+2002/08/22/big/img_592
+2002/07/25/big/img_817
+2003/01/15/big/img_1219
+2002/08/05/big/img_3508
+2002/08/15/big/img_1108
+2002/07/19/big/img_488
+2003/01/16/big/img_704
+2003/01/13/big/img_1087
+2002/08/10/big/img_670
+2002/07/24/big/img_104
+2002/08/27/big/img_19823
+2002/09/01/big/img_16229
+2003/01/13/big/img_846
+2002/08/04/big/img_412
+2002/07/22/big/img_554
+2002/08/12/big/img_331
+2002/08/02/big/img_533
+2002/08/12/big/img_259
+2002/08/18/big/img_328
+2003/01/14/big/img_630
+2002/08/05/big/img_3541
+2002/08/06/big/img_2390
+2002/08/20/big/img_150
+2002/08/02/big/img_1231
+2002/08/16/big/img_710
+2002/08/19/big/img_591
+2002/07/22/big/img_725
+2002/07/24/big/img_820
+2003/01/13/big/img_568
+2002/08/22/big/img_853
+2002/08/09/big/img_648
+2002/08/23/big/img_528
+2003/01/14/big/img_888
+2002/08/30/big/img_18201
+2002/08/13/big/img_965
+2003/01/14/big/img_660
+2002/07/19/big/img_517
+2003/01/14/big/img_406
+2002/08/30/big/img_18433
+2002/08/07/big/img_1630
+2002/08/06/big/img_2717
+2002/08/21/big/img_470
+2002/07/23/big/img_633
+2002/08/20/big/img_915
+2002/08/16/big/img_893
+2002/07/29/big/img_644
+2002/08/15/big/img_529
+2002/08/16/big/img_668
+2002/08/07/big/img_1871
+2002/07/25/big/img_192
+2002/07/31/big/img_961
+2002/08/19/big/img_738
+2002/07/31/big/img_382
+2002/08/19/big/img_298
+2003/01/17/big/img_608
+2002/08/21/big/img_514
+2002/07/23/big/img_183
+2003/01/17/big/img_536
+2002/07/24/big/img_478
+2002/08/06/big/img_2997
+2002/09/02/big/img_15380
+2002/08/07/big/img_1153
+2002/07/31/big/img_967
+2002/07/31/big/img_711
+2002/08/26/big/img_664
+2003/01/01/big/img_326
+2002/08/24/big/img_775
+2002/08/08/big/img_961
+2002/08/16/big/img_77
+2002/08/12/big/img_296
+2002/07/22/big/img_905
+2003/01/13/big/img_284
+2002/08/13/big/img_887
+2002/08/24/big/img_849
+2002/07/30/big/img_345
+2002/08/18/big/img_419
+2002/08/01/big/img_1347
+2002/08/05/big/img_3670
+2002/07/21/big/img_479
+2002/08/08/big/img_913
+2002/09/02/big/img_15828
+2002/08/30/big/img_18194
+2002/08/08/big/img_471
+2002/08/22/big/img_734
+2002/08/09/big/img_586
+2002/08/09/big/img_454
+2002/07/29/big/img_47
+2002/07/19/big/img_381
+2002/07/29/big/img_733
+2002/08/20/big/img_327
+2002/07/21/big/img_96
+2002/08/06/big/img_2680
+2002/07/25/big/img_919
+2002/07/21/big/img_158
+2002/07/22/big/img_801
+2002/07/22/big/img_567
+2002/07/24/big/img_804
+2002/07/24/big/img_690
+2003/01/15/big/img_576
+2002/08/14/big/img_335
+2003/01/13/big/img_390
+2002/08/11/big/img_258
+2002/07/23/big/img_917
+2002/08/15/big/img_525
+2003/01/15/big/img_505
+2002/07/30/big/img_886
+2003/01/16/big/img_640
+2003/01/14/big/img_642
+2003/01/17/big/img_844
+2002/08/04/big/img_571
+2002/08/29/big/img_18702
+2003/01/15/big/img_240
+2002/07/29/big/img_553
+2002/08/10/big/img_354
+2002/08/18/big/img_17
+2003/01/15/big/img_782
+2002/07/27/big/img_382
+2002/08/14/big/img_970
+2003/01/16/big/img_70
+2003/01/16/big/img_625
+2002/08/18/big/img_341
+2002/08/26/big/img_188
+2002/08/09/big/img_405
+2002/08/02/big/img_37
+2002/08/13/big/img_748
+2002/07/22/big/img_399
+2002/07/25/big/img_844
+2002/08/12/big/img_340
+2003/01/13/big/img_815
+2002/08/26/big/img_5
+2002/08/10/big/img_158
+2002/08/18/big/img_95
+2002/07/29/big/img_1297
+2003/01/13/big/img_508
+2002/09/01/big/img_16680
+2003/01/16/big/img_338
+2002/08/13/big/img_517
+2002/07/22/big/img_626
+2002/08/06/big/img_3024
+2002/07/26/big/img_499
+2003/01/13/big/img_387
+2002/08/31/big/img_18025
+2002/08/13/big/img_520
+2003/01/16/big/img_576
+2002/07/26/big/img_121
+2002/08/25/big/img_703
+2002/08/26/big/img_615
+2002/08/17/big/img_434
+2002/08/02/big/img_677
+2002/08/18/big/img_276
+2002/08/05/big/img_3672
+2002/07/26/big/img_700
+2002/07/31/big/img_277
+2003/01/14/big/img_220
+2002/08/23/big/img_232
+2002/08/31/big/img_17422
+2002/07/22/big/img_508
+2002/08/13/big/img_681
+2003/01/15/big/img_638
+2002/08/30/big/img_18408
+2003/01/14/big/img_533
+2003/01/17/big/img_12
+2002/08/28/big/img_19388
+2002/08/08/big/img_133
+2002/07/26/big/img_885
+2002/08/19/big/img_387
+2002/08/27/big/img_19976
+2002/08/26/big/img_118
+2002/08/28/big/img_19146
+2002/08/05/big/img_3259
+2002/08/15/big/img_536
+2002/07/22/big/img_279
+2002/07/22/big/img_9
+2002/08/13/big/img_301
+2002/08/15/big/img_974
+2002/08/06/big/img_2355
+2002/08/01/big/img_1526
+2002/08/03/big/img_417
+2002/08/04/big/img_407
+2002/08/15/big/img_1029
+2002/07/29/big/img_700
+2002/08/01/big/img_1463
+2002/08/31/big/img_17365
+2002/07/28/big/img_223
+2002/07/19/big/img_827
+2002/07/27/big/img_531
+2002/07/19/big/img_845
+2002/08/20/big/img_382
+2002/07/31/big/img_268
+2002/08/27/big/img_19705
+2002/08/02/big/img_830
+2002/08/23/big/img_250
+2002/07/20/big/img_777
+2002/08/21/big/img_879
+2002/08/26/big/img_20146
+2002/08/23/big/img_789
+2002/08/06/big/img_2683
+2002/08/25/big/img_576
+2002/08/09/big/img_498
+2002/08/08/big/img_384
+2002/08/26/big/img_592
+2002/07/29/big/img_1470
+2002/08/21/big/img_452
+2002/08/30/big/img_18395
+2002/08/15/big/img_215
+2002/07/21/big/img_643
+2002/07/22/big/img_209
+2003/01/17/big/img_346
+2002/08/25/big/img_658
+2002/08/21/big/img_221
+2002/08/14/big/img_60
+2003/01/17/big/img_885
+2003/01/16/big/img_482
+2002/08/19/big/img_593
+2002/08/08/big/img_233
+2002/07/30/big/img_458
+2002/07/23/big/img_384
+2003/01/15/big/img_670
+2003/01/15/big/img_267
+2002/08/26/big/img_540
+2002/07/29/big/img_552
+2002/07/30/big/img_997
+2003/01/17/big/img_377
+2002/08/21/big/img_265
+2002/08/09/big/img_561
+2002/07/31/big/img_945
+2002/09/02/big/img_15252
+2002/08/11/big/img_276
+2002/07/22/big/img_491
+2002/07/26/big/img_517
+2002/08/14/big/img_726
+2002/08/08/big/img_46
+2002/08/28/big/img_19458
+2002/08/06/big/img_2935
+2002/07/29/big/img_1392
+2002/08/13/big/img_776
+2002/08/24/big/img_616
+2002/08/14/big/img_1065
+2002/07/29/big/img_889
+2002/08/18/big/img_188
+2002/08/07/big/img_1453
+2002/08/02/big/img_760
+2002/07/28/big/img_416
+2002/08/07/big/img_1393
+2002/08/26/big/img_292
+2002/08/26/big/img_301
+2003/01/13/big/img_195
+2002/07/26/big/img_532
+2002/08/20/big/img_550
+2002/08/05/big/img_3658
+2002/08/26/big/img_738
+2002/09/02/big/img_15750
+2003/01/17/big/img_451
+2002/07/23/big/img_339
+2002/08/16/big/img_637
+2002/08/14/big/img_748
+2002/08/06/big/img_2739
+2002/07/25/big/img_482
+2002/08/19/big/img_191
+2002/08/26/big/img_537
+2003/01/15/big/img_716
+2003/01/15/big/img_767
+2002/08/02/big/img_452
+2002/08/08/big/img_1011
+2002/08/10/big/img_144
+2003/01/14/big/img_122
+2002/07/24/big/img_586
+2002/07/24/big/img_762
+2002/08/20/big/img_369
+2002/07/30/big/img_146
+2002/08/23/big/img_396
+2003/01/15/big/img_200
+2002/08/15/big/img_1183
+2003/01/14/big/img_698
+2002/08/09/big/img_792
+2002/08/06/big/img_2347
+2002/07/31/big/img_911
+2002/08/26/big/img_722
+2002/08/23/big/img_621
+2002/08/05/big/img_3790
+2003/01/13/big/img_633
+2002/08/09/big/img_224
+2002/07/24/big/img_454
+2002/07/21/big/img_202
+2002/08/02/big/img_630
+2002/08/30/big/img_18315
+2002/07/19/big/img_491
+2002/09/01/big/img_16456
+2002/08/09/big/img_242
+2002/07/25/big/img_595
+2002/07/22/big/img_522
+2002/08/01/big/img_1593
+2002/07/29/big/img_336
+2002/08/15/big/img_448
+2002/08/28/big/img_19281
+2002/07/29/big/img_342
+2002/08/12/big/img_78
+2003/01/14/big/img_525
+2002/07/28/big/img_147
+2002/08/11/big/img_353
+2002/08/22/big/img_513
+2002/08/04/big/img_721
+2002/08/17/big/img_247
+2003/01/14/big/img_891
+2002/08/20/big/img_853
+2002/07/19/big/img_414
+2002/08/01/big/img_1530
+2003/01/14/big/img_924
+2002/08/22/big/img_468
+2002/08/18/big/img_354
+2002/08/30/big/img_18193
+2002/08/23/big/img_492
+2002/08/15/big/img_871
+2002/08/12/big/img_494
+2002/08/06/big/img_2470
+2002/07/23/big/img_923
+2002/08/26/big/img_155
+2002/08/08/big/img_669
+2002/07/23/big/img_404
+2002/08/28/big/img_19421
+2002/08/29/big/img_18993
+2002/08/25/big/img_416
+2003/01/17/big/img_434
+2002/07/29/big/img_1370
+2002/07/28/big/img_483
+2002/08/11/big/img_50
+2002/08/10/big/img_404
+2002/09/02/big/img_15057
+2003/01/14/big/img_911
+2002/09/01/big/img_16697
+2003/01/16/big/img_665
+2002/09/01/big/img_16708
+2002/08/22/big/img_612
+2002/08/28/big/img_19471
+2002/08/02/big/img_198
+2003/01/16/big/img_527
+2002/08/22/big/img_209
+2002/08/30/big/img_18205
+2003/01/14/big/img_114
+2003/01/14/big/img_1028
+2003/01/16/big/img_894
+2003/01/14/big/img_837
+2002/07/30/big/img_9
+2002/08/06/big/img_2821
+2002/08/04/big/img_85
+2003/01/13/big/img_884
+2002/07/22/big/img_570
+2002/08/07/big/img_1773
+2002/07/26/big/img_208
+2003/01/17/big/img_946
+2002/07/19/big/img_930
+2003/01/01/big/img_698
+2003/01/17/big/img_612
+2002/07/19/big/img_372
+2002/07/30/big/img_721
+2003/01/14/big/img_649
+2002/08/19/big/img_4
+2002/07/25/big/img_1024
+2003/01/15/big/img_601
+2002/08/30/big/img_18470
+2002/07/22/big/img_29
+2002/08/07/big/img_1686
+2002/07/20/big/img_294
+2002/08/14/big/img_800
+2002/08/19/big/img_353
+2002/08/19/big/img_350
+2002/08/05/big/img_3392
+2002/08/09/big/img_622
+2003/01/15/big/img_236
+2002/08/11/big/img_643
+2002/08/05/big/img_3458
+2002/08/12/big/img_413
+2002/08/22/big/img_415
+2002/08/13/big/img_635
+2002/08/07/big/img_1198
+2002/08/04/big/img_873
+2002/08/12/big/img_407
+2003/01/15/big/img_346
+2002/08/02/big/img_275
+2002/08/17/big/img_997
+2002/08/21/big/img_958
+2002/08/20/big/img_579
+2002/07/29/big/img_142
+2003/01/14/big/img_1115
+2002/08/16/big/img_365
+2002/07/29/big/img_1414
+2002/08/17/big/img_489
+2002/08/13/big/img_1010
+2002/07/31/big/img_276
+2002/07/25/big/img_1000
+2002/08/23/big/img_524
+2002/08/28/big/img_19147
+2003/01/13/big/img_433
+2002/08/20/big/img_205
+2003/01/01/big/img_458
+2002/07/29/big/img_1449
+2003/01/16/big/img_696
+2002/08/28/big/img_19296
+2002/08/29/big/img_18688
+2002/08/21/big/img_767
+2002/08/20/big/img_532
+2002/08/26/big/img_187
+2002/07/26/big/img_183
+2002/07/27/big/img_890
+2003/01/13/big/img_576
+2002/07/30/big/img_15
+2002/07/31/big/img_889
+2002/08/31/big/img_17759
+2003/01/14/big/img_1114
+2002/07/19/big/img_445
+2002/08/03/big/img_593
+2002/07/24/big/img_750
+2002/07/30/big/img_133
+2002/08/25/big/img_671
+2002/07/20/big/img_351
+2002/08/31/big/img_17276
+2002/08/05/big/img_3231
+2002/09/02/big/img_15882
+2002/08/14/big/img_115
+2002/08/02/big/img_1148
+2002/07/25/big/img_936
+2002/07/31/big/img_639
+2002/08/04/big/img_427
+2002/08/22/big/img_843
+2003/01/17/big/img_17
+2003/01/13/big/img_690
+2002/08/13/big/img_472
+2002/08/09/big/img_425
+2002/08/05/big/img_3450
+2003/01/17/big/img_439
+2002/08/13/big/img_539
+2002/07/28/big/img_35
+2002/08/16/big/img_241
+2002/08/06/big/img_2898
+2003/01/16/big/img_429
+2002/08/05/big/img_3817
+2002/08/27/big/img_19919
+2002/07/19/big/img_422
+2002/08/15/big/img_560
+2002/07/23/big/img_750
+2002/07/30/big/img_353
+2002/08/05/big/img_43
+2002/08/23/big/img_305
+2002/08/01/big/img_2137
+2002/08/30/big/img_18097
+2002/08/01/big/img_1389
+2002/08/02/big/img_308
+2003/01/14/big/img_652
+2002/08/01/big/img_1798
+2003/01/14/big/img_732
+2003/01/16/big/img_294
+2002/08/26/big/img_213
+2002/07/24/big/img_842
+2003/01/13/big/img_630
+2003/01/13/big/img_634
+2002/08/06/big/img_2285
+2002/08/01/big/img_2162
+2002/08/30/big/img_18134
+2002/08/02/big/img_1045
+2002/08/01/big/img_2143
+2002/07/25/big/img_135
+2002/07/20/big/img_645
+2002/08/05/big/img_3666
+2002/08/14/big/img_523
+2002/08/04/big/img_425
+2003/01/14/big/img_137
+2003/01/01/big/img_176
+2002/08/15/big/img_505
+2002/08/24/big/img_386
+2002/08/05/big/img_3187
+2002/08/15/big/img_419
+2003/01/13/big/img_520
+2002/08/04/big/img_444
+2002/08/26/big/img_483
+2002/08/05/big/img_3449
+2002/08/30/big/img_18409
+2002/08/28/big/img_19455
+2002/08/27/big/img_20090
+2002/07/23/big/img_625
+2002/08/24/big/img_205
+2002/08/08/big/img_938
+2003/01/13/big/img_527
+2002/08/07/big/img_1712
+2002/07/24/big/img_801
+2002/08/09/big/img_579
+2003/01/14/big/img_41
+2003/01/15/big/img_1130
+2002/07/21/big/img_672
+2002/08/07/big/img_1590
+2003/01/01/big/img_532
+2002/08/02/big/img_529
+2002/08/05/big/img_3591
+2002/08/23/big/img_5
+2003/01/14/big/img_882
+2002/08/28/big/img_19234
+2002/07/24/big/img_398
+2003/01/14/big/img_592
+2002/08/22/big/img_548
+2002/08/12/big/img_761
+2003/01/16/big/img_497
+2002/08/18/big/img_133
+2002/08/08/big/img_874
+2002/07/19/big/img_247
+2002/08/15/big/img_170
+2002/08/27/big/img_19679
+2002/08/20/big/img_246
+2002/08/24/big/img_358
+2002/07/29/big/img_599
+2002/08/01/big/img_1555
+2002/07/30/big/img_491
+2002/07/30/big/img_371
+2003/01/16/big/img_682
+2002/07/25/big/img_619
+2003/01/15/big/img_587
+2002/08/02/big/img_1212
+2002/08/01/big/img_2152
+2002/07/25/big/img_668
+2003/01/16/big/img_574
+2002/08/28/big/img_19464
+2002/08/11/big/img_536
+2002/07/24/big/img_201
+2002/08/05/big/img_3488
+2002/07/25/big/img_887
+2002/07/22/big/img_789
+2002/07/30/big/img_432
+2002/08/16/big/img_166
+2002/09/01/big/img_16333
+2002/07/26/big/img_1010
+2002/07/21/big/img_793
+2002/07/22/big/img_720
+2002/07/31/big/img_337
+2002/07/27/big/img_185
+2002/08/23/big/img_440
+2002/07/31/big/img_801
+2002/07/25/big/img_478
+2003/01/14/big/img_171
+2002/08/07/big/img_1054
+2002/09/02/big/img_15659
+2002/07/29/big/img_1348
+2002/08/09/big/img_337
+2002/08/26/big/img_684
+2002/07/31/big/img_537
+2002/08/15/big/img_808
+2003/01/13/big/img_740
+2002/08/07/big/img_1667
+2002/08/03/big/img_404
+2002/08/06/big/img_2520
+2002/07/19/big/img_230
+2002/07/19/big/img_356
+2003/01/16/big/img_627
+2002/08/04/big/img_474
+2002/07/29/big/img_833
+2002/07/25/big/img_176
+2002/08/01/big/img_1684
+2002/08/21/big/img_643
+2002/08/27/big/img_19673
+2002/08/02/big/img_838
+2002/08/06/big/img_2378
+2003/01/15/big/img_48
+2002/07/30/big/img_470
+2002/08/15/big/img_963
+2002/08/24/big/img_444
+2002/08/16/big/img_662
+2002/08/15/big/img_1209
+2002/07/24/big/img_25
+2002/08/06/big/img_2740
+2002/07/29/big/img_996
+2002/08/31/big/img_18074
+2002/08/04/big/img_343
+2003/01/17/big/img_509
+2003/01/13/big/img_726
+2002/08/07/big/img_1466
+2002/07/26/big/img_307
+2002/08/10/big/img_598
+2002/08/13/big/img_890
+2002/08/14/big/img_997
+2002/07/19/big/img_392
+2002/08/02/big/img_475
+2002/08/29/big/img_19038
+2002/07/29/big/img_538
+2002/07/29/big/img_502
+2002/08/02/big/img_364
+2002/08/31/big/img_17353
+2002/08/08/big/img_539
+2002/08/01/big/img_1449
+2002/07/22/big/img_363
+2002/08/02/big/img_90
+2002/09/01/big/img_16867
+2002/08/05/big/img_3371
+2002/07/30/big/img_342
+2002/08/07/big/img_1363
+2002/08/22/big/img_790
+2003/01/15/big/img_404
+2002/08/05/big/img_3447
+2002/09/01/big/img_16167
+2003/01/13/big/img_840
+2002/08/22/big/img_1001
+2002/08/09/big/img_431
+2002/07/27/big/img_618
+2002/07/31/big/img_741
+2002/07/30/big/img_964
+2002/07/25/big/img_86
+2002/07/29/big/img_275
+2002/08/21/big/img_921
+2002/07/26/big/img_892
+2002/08/21/big/img_663
+2003/01/13/big/img_567
+2003/01/14/big/img_719
+2002/07/28/big/img_251
+2003/01/15/big/img_1123
+2002/07/29/big/img_260
+2002/08/24/big/img_337
+2002/08/01/big/img_1914
+2002/08/13/big/img_373
+2003/01/15/big/img_589
+2002/08/13/big/img_906
+2002/07/26/big/img_270
+2002/08/26/big/img_313
+2002/08/25/big/img_694
+2003/01/01/big/img_327
+2002/07/23/big/img_261
+2002/08/26/big/img_642
+2002/07/29/big/img_918
+2002/07/23/big/img_455
+2002/07/24/big/img_612
+2002/07/23/big/img_534
+2002/07/19/big/img_534
+2002/07/19/big/img_726
+2002/08/01/big/img_2146
+2002/08/02/big/img_543
+2003/01/16/big/img_777
+2002/07/30/big/img_484
+2002/08/13/big/img_1161
+2002/07/21/big/img_390
+2002/08/06/big/img_2288
+2002/08/21/big/img_677
+2002/08/13/big/img_747
+2002/08/15/big/img_1248
+2002/07/31/big/img_416
+2002/09/02/big/img_15259
+2002/08/16/big/img_781
+2002/08/24/big/img_754
+2002/07/24/big/img_803
+2002/08/20/big/img_609
+2002/08/28/big/img_19571
+2002/09/01/big/img_16140
+2002/08/26/big/img_769
+2002/07/20/big/img_588
+2002/08/02/big/img_898
+2002/07/21/big/img_466
+2002/08/14/big/img_1046
+2002/07/25/big/img_212
+2002/08/26/big/img_353
+2002/08/19/big/img_810
+2002/08/31/big/img_17824
+2002/08/12/big/img_631
+2002/07/19/big/img_828
+2002/07/24/big/img_130
+2002/08/25/big/img_580
+2002/07/31/big/img_699
+2002/07/23/big/img_808
+2002/07/31/big/img_377
+2003/01/16/big/img_570
+2002/09/01/big/img_16254
+2002/07/21/big/img_471
+2002/08/01/big/img_1548
+2002/08/18/big/img_252
+2002/08/19/big/img_576
+2002/08/20/big/img_464
+2002/07/27/big/img_735
+2002/08/21/big/img_589
+2003/01/15/big/img_1192
+2002/08/09/big/img_302
+2002/07/31/big/img_594
+2002/08/23/big/img_19
+2002/08/29/big/img_18819
+2002/08/19/big/img_293
+2002/07/30/big/img_331
+2002/08/23/big/img_607
+2002/07/30/big/img_363
+2002/08/16/big/img_766
+2003/01/13/big/img_481
+2002/08/06/big/img_2515
+2002/09/02/big/img_15913
+2002/09/02/big/img_15827
+2002/09/02/big/img_15053
+2002/08/07/big/img_1576
+2002/07/23/big/img_268
+2002/08/21/big/img_152
+2003/01/15/big/img_578
+2002/07/21/big/img_589
+2002/07/20/big/img_548
+2002/08/27/big/img_19693
+2002/08/31/big/img_17252
+2002/07/31/big/img_138
+2002/07/23/big/img_372
+2002/08/16/big/img_695
+2002/07/27/big/img_287
+2002/08/15/big/img_315
+2002/08/10/big/img_361
+2002/07/29/big/img_899
+2002/08/13/big/img_771
+2002/08/21/big/img_92
+2003/01/15/big/img_425
+2003/01/16/big/img_450
+2002/09/01/big/img_16942
+2002/08/02/big/img_51
+2002/09/02/big/img_15379
+2002/08/24/big/img_147
+2002/08/30/big/img_18122
+2002/07/26/big/img_950
+2002/08/07/big/img_1400
+2002/08/17/big/img_468
+2002/08/15/big/img_470
+2002/07/30/big/img_318
+2002/07/22/big/img_644
+2002/08/27/big/img_19732
+2002/07/23/big/img_601
+2002/08/26/big/img_398
+2002/08/21/big/img_428
+2002/08/06/big/img_2119
+2002/08/29/big/img_19103
+2003/01/14/big/img_933
+2002/08/11/big/img_674
+2002/08/28/big/img_19420
+2002/08/03/big/img_418
+2002/08/17/big/img_312
+2002/07/25/big/img_1044
+2003/01/17/big/img_671
+2002/08/30/big/img_18297
+2002/07/25/big/img_755
+2002/07/23/big/img_471
+2002/08/21/big/img_39
+2002/07/26/big/img_699
+2003/01/14/big/img_33
+2002/07/31/big/img_411
+2002/08/16/big/img_645
+2003/01/17/big/img_116
+2002/09/02/big/img_15903
+2002/08/20/big/img_120
+2002/08/22/big/img_176
+2002/07/29/big/img_1316
+2002/08/27/big/img_19914
+2002/07/22/big/img_719
+2002/08/28/big/img_19239
+2003/01/13/big/img_385
+2002/08/08/big/img_525
+2002/07/19/big/img_782
+2002/08/13/big/img_843
+2002/07/30/big/img_107
+2002/08/11/big/img_752
+2002/07/29/big/img_383
+2002/08/26/big/img_249
+2002/08/29/big/img_18860
+2002/07/30/big/img_70
+2002/07/26/big/img_194
+2002/08/15/big/img_530
+2002/08/08/big/img_816
+2002/07/31/big/img_286
+2003/01/13/big/img_294
+2002/07/31/big/img_251
+2002/07/24/big/img_13
+2002/08/31/big/img_17938
+2002/07/22/big/img_642
+2003/01/14/big/img_728
+2002/08/18/big/img_47
+2002/08/22/big/img_306
+2002/08/20/big/img_348
+2002/08/15/big/img_764
+2002/08/08/big/img_163
+2002/07/23/big/img_531
+2002/07/23/big/img_467
+2003/01/16/big/img_743
+2003/01/13/big/img_535
+2002/08/02/big/img_523
+2002/08/22/big/img_120
+2002/08/11/big/img_496
+2002/08/29/big/img_19075
+2002/08/08/big/img_465
+2002/08/09/big/img_790
+2002/08/19/big/img_588
+2002/08/23/big/img_407
+2003/01/17/big/img_435
+2002/08/24/big/img_398
+2002/08/27/big/img_19899
+2003/01/15/big/img_335
+2002/08/13/big/img_493
+2002/09/02/big/img_15460
+2002/07/31/big/img_470
+2002/08/05/big/img_3550
+2002/07/28/big/img_123
+2002/08/01/big/img_1498
+2002/08/04/big/img_504
+2003/01/17/big/img_427
+2002/08/27/big/img_19708
+2002/07/27/big/img_861
+2002/07/25/big/img_685
+2002/07/31/big/img_207
+2003/01/14/big/img_745
+2002/08/31/big/img_17756
+2002/08/24/big/img_288
+2002/08/18/big/img_181
+2002/08/10/big/img_520
+2002/08/25/big/img_705
+2002/08/23/big/img_226
+2002/08/04/big/img_727
+2002/07/24/big/img_625
+2002/08/28/big/img_19157
+2002/08/23/big/img_586
+2002/07/31/big/img_232
+2003/01/13/big/img_240
+2003/01/14/big/img_321
+2003/01/15/big/img_533
+2002/07/23/big/img_480
+2002/07/24/big/img_371
+2002/08/21/big/img_702
+2002/08/31/big/img_17075
+2002/09/02/big/img_15278
+2002/07/29/big/img_246
+2003/01/15/big/img_829
+2003/01/15/big/img_1213
+2003/01/16/big/img_441
+2002/08/14/big/img_921
+2002/07/23/big/img_425
+2002/08/15/big/img_296
+2002/07/19/big/img_135
+2002/07/26/big/img_402
+2003/01/17/big/img_88
+2002/08/20/big/img_872
+2002/08/13/big/img_1110
+2003/01/16/big/img_1040
+2002/07/23/big/img_9
+2002/08/13/big/img_700
+2002/08/16/big/img_371
+2002/08/27/big/img_19966
+2003/01/17/big/img_391
+2002/08/18/big/img_426
+2002/08/01/big/img_1618
+2002/07/21/big/img_754
+2003/01/14/big/img_1101
+2003/01/16/big/img_1022
+2002/07/22/big/img_275
+2002/08/24/big/img_86
+2002/08/17/big/img_582
+2003/01/15/big/img_765
+2003/01/17/big/img_449
+2002/07/28/big/img_265
+2003/01/13/big/img_552
+2002/07/28/big/img_115
+2003/01/16/big/img_56
+2002/08/02/big/img_1232
+2003/01/17/big/img_925
+2002/07/22/big/img_445
+2002/07/25/big/img_957
+2002/07/20/big/img_589
+2002/08/31/big/img_17107
+2002/07/29/big/img_483
+2002/08/14/big/img_1063
+2002/08/07/big/img_1545
+2002/08/14/big/img_680
+2002/09/01/big/img_16694
+2002/08/14/big/img_257
+2002/08/11/big/img_726
+2002/07/26/big/img_681
+2002/07/25/big/img_481
+2003/01/14/big/img_737
+2002/08/28/big/img_19480
+2003/01/16/big/img_362
+2002/08/27/big/img_19865
+2003/01/01/big/img_547
+2002/09/02/big/img_15074
+2002/08/01/big/img_1453
+2002/08/22/big/img_594
+2002/08/28/big/img_19263
+2002/08/13/big/img_478
+2002/07/29/big/img_1358
+2003/01/14/big/img_1022
+2002/08/16/big/img_450
+2002/08/02/big/img_159
+2002/07/26/big/img_781
+2003/01/13/big/img_601
+2002/08/20/big/img_407
+2002/08/15/big/img_468
+2002/08/31/big/img_17902
+2002/08/16/big/img_81
+2002/07/25/big/img_987
+2002/07/25/big/img_500
+2002/08/02/big/img_31
+2002/08/18/big/img_538
+2002/08/08/big/img_54
+2002/07/23/big/img_686
+2002/07/24/big/img_836
+2003/01/17/big/img_734
+2002/08/16/big/img_1055
+2003/01/16/big/img_521
+2002/07/25/big/img_612
+2002/08/22/big/img_778
+2002/08/03/big/img_251
+2002/08/12/big/img_436
+2002/08/23/big/img_705
+2002/07/28/big/img_243
+2002/07/25/big/img_1029
+2002/08/20/big/img_287
+2002/08/29/big/img_18739
+2002/08/05/big/img_3272
+2002/07/27/big/img_214
+2003/01/14/big/img_5
+2002/08/01/big/img_1380
+2002/08/29/big/img_19097
+2002/07/30/big/img_486
+2002/08/29/big/img_18707
+2002/08/10/big/img_559
+2002/08/15/big/img_365
+2002/08/09/big/img_525
+2002/08/10/big/img_689
+2002/07/25/big/img_502
+2002/08/03/big/img_667
+2002/08/10/big/img_855
+2002/08/10/big/img_706
+2002/08/18/big/img_603
+2003/01/16/big/img_1055
+2002/08/31/big/img_17890
+2002/08/15/big/img_761
+2003/01/15/big/img_489
+2002/08/26/big/img_351
+2002/08/01/big/img_1772
+2002/08/31/big/img_17729
+2002/07/25/big/img_609
+2003/01/13/big/img_539
+2002/07/27/big/img_686
+2002/07/31/big/img_311
+2002/08/22/big/img_799
+2003/01/16/big/img_936
+2002/08/31/big/img_17813
+2002/08/04/big/img_862
+2002/08/09/big/img_332
+2002/07/20/big/img_148
+2002/08/12/big/img_426
+2002/07/24/big/img_69
+2002/07/27/big/img_685
+2002/08/02/big/img_480
+2002/08/26/big/img_154
+2002/07/24/big/img_598
+2002/08/01/big/img_1881
+2002/08/20/big/img_667
+2003/01/14/big/img_495
+2002/07/21/big/img_744
+2002/07/30/big/img_150
+2002/07/23/big/img_924
+2002/08/08/big/img_272
+2002/07/23/big/img_310
+2002/07/25/big/img_1011
+2002/09/02/big/img_15725
+2002/07/19/big/img_814
+2002/08/20/big/img_936
+2002/07/25/big/img_85
+2002/08/24/big/img_662
+2002/08/09/big/img_495
+2003/01/15/big/img_196
+2002/08/16/big/img_707
+2002/08/28/big/img_19370
+2002/08/06/big/img_2366
+2002/08/06/big/img_3012
+2002/08/01/big/img_1452
+2002/07/31/big/img_742
+2002/07/27/big/img_914
+2003/01/13/big/img_290
+2002/07/31/big/img_288
+2002/08/02/big/img_171
+2002/08/22/big/img_191
+2002/07/27/big/img_1066
+2002/08/12/big/img_383
+2003/01/17/big/img_1018
+2002/08/01/big/img_1785
+2002/08/11/big/img_390
+2002/08/27/big/img_20037
+2002/08/12/big/img_38
+2003/01/15/big/img_103
+2002/08/26/big/img_31
+2002/08/18/big/img_660
+2002/07/22/big/img_694
+2002/08/15/big/img_24
+2002/07/27/big/img_1077
+2002/08/01/big/img_1943
+2002/07/22/big/img_292
+2002/09/01/big/img_16857
+2002/07/22/big/img_892
+2003/01/14/big/img_46
+2002/08/09/big/img_469
+2002/08/09/big/img_414
+2003/01/16/big/img_40
+2002/08/28/big/img_19231
+2002/07/27/big/img_978
+2002/07/23/big/img_475
+2002/07/25/big/img_92
+2002/08/09/big/img_799
+2002/07/25/big/img_491
+2002/08/03/big/img_654
+2003/01/15/big/img_687
+2002/08/11/big/img_478
+2002/08/07/big/img_1664
+2002/08/20/big/img_362
+2002/08/01/big/img_1298
+2003/01/13/big/img_500
+2002/08/06/big/img_2896
+2002/08/30/big/img_18529
+2002/08/16/big/img_1020
+2002/07/29/big/img_892
+2002/08/29/big/img_18726
+2002/07/21/big/img_453
+2002/08/17/big/img_437
+2002/07/19/big/img_665
+2002/07/22/big/img_440
+2002/07/19/big/img_582
+2002/07/21/big/img_233
+2003/01/01/big/img_82
+2002/07/25/big/img_341
+2002/07/29/big/img_864
+2002/08/02/big/img_276
+2002/08/29/big/img_18654
+2002/07/27/big/img_1024
+2002/08/19/big/img_373
+2003/01/15/big/img_241
+2002/07/25/big/img_84
+2002/08/13/big/img_834
+2002/08/10/big/img_511
+2002/08/01/big/img_1627
+2002/08/08/big/img_607
+2002/08/06/big/img_2083
+2002/08/01/big/img_1486
+2002/08/08/big/img_700
+2002/08/01/big/img_1954
+2002/08/21/big/img_54
+2002/07/30/big/img_847
+2002/08/28/big/img_19169
+2002/07/21/big/img_549
+2002/08/03/big/img_693
+2002/07/31/big/img_1002
+2003/01/14/big/img_1035
+2003/01/16/big/img_622
+2002/07/30/big/img_1201
+2002/08/10/big/img_444
+2002/07/31/big/img_374
+2002/08/21/big/img_301
+2002/08/13/big/img_1095
+2003/01/13/big/img_288
+2002/07/25/big/img_232
+2003/01/13/big/img_967
+2002/08/26/big/img_360
+2002/08/05/big/img_67
+2002/08/29/big/img_18969
+2002/07/28/big/img_16
+2002/08/16/big/img_515
+2002/07/20/big/img_708
+2002/08/18/big/img_178
+2003/01/15/big/img_509
+2002/07/25/big/img_430
+2002/08/21/big/img_738
+2002/08/16/big/img_886
+2002/09/02/big/img_15605
+2002/09/01/big/img_16242
+2002/08/24/big/img_711
+2002/07/25/big/img_90
+2002/08/09/big/img_491
+2002/07/30/big/img_534
+2003/01/13/big/img_474
+2002/08/25/big/img_510
+2002/08/15/big/img_555
+2002/08/02/big/img_775
+2002/07/23/big/img_975
+2002/08/19/big/img_229
+2003/01/17/big/img_860
+2003/01/02/big/img_10
+2002/07/23/big/img_542
+2002/08/06/big/img_2535
+2002/07/22/big/img_37
+2002/08/06/big/img_2342
+2002/08/25/big/img_515
+2002/08/25/big/img_336
+2002/08/18/big/img_837
+2002/08/21/big/img_616
+2003/01/17/big/img_24
+2002/07/26/big/img_936
+2002/08/14/big/img_896
+2002/07/29/big/img_465
+2002/07/31/big/img_543
+2002/08/01/big/img_1411
+2002/08/02/big/img_423
+2002/08/21/big/img_44
+2002/07/31/big/img_11
+2003/01/15/big/img_628
+2003/01/15/big/img_605
+2002/07/30/big/img_571
+2002/07/23/big/img_428
+2002/08/15/big/img_942
+2002/07/26/big/img_531
+2003/01/16/big/img_59
+2002/08/02/big/img_410
+2002/07/31/big/img_230
+2002/08/19/big/img_806
+2003/01/14/big/img_462
+2002/08/16/big/img_370
+2002/08/13/big/img_380
+2002/08/16/big/img_932
+2002/07/19/big/img_393
+2002/08/20/big/img_764
+2002/08/15/big/img_616
+2002/07/26/big/img_267
+2002/07/27/big/img_1069
+2002/08/14/big/img_1041
+2003/01/13/big/img_594
+2002/09/01/big/img_16845
+2002/08/09/big/img_229
+2003/01/16/big/img_639
+2002/08/19/big/img_398
+2002/08/18/big/img_978
+2002/08/24/big/img_296
+2002/07/29/big/img_415
+2002/07/30/big/img_923
+2002/08/18/big/img_575
+2002/08/22/big/img_182
+2002/07/25/big/img_806
+2002/07/22/big/img_49
+2002/07/29/big/img_989
+2003/01/17/big/img_789
+2003/01/15/big/img_503
+2002/09/01/big/img_16062
+2003/01/17/big/img_794
+2002/08/15/big/img_564
+2003/01/15/big/img_222
+2002/08/01/big/img_1656
+2003/01/13/big/img_432
+2002/07/19/big/img_426
+2002/08/17/big/img_244
+2002/08/13/big/img_805
+2002/09/02/big/img_15067
+2002/08/11/big/img_58
+2002/08/22/big/img_636
+2002/07/22/big/img_416
+2002/08/13/big/img_836
+2002/08/26/big/img_363
+2002/07/30/big/img_917
+2003/01/14/big/img_206
+2002/08/12/big/img_311
+2002/08/31/big/img_17623
+2002/07/29/big/img_661
+2003/01/13/big/img_417
+2002/08/02/big/img_463
+2002/08/02/big/img_669
+2002/08/26/big/img_670
+2002/08/02/big/img_375
+2002/07/19/big/img_209
+2002/08/08/big/img_115
+2002/08/21/big/img_399
+2002/08/20/big/img_911
+2002/08/07/big/img_1212
+2002/08/20/big/img_578
+2002/08/22/big/img_554
+2002/08/21/big/img_484
+2002/07/25/big/img_450
+2002/08/03/big/img_542
+2002/08/15/big/img_561
+2002/07/23/big/img_360
+2002/08/30/big/img_18137
+2002/07/25/big/img_250
+2002/08/03/big/img_647
+2002/08/20/big/img_375
+2002/08/14/big/img_387
+2002/09/01/big/img_16990
+2002/08/28/big/img_19341
+2003/01/15/big/img_239
+2002/08/20/big/img_528
+2002/08/12/big/img_130
+2002/09/02/big/img_15108
+2003/01/15/big/img_372
+2002/08/16/big/img_678
+2002/08/04/big/img_623
+2002/07/23/big/img_477
+2002/08/28/big/img_19590
+2003/01/17/big/img_978
+2002/09/01/big/img_16692
+2002/07/20/big/img_109
+2002/08/06/big/img_2660
+2003/01/14/big/img_464
+2002/08/09/big/img_618
+2002/07/22/big/img_722
+2002/08/25/big/img_419
+2002/08/03/big/img_314
+2002/08/25/big/img_40
+2002/07/27/big/img_430
+2002/08/10/big/img_569
+2002/08/23/big/img_398
+2002/07/23/big/img_893
+2002/08/16/big/img_261
+2002/08/06/big/img_2668
+2002/07/22/big/img_835
+2002/09/02/big/img_15093
+2003/01/16/big/img_65
+2002/08/21/big/img_448
+2003/01/14/big/img_351
+2003/01/17/big/img_133
+2002/07/28/big/img_493
+2003/01/15/big/img_640
+2002/09/01/big/img_16880
+2002/08/15/big/img_350
+2002/08/20/big/img_624
+2002/08/25/big/img_604
+2002/08/06/big/img_2200
+2002/08/23/big/img_290
+2002/08/13/big/img_1152
+2003/01/14/big/img_251
+2002/08/02/big/img_538
+2002/08/22/big/img_613
+2003/01/13/big/img_351
+2002/08/18/big/img_368
+2002/07/23/big/img_392
+2002/07/25/big/img_198
+2002/07/25/big/img_418
+2002/08/26/big/img_614
+2002/07/23/big/img_405
+2003/01/14/big/img_445
+2002/07/25/big/img_326
+2002/08/10/big/img_734
+2003/01/14/big/img_530
+2002/08/08/big/img_561
+2002/08/29/big/img_18990
+2002/08/10/big/img_576
+2002/07/29/big/img_1494
+2002/07/19/big/img_198
+2002/08/10/big/img_562
+2002/07/22/big/img_901
+2003/01/14/big/img_37
+2002/09/02/big/img_15629
+2003/01/14/big/img_58
+2002/08/01/big/img_1364
+2002/07/27/big/img_636
+2003/01/13/big/img_241
+2002/09/01/big/img_16988
+2003/01/13/big/img_560
+2002/08/09/big/img_533
+2002/07/31/big/img_249
+2003/01/17/big/img_1007
+2002/07/21/big/img_64
+2003/01/13/big/img_537
+2003/01/15/big/img_606
+2002/08/18/big/img_651
+2002/08/24/big/img_405
+2002/07/26/big/img_837
+2002/08/09/big/img_562
+2002/08/01/big/img_1983
+2002/08/03/big/img_514
+2002/07/29/big/img_314
+2002/08/12/big/img_493
+2003/01/14/big/img_121
+2003/01/14/big/img_479
+2002/08/04/big/img_410
+2002/07/22/big/img_607
+2003/01/17/big/img_417
+2002/07/20/big/img_547
+2002/08/13/big/img_396
+2002/08/31/big/img_17538
+2002/08/13/big/img_187
+2002/08/12/big/img_328
+2003/01/14/big/img_569
+2002/07/27/big/img_1081
+2002/08/14/big/img_504
+2002/08/23/big/img_785
+2002/07/26/big/img_339
+2002/08/07/big/img_1156
+2002/08/07/big/img_1456
+2002/08/23/big/img_378
+2002/08/27/big/img_19719
+2002/07/31/big/img_39
+2002/07/31/big/img_883
+2003/01/14/big/img_676
+2002/07/29/big/img_214
+2002/07/26/big/img_669
+2002/07/25/big/img_202
+2002/08/08/big/img_259
+2003/01/17/big/img_943
+2003/01/15/big/img_512
+2002/08/05/big/img_3295
+2002/08/27/big/img_19685
+2002/08/08/big/img_277
+2002/08/30/big/img_18154
+2002/07/22/big/img_663
+2002/08/29/big/img_18914
+2002/07/31/big/img_908
+2002/08/27/big/img_19926
+2003/01/13/big/img_791
+2003/01/15/big/img_827
+2002/08/18/big/img_878
+2002/08/14/big/img_670
+2002/07/20/big/img_182
+2002/08/15/big/img_291
+2002/08/06/big/img_2600
+2002/07/23/big/img_587
+2002/08/14/big/img_577
+2003/01/15/big/img_585
+2002/07/30/big/img_310
+2002/08/03/big/img_658
+2002/08/10/big/img_157
+2002/08/19/big/img_811
+2002/07/29/big/img_1318
+2002/08/04/big/img_104
+2002/07/30/big/img_332
+2002/07/24/big/img_789
+2002/07/29/big/img_516
+2002/07/23/big/img_843
+2002/08/01/big/img_1528
+2002/08/13/big/img_798
+2002/08/07/big/img_1729
+2002/08/28/big/img_19448
+2003/01/16/big/img_95
+2002/08/12/big/img_473
+2002/07/27/big/img_269
+2003/01/16/big/img_621
+2002/07/29/big/img_772
+2002/07/24/big/img_171
+2002/07/19/big/img_429
+2002/08/07/big/img_1933
+2002/08/27/big/img_19629
+2002/08/05/big/img_3688
+2002/08/07/big/img_1691
+2002/07/23/big/img_600
+2002/07/29/big/img_666
+2002/08/25/big/img_566
+2002/08/06/big/img_2659
+2002/08/29/big/img_18929
+2002/08/16/big/img_407
+2002/08/18/big/img_774
+2002/08/19/big/img_249
+2002/08/06/big/img_2427
+2002/08/29/big/img_18899
+2002/08/01/big/img_1818
+2002/07/31/big/img_108
+2002/07/29/big/img_500
+2002/08/11/big/img_115
+2002/07/19/big/img_521
+2002/08/02/big/img_1163
+2002/07/22/big/img_62
+2002/08/13/big/img_466
+2002/08/21/big/img_956
+2002/08/23/big/img_602
+2002/08/20/big/img_858
+2002/07/25/big/img_690
+2002/07/19/big/img_130
+2002/08/04/big/img_874
+2002/07/26/big/img_489
+2002/07/22/big/img_548
+2002/08/10/big/img_191
+2002/07/25/big/img_1051
+2002/08/18/big/img_473
+2002/08/12/big/img_755
+2002/08/18/big/img_413
+2002/08/08/big/img_1044
+2002/08/17/big/img_680
+2002/08/26/big/img_235
+2002/08/20/big/img_330
+2002/08/22/big/img_344
+2002/08/09/big/img_593
+2002/07/31/big/img_1006
+2002/08/14/big/img_337
+2002/08/16/big/img_728
+2002/07/24/big/img_834
+2002/08/04/big/img_552
+2002/09/02/big/img_15213
+2002/07/25/big/img_725
+2002/08/30/big/img_18290
+2003/01/01/big/img_475
+2002/07/27/big/img_1083
+2002/08/29/big/img_18955
+2002/08/31/big/img_17232
+2002/08/08/big/img_480
+2002/08/01/big/img_1311
+2002/07/30/big/img_745
+2002/08/03/big/img_649
+2002/08/12/big/img_193
+2002/07/29/big/img_228
+2002/07/25/big/img_836
+2002/08/20/big/img_400
+2002/07/30/big/img_507
+2002/09/02/big/img_15072
+2002/07/26/big/img_658
+2002/07/28/big/img_503
+2002/08/05/big/img_3814
+2002/08/24/big/img_745
+2003/01/13/big/img_817
+2002/08/08/big/img_579
+2002/07/22/big/img_251
+2003/01/13/big/img_689
+2002/07/25/big/img_407
+2002/08/13/big/img_1050
+2002/08/14/big/img_733
+2002/07/24/big/img_82
+2003/01/17/big/img_288
+2003/01/15/big/img_475
+2002/08/14/big/img_620
+2002/08/21/big/img_167
+2002/07/19/big/img_300
+2002/07/26/big/img_219
+2002/08/01/big/img_1468
+2002/07/23/big/img_260
+2002/08/09/big/img_555
+2002/07/19/big/img_160
+2002/08/02/big/img_1060
+2003/01/14/big/img_149
+2002/08/15/big/img_346
+2002/08/24/big/img_597
+2002/08/22/big/img_502
+2002/08/30/big/img_18228
+2002/07/21/big/img_766
+2003/01/15/big/img_841
+2002/07/24/big/img_516
+2002/08/02/big/img_265
+2002/08/15/big/img_1243
+2003/01/15/big/img_223
+2002/08/04/big/img_236
+2002/07/22/big/img_309
+2002/07/20/big/img_656
+2002/07/31/big/img_412
+2002/09/01/big/img_16462
+2003/01/16/big/img_431
+2002/07/22/big/img_793
+2002/08/15/big/img_877
+2002/07/26/big/img_282
+2002/07/25/big/img_529
+2002/08/24/big/img_613
+2003/01/17/big/img_700
+2002/08/06/big/img_2526
+2002/08/24/big/img_394
+2002/08/21/big/img_521
+2002/08/25/big/img_560
+2002/07/29/big/img_966
+2002/07/25/big/img_448
+2003/01/13/big/img_782
+2002/08/21/big/img_296
+2002/09/01/big/img_16755
+2002/08/05/big/img_3552
+2002/09/02/big/img_15823
+2003/01/14/big/img_193
+2002/07/21/big/img_159
+2002/08/02/big/img_564
+2002/08/16/big/img_300
+2002/07/19/big/img_269
+2002/08/13/big/img_676
+2002/07/28/big/img_57
+2002/08/05/big/img_3318
+2002/07/31/big/img_218
+2002/08/21/big/img_898
+2002/07/29/big/img_109
+2002/07/19/big/img_854
+2002/08/23/big/img_311
+2002/08/14/big/img_318
+2002/07/25/big/img_523
+2002/07/21/big/img_678
+2003/01/17/big/img_690
+2002/08/28/big/img_19503
+2002/08/18/big/img_251
+2002/08/22/big/img_672
+2002/08/20/big/img_663
+2002/08/02/big/img_148
+2002/09/02/big/img_15580
+2002/07/25/big/img_778
+2002/08/14/big/img_565
+2002/08/12/big/img_374
+2002/08/13/big/img_1018
+2002/08/20/big/img_474
+2002/08/25/big/img_33
+2002/08/02/big/img_1190
+2002/08/08/big/img_864
+2002/08/14/big/img_1071
+2002/08/30/big/img_18103
+2002/08/18/big/img_533
+2003/01/16/big/img_650
+2002/07/25/big/img_108
+2002/07/26/big/img_81
+2002/07/27/big/img_543
+2002/07/29/big/img_521
+2003/01/13/big/img_434
+2002/08/26/big/img_674
+2002/08/06/big/img_2932
+2002/08/07/big/img_1262
+2003/01/15/big/img_201
+2003/01/16/big/img_673
+2002/09/02/big/img_15988
+2002/07/29/big/img_1306
+2003/01/14/big/img_1072
+2002/08/30/big/img_18232
+2002/08/05/big/img_3711
+2002/07/23/big/img_775
+2002/08/01/big/img_16
+2003/01/16/big/img_630
+2002/08/22/big/img_695
+2002/08/14/big/img_51
+2002/08/14/big/img_782
+2002/08/24/big/img_742
+2003/01/14/big/img_512
+2003/01/15/big/img_1183
+2003/01/15/big/img_714
+2002/08/01/big/img_2078
+2002/07/31/big/img_682
+2002/09/02/big/img_15687
+2002/07/26/big/img_518
+2002/08/27/big/img_19676
+2002/09/02/big/img_15969
+2002/08/02/big/img_931
+2002/08/25/big/img_508
+2002/08/29/big/img_18616
+2002/07/22/big/img_839
+2002/07/28/big/img_313
+2003/01/14/big/img_155
+2002/08/02/big/img_1105
+2002/08/09/big/img_53
+2002/08/16/big/img_469
+2002/08/15/big/img_502
+2002/08/20/big/img_575
+2002/07/25/big/img_138
+2003/01/16/big/img_579
+2002/07/19/big/img_352
+2003/01/14/big/img_762
+2003/01/01/big/img_588
+2002/08/02/big/img_981
+2002/08/21/big/img_447
+2002/09/01/big/img_16151
+2003/01/14/big/img_769
+2002/08/23/big/img_461
+2002/08/17/big/img_240
+2002/09/02/big/img_15220
+2002/07/19/big/img_408
+2002/09/02/big/img_15496
+2002/07/29/big/img_758
+2002/08/28/big/img_19392
+2002/08/06/big/img_2723
+2002/08/31/big/img_17752
+2002/08/23/big/img_469
+2002/08/13/big/img_515
+2002/09/02/big/img_15551
+2002/08/03/big/img_462
+2002/07/24/big/img_613
+2002/07/22/big/img_61
+2002/08/08/big/img_171
+2002/08/21/big/img_177
+2003/01/14/big/img_105
+2002/08/02/big/img_1017
+2002/08/22/big/img_106
+2002/07/27/big/img_542
+2002/07/21/big/img_665
+2002/07/23/big/img_595
+2002/08/04/big/img_657
+2002/08/29/big/img_19002
+2003/01/15/big/img_550
+2002/08/14/big/img_662
+2002/07/20/big/img_425
+2002/08/30/big/img_18528
+2002/07/26/big/img_611
+2002/07/22/big/img_849
+2002/08/07/big/img_1655
+2002/08/21/big/img_638
+2003/01/17/big/img_732
+2003/01/01/big/img_496
+2002/08/18/big/img_713
+2002/08/08/big/img_109
+2002/07/27/big/img_1008
+2002/07/20/big/img_559
+2002/08/16/big/img_699
+2002/08/31/big/img_17702
+2002/07/31/big/img_1013
+2002/08/01/big/img_2027
+2002/08/02/big/img_1001
+2002/08/03/big/img_210
+2002/08/01/big/img_2087
+2003/01/14/big/img_199
+2002/07/29/big/img_48
+2002/07/19/big/img_727
+2002/08/09/big/img_249
+2002/08/04/big/img_632
+2002/08/22/big/img_620
+2003/01/01/big/img_457
+2002/08/05/big/img_3223
+2002/07/27/big/img_240
+2002/07/25/big/img_797
+2002/08/13/big/img_430
+2002/07/25/big/img_615
+2002/08/12/big/img_28
+2002/07/30/big/img_220
+2002/07/24/big/img_89
+2002/08/21/big/img_357
+2002/08/09/big/img_590
+2003/01/13/big/img_525
+2002/08/17/big/img_818
+2003/01/02/big/img_7
+2002/07/26/big/img_636
+2003/01/13/big/img_1122
+2002/07/23/big/img_810
+2002/08/20/big/img_888
+2002/07/27/big/img_3
+2002/08/15/big/img_451
+2002/09/02/big/img_15787
+2002/07/31/big/img_281
+2002/08/05/big/img_3274
+2002/08/07/big/img_1254
+2002/07/31/big/img_27
+2002/08/01/big/img_1366
+2002/07/30/big/img_182
+2002/08/27/big/img_19690
+2002/07/29/big/img_68
+2002/08/23/big/img_754
+2002/07/30/big/img_540
+2002/08/27/big/img_20063
+2002/08/14/big/img_471
+2002/08/02/big/img_615
+2002/07/30/big/img_186
+2002/08/25/big/img_150
+2002/07/27/big/img_626
+2002/07/20/big/img_225
+2003/01/15/big/img_1252
+2002/07/19/big/img_367
+2003/01/15/big/img_582
+2002/08/09/big/img_572
+2002/08/08/big/img_428
+2003/01/15/big/img_639
+2002/08/28/big/img_19245
+2002/07/24/big/img_321
+2002/08/02/big/img_662
+2002/08/08/big/img_1033
+2003/01/17/big/img_867
+2002/07/22/big/img_652
+2003/01/14/big/img_224
+2002/08/18/big/img_49
+2002/07/26/big/img_46
+2002/08/31/big/img_18021
+2002/07/25/big/img_151
+2002/08/23/big/img_540
+2002/08/25/big/img_693
+2002/07/23/big/img_340
+2002/07/28/big/img_117
+2002/09/02/big/img_15768
+2002/08/26/big/img_562
+2002/07/24/big/img_480
+2003/01/15/big/img_341
+2002/08/10/big/img_783
+2002/08/20/big/img_132
+2003/01/14/big/img_370
+2002/07/20/big/img_720
+2002/08/03/big/img_144
+2002/08/20/big/img_538
+2002/08/01/big/img_1745
+2002/08/11/big/img_683
+2002/08/03/big/img_328
+2002/08/10/big/img_793
+2002/08/14/big/img_689
+2002/08/02/big/img_162
+2003/01/17/big/img_411
+2002/07/31/big/img_361
+2002/08/15/big/img_289
+2002/08/08/big/img_254
+2002/08/15/big/img_996
+2002/08/20/big/img_785
+2002/07/24/big/img_511
+2002/08/06/big/img_2614
+2002/08/29/big/img_18733
+2002/08/17/big/img_78
+2002/07/30/big/img_378
+2002/08/31/big/img_17947
+2002/08/26/big/img_88
+2002/07/30/big/img_558
+2002/08/02/big/img_67
+2003/01/14/big/img_325
+2002/07/29/big/img_1357
+2002/07/19/big/img_391
+2002/07/30/big/img_307
+2003/01/13/big/img_219
+2002/07/24/big/img_807
+2002/08/23/big/img_543
+2002/08/29/big/img_18620
+2002/07/22/big/img_769
+2002/08/26/big/img_503
+2002/07/30/big/img_78
+2002/08/14/big/img_1036
+2002/08/09/big/img_58
+2002/07/24/big/img_616
+2002/08/02/big/img_464
+2002/07/26/big/img_576
+2002/07/22/big/img_273
+2003/01/16/big/img_470
+2002/07/29/big/img_329
+2002/07/30/big/img_1086
+2002/07/31/big/img_353
+2002/09/02/big/img_15275
+2003/01/17/big/img_555
+2002/08/26/big/img_212
+2002/08/01/big/img_1692
+2003/01/15/big/img_600
+2002/07/29/big/img_825
+2002/08/08/big/img_68
+2002/08/10/big/img_719
+2002/07/31/big/img_636
+2002/07/29/big/img_325
+2002/07/21/big/img_515
+2002/07/22/big/img_705
+2003/01/13/big/img_818
+2002/08/09/big/img_486
+2002/08/22/big/img_141
+2002/07/22/big/img_303
+2002/08/09/big/img_393
+2002/07/29/big/img_963
+2002/08/02/big/img_1215
+2002/08/19/big/img_674
+2002/08/12/big/img_690
+2002/08/21/big/img_637
+2002/08/21/big/img_841
+2002/08/24/big/img_71
+2002/07/25/big/img_596
+2002/07/24/big/img_864
+2002/08/18/big/img_293
+2003/01/14/big/img_657
+2002/08/15/big/img_411
+2002/08/16/big/img_348
+2002/08/05/big/img_3157
+2002/07/20/big/img_663
+2003/01/13/big/img_654
+2003/01/16/big/img_433
+2002/08/30/big/img_18200
+2002/08/12/big/img_226
+2003/01/16/big/img_491
+2002/08/08/big/img_666
+2002/07/19/big/img_576
+2003/01/15/big/img_776
+2003/01/16/big/img_899
+2002/07/19/big/img_397
+2002/08/14/big/img_44
+2003/01/15/big/img_762
+2002/08/02/big/img_982
+2002/09/02/big/img_15234
+2002/08/17/big/img_556
+2002/08/21/big/img_410
+2002/08/21/big/img_386
+2002/07/19/big/img_690
+2002/08/05/big/img_3052
+2002/08/14/big/img_219
+2002/08/16/big/img_273
+2003/01/15/big/img_752
+2002/08/08/big/img_184
+2002/07/31/big/img_743
+2002/08/23/big/img_338
+2003/01/14/big/img_1055
+2002/08/05/big/img_3405
+2003/01/15/big/img_17
+2002/08/03/big/img_141
+2002/08/14/big/img_549
+2002/07/27/big/img_1034
+2002/07/31/big/img_932
+2002/08/30/big/img_18487
+2002/09/02/big/img_15814
+2002/08/01/big/img_2086
+2002/09/01/big/img_16535
+2002/07/22/big/img_500
+2003/01/13/big/img_400
+2002/08/25/big/img_607
+2002/08/30/big/img_18384
+2003/01/14/big/img_951
+2002/08/13/big/img_1150
+2002/08/08/big/img_1022
+2002/08/10/big/img_428
+2002/08/28/big/img_19242
+2002/08/05/big/img_3098
+2002/07/23/big/img_400
+2002/08/26/big/img_365
+2002/07/20/big/img_318
+2002/08/13/big/img_740
+2003/01/16/big/img_37
+2002/08/26/big/img_274
+2002/08/02/big/img_205
+2002/08/21/big/img_695
+2002/08/06/big/img_2289
+2002/08/20/big/img_794
+2002/08/18/big/img_438
+2002/08/07/big/img_1380
+2002/08/02/big/img_737
+2002/08/07/big/img_1651
+2002/08/15/big/img_1238
+2002/08/01/big/img_1681
+2002/08/06/big/img_3017
+2002/07/23/big/img_706
+2002/07/31/big/img_392
+2002/08/09/big/img_539
+2002/07/29/big/img_835
+2002/08/26/big/img_723
+2002/08/28/big/img_19235
+2003/01/16/big/img_353
+2002/08/10/big/img_150
+2002/08/29/big/img_19025
+2002/08/21/big/img_310
+2002/08/10/big/img_823
+2002/07/26/big/img_981
+2002/08/11/big/img_288
+2002/08/19/big/img_534
+2002/08/21/big/img_300
+2002/07/31/big/img_49
+2002/07/30/big/img_469
+2002/08/28/big/img_19197
+2002/08/25/big/img_205
+2002/08/10/big/img_390
+2002/08/23/big/img_291
+2002/08/26/big/img_230
+2002/08/18/big/img_76
+2002/07/23/big/img_409
+2002/08/14/big/img_1053
+2003/01/14/big/img_291
+2002/08/10/big/img_503
+2002/08/27/big/img_19928
+2002/08/03/big/img_563
+2002/08/17/big/img_250
+2002/08/06/big/img_2381
+2002/08/17/big/img_948
+2002/08/06/big/img_2710
+2002/07/22/big/img_696
+2002/07/31/big/img_670
+2002/08/12/big/img_594
+2002/07/29/big/img_624
+2003/01/17/big/img_934
+2002/08/03/big/img_584
+2002/08/22/big/img_1003
+2002/08/05/big/img_3396
+2003/01/13/big/img_570
+2002/08/02/big/img_219
+2002/09/02/big/img_15774
+2002/08/16/big/img_818
+2002/08/23/big/img_402
+2003/01/14/big/img_552
+2002/07/29/big/img_71
+2002/08/05/big/img_3592
+2002/08/16/big/img_80
+2002/07/27/big/img_672
+2003/01/13/big/img_470
+2003/01/16/big/img_702
+2002/09/01/big/img_16130
+2002/08/08/big/img_240
+2002/09/01/big/img_16338
+2002/07/26/big/img_312
+2003/01/14/big/img_538
+2002/07/20/big/img_695
+2002/08/30/big/img_18098
+2002/08/25/big/img_259
+2002/08/16/big/img_1042
+2002/08/09/big/img_837
+2002/08/31/big/img_17760
+2002/07/31/big/img_14
+2002/08/09/big/img_361
+2003/01/16/big/img_107
+2002/08/14/big/img_124
+2002/07/19/big/img_463
+2003/01/15/big/img_275
+2002/07/25/big/img_1151
+2002/07/29/big/img_1501
+2002/08/27/big/img_19889
+2002/08/29/big/img_18603
+2003/01/17/big/img_601
+2002/08/25/big/img_355
+2002/08/08/big/img_297
+2002/08/20/big/img_290
+2002/07/31/big/img_195
+2003/01/01/big/img_336
+2002/08/18/big/img_369
+2002/07/25/big/img_621
+2002/08/11/big/img_508
+2003/01/14/big/img_458
+2003/01/15/big/img_795
+2002/08/12/big/img_498
+2002/08/01/big/img_1734
+2002/08/02/big/img_246
+2002/08/16/big/img_565
+2002/08/11/big/img_475
+2002/08/22/big/img_408
+2002/07/28/big/img_78
+2002/07/21/big/img_81
+2003/01/14/big/img_697
+2002/08/14/big/img_661
+2002/08/15/big/img_507
+2002/08/19/big/img_55
+2002/07/22/big/img_152
+2003/01/14/big/img_470
+2002/08/03/big/img_379
+2002/08/22/big/img_506
+2003/01/16/big/img_966
+2002/08/18/big/img_698
+2002/08/24/big/img_528
+2002/08/23/big/img_10
+2002/08/01/big/img_1655
+2002/08/22/big/img_953
+2002/07/19/big/img_630
+2002/07/22/big/img_889
+2002/08/16/big/img_351
+2003/01/16/big/img_83
+2002/07/19/big/img_805
+2002/08/14/big/img_704
+2002/07/19/big/img_389
+2002/08/31/big/img_17765
+2002/07/29/big/img_606
+2003/01/17/big/img_939
+2002/09/02/big/img_15081
+2002/08/21/big/img_181
+2002/07/29/big/img_1321
+2002/07/21/big/img_497
+2002/07/20/big/img_539
+2002/08/24/big/img_119
+2002/08/01/big/img_1281
+2002/07/26/big/img_207
+2002/07/26/big/img_432
+2002/07/27/big/img_1006
+2002/08/05/big/img_3087
+2002/08/14/big/img_252
+2002/08/14/big/img_798
+2002/07/24/big/img_538
+2002/09/02/big/img_15507
+2002/08/08/big/img_901
+2003/01/14/big/img_557
+2002/08/07/big/img_1819
+2002/08/04/big/img_470
+2002/08/01/big/img_1504
+2002/08/16/big/img_1070
+2002/08/16/big/img_372
+2002/08/23/big/img_416
+2002/08/30/big/img_18208
+2002/08/01/big/img_2043
+2002/07/22/big/img_385
+2002/08/22/big/img_466
+2002/08/21/big/img_869
+2002/08/28/big/img_19429
+2002/08/02/big/img_770
+2002/07/23/big/img_433
+2003/01/14/big/img_13
+2002/07/27/big/img_953
+2002/09/02/big/img_15728
+2002/08/01/big/img_1361
+2002/08/29/big/img_18897
+2002/08/26/big/img_534
+2002/08/11/big/img_121
+2002/08/26/big/img_20130
+2002/07/31/big/img_363
+2002/08/13/big/img_978
+2002/07/25/big/img_835
+2002/08/02/big/img_906
+2003/01/14/big/img_548
+2002/07/30/big/img_80
+2002/07/26/big/img_982
+2003/01/16/big/img_99
+2002/08/19/big/img_362
+2002/08/24/big/img_376
+2002/08/07/big/img_1264
+2002/07/27/big/img_938
+2003/01/17/big/img_535
+2002/07/26/big/img_457
+2002/08/08/big/img_848
+2003/01/15/big/img_859
+2003/01/15/big/img_622
+2002/07/30/big/img_403
+2002/07/29/big/img_217
+2002/07/26/big/img_891
+2002/07/24/big/img_70
+2002/08/25/big/img_619
+2002/08/05/big/img_3375
+2002/08/01/big/img_2160
+2002/08/06/big/img_2227
+2003/01/14/big/img_117
+2002/08/14/big/img_227
+2002/08/13/big/img_565
+2002/08/19/big/img_625
+2002/08/03/big/img_812
+2002/07/24/big/img_41
+2002/08/16/big/img_235
+2002/07/29/big/img_759
+2002/07/21/big/img_433
+2002/07/29/big/img_190
+2003/01/16/big/img_435
+2003/01/13/big/img_708
+2002/07/30/big/img_57
+2002/08/22/big/img_162
+2003/01/01/big/img_558
+2003/01/15/big/img_604
+2002/08/16/big/img_935
+2002/08/20/big/img_394
+2002/07/28/big/img_465
+2002/09/02/big/img_15534
+2002/08/16/big/img_87
+2002/07/22/big/img_469
+2002/08/12/big/img_245
+2003/01/13/big/img_236
+2002/08/06/big/img_2736
+2002/08/03/big/img_348
+2003/01/14/big/img_218
+2002/07/26/big/img_232
+2003/01/15/big/img_244
+2002/07/25/big/img_1121
+2002/08/01/big/img_1484
+2002/07/26/big/img_541
+2002/08/07/big/img_1244
+2002/07/31/big/img_3
+2002/08/30/big/img_18437
+2002/08/29/big/img_19094
+2002/08/01/big/img_1355
+2002/08/19/big/img_338
+2002/07/19/big/img_255
+2002/07/21/big/img_76
+2002/08/25/big/img_199
+2002/08/12/big/img_740
+2002/07/30/big/img_852
+2002/08/15/big/img_599
+2002/08/23/big/img_254
+2002/08/19/big/img_125
+2002/07/24/big/img_2
+2002/08/04/big/img_145
+2002/08/05/big/img_3137
+2002/07/28/big/img_463
+2003/01/14/big/img_801
+2002/07/23/big/img_366
+2002/08/26/big/img_600
+2002/08/26/big/img_649
+2002/09/02/big/img_15849
+2002/07/26/big/img_248
+2003/01/13/big/img_200
+2002/08/07/big/img_1794
+2002/08/31/big/img_17270
+2002/08/23/big/img_608
+2003/01/13/big/img_837
+2002/08/23/big/img_581
+2002/08/20/big/img_754
+2002/08/18/big/img_183
+2002/08/20/big/img_328
+2002/07/22/big/img_494
+2002/07/29/big/img_399
+2002/08/28/big/img_19284
+2002/08/08/big/img_566
+2002/07/25/big/img_376
+2002/07/23/big/img_138
+2002/07/25/big/img_435
+2002/08/17/big/img_685
+2002/07/19/big/img_90
+2002/07/20/big/img_716
+2002/08/31/big/img_17458
+2002/08/26/big/img_461
+2002/07/25/big/img_355
+2002/08/06/big/img_2152
+2002/07/27/big/img_932
+2002/07/23/big/img_232
+2002/08/08/big/img_1020
+2002/07/31/big/img_366
+2002/08/06/big/img_2667
+2002/08/21/big/img_465
+2002/08/15/big/img_305
+2002/08/02/big/img_247
+2002/07/28/big/img_46
+2002/08/27/big/img_19922
+2002/08/23/big/img_643
+2003/01/13/big/img_624
+2002/08/23/big/img_625
+2002/08/05/big/img_3787
+2003/01/13/big/img_627
+2002/09/01/big/img_16381
+2002/08/05/big/img_3668
+2002/07/21/big/img_535
+2002/08/27/big/img_19680
+2002/07/22/big/img_413
+2002/07/29/big/img_481
+2003/01/15/big/img_496
+2002/07/23/big/img_701
+2002/08/29/big/img_18670
+2002/07/28/big/img_319
+2003/01/14/big/img_517
+2002/07/26/big/img_256
+2003/01/16/big/img_593
+2002/07/30/big/img_956
+2002/07/30/big/img_667
+2002/07/25/big/img_100
+2002/08/11/big/img_570
+2002/07/26/big/img_745
+2002/08/04/big/img_834
+2002/08/25/big/img_521
+2002/08/01/big/img_2148
+2002/09/02/big/img_15183
+2002/08/22/big/img_514
+2002/08/23/big/img_477
+2002/07/23/big/img_336
+2002/07/26/big/img_481
+2002/08/20/big/img_409
+2002/07/23/big/img_918
+2002/08/09/big/img_474
+2002/08/02/big/img_929
+2002/08/31/big/img_17932
+2002/08/19/big/img_161
+2002/08/09/big/img_667
+2002/07/31/big/img_805
+2002/09/02/big/img_15678
+2002/08/31/big/img_17509
+2002/08/29/big/img_18998
+2002/07/23/big/img_301
+2002/08/07/big/img_1612
+2002/08/06/big/img_2472
+2002/07/23/big/img_466
+2002/08/27/big/img_19634
+2003/01/16/big/img_16
+2002/08/14/big/img_193
+2002/08/21/big/img_340
+2002/08/27/big/img_19799
+2002/08/01/big/img_1345
+2002/08/07/big/img_1448
+2002/08/11/big/img_324
+2003/01/16/big/img_754
+2002/08/13/big/img_418
+2003/01/16/big/img_544
+2002/08/19/big/img_135
+2002/08/10/big/img_455
+2002/08/10/big/img_693
+2002/08/31/big/img_17967
+2002/08/28/big/img_19229
+2002/08/04/big/img_811
+2002/09/01/big/img_16225
+2003/01/16/big/img_428
+2002/09/02/big/img_15295
+2002/07/26/big/img_108
+2002/07/21/big/img_477
+2002/08/07/big/img_1354
+2002/08/23/big/img_246
+2002/08/16/big/img_652
+2002/07/27/big/img_553
+2002/07/31/big/img_346
+2002/08/04/big/img_537
+2002/08/08/big/img_498
+2002/08/29/big/img_18956
+2003/01/13/big/img_922
+2002/08/31/big/img_17425
+2002/07/26/big/img_438
+2002/08/19/big/img_185
+2003/01/16/big/img_33
+2002/08/10/big/img_252
+2002/07/29/big/img_598
+2002/08/27/big/img_19820
+2002/08/06/big/img_2664
+2002/08/20/big/img_705
+2003/01/14/big/img_816
+2002/08/03/big/img_552
+2002/07/25/big/img_561
+2002/07/25/big/img_934
+2002/08/01/big/img_1893
+2003/01/14/big/img_746
+2003/01/16/big/img_519
+2002/08/03/big/img_681
+2002/07/24/big/img_808
+2002/08/14/big/img_803
+2002/08/25/big/img_155
+2002/07/30/big/img_1107
+2002/08/29/big/img_18882
+2003/01/15/big/img_598
+2002/08/19/big/img_122
+2002/07/30/big/img_428
+2002/07/24/big/img_684
+2002/08/22/big/img_192
+2002/08/22/big/img_543
+2002/08/07/big/img_1318
+2002/08/18/big/img_25
+2002/07/26/big/img_583
+2002/07/20/big/img_464
+2002/08/19/big/img_664
+2002/08/24/big/img_861
+2002/09/01/big/img_16136
+2002/08/22/big/img_400
+2002/08/12/big/img_445
+2003/01/14/big/img_174
+2002/08/27/big/img_19677
+2002/08/31/big/img_17214
+2002/08/30/big/img_18175
+2003/01/17/big/img_402
+2002/08/06/big/img_2396
+2002/08/18/big/img_448
+2002/08/21/big/img_165
+2002/08/31/big/img_17609
+2003/01/01/big/img_151
+2002/08/26/big/img_372
+2002/09/02/big/img_15994
+2002/07/26/big/img_660
+2002/09/02/big/img_15197
+2002/07/29/big/img_258
+2002/08/30/big/img_18525
+2003/01/13/big/img_368
+2002/07/29/big/img_1538
+2002/07/21/big/img_787
+2002/08/18/big/img_152
+2002/08/06/big/img_2379
+2003/01/17/big/img_864
+2002/08/27/big/img_19998
+2002/08/01/big/img_1634
+2002/07/25/big/img_414
+2002/08/22/big/img_627
+2002/08/07/big/img_1669
+2002/08/16/big/img_1052
+2002/08/31/big/img_17796
+2002/08/18/big/img_199
+2002/09/02/big/img_15147
+2002/08/09/big/img_460
+2002/08/14/big/img_581
+2002/08/30/big/img_18286
+2002/07/26/big/img_337
+2002/08/18/big/img_589
+2003/01/14/big/img_866
+2002/07/20/big/img_624
+2002/08/01/big/img_1801
+2002/07/24/big/img_683
+2002/08/09/big/img_725
+2003/01/14/big/img_34
+2002/07/30/big/img_144
+2002/07/30/big/img_706
+2002/08/08/big/img_394
+2002/08/19/big/img_619
+2002/08/06/big/img_2703
+2002/08/29/big/img_19034
+2002/07/24/big/img_67
+2002/08/27/big/img_19841
+2002/08/19/big/img_427
+2003/01/14/big/img_333
+2002/09/01/big/img_16406
+2002/07/19/big/img_882
+2002/08/17/big/img_238
+2003/01/14/big/img_739
+2002/07/22/big/img_151
+2002/08/21/big/img_743
+2002/07/25/big/img_1048
+2002/07/30/big/img_395
+2003/01/13/big/img_584
+2002/08/13/big/img_742
+2002/08/13/big/img_1168
+2003/01/14/big/img_147
+2002/07/26/big/img_803
+2002/08/05/big/img_3298
+2002/08/07/big/img_1451
+2002/08/16/big/img_424
+2002/07/29/big/img_1069
+2002/09/01/big/img_16735
+2002/07/21/big/img_637
+2003/01/14/big/img_585
+2002/08/02/big/img_358
+2003/01/13/big/img_358
+2002/08/14/big/img_198
+2002/08/17/big/img_935
+2002/08/04/big/img_42
+2002/08/30/big/img_18245
+2002/07/25/big/img_158
+2002/08/22/big/img_744
+2002/08/06/big/img_2291
+2002/08/05/big/img_3044
+2002/07/30/big/img_272
+2002/08/23/big/img_641
+2002/07/24/big/img_797
+2002/07/30/big/img_392
+2003/01/14/big/img_447
+2002/07/31/big/img_898
+2002/08/06/big/img_2812
+2002/08/13/big/img_564
+2002/07/22/big/img_43
+2002/07/26/big/img_634
+2002/07/19/big/img_843
+2002/08/26/big/img_58
+2002/07/21/big/img_375
+2002/08/25/big/img_729
+2002/07/19/big/img_561
+2003/01/15/big/img_884
+2002/07/25/big/img_891
+2002/08/09/big/img_558
+2002/08/26/big/img_587
+2002/08/13/big/img_1146
+2002/09/02/big/img_15153
+2002/07/26/big/img_316
+2002/08/01/big/img_1940
+2002/08/26/big/img_90
+2003/01/13/big/img_347
+2002/07/25/big/img_520
+2002/08/29/big/img_18718
+2002/08/28/big/img_19219
+2002/08/13/big/img_375
+2002/07/20/big/img_719
+2002/08/31/big/img_17431
+2002/07/28/big/img_192
+2002/08/26/big/img_259
+2002/08/18/big/img_484
+2002/07/29/big/img_580
+2002/07/26/big/img_84
+2002/08/02/big/img_302
+2002/08/31/big/img_17007
+2003/01/15/big/img_543
+2002/09/01/big/img_16488
+2002/08/22/big/img_798
+2002/07/30/big/img_383
+2002/08/04/big/img_668
+2002/08/13/big/img_156
+2002/08/07/big/img_1353
+2002/07/25/big/img_281
+2003/01/14/big/img_587
+2003/01/15/big/img_524
+2002/08/19/big/img_726
+2002/08/21/big/img_709
+2002/08/26/big/img_465
+2002/07/31/big/img_658
+2002/08/28/big/img_19148
+2002/07/23/big/img_423
+2002/08/16/big/img_758
+2002/08/22/big/img_523
+2002/08/16/big/img_591
+2002/08/23/big/img_845
+2002/07/26/big/img_678
+2002/08/09/big/img_806
+2002/08/06/big/img_2369
+2002/07/29/big/img_457
+2002/07/19/big/img_278
+2002/08/30/big/img_18107
+2002/07/26/big/img_444
+2002/08/20/big/img_278
+2002/08/26/big/img_92
+2002/08/26/big/img_257
+2002/07/25/big/img_266
+2002/08/05/big/img_3829
+2002/07/26/big/img_757
+2002/07/29/big/img_1536
+2002/08/09/big/img_472
+2003/01/17/big/img_480
+2002/08/28/big/img_19355
+2002/07/26/big/img_97
+2002/08/06/big/img_2503
+2002/07/19/big/img_254
+2002/08/01/big/img_1470
+2002/08/21/big/img_42
+2002/08/20/big/img_217
+2002/08/06/big/img_2459
+2002/07/19/big/img_552
+2002/08/13/big/img_717
+2002/08/12/big/img_586
+2002/08/20/big/img_411
+2003/01/13/big/img_768
+2002/08/07/big/img_1747
+2002/08/15/big/img_385
+2002/08/01/big/img_1648
+2002/08/15/big/img_311
+2002/08/21/big/img_95
+2002/08/09/big/img_108
+2002/08/21/big/img_398
+2002/08/17/big/img_340
+2002/08/14/big/img_474
+2002/08/13/big/img_294
+2002/08/24/big/img_840
+2002/08/09/big/img_808
+2002/08/23/big/img_491
+2002/07/28/big/img_33
+2003/01/13/big/img_664
+2002/08/02/big/img_261
+2002/08/09/big/img_591
+2002/07/26/big/img_309
+2003/01/14/big/img_372
+2002/08/19/big/img_581
+2002/08/19/big/img_168
+2002/08/26/big/img_422
+2002/07/24/big/img_106
+2002/08/01/big/img_1936
+2002/08/05/big/img_3764
+2002/08/21/big/img_266
+2002/08/31/big/img_17968
+2002/08/01/big/img_1941
+2002/08/15/big/img_550
+2002/08/14/big/img_13
+2002/07/30/big/img_171
+2003/01/13/big/img_490
+2002/07/25/big/img_427
+2002/07/19/big/img_770
+2002/08/12/big/img_759
+2003/01/15/big/img_1360
+2002/08/05/big/img_3692
+2003/01/16/big/img_30
+2002/07/25/big/img_1026
+2002/07/22/big/img_288
+2002/08/29/big/img_18801
+2002/07/24/big/img_793
+2002/08/13/big/img_178
+2002/08/06/big/img_2322
+2003/01/14/big/img_560
+2002/08/18/big/img_408
+2003/01/16/big/img_915
+2003/01/16/big/img_679
+2002/08/07/big/img_1552
+2002/08/29/big/img_19050
+2002/08/01/big/img_2172
+2002/07/31/big/img_30
+2002/07/30/big/img_1019
+2002/07/30/big/img_587
+2003/01/13/big/img_773
+2002/07/30/big/img_410
+2002/07/28/big/img_65
+2002/08/05/big/img_3138
+2002/07/23/big/img_541
+2002/08/22/big/img_963
+2002/07/27/big/img_657
+2002/07/30/big/img_1051
+2003/01/16/big/img_150
+2002/07/31/big/img_519
+2002/08/01/big/img_1961
+2002/08/05/big/img_3752
+2002/07/23/big/img_631
+2003/01/14/big/img_237
+2002/07/28/big/img_21
+2002/07/22/big/img_813
+2002/08/05/big/img_3563
+2003/01/17/big/img_620
+2002/07/19/big/img_523
+2002/07/30/big/img_904
+2002/08/29/big/img_18642
+2002/08/11/big/img_492
+2002/08/01/big/img_2130
+2002/07/25/big/img_618
+2002/08/17/big/img_305
+2003/01/16/big/img_520
+2002/07/26/big/img_495
+2002/08/17/big/img_164
+2002/08/03/big/img_440
+2002/07/24/big/img_441
+2002/08/06/big/img_2146
+2002/08/11/big/img_558
+2002/08/02/big/img_545
+2002/08/31/big/img_18090
+2003/01/01/big/img_136
+2002/07/25/big/img_1099
+2003/01/13/big/img_728
+2003/01/16/big/img_197
+2002/07/26/big/img_651
+2002/08/11/big/img_676
+2003/01/15/big/img_10
+2002/08/21/big/img_250
+2002/08/14/big/img_325
+2002/08/04/big/img_390
+2002/07/24/big/img_554
+2003/01/16/big/img_333
+2002/07/31/big/img_922
+2002/09/02/big/img_15586
+2003/01/16/big/img_184
+2002/07/22/big/img_766
+2002/07/21/big/img_608
+2002/08/07/big/img_1578
+2002/08/17/big/img_961
+2002/07/27/big/img_324
+2002/08/05/big/img_3765
+2002/08/23/big/img_462
+2003/01/16/big/img_382
+2002/08/27/big/img_19838
+2002/08/01/big/img_1505
+2002/08/21/big/img_662
+2002/08/14/big/img_605
+2002/08/19/big/img_816
+2002/07/29/big/img_136
+2002/08/20/big/img_719
+2002/08/06/big/img_2826
+2002/08/10/big/img_630
+2003/01/17/big/img_973
+2002/08/14/big/img_116
+2002/08/02/big/img_666
+2002/08/21/big/img_710
+2002/08/05/big/img_55
+2002/07/31/big/img_229
+2002/08/01/big/img_1549
+2002/07/23/big/img_432
+2002/07/21/big/img_430
+2002/08/21/big/img_549
+2002/08/08/big/img_985
+2002/07/20/big/img_610
+2002/07/23/big/img_978
+2002/08/23/big/img_219
+2002/07/25/big/img_175
+2003/01/15/big/img_230
+2002/08/23/big/img_385
+2002/07/31/big/img_879
+2002/08/12/big/img_495
+2002/08/22/big/img_499
+2002/08/30/big/img_18322
+2002/08/15/big/img_795
+2002/08/13/big/img_835
+2003/01/17/big/img_930
+2002/07/30/big/img_873
+2002/08/11/big/img_257
+2002/07/31/big/img_593
+2002/08/21/big/img_916
+2003/01/13/big/img_814
+2002/07/25/big/img_722
+2002/08/16/big/img_379
+2002/07/31/big/img_497
+2002/07/22/big/img_602
+2002/08/21/big/img_642
+2002/08/21/big/img_614
+2002/08/23/big/img_482
+2002/07/29/big/img_603
+2002/08/13/big/img_705
+2002/07/23/big/img_833
+2003/01/14/big/img_511
+2002/07/24/big/img_376
+2002/08/17/big/img_1030
+2002/08/05/big/img_3576
+2002/08/16/big/img_540
+2002/07/22/big/img_630
+2002/08/10/big/img_180
+2002/08/14/big/img_905
+2002/08/29/big/img_18777
+2002/08/22/big/img_693
+2003/01/16/big/img_933
+2002/08/20/big/img_555
+2002/08/15/big/img_549
+2003/01/14/big/img_830
+2003/01/16/big/img_64
+2002/08/27/big/img_19670
+2002/08/22/big/img_729
+2002/07/27/big/img_981
+2002/08/09/big/img_458
+2003/01/17/big/img_884
+2002/07/25/big/img_639
+2002/08/31/big/img_18008
+2002/08/22/big/img_249
+2002/08/17/big/img_971
+2002/08/04/big/img_308
+2002/07/28/big/img_362
+2002/08/12/big/img_142
+2002/08/26/big/img_61
+2002/08/14/big/img_422
+2002/07/19/big/img_607
+2003/01/15/big/img_717
+2002/08/01/big/img_1475
+2002/08/29/big/img_19061
+2003/01/01/big/img_346
+2002/07/20/big/img_315
+2003/01/15/big/img_756
+2002/08/15/big/img_879
+2002/08/08/big/img_615
+2003/01/13/big/img_431
+2002/08/05/big/img_3233
+2002/08/24/big/img_526
+2003/01/13/big/img_717
+2002/09/01/big/img_16408
+2002/07/22/big/img_217
+2002/07/31/big/img_960
+2002/08/21/big/img_610
+2002/08/05/big/img_3753
+2002/08/03/big/img_151
+2002/08/21/big/img_267
+2002/08/01/big/img_2175
+2002/08/04/big/img_556
+2002/08/21/big/img_527
+2002/09/02/big/img_15800
+2002/07/27/big/img_156
+2002/07/20/big/img_590
+2002/08/15/big/img_700
+2002/08/08/big/img_444
+2002/07/25/big/img_94
+2002/07/24/big/img_778
+2002/08/14/big/img_694
+2002/07/20/big/img_666
+2002/08/02/big/img_200
+2002/08/02/big/img_578
+2003/01/17/big/img_332
+2002/09/01/big/img_16352
+2002/08/27/big/img_19668
+2002/07/23/big/img_823
+2002/08/13/big/img_431
+2003/01/16/big/img_463
+2002/08/27/big/img_19711
+2002/08/23/big/img_154
+2002/07/31/big/img_360
+2002/08/23/big/img_555
+2002/08/10/big/img_561
+2003/01/14/big/img_550
+2002/08/07/big/img_1370
+2002/07/30/big/img_1184
+2002/08/01/big/img_1445
+2002/08/23/big/img_22
+2002/07/30/big/img_606
+2003/01/17/big/img_271
+2002/08/31/big/img_17316
+2002/08/16/big/img_973
+2002/07/26/big/img_77
+2002/07/20/big/img_788
+2002/08/06/big/img_2426
+2002/08/07/big/img_1498
+2002/08/16/big/img_358
+2002/08/06/big/img_2851
+2002/08/12/big/img_359
+2002/08/01/big/img_1521
+2002/08/02/big/img_709
+2002/08/20/big/img_935
+2002/08/12/big/img_188
+2002/08/24/big/img_411
+2002/08/22/big/img_680
+2002/08/06/big/img_2480
+2002/07/20/big/img_627
+2002/07/30/big/img_214
+2002/07/25/big/img_354
+2002/08/02/big/img_636
+2003/01/15/big/img_661
+2002/08/07/big/img_1327
+2002/08/01/big/img_2108
+2002/08/31/big/img_17919
+2002/08/29/big/img_18768
+2002/08/05/big/img_3840
+2002/07/26/big/img_242
+2003/01/14/big/img_451
+2002/08/20/big/img_923
+2002/08/27/big/img_19908
+2002/08/16/big/img_282
+2002/08/19/big/img_440
+2003/01/01/big/img_230
+2002/08/08/big/img_212
+2002/07/20/big/img_443
+2002/08/25/big/img_635
+2003/01/13/big/img_1169
+2002/07/26/big/img_998
+2002/08/15/big/img_995
+2002/08/06/big/img_3002
+2002/07/29/big/img_460
+2003/01/14/big/img_925
+2002/07/23/big/img_539
+2002/08/16/big/img_694
+2003/01/13/big/img_459
+2002/07/23/big/img_249
+2002/08/20/big/img_539
+2002/08/04/big/img_186
+2002/08/26/big/img_264
+2002/07/22/big/img_704
+2002/08/25/big/img_277
+2002/08/22/big/img_988
+2002/07/29/big/img_504
+2002/08/05/big/img_3600
+2002/08/30/big/img_18380
+2003/01/14/big/img_937
+2002/08/21/big/img_254
+2002/08/10/big/img_130
+2002/08/20/big/img_339
+2003/01/14/big/img_428
+2002/08/20/big/img_889
+2002/08/31/big/img_17637
+2002/07/26/big/img_644
+2002/09/01/big/img_16776
+2002/08/06/big/img_2239
+2002/08/06/big/img_2646
+2003/01/13/big/img_491
+2002/08/10/big/img_579
+2002/08/21/big/img_713
+2002/08/22/big/img_482
+2002/07/22/big/img_167
+2002/07/24/big/img_539
+2002/08/14/big/img_721
+2002/07/25/big/img_389
+2002/09/01/big/img_16591
+2002/08/13/big/img_543
+2003/01/14/big/img_432
+2002/08/09/big/img_287
+2002/07/26/big/img_126
+2002/08/23/big/img_412
+2002/08/15/big/img_1034
+2002/08/28/big/img_19485
+2002/07/31/big/img_236
+2002/07/30/big/img_523
+2002/07/19/big/img_141
+2003/01/17/big/img_957
+2002/08/04/big/img_81
+2002/07/25/big/img_206
+2002/08/15/big/img_716
+2002/08/13/big/img_403
+2002/08/15/big/img_685
+2002/07/26/big/img_884
+2002/07/19/big/img_499
+2002/07/23/big/img_772
+2002/07/27/big/img_752
+2003/01/14/big/img_493
+2002/08/25/big/img_664
+2002/07/31/big/img_334
+2002/08/26/big/img_678
+2002/09/01/big/img_16541
+2003/01/14/big/img_347
+2002/07/23/big/img_187
+2002/07/30/big/img_1163
+2002/08/05/big/img_35
+2002/08/22/big/img_944
+2002/08/07/big/img_1239
+2002/07/29/big/img_1215
+2002/08/03/big/img_312
+2002/08/05/big/img_3523
+2002/07/29/big/img_218
+2002/08/13/big/img_672
+2002/08/16/big/img_205
+2002/08/17/big/img_594
+2002/07/29/big/img_1411
+2002/07/30/big/img_942
+2003/01/16/big/img_312
+2002/08/08/big/img_312
+2002/07/25/big/img_15
+2002/08/09/big/img_839
+2002/08/01/big/img_2069
+2002/08/31/big/img_17512
+2002/08/01/big/img_3
+2002/07/31/big/img_320
+2003/01/15/big/img_1265
+2002/08/14/big/img_563
+2002/07/31/big/img_167
+2002/08/20/big/img_374
+2002/08/13/big/img_406
+2002/08/08/big/img_625
+2002/08/02/big/img_314
+2002/08/27/big/img_19964
+2002/09/01/big/img_16670
+2002/07/31/big/img_599
+2002/08/29/big/img_18906
+2002/07/24/big/img_373
+2002/07/26/big/img_513
+2002/09/02/big/img_15497
+2002/08/19/big/img_117
+2003/01/01/big/img_158
+2002/08/24/big/img_178
+2003/01/13/big/img_935
+2002/08/13/big/img_609
+2002/08/30/big/img_18341
+2002/08/25/big/img_674
+2003/01/13/big/img_209
+2002/08/13/big/img_258
+2002/08/05/big/img_3543
+2002/08/07/big/img_1970
+2002/08/06/big/img_3004
+2003/01/17/big/img_487
+2002/08/24/big/img_873
+2002/08/29/big/img_18730
+2002/08/09/big/img_375
+2003/01/16/big/img_751
+2002/08/02/big/img_603
+2002/08/19/big/img_325
+2002/09/01/big/img_16420
+2002/08/05/big/img_3633
+2002/08/21/big/img_516
+2002/07/19/big/img_501
+2002/07/26/big/img_688
+2002/07/24/big/img_256
+2002/07/25/big/img_438
+2002/07/31/big/img_1017
+2002/08/22/big/img_512
+2002/07/21/big/img_543
+2002/08/08/big/img_223
+2002/08/19/big/img_189
+2002/08/12/big/img_630
+2002/07/30/big/img_958
+2002/07/28/big/img_208
+2002/08/31/big/img_17691
+2002/07/22/big/img_542
+2002/07/19/big/img_741
+2002/07/19/big/img_158
+2002/08/15/big/img_399
+2002/08/01/big/img_2159
+2002/08/14/big/img_455
+2002/08/17/big/img_1011
+2002/08/26/big/img_744
+2002/08/12/big/img_624
+2003/01/17/big/img_821
+2002/08/16/big/img_980
+2002/07/28/big/img_281
+2002/07/25/big/img_171
+2002/08/03/big/img_116
+2002/07/22/big/img_467
+2002/07/31/big/img_750
+2002/07/26/big/img_435
+2002/07/19/big/img_822
+2002/08/13/big/img_626
+2002/08/11/big/img_344
+2002/08/02/big/img_473
+2002/09/01/big/img_16817
+2002/08/01/big/img_1275
+2002/08/28/big/img_19270
+2002/07/23/big/img_607
+2002/08/09/big/img_316
+2002/07/29/big/img_626
+2002/07/24/big/img_824
+2002/07/22/big/img_342
+2002/08/08/big/img_794
+2002/08/07/big/img_1209
+2002/07/19/big/img_18
+2002/08/25/big/img_634
+2002/07/24/big/img_730
+2003/01/17/big/img_356
+2002/07/23/big/img_305
+2002/07/30/big/img_453
+2003/01/13/big/img_972
+2002/08/06/big/img_2610
+2002/08/29/big/img_18920
+2002/07/31/big/img_123
+2002/07/26/big/img_979
+2002/08/24/big/img_635
+2002/08/05/big/img_3704
+2002/08/07/big/img_1358
+2002/07/22/big/img_306
+2002/08/13/big/img_619
+2002/08/02/big/img_366
diff --git a/face_detect/data/__init__.py b/face_detect/data/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..ea50ebaf88d64e75f4960bc99b14f138a343e575
--- /dev/null
+++ b/face_detect/data/__init__.py
@@ -0,0 +1,3 @@
+from .wider_face import WiderFaceDetection, detection_collate
+from .data_augment import *
+from .config import *
diff --git a/face_detect/data/config.py b/face_detect/data/config.py
new file mode 100755
index 0000000000000000000000000000000000000000..e57cdc530e3d78c4aa6310985c90c5ee125f8f01
--- /dev/null
+++ b/face_detect/data/config.py
@@ -0,0 +1,42 @@
+# config.py
+
+cfg_mnet = {
+    'name': 'mobilenet0.25',
+    'min_sizes': [[16, 32], [64, 128], [256, 512]],
+    'steps': [8, 16, 32],
+    'variance': [0.1, 0.2],
+    'clip': False,
+    'loc_weight': 2.0,
+    'gpu_train': True,
+    'batch_size': 32,
+    'ngpu': 1,
+    'epoch': 250,
+    'decay1': 190,
+    'decay2': 220,
+    'image_size': 640,
+    'pretrain': False,
+    'return_layers': {'stage1': 1, 'stage2': 2, 'stage3': 3},
+    'in_channel': 32,
+    'out_channel': 64
+}
+
+cfg_re50 = {
+    'name': 'Resnet50',
+    'min_sizes': [[16, 32], [64, 128], [256, 512]],
+    'steps': [8, 16, 32],
+    'variance': [0.1, 0.2],
+    'clip': False,
+    'loc_weight': 2.0,
+    'gpu_train': True,
+    'batch_size': 24,
+    'ngpu': 4,
+    'epoch': 100,
+    'decay1': 70,
+    'decay2': 90,
+    'image_size': 840,
+    'pretrain': False,
+    'return_layers': {'layer2': 1, 'layer3': 2, 'layer4': 3},
+    'in_channel': 256,
+    'out_channel': 256
+}
+
diff --git a/face_detect/data/data_augment.py b/face_detect/data/data_augment.py
new file mode 100755
index 0000000000000000000000000000000000000000..c1b52ae19bf8d9ac3fa256b68730ce1b556c6d6e
--- /dev/null
+++ b/face_detect/data/data_augment.py
@@ -0,0 +1,237 @@
+import cv2
+import numpy as np
+import random
+from utils.box_utils import matrix_iof
+
+
+def _crop(image, boxes, labels, landm, img_dim):
+    height, width, _ = image.shape
+    pad_image_flag = True
+
+    for _ in range(250):
+        """
+        if random.uniform(0, 1) <= 0.2:
+            scale = 1.0
+        else:
+            scale = random.uniform(0.3, 1.0)
+        """
+        PRE_SCALES = [0.3, 0.45, 0.6, 0.8, 1.0]
+        scale = random.choice(PRE_SCALES)
+        short_side = min(width, height)
+        w = int(scale * short_side)
+        h = w
+
+        if width == w:
+            l = 0
+        else:
+            l = random.randrange(width - w)
+        if height == h:
+            t = 0
+        else:
+            t = random.randrange(height - h)
+        roi = np.array((l, t, l + w, t + h))
+
+        value = matrix_iof(boxes, roi[np.newaxis])
+        flag = (value >= 1)
+        if not flag.any():
+            continue
+
+        centers = (boxes[:, :2] + boxes[:, 2:]) / 2
+        mask_a = np.logical_and(roi[:2] < centers, centers < roi[2:]).all(axis=1)
+        boxes_t = boxes[mask_a].copy()
+        labels_t = labels[mask_a].copy()
+        landms_t = landm[mask_a].copy()
+        landms_t = landms_t.reshape([-1, 5, 2])
+
+        if boxes_t.shape[0] == 0:
+            continue
+
+        image_t = image[roi[1]:roi[3], roi[0]:roi[2]]
+
+        boxes_t[:, :2] = np.maximum(boxes_t[:, :2], roi[:2])
+        boxes_t[:, :2] -= roi[:2]
+        boxes_t[:, 2:] = np.minimum(boxes_t[:, 2:], roi[2:])
+        boxes_t[:, 2:] -= roi[:2]
+
+        # landm
+        landms_t[:, :, :2] = landms_t[:, :, :2] - roi[:2]
+        landms_t[:, :, :2] = np.maximum(landms_t[:, :, :2], np.array([0, 0]))
+        landms_t[:, :, :2] = np.minimum(landms_t[:, :, :2], roi[2:] - roi[:2])
+        landms_t = landms_t.reshape([-1, 10])
+
+
+	# make sure that the cropped image contains at least one face > 16 pixel at training image scale
+        b_w_t = (boxes_t[:, 2] - boxes_t[:, 0] + 1) / w * img_dim
+        b_h_t = (boxes_t[:, 3] - boxes_t[:, 1] + 1) / h * img_dim
+        mask_b = np.minimum(b_w_t, b_h_t) > 0.0
+        boxes_t = boxes_t[mask_b]
+        labels_t = labels_t[mask_b]
+        landms_t = landms_t[mask_b]
+
+        if boxes_t.shape[0] == 0:
+            continue
+
+        pad_image_flag = False
+
+        return image_t, boxes_t, labels_t, landms_t, pad_image_flag
+    return image, boxes, labels, landm, pad_image_flag
+
+
+def _distort(image):
+
+    def _convert(image, alpha=1, beta=0):
+        tmp = image.astype(float) * alpha + beta
+        tmp[tmp < 0] = 0
+        tmp[tmp > 255] = 255
+        image[:] = tmp
+
+    image = image.copy()
+
+    if random.randrange(2):
+
+        #brightness distortion
+        if random.randrange(2):
+            _convert(image, beta=random.uniform(-32, 32))
+
+        #contrast distortion
+        if random.randrange(2):
+            _convert(image, alpha=random.uniform(0.5, 1.5))
+
+        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
+
+        #saturation distortion
+        if random.randrange(2):
+            _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))
+
+        #hue distortion
+        if random.randrange(2):
+            tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
+            tmp %= 180
+            image[:, :, 0] = tmp
+
+        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
+
+    else:
+
+        #brightness distortion
+        if random.randrange(2):
+            _convert(image, beta=random.uniform(-32, 32))
+
+        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
+
+        #saturation distortion
+        if random.randrange(2):
+            _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))
+
+        #hue distortion
+        if random.randrange(2):
+            tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
+            tmp %= 180
+            image[:, :, 0] = tmp
+
+        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
+
+        #contrast distortion
+        if random.randrange(2):
+            _convert(image, alpha=random.uniform(0.5, 1.5))
+
+    return image
+
+
+def _expand(image, boxes, fill, p):
+    if random.randrange(2):
+        return image, boxes
+
+    height, width, depth = image.shape
+
+    scale = random.uniform(1, p)
+    w = int(scale * width)
+    h = int(scale * height)
+
+    left = random.randint(0, w - width)
+    top = random.randint(0, h - height)
+
+    boxes_t = boxes.copy()
+    boxes_t[:, :2] += (left, top)
+    boxes_t[:, 2:] += (left, top)
+    expand_image = np.empty(
+        (h, w, depth),
+        dtype=image.dtype)
+    expand_image[:, :] = fill
+    expand_image[top:top + height, left:left + width] = image
+    image = expand_image
+
+    return image, boxes_t
+
+
+def _mirror(image, boxes, landms):
+    _, width, _ = image.shape
+    if random.randrange(2):
+        image = image[:, ::-1]
+        boxes = boxes.copy()
+        boxes[:, 0::2] = width - boxes[:, 2::-2]
+
+        # landm
+        landms = landms.copy()
+        landms = landms.reshape([-1, 5, 2])
+        landms[:, :, 0] = width - landms[:, :, 0]
+        tmp = landms[:, 1, :].copy()
+        landms[:, 1, :] = landms[:, 0, :]
+        landms[:, 0, :] = tmp
+        tmp1 = landms[:, 4, :].copy()
+        landms[:, 4, :] = landms[:, 3, :]
+        landms[:, 3, :] = tmp1
+        landms = landms.reshape([-1, 10])
+
+    return image, boxes, landms
+
+
+def _pad_to_square(image, rgb_mean, pad_image_flag):
+    if not pad_image_flag:
+        return image
+    height, width, _ = image.shape
+    long_side = max(width, height)
+    image_t = np.empty((long_side, long_side, 3), dtype=image.dtype)
+    image_t[:, :] = rgb_mean
+    image_t[0:0 + height, 0:0 + width] = image
+    return image_t
+
+
+def _resize_subtract_mean(image, insize, rgb_mean):
+    interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_NEAREST, cv2.INTER_LANCZOS4]
+    interp_method = interp_methods[random.randrange(5)]
+    image = cv2.resize(image, (insize, insize), interpolation=interp_method)
+    image = image.astype(np.float32)
+    image -= rgb_mean
+    return image.transpose(2, 0, 1)
+
+
+class preproc(object):
+
+    def __init__(self, img_dim, rgb_means):
+        self.img_dim = img_dim
+        self.rgb_means = rgb_means
+
+    def __call__(self, image, targets):
+        assert targets.shape[0] > 0, "this image does not have gt"
+
+        boxes = targets[:, :4].copy()
+        labels = targets[:, -1].copy()
+        landm = targets[:, 4:-1].copy()
+
+        image_t, boxes_t, labels_t, landm_t, pad_image_flag = _crop(image, boxes, labels, landm, self.img_dim)
+        image_t = _distort(image_t)
+        image_t = _pad_to_square(image_t,self.rgb_means, pad_image_flag)
+        image_t, boxes_t, landm_t = _mirror(image_t, boxes_t, landm_t)
+        height, width, _ = image_t.shape
+        image_t = _resize_subtract_mean(image_t, self.img_dim, self.rgb_means)
+        boxes_t[:, 0::2] /= width
+        boxes_t[:, 1::2] /= height
+
+        landm_t[:, 0::2] /= width
+        landm_t[:, 1::2] /= height
+
+        labels_t = np.expand_dims(labels_t, 1)
+        targets_t = np.hstack((boxes_t, landm_t, labels_t))
+
+        return image_t, targets_t
diff --git a/face_detect/data/wider_face.py b/face_detect/data/wider_face.py
new file mode 100755
index 0000000000000000000000000000000000000000..22f56efdc221bd4162d22884669ba44a3d4de5cd
--- /dev/null
+++ b/face_detect/data/wider_face.py
@@ -0,0 +1,101 @@
+import os
+import os.path
+import sys
+import torch
+import torch.utils.data as data
+import cv2
+import numpy as np
+
+class WiderFaceDetection(data.Dataset):
+    def __init__(self, txt_path, preproc=None):
+        self.preproc = preproc
+        self.imgs_path = []
+        self.words = []
+        f = open(txt_path,'r')
+        lines = f.readlines()
+        isFirst = True
+        labels = []
+        for line in lines:
+            line = line.rstrip()
+            if line.startswith('#'):
+                if isFirst is True:
+                    isFirst = False
+                else:
+                    labels_copy = labels.copy()
+                    self.words.append(labels_copy)
+                    labels.clear()
+                path = line[2:]
+                path = txt_path.replace('label.txt','images/') + path
+                self.imgs_path.append(path)
+            else:
+                line = line.split(' ')
+                label = [float(x) for x in line]
+                labels.append(label)
+
+        self.words.append(labels)
+
+    def __len__(self):
+        return len(self.imgs_path)
+
+    def __getitem__(self, index):
+        img = cv2.imread(self.imgs_path[index])
+        height, width, _ = img.shape
+
+        labels = self.words[index]
+        annotations = np.zeros((0, 15))
+        if len(labels) == 0:
+            return annotations
+        for idx, label in enumerate(labels):
+            annotation = np.zeros((1, 15))
+            # bbox
+            annotation[0, 0] = label[0]  # x1
+            annotation[0, 1] = label[1]  # y1
+            annotation[0, 2] = label[0] + label[2]  # x2
+            annotation[0, 3] = label[1] + label[3]  # y2
+
+            # landmarks
+            annotation[0, 4] = label[4]    # l0_x
+            annotation[0, 5] = label[5]    # l0_y
+            annotation[0, 6] = label[7]    # l1_x
+            annotation[0, 7] = label[8]    # l1_y
+            annotation[0, 8] = label[10]   # l2_x
+            annotation[0, 9] = label[11]   # l2_y
+            annotation[0, 10] = label[13]  # l3_x
+            annotation[0, 11] = label[14]  # l3_y
+            annotation[0, 12] = label[16]  # l4_x
+            annotation[0, 13] = label[17]  # l4_y
+            if (annotation[0, 4]<0):
+                annotation[0, 14] = -1
+            else:
+                annotation[0, 14] = 1
+
+            annotations = np.append(annotations, annotation, axis=0)
+        target = np.array(annotations)
+        if self.preproc is not None:
+            img, target = self.preproc(img, target)
+
+        return torch.from_numpy(img), target
+
+def detection_collate(batch):
+    """Custom collate fn for dealing with batches of images that have a different
+    number of associated object annotations (bounding boxes).
+
+    Arguments:
+        batch: (tuple) A tuple of tensor images and lists of annotations
+
+    Return:
+        A tuple containing:
+            1) (tensor) batch of images stacked on their 0 dim
+            2) (list of tensors) annotations for a given image are stacked on 0 dim
+    """
+    targets = []
+    imgs = []
+    for _, sample in enumerate(batch):
+        for _, tup in enumerate(sample):
+            if torch.is_tensor(tup):
+                imgs.append(tup)
+            elif isinstance(tup, type(np.empty(0))):
+                annos = torch.from_numpy(tup).float()
+                targets.append(annos)
+
+    return (torch.stack(imgs, 0), targets)
diff --git a/face_detect/facemodels/__init__.py b/face_detect/facemodels/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/face_detect/facemodels/net.py b/face_detect/facemodels/net.py
new file mode 100755
index 0000000000000000000000000000000000000000..beb6040b24258f8b96020c1c9fc2610819718017
--- /dev/null
+++ b/face_detect/facemodels/net.py
@@ -0,0 +1,137 @@
+import time
+import torch
+import torch.nn as nn
+import torchvision.models._utils as _utils
+import torchvision.models as models
+import torch.nn.functional as F
+from torch.autograd import Variable
+
+def conv_bn(inp, oup, stride = 1, leaky = 0):
+    return nn.Sequential(
+        nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
+        nn.BatchNorm2d(oup),
+        nn.LeakyReLU(negative_slope=leaky, inplace=True)
+    )
+
+def conv_bn_no_relu(inp, oup, stride):
+    return nn.Sequential(
+        nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
+        nn.BatchNorm2d(oup),
+    )
+
+def conv_bn1X1(inp, oup, stride, leaky=0):
+    return nn.Sequential(
+        nn.Conv2d(inp, oup, 1, stride, padding=0, bias=False),
+        nn.BatchNorm2d(oup),
+        nn.LeakyReLU(negative_slope=leaky, inplace=True)
+    )
+
+def conv_dw(inp, oup, stride, leaky=0.1):
+    return nn.Sequential(
+        nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
+        nn.BatchNorm2d(inp),
+        nn.LeakyReLU(negative_slope= leaky,inplace=True),
+
+        nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
+        nn.BatchNorm2d(oup),
+        nn.LeakyReLU(negative_slope= leaky,inplace=True),
+    )
+
+class SSH(nn.Module):
+    def __init__(self, in_channel, out_channel):
+        super(SSH, self).__init__()
+        assert out_channel % 4 == 0
+        leaky = 0
+        if (out_channel <= 64):
+            leaky = 0.1
+        self.conv3X3 = conv_bn_no_relu(in_channel, out_channel//2, stride=1)
+
+        self.conv5X5_1 = conv_bn(in_channel, out_channel//4, stride=1, leaky = leaky)
+        self.conv5X5_2 = conv_bn_no_relu(out_channel//4, out_channel//4, stride=1)
+
+        self.conv7X7_2 = conv_bn(out_channel//4, out_channel//4, stride=1, leaky = leaky)
+        self.conv7x7_3 = conv_bn_no_relu(out_channel//4, out_channel//4, stride=1)
+
+    def forward(self, input):
+        conv3X3 = self.conv3X3(input)
+
+        conv5X5_1 = self.conv5X5_1(input)
+        conv5X5 = self.conv5X5_2(conv5X5_1)
+
+        conv7X7_2 = self.conv7X7_2(conv5X5_1)
+        conv7X7 = self.conv7x7_3(conv7X7_2)
+
+        out = torch.cat([conv3X3, conv5X5, conv7X7], dim=1)
+        out = F.relu(out)
+        return out
+
+class FPN(nn.Module):
+    def __init__(self,in_channels_list,out_channels):
+        super(FPN,self).__init__()
+        leaky = 0
+        if (out_channels <= 64):
+            leaky = 0.1
+        self.output1 = conv_bn1X1(in_channels_list[0], out_channels, stride = 1, leaky = leaky)
+        self.output2 = conv_bn1X1(in_channels_list[1], out_channels, stride = 1, leaky = leaky)
+        self.output3 = conv_bn1X1(in_channels_list[2], out_channels, stride = 1, leaky = leaky)
+
+        self.merge1 = conv_bn(out_channels, out_channels, leaky = leaky)
+        self.merge2 = conv_bn(out_channels, out_channels, leaky = leaky)
+
+    def forward(self, input):
+        # names = list(input.keys())
+        input = list(input.values())
+
+        output1 = self.output1(input[0])
+        output2 = self.output2(input[1])
+        output3 = self.output3(input[2])
+
+        up3 = F.interpolate(output3, size=[output2.size(2), output2.size(3)], mode="nearest")
+        output2 = output2 + up3
+        output2 = self.merge2(output2)
+
+        up2 = F.interpolate(output2, size=[output1.size(2), output1.size(3)], mode="nearest")
+        output1 = output1 + up2
+        output1 = self.merge1(output1)
+
+        out = [output1, output2, output3]
+        return out
+
+
+
+class MobileNetV1(nn.Module):
+    def __init__(self):
+        super(MobileNetV1, self).__init__()
+        self.stage1 = nn.Sequential(
+            conv_bn(3, 8, 2, leaky = 0.1),    # 3
+            conv_dw(8, 16, 1),   # 7
+            conv_dw(16, 32, 2),  # 11
+            conv_dw(32, 32, 1),  # 19
+            conv_dw(32, 64, 2),  # 27
+            conv_dw(64, 64, 1),  # 43
+        )
+        self.stage2 = nn.Sequential(
+            conv_dw(64, 128, 2),  # 43 + 16 = 59
+            conv_dw(128, 128, 1), # 59 + 32 = 91
+            conv_dw(128, 128, 1), # 91 + 32 = 123
+            conv_dw(128, 128, 1), # 123 + 32 = 155
+            conv_dw(128, 128, 1), # 155 + 32 = 187
+            conv_dw(128, 128, 1), # 187 + 32 = 219
+        )
+        self.stage3 = nn.Sequential(
+            conv_dw(128, 256, 2), # 219 +3 2 = 241
+            conv_dw(256, 256, 1), # 241 + 64 = 301
+        )
+        self.avg = nn.AdaptiveAvgPool2d((1,1))
+        self.fc = nn.Linear(256, 1000)
+
+    def forward(self, x):
+        x = self.stage1(x)
+        x = self.stage2(x)
+        x = self.stage3(x)
+        x = self.avg(x)
+        # x = self.model(x)
+        x = x.view(-1, 256)
+        x = self.fc(x)
+        return x
+
diff --git a/face_detect/facemodels/retinaface.py b/face_detect/facemodels/retinaface.py
new file mode 100755
index 0000000000000000000000000000000000000000..b7092a2bc2f35d06ce99d25473bce913ef3fd8e7
--- /dev/null
+++ b/face_detect/facemodels/retinaface.py
@@ -0,0 +1,127 @@
+import torch
+import torch.nn as nn
+import torchvision.models.detection.backbone_utils as backbone_utils
+import torchvision.models._utils as _utils
+import torch.nn.functional as F
+from collections import OrderedDict
+
+from facemodels.net import MobileNetV1 as MobileNetV1
+from facemodels.net import FPN as FPN
+from facemodels.net import SSH as SSH
+
+
+
+class ClassHead(nn.Module):
+    def __init__(self,inchannels=512,num_anchors=3):
+        super(ClassHead,self).__init__()
+        self.num_anchors = num_anchors
+        self.conv1x1 = nn.Conv2d(inchannels,self.num_anchors*2,kernel_size=(1,1),stride=1,padding=0)
+
+    def forward(self,x):
+        out = self.conv1x1(x)
+        out = out.permute(0,2,3,1).contiguous()
+        
+        return out.view(out.shape[0], -1, 2)
+
+class BboxHead(nn.Module):
+    def __init__(self,inchannels=512,num_anchors=3):
+        super(BboxHead,self).__init__()
+        self.conv1x1 = nn.Conv2d(inchannels,num_anchors*4,kernel_size=(1,1),stride=1,padding=0)
+
+    def forward(self,x):
+        out = self.conv1x1(x)
+        out = out.permute(0,2,3,1).contiguous()
+
+        return out.view(out.shape[0], -1, 4)
+
+class LandmarkHead(nn.Module):
+    def __init__(self,inchannels=512,num_anchors=3):
+        super(LandmarkHead,self).__init__()
+        self.conv1x1 = nn.Conv2d(inchannels,num_anchors*10,kernel_size=(1,1),stride=1,padding=0)
+
+    def forward(self,x):
+        out = self.conv1x1(x)
+        out = out.permute(0,2,3,1).contiguous()
+
+        return out.view(out.shape[0], -1, 10)
+
+class RetinaFace(nn.Module):
+    def __init__(self, cfg = None, phase = 'train'):
+        """
+        :param cfg:  Network related settings.
+        :param phase: train or test.
+        """
+        super(RetinaFace,self).__init__()
+        self.phase = phase
+        backbone = None
+        if cfg['name'] == 'mobilenet0.25':
+            backbone = MobileNetV1()
+            if cfg['pretrain']:
+                checkpoint = torch.load("./weights/mobilenetV1X0.25_pretrain.tar", map_location=torch.device('cpu'))
+                from collections import OrderedDict
+                new_state_dict = OrderedDict()
+                for k, v in checkpoint['state_dict'].items():
+                    name = k[7:]  # remove module.
+                    new_state_dict[name] = v
+                # load params
+                backbone.load_state_dict(new_state_dict)
+        elif cfg['name'] == 'Resnet50':
+            import torchvision.models as models
+            backbone = models.resnet50(pretrained=cfg['pretrain'])
+
+        self.body = _utils.IntermediateLayerGetter(backbone, cfg['return_layers'])
+        in_channels_stage2 = cfg['in_channel']
+        in_channels_list = [
+            in_channels_stage2 * 2,
+            in_channels_stage2 * 4,
+            in_channels_stage2 * 8,
+        ]
+        out_channels = cfg['out_channel']
+        self.fpn = FPN(in_channels_list,out_channels)
+        self.ssh1 = SSH(out_channels, out_channels)
+        self.ssh2 = SSH(out_channels, out_channels)
+        self.ssh3 = SSH(out_channels, out_channels)
+
+        self.ClassHead = self._make_class_head(fpn_num=3, inchannels=cfg['out_channel'])
+        self.BboxHead = self._make_bbox_head(fpn_num=3, inchannels=cfg['out_channel'])
+        self.LandmarkHead = self._make_landmark_head(fpn_num=3, inchannels=cfg['out_channel'])
+
+    def _make_class_head(self,fpn_num=3,inchannels=64,anchor_num=2):
+        classhead = nn.ModuleList()
+        for i in range(fpn_num):
+            classhead.append(ClassHead(inchannels,anchor_num))
+        return classhead
+    
+    def _make_bbox_head(self,fpn_num=3,inchannels=64,anchor_num=2):
+        bboxhead = nn.ModuleList()
+        for i in range(fpn_num):
+            bboxhead.append(BboxHead(inchannels,anchor_num))
+        return bboxhead
+
+    def _make_landmark_head(self,fpn_num=3,inchannels=64,anchor_num=2):
+        landmarkhead = nn.ModuleList()
+        for i in range(fpn_num):
+            landmarkhead.append(LandmarkHead(inchannels,anchor_num))
+        return landmarkhead
+
+    def forward(self,inputs):
+        out = self.body(inputs)
+
+        # FPN
+        fpn = self.fpn(out)
+
+        # SSH
+        feature1 = self.ssh1(fpn[0])
+        feature2 = self.ssh2(fpn[1])
+        feature3 = self.ssh3(fpn[2])
+        features = [feature1, feature2, feature3]
+
+        bbox_regressions = torch.cat([self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1)
+        classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)],dim=1)
+        ldm_regressions = torch.cat([self.LandmarkHead[i](feature) for i, feature in enumerate(features)], dim=1)
+
+        if self.phase == 'train':
+            output = (bbox_regressions, classifications, ldm_regressions)
+        else:
+            output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions)
+        return output
\ No newline at end of file
diff --git a/face_detect/layers/__init__.py b/face_detect/layers/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..53a3f4b5160995d93bc7911e808b3045d74362c9
--- /dev/null
+++ b/face_detect/layers/__init__.py
@@ -0,0 +1,2 @@
+from .functions import *
+from .modules import *
diff --git a/face_detect/layers/functions/prior_box.py b/face_detect/layers/functions/prior_box.py
new file mode 100755
index 0000000000000000000000000000000000000000..80c7f858371ed71f39ed609eb44b423d8693bf61
--- /dev/null
+++ b/face_detect/layers/functions/prior_box.py
@@ -0,0 +1,34 @@
+import torch
+from itertools import product as product
+import numpy as np
+from math import ceil
+
+
+class PriorBox(object):
+    def __init__(self, cfg, image_size=None, phase='train'):
+        super(PriorBox, self).__init__()
+        self.min_sizes = cfg['min_sizes']
+        self.steps = cfg['steps']
+        self.clip = cfg['clip']
+        self.image_size = image_size
+        self.feature_maps = [[ceil(self.image_size[0]/step), ceil(self.image_size[1]/step)] for step in self.steps]
+        self.name = "s"
+
+    def forward(self):
+        anchors = []
+        for k, f in enumerate(self.feature_maps):
+            min_sizes = self.min_sizes[k]
+            for i, j in product(range(f[0]), range(f[1])):
+                for min_size in min_sizes:
+                    s_kx = min_size / self.image_size[1]
+                    s_ky = min_size / self.image_size[0]
+                    dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]]
+                    dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]]
+                    for cy, cx in product(dense_cy, dense_cx):
+                        anchors += [cx, cy, s_kx, s_ky]
+
+        # back to torch land
+        output = torch.Tensor(anchors).view(-1, 4)
+        if self.clip:
+            output.clamp_(max=1, min=0)
+        return output
diff --git a/face_detect/layers/modules/__init__.py b/face_detect/layers/modules/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..cf24bddbf283f233d0b93fc074a2bac2f5c044a9
--- /dev/null
+++ b/face_detect/layers/modules/__init__.py
@@ -0,0 +1,3 @@
+from .multibox_loss import MultiBoxLoss
+
+__all__ = ['MultiBoxLoss']
diff --git a/face_detect/layers/modules/multibox_loss.py b/face_detect/layers/modules/multibox_loss.py
new file mode 100755
index 0000000000000000000000000000000000000000..096620480eba59e9d893c1940899f7e3d6736cae
--- /dev/null
+++ b/face_detect/layers/modules/multibox_loss.py
@@ -0,0 +1,125 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.autograd import Variable
+from utils.box_utils import match, log_sum_exp
+from data import cfg_mnet
+GPU = cfg_mnet['gpu_train']
+
+class MultiBoxLoss(nn.Module):
+    """SSD Weighted Loss Function
+    Compute Targets:
+        1) Produce Confidence Target Indices by matching  ground truth boxes
+           with (default) 'priorboxes' that have jaccard index > threshold parameter
+           (default threshold: 0.5).
+        2) Produce localization target by 'encoding' variance into offsets of ground
+           truth boxes and their matched  'priorboxes'.
+        3) Hard negative mining to filter the excessive number of negative examples
+           that comes with using a large number of default bounding boxes.
+           (default negative:positive ratio 3:1)
+    Objective Loss:
+        L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
+        Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
+        weighted by α which is set to 1 by cross val.
+        Args:
+            c: class confidences,
+            l: predicted boxes,
+            g: ground truth boxes
+            N: number of matched default boxes
+        See: https://arxiv.org/pdf/1512.02325.pdf for more details.
+    """
+
+    def __init__(self, num_classes, overlap_thresh, prior_for_matching, bkg_label, neg_mining, neg_pos, neg_overlap, encode_target):
+        super(MultiBoxLoss, self).__init__()
+        self.num_classes = num_classes
+        self.threshold = overlap_thresh
+        self.background_label = bkg_label
+        self.encode_target = encode_target
+        self.use_prior_for_matching = prior_for_matching
+        self.do_neg_mining = neg_mining
+        self.negpos_ratio = neg_pos
+        self.neg_overlap = neg_overlap
+        self.variance = [0.1, 0.2]
+
+    def forward(self, predictions, priors, targets):
+        """Multibox Loss
+        Args:
+            predictions (tuple): A tuple containing loc preds, conf preds,
+            and prior boxes from SSD net.
+                conf shape: torch.size(batch_size,num_priors,num_classes)
+                loc shape: torch.size(batch_size,num_priors,4)
+                priors shape: torch.size(num_priors,4)
+
+            ground_truth (tensor): Ground truth boxes and labels for a batch,
+                shape: [batch_size,num_objs,5] (last idx is the label).
+        """
+
+        loc_data, conf_data, landm_data = predictions
+        priors = priors
+        num = loc_data.size(0)
+        num_priors = (priors.size(0))
+
+        # match priors (default boxes) and ground truth boxes
+        loc_t = torch.Tensor(num, num_priors, 4)
+        landm_t = torch.Tensor(num, num_priors, 10)
+        conf_t = torch.LongTensor(num, num_priors)
+        for idx in range(num):
+            truths = targets[idx][:, :4].data
+            labels = targets[idx][:, -1].data
+            landms = targets[idx][:, 4:14].data
+            defaults = priors.data
+            match(self.threshold, truths, defaults, self.variance, labels, landms, loc_t, conf_t, landm_t, idx)
+        if GPU:
+            loc_t = loc_t.cuda()
+            conf_t = conf_t.cuda()
+            landm_t = landm_t.cuda()
+
+        zeros = torch.tensor(0).cuda()
+        # landm Loss (Smooth L1)
+        # Shape: [batch,num_priors,10]
+        pos1 = conf_t > zeros
+        num_pos_landm = pos1.long().sum(1, keepdim=True)
+        N1 = max(num_pos_landm.data.sum().float(), 1)
+        pos_idx1 = pos1.unsqueeze(pos1.dim()).expand_as(landm_data)
+        landm_p = landm_data[pos_idx1].view(-1, 10)
+        landm_t = landm_t[pos_idx1].view(-1, 10)
+        loss_landm = F.smooth_l1_loss(landm_p, landm_t, reduction='sum')
+
+
+        pos = conf_t != zeros
+        conf_t[pos] = 1
+
+        # Localization Loss (Smooth L1)
+        # Shape: [batch,num_priors,4]
+        pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)
+        loc_p = loc_data[pos_idx].view(-1, 4)
+        loc_t = loc_t[pos_idx].view(-1, 4)
+        loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum')
+
+        # Compute max conf across batch for hard negative mining
+        batch_conf = conf_data.view(-1, self.num_classes)
+        loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))
+
+        # Hard Negative Mining
+        loss_c[pos.view(-1, 1)] = 0 # filter out pos boxes for now
+        loss_c = loss_c.view(num, -1)
+        _, loss_idx = loss_c.sort(1, descending=True)
+        _, idx_rank = loss_idx.sort(1)
+        num_pos = pos.long().sum(1, keepdim=True)
+        num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)
+        neg = idx_rank < num_neg.expand_as(idx_rank)
+
+        # Confidence Loss Including Positive and Negative Examples
+        pos_idx = pos.unsqueeze(2).expand_as(conf_data)
+        neg_idx = neg.unsqueeze(2).expand_as(conf_data)
+        conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1,self.num_classes)
+        targets_weighted = conf_t[(pos+neg).gt(0)]
+        loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum')
+
+        # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
+        N = max(num_pos.data.sum().float(), 1)
+        loss_l /= N
+        loss_c /= N
+        loss_landm /= N1
+
+        return loss_l, loss_c, loss_landm
diff --git a/face_detect/retinaface_detection.py b/face_detect/retinaface_detection.py
new file mode 100755
index 0000000000000000000000000000000000000000..d24e5208a368ac1d7f04e1aec14960bd64bbe873
--- /dev/null
+++ b/face_detect/retinaface_detection.py
@@ -0,0 +1,192 @@
+'''
+@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
+@author: yangxy (yangtao9009@gmail.com)
+'''
+import os
+import torch
+import torch.backends.cudnn as cudnn
+import numpy as np
+from data import cfg_re50
+from layers.functions.prior_box import PriorBox
+from utils.nms.py_cpu_nms import py_cpu_nms
+import cv2
+from facemodels.retinaface import RetinaFace
+from utils.box_utils import decode, decode_landm
+import time
+import torch.nn.functional as F
+
+
+class RetinaFaceDetection(object):
+    def __init__(self, base_dir, device='cuda', network='RetinaFace-R50'):
+        torch.set_grad_enabled(False)
+        cudnn.benchmark = True
+        self.pretrained_path = os.path.join(base_dir, 'weights', network+'.pth')
+        self.device = device #torch.cuda.current_device()
+        self.cfg = cfg_re50
+        self.net = RetinaFace(cfg=self.cfg, phase='test')
+        self.load_model()
+        self.net = self.net.to(device)
+
+        self.mean = torch.tensor([[[[104]], [[117]], [[123]]]]).to(device)
+
+    def check_keys(self, pretrained_state_dict):
+        ckpt_keys = set(pretrained_state_dict.keys())
+        model_keys = set(self.net.state_dict().keys())
+        used_pretrained_keys = model_keys & ckpt_keys
+        unused_pretrained_keys = ckpt_keys - model_keys
+        missing_keys = model_keys - ckpt_keys
+        assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
+        return True
+
+    def remove_prefix(self, state_dict, prefix):
+        ''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
+        f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
+        return {f(key): value for key, value in state_dict.items()}
+
+    def load_model(self, load_to_cpu=False):
+        #if load_to_cpu:
+        #    pretrained_dict = torch.load(self.pretrained_path, map_location=lambda storage, loc: storage)
+        #else:
+        #    pretrained_dict = torch.load(self.pretrained_path, map_location=lambda storage, loc: storage.cuda())
+        pretrained_dict = torch.load(self.pretrained_path, map_location=torch.device('cpu'))
+        if "state_dict" in pretrained_dict.keys():
+            pretrained_dict = self.remove_prefix(pretrained_dict['state_dict'], 'module.')
+        else:
+            pretrained_dict = self.remove_prefix(pretrained_dict, 'module.')
+        self.check_keys(pretrained_dict)
+        self.net.load_state_dict(pretrained_dict, strict=False)
+        self.net.eval()
+    
+    def detect(self, img_raw, resize=1, confidence_threshold=0.9, nms_threshold=0.4, top_k=5000, keep_top_k=750, save_image=False):
+        img = np.float32(img_raw)
+
+        im_height, im_width = img.shape[:2]
+        ss = 1.0
+        # tricky
+        if max(im_height, im_width) > 1500:
+            ss = 1000.0/max(im_height, im_width)
+            img = cv2.resize(img, (0,0), fx=ss, fy=ss)
+            im_height, im_width = img.shape[:2]
+
+        scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
+        img -= (104, 117, 123)
+        img = img.transpose(2, 0, 1)
+        img = torch.from_numpy(img).unsqueeze(0)
+        img = img.to(self.device)
+        scale = scale.to(self.device)
+
+        loc, conf, landms = self.net(img)  # forward pass
+
+        priorbox = PriorBox(self.cfg, image_size=(im_height, im_width))
+        priors = priorbox.forward()
+        priors = priors.to(self.device)
+        prior_data = priors.data
+        boxes = decode(loc.data.squeeze(0), prior_data, self.cfg['variance'])
+        boxes = boxes * scale / resize
+        boxes = boxes.cpu().numpy()
+        scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
+        landms = decode_landm(landms.data.squeeze(0), prior_data, self.cfg['variance'])
+        scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
+                               img.shape[3], img.shape[2], img.shape[3], img.shape[2],
+                               img.shape[3], img.shape[2]])
+        scale1 = scale1.to(self.device)
+        landms = landms * scale1 / resize
+        landms = landms.cpu().numpy()
+
+        # ignore low scores
+        inds = np.where(scores > confidence_threshold)[0]
+        boxes = boxes[inds]
+        landms = landms[inds]
+        scores = scores[inds]
+
+        # keep top-K before NMS
+        order = scores.argsort()[::-1][:top_k]
+        boxes = boxes[order]
+        landms = landms[order]
+        scores = scores[order]
+
+        # do NMS
+        dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
+        keep = py_cpu_nms(dets, nms_threshold)
+        # keep = nms(dets, nms_threshold,force_cpu=args.cpu)
+        dets = dets[keep, :]
+        landms = landms[keep]
+
+        # keep top-K faster NMS
+        dets = dets[:keep_top_k, :]
+        landms = landms[:keep_top_k, :]
+
+        # sort faces(delete)
+        '''
+        fscores = [det[4] for det in dets]
+        sorted_idx = sorted(range(len(fscores)), key=lambda k:fscores[k], reverse=False) # sort index
+        tmp = [landms[idx] for idx in sorted_idx]
+        landms = np.asarray(tmp)
+        '''
+        
+        landms = landms.reshape((-1, 5, 2))
+        landms = landms.transpose((0, 2, 1))
+        landms = landms.reshape(-1, 10, )
+        return dets/ss, landms/ss
+
+    def detect_tensor(self, img, resize=1, confidence_threshold=0.9, nms_threshold=0.4, top_k=5000, keep_top_k=750, save_image=False):
+        im_height, im_width = img.shape[-2:]
+        ss = 1000/max(im_height, im_width)
+        img = F.interpolate(img, scale_factor=ss)
+        im_height, im_width = img.shape[-2:]
+        scale = torch.Tensor([im_width, im_height, im_width, im_height]).to(self.device)
+        img -= self.mean
+
+        loc, conf, landms = self.net(img)  # forward pass
+
+        priorbox = PriorBox(self.cfg, image_size=(im_height, im_width))
+        priors = priorbox.forward()
+        priors = priors.to(self.device)
+        prior_data = priors.data
+        boxes = decode(loc.data.squeeze(0), prior_data, self.cfg['variance'])
+        boxes = boxes * scale / resize
+        boxes = boxes.cpu().numpy()
+        scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
+        landms = decode_landm(landms.data.squeeze(0), prior_data, self.cfg['variance'])
+        scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
+                               img.shape[3], img.shape[2], img.shape[3], img.shape[2],
+                               img.shape[3], img.shape[2]])
+        scale1 = scale1.to(self.device)
+        landms = landms * scale1 / resize
+        landms = landms.cpu().numpy()
+
+        # ignore low scores
+        inds = np.where(scores > confidence_threshold)[0]
+        boxes = boxes[inds]
+        landms = landms[inds]
+        scores = scores[inds]
+
+        # keep top-K before NMS
+        order = scores.argsort()[::-1][:top_k]
+        boxes = boxes[order]
+        landms = landms[order]
+        scores = scores[order]
+
+        # do NMS
+        dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
+        keep = py_cpu_nms(dets, nms_threshold)
+        # keep = nms(dets, nms_threshold,force_cpu=args.cpu)
+        dets = dets[keep, :]
+        landms = landms[keep]
+
+        # keep top-K faster NMS
+        dets = dets[:keep_top_k, :]
+        landms = landms[:keep_top_k, :]
+
+        # sort faces(delete)
+        '''
+        fscores = [det[4] for det in dets]
+        sorted_idx = sorted(range(len(fscores)), key=lambda k:fscores[k], reverse=False) # sort index
+        tmp = [landms[idx] for idx in sorted_idx]
+        landms = np.asarray(tmp)
+        '''
+        
+        landms = landms.reshape((-1, 5, 2))
+        landms = landms.transpose((0, 2, 1))
+        landms = landms.reshape(-1, 10, )
+        return dets/ss, landms/ss
diff --git a/face_detect/utils/__init__.py b/face_detect/utils/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/face_detect/utils/box_utils.py b/face_detect/utils/box_utils.py
new file mode 100755
index 0000000000000000000000000000000000000000..c1d12bc612ae3ba3ea9d138bfc5997a2b15d8dd9
--- /dev/null
+++ b/face_detect/utils/box_utils.py
@@ -0,0 +1,330 @@
+import torch
+import numpy as np
+
+
+def point_form(boxes):
+    """ Convert prior_boxes to (xmin, ymin, xmax, ymax)
+    representation for comparison to point form ground truth data.
+    Args:
+        boxes: (tensor) center-size default boxes from priorbox layers.
+    Return:
+        boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
+    """
+    return torch.cat((boxes[:, :2] - boxes[:, 2:]/2,     # xmin, ymin
+                     boxes[:, :2] + boxes[:, 2:]/2), 1)  # xmax, ymax
+
+
+def center_size(boxes):
+    """ Convert prior_boxes to (cx, cy, w, h)
+    representation for comparison to center-size form ground truth data.
+    Args:
+        boxes: (tensor) point_form boxes
+    Return:
+        boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
+    """
+    return torch.cat((boxes[:, 2:] + boxes[:, :2])/2,  # cx, cy
+                     boxes[:, 2:] - boxes[:, :2], 1)  # w, h
+
+
+def intersect(box_a, box_b):
+    """ We resize both tensors to [A,B,2] without new malloc:
+    [A,2] -> [A,1,2] -> [A,B,2]
+    [B,2] -> [1,B,2] -> [A,B,2]
+    Then we compute the area of intersect between box_a and box_b.
+    Args:
+      box_a: (tensor) bounding boxes, Shape: [A,4].
+      box_b: (tensor) bounding boxes, Shape: [B,4].
+    Return:
+      (tensor) intersection area, Shape: [A,B].
+    """
+    A = box_a.size(0)
+    B = box_b.size(0)
+    max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
+                       box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
+    min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
+                       box_b[:, :2].unsqueeze(0).expand(A, B, 2))
+    inter = torch.clamp((max_xy - min_xy), min=0)
+    return inter[:, :, 0] * inter[:, :, 1]
+
+
+def jaccard(box_a, box_b):
+    """Compute the jaccard overlap of two sets of boxes.  The jaccard overlap
+    is simply the intersection over union of two boxes.  Here we operate on
+    ground truth boxes and default boxes.
+    E.g.:
+        A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
+    Args:
+        box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
+        box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
+    Return:
+        jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
+    """
+    inter = intersect(box_a, box_b)
+    area_a = ((box_a[:, 2]-box_a[:, 0]) *
+              (box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter)  # [A,B]
+    area_b = ((box_b[:, 2]-box_b[:, 0]) *
+              (box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter)  # [A,B]
+    union = area_a + area_b - inter
+    return inter / union  # [A,B]
+
+
+def matrix_iou(a, b):
+    """
+    return iou of a and b, numpy version for data augenmentation
+    """
+    lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
+    rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
+
+    area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
+    area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
+    area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
+    return area_i / (area_a[:, np.newaxis] + area_b - area_i)
+
+
+def matrix_iof(a, b):
+    """
+    return iof of a and b, numpy version for data augenmentation
+    """
+    lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
+    rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
+
+    area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
+    area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
+    return area_i / np.maximum(area_a[:, np.newaxis], 1)
+
+
+def match(threshold, truths, priors, variances, labels, landms, loc_t, conf_t, landm_t, idx):
+    """Match each prior box with the ground truth box of the highest jaccard
+    overlap, encode the bounding boxes, then return the matched indices
+    corresponding to both confidence and location preds.
+    Args:
+        threshold: (float) The overlap threshold used when mathing boxes.
+        truths: (tensor) Ground truth boxes, Shape: [num_obj, 4].
+        priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].
+        variances: (tensor) Variances corresponding to each prior coord,
+            Shape: [num_priors, 4].
+        labels: (tensor) All the class labels for the image, Shape: [num_obj].
+        landms: (tensor) Ground truth landms, Shape [num_obj, 10].
+        loc_t: (tensor) Tensor to be filled w/ endcoded location targets.
+        conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.
+        landm_t: (tensor) Tensor to be filled w/ endcoded landm targets.
+        idx: (int) current batch index
+    Return:
+        The matched indices corresponding to 1)location 2)confidence 3)landm preds.
+    """
+    # jaccard index
+    overlaps = jaccard(
+        truths,
+        point_form(priors)
+    )
+    # (Bipartite Matching)
+    # [1,num_objects] best prior for each ground truth
+    best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)
+
+    # ignore hard gt
+    valid_gt_idx = best_prior_overlap[:, 0] >= 0.2
+    best_prior_idx_filter = best_prior_idx[valid_gt_idx, :]
+    if best_prior_idx_filter.shape[0] <= 0:
+        loc_t[idx] = 0
+        conf_t[idx] = 0
+        return
+
+    # [1,num_priors] best ground truth for each prior
+    best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)
+    best_truth_idx.squeeze_(0)
+    best_truth_overlap.squeeze_(0)
+    best_prior_idx.squeeze_(1)
+    best_prior_idx_filter.squeeze_(1)
+    best_prior_overlap.squeeze_(1)
+    best_truth_overlap.index_fill_(0, best_prior_idx_filter, 2)  # ensure best prior
+    # TODO refactor: index  best_prior_idx with long tensor
+    # ensure every gt matches with its prior of max overlap
+    for j in range(best_prior_idx.size(0)):     # 判别此anchor是预测哪一个boxes
+        best_truth_idx[best_prior_idx[j]] = j
+    matches = truths[best_truth_idx]            # Shape: [num_priors,4] 此处为每一个anchor对应的bbox取出来
+    conf = labels[best_truth_idx]               # Shape: [num_priors]      此处为每一个anchor对应的label取出来
+    conf[best_truth_overlap < threshold] = 0    # label as background   overlap<0.35的全部作为负样本
+    loc = encode(matches, priors, variances)
+
+    matches_landm = landms[best_truth_idx]
+    landm = encode_landm(matches_landm, priors, variances)
+    loc_t[idx] = loc    # [num_priors,4] encoded offsets to learn
+    conf_t[idx] = conf  # [num_priors] top class label for each prior
+    landm_t[idx] = landm
+
+
+def encode(matched, priors, variances):
+    """Encode the variances from the priorbox layers into the ground truth boxes
+    we have matched (based on jaccard overlap) with the prior boxes.
+    Args:
+        matched: (tensor) Coords of ground truth for each prior in point-form
+            Shape: [num_priors, 4].
+        priors: (tensor) Prior boxes in center-offset form
+            Shape: [num_priors,4].
+        variances: (list[float]) Variances of priorboxes
+    Return:
+        encoded boxes (tensor), Shape: [num_priors, 4]
+    """
+
+    # dist b/t match center and prior's center
+    g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
+    # encode variance
+    g_cxcy /= (variances[0] * priors[:, 2:])
+    # match wh / prior wh
+    g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
+    g_wh = torch.log(g_wh) / variances[1]
+    # return target for smooth_l1_loss
+    return torch.cat([g_cxcy, g_wh], 1)  # [num_priors,4]
+
+def encode_landm(matched, priors, variances):
+    """Encode the variances from the priorbox layers into the ground truth boxes
+    we have matched (based on jaccard overlap) with the prior boxes.
+    Args:
+        matched: (tensor) Coords of ground truth for each prior in point-form
+            Shape: [num_priors, 10].
+        priors: (tensor) Prior boxes in center-offset form
+            Shape: [num_priors,4].
+        variances: (list[float]) Variances of priorboxes
+    Return:
+        encoded landm (tensor), Shape: [num_priors, 10]
+    """
+
+    # dist b/t match center and prior's center
+    matched = torch.reshape(matched, (matched.size(0), 5, 2))
+    priors_cx = priors[:, 0].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
+    priors_cy = priors[:, 1].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
+    priors_w = priors[:, 2].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
+    priors_h = priors[:, 3].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
+    priors = torch.cat([priors_cx, priors_cy, priors_w, priors_h], dim=2)
+    g_cxcy = matched[:, :, :2] - priors[:, :, :2]
+    # encode variance
+    g_cxcy /= (variances[0] * priors[:, :, 2:])
+    # g_cxcy /= priors[:, :, 2:]
+    g_cxcy = g_cxcy.reshape(g_cxcy.size(0), -1)
+    # return target for smooth_l1_loss
+    return g_cxcy
+
+
+# Adapted from https://github.com/Hakuyume/chainer-ssd
+def decode(loc, priors, variances):
+    """Decode locations from predictions using priors to undo
+    the encoding we did for offset regression at train time.
+    Args:
+        loc (tensor): location predictions for loc layers,
+            Shape: [num_priors,4]
+        priors (tensor): Prior boxes in center-offset form.
+            Shape: [num_priors,4].
+        variances: (list[float]) Variances of priorboxes
+    Return:
+        decoded bounding box predictions
+    """
+
+    boxes = torch.cat((
+        priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
+        priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
+    boxes[:, :2] -= boxes[:, 2:] / 2
+    boxes[:, 2:] += boxes[:, :2]
+    return boxes
+
+def decode_landm(pre, priors, variances):
+    """Decode landm from predictions using priors to undo
+    the encoding we did for offset regression at train time.
+    Args:
+        pre (tensor): landm predictions for loc layers,
+            Shape: [num_priors,10]
+        priors (tensor): Prior boxes in center-offset form.
+            Shape: [num_priors,4].
+        variances: (list[float]) Variances of priorboxes
+    Return:
+        decoded landm predictions
+    """
+    landms = torch.cat((priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
+                        priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
+                        priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
+                        priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
+                        priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:],
+                        ), dim=1)
+    return landms
+
+
+def log_sum_exp(x):
+    """Utility function for computing log_sum_exp while determining
+    This will be used to determine unaveraged confidence loss across
+    all examples in a batch.
+    Args:
+        x (Variable(tensor)): conf_preds from conf layers
+    """
+    x_max = x.data.max()
+    return torch.log(torch.sum(torch.exp(x-x_max), 1, keepdim=True)) + x_max
+
+
+# Original author: Francisco Massa:
+# https://github.com/fmassa/object-detection.torch
+# Ported to PyTorch by Max deGroot (02/01/2017)
+def nms(boxes, scores, overlap=0.5, top_k=200):
+    """Apply non-maximum suppression at test time to avoid detecting too many
+    overlapping bounding boxes for a given object.
+    Args:
+        boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
+        scores: (tensor) The class predscores for the img, Shape:[num_priors].
+        overlap: (float) The overlap thresh for suppressing unnecessary boxes.
+        top_k: (int) The Maximum number of box preds to consider.
+    Return:
+        The indices of the kept boxes with respect to num_priors.
+    """
+
+    keep = torch.Tensor(scores.size(0)).fill_(0).long()
+    if boxes.numel() == 0:
+        return keep
+    x1 = boxes[:, 0]
+    y1 = boxes[:, 1]
+    x2 = boxes[:, 2]
+    y2 = boxes[:, 3]
+    area = torch.mul(x2 - x1, y2 - y1)
+    v, idx = scores.sort(0)  # sort in ascending order
+    # I = I[v >= 0.01]
+    idx = idx[-top_k:]  # indices of the top-k largest vals
+    xx1 = boxes.new()
+    yy1 = boxes.new()
+    xx2 = boxes.new()
+    yy2 = boxes.new()
+    w = boxes.new()
+    h = boxes.new()
+
+    # keep = torch.Tensor()
+    count = 0
+    while idx.numel() > 0:
+        i = idx[-1]  # index of current largest val
+        # keep.append(i)
+        keep[count] = i
+        count += 1
+        if idx.size(0) == 1:
+            break
+        idx = idx[:-1]  # remove kept element from view
+        # load bboxes of next highest vals
+        torch.index_select(x1, 0, idx, out=xx1)
+        torch.index_select(y1, 0, idx, out=yy1)
+        torch.index_select(x2, 0, idx, out=xx2)
+        torch.index_select(y2, 0, idx, out=yy2)
+        # store element-wise max with next highest score
+        xx1 = torch.clamp(xx1, min=x1[i])
+        yy1 = torch.clamp(yy1, min=y1[i])
+        xx2 = torch.clamp(xx2, max=x2[i])
+        yy2 = torch.clamp(yy2, max=y2[i])
+        w.resize_as_(xx2)
+        h.resize_as_(yy2)
+        w = xx2 - xx1
+        h = yy2 - yy1
+        # check sizes of xx1 and xx2.. after each iteration
+        w = torch.clamp(w, min=0.0)
+        h = torch.clamp(h, min=0.0)
+        inter = w*h
+        # IoU = i / (area(a) + area(b) - i)
+        rem_areas = torch.index_select(area, 0, idx)  # load remaining areas)
+        union = (rem_areas - inter) + area[i]
+        IoU = inter/union  # store result in iou
+        # keep only elements with an IoU <= overlap
+        idx = idx[IoU.le(overlap)]
+    return keep, count
+
+
diff --git a/face_detect/utils/nms/__init__.py b/face_detect/utils/nms/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/face_detect/utils/nms/py_cpu_nms.py b/face_detect/utils/nms/py_cpu_nms.py
new file mode 100755
index 0000000000000000000000000000000000000000..54e7b25fef72b518df6dcf8d6fb78b986796c6e3
--- /dev/null
+++ b/face_detect/utils/nms/py_cpu_nms.py
@@ -0,0 +1,38 @@
+# --------------------------------------------------------
+# Fast R-CNN
+# Copyright (c) 2015 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Ross Girshick
+# --------------------------------------------------------
+
+import numpy as np
+
+def py_cpu_nms(dets, thresh):
+    """Pure Python NMS baseline."""
+    x1 = dets[:, 0]
+    y1 = dets[:, 1]
+    x2 = dets[:, 2]
+    y2 = dets[:, 3]
+    scores = dets[:, 4]
+
+    areas = (x2 - x1 + 1) * (y2 - y1 + 1)
+    order = scores.argsort()[::-1]
+
+    keep = []
+    while order.size > 0:
+        i = order[0]
+        keep.append(i)
+        xx1 = np.maximum(x1[i], x1[order[1:]])
+        yy1 = np.maximum(y1[i], y1[order[1:]])
+        xx2 = np.minimum(x2[i], x2[order[1:]])
+        yy2 = np.minimum(y2[i], y2[order[1:]])
+
+        w = np.maximum(0.0, xx2 - xx1 + 1)
+        h = np.maximum(0.0, yy2 - yy1 + 1)
+        inter = w * h
+        ovr = inter / (areas[i] + areas[order[1:]] - inter)
+
+        inds = np.where(ovr <= thresh)[0]
+        order = order[inds + 1]
+
+    return keep
diff --git a/face_detect/utils/timer.py b/face_detect/utils/timer.py
new file mode 100755
index 0000000000000000000000000000000000000000..e4b3b8098a5ad41f8d18d42b6b2fedb694aa5508
--- /dev/null
+++ b/face_detect/utils/timer.py
@@ -0,0 +1,40 @@
+# --------------------------------------------------------
+# Fast R-CNN
+# Copyright (c) 2015 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Ross Girshick
+# --------------------------------------------------------
+
+import time
+
+
+class Timer(object):
+    """A simple timer."""
+    def __init__(self):
+        self.total_time = 0.
+        self.calls = 0
+        self.start_time = 0.
+        self.diff = 0.
+        self.average_time = 0.
+
+    def tic(self):
+        # using time.time instead of time.clock because time time.clock
+        # does not normalize for multithreading
+        self.start_time = time.time()
+
+    def toc(self, average=True):
+        self.diff = time.time() - self.start_time
+        self.total_time += self.diff
+        self.calls += 1
+        self.average_time = self.total_time / self.calls
+        if average:
+            return self.average_time
+        else:
+            return self.diff
+
+    def clear(self):
+        self.total_time = 0.
+        self.calls = 0
+        self.start_time = 0.
+        self.diff = 0.
+        self.average_time = 0.
diff --git a/face_enhancement.py b/face_enhancement.py
new file mode 100755
index 0000000000000000000000000000000000000000..42f45b8149a9d88a19cdb94eb9146231fff2ce10
--- /dev/null
+++ b/face_enhancement.py
@@ -0,0 +1,145 @@
+'''
+@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
+@author: yangxy (yangtao9009@gmail.com)
+'''
+import os
+import cv2
+import glob
+import time
+import argparse
+import numpy as np
+from PIL import Image
+import __init_paths
+from face_detect.retinaface_detection import RetinaFaceDetection
+from face_parse.face_parsing import FaceParse
+from face_model.face_gan import FaceGAN
+from sr_model.real_esrnet import RealESRNet
+from align_faces import warp_and_crop_face, get_reference_facial_points
+
+class FaceEnhancement(object):
+    def __init__(self, base_dir='./', size=512, model=None, use_sr=True, sr_model=None, channel_multiplier=2, narrow=1, key=None, device='cuda'):
+        self.facedetector = RetinaFaceDetection(base_dir, device)
+        self.facegan = FaceGAN(base_dir, size, model, channel_multiplier, narrow, key, device=device)
+        self.srmodel =  RealESRNet(base_dir, sr_model, device=device)
+        self.faceparser = FaceParse(base_dir, device=device)
+        self.use_sr = use_sr
+        self.size = size
+        self.threshold = 0.9
+
+        # the mask for pasting restored faces back
+        self.mask = np.zeros((512, 512), np.float32)
+        cv2.rectangle(self.mask, (26, 26), (486, 486), (1, 1, 1), -1, cv2.LINE_AA)
+        self.mask = cv2.GaussianBlur(self.mask, (101, 101), 11)
+        self.mask = cv2.GaussianBlur(self.mask, (101, 101), 11)
+
+        self.kernel = np.array((
+                [0.0625, 0.125, 0.0625],
+                [0.125, 0.25, 0.125],
+                [0.0625, 0.125, 0.0625]), dtype="float32")
+
+        # get the reference 5 landmarks position in the crop settings
+        default_square = True
+        inner_padding_factor = 0.25
+        outer_padding = (0, 0)
+        self.reference_5pts = get_reference_facial_points(
+                (self.size, self.size), inner_padding_factor, outer_padding, default_square)
+
+    def mask_postprocess(self, mask, thres=20):
+        mask[:thres, :] = 0; mask[-thres:, :] = 0
+        mask[:, :thres] = 0; mask[:, -thres:] = 0
+        mask = cv2.GaussianBlur(mask, (101, 101), 11)
+        mask = cv2.GaussianBlur(mask, (101, 101), 11)
+        return mask.astype(np.float32)
+
+    def process(self, img):
+        if self.use_sr:
+            img_sr = self.srmodel.process(img)
+            if img_sr is not None:
+                img = cv2.resize(img, img_sr.shape[:2][::-1])
+
+        facebs, landms = self.facedetector.detect(img)
+        
+        orig_faces, enhanced_faces = [], []
+        height, width = img.shape[:2]
+        full_mask = np.zeros((height, width), dtype=np.float32)
+        full_img = np.zeros(img.shape, dtype=np.uint8)
+
+        for i, (faceb, facial5points) in enumerate(zip(facebs, landms)):
+            if faceb[4]<self.threshold: continue
+            fh, fw = (faceb[3]-faceb[1]), (faceb[2]-faceb[0])
+
+            facial5points = np.reshape(facial5points, (2, 5))
+
+            of, tfm_inv = warp_and_crop_face(img, facial5points, reference_pts=self.reference_5pts, crop_size=(self.size, self.size))
+            
+            # enhance the face
+            ef = self.facegan.process(of)
+            
+            orig_faces.append(of)
+            enhanced_faces.append(ef)
+            
+            #tmp_mask = self.mask
+            tmp_mask = self.mask_postprocess(self.faceparser.process(ef)[0]/255.)
+            tmp_mask = cv2.resize(tmp_mask, ef.shape[:2])
+            tmp_mask = cv2.warpAffine(tmp_mask, tfm_inv, (width, height), flags=3)
+
+            if min(fh, fw)<100: # gaussian filter for small faces
+                ef = cv2.filter2D(ef, -1, self.kernel)
+            
+            tmp_img = cv2.warpAffine(ef, tfm_inv, (width, height), flags=3)
+
+            mask = tmp_mask - full_mask
+            full_mask[np.where(mask>0)] = tmp_mask[np.where(mask>0)]
+            full_img[np.where(mask>0)] = tmp_img[np.where(mask>0)]
+
+        full_mask = full_mask[:, :, np.newaxis]
+        if self.use_sr and img_sr is not None:
+            img = cv2.convertScaleAbs(img_sr*(1-full_mask) + full_img*full_mask)
+        else:
+            img = cv2.convertScaleAbs(img*(1-full_mask) + full_img*full_mask)
+
+        return img, orig_faces, enhanced_faces
+        
+
+if __name__=='__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--model', type=str, default='GPEN-BFR-512', help='GPEN model')
+    parser.add_argument('--key', type=str, default=None, help='key of GPEN model')
+    parser.add_argument('--size', type=int, default=512, help='resolution of GPEN')
+    parser.add_argument('--channel_multiplier', type=int, default=2, help='channel multiplier of GPEN')
+    parser.add_argument('--narrow', type=float, default=1, help='channel narrow scale')
+    parser.add_argument('--use_sr', action='store_true', help='use sr or not')
+    parser.add_argument('--use_cuda', action='store_true', help='use cuda or not')
+    parser.add_argument('--sr_model', type=str, default='rrdb_realesrnet_psnr', help='SR model')
+    parser.add_argument('--sr_scale', type=int, default=2, help='SR scale')
+    parser.add_argument('--indir', type=str, default='examples/imgs', help='input folder')
+    parser.add_argument('--outdir', type=str, default='results/outs-BFR', help='output folder')
+    args = parser.parse_args()
+
+    #model = {'name':'GPEN-BFR-512', 'size':512, 'channel_multiplier':2, 'narrow':1}
+    #model = {'name':'GPEN-BFR-256', 'size':256, 'channel_multiplier':1, 'narrow':0.5}
+    
+    os.makedirs(args.outdir, exist_ok=True)
+
+    faceenhancer = FaceEnhancement(size=args.size, model=args.model, use_sr=args.use_sr, sr_model=args.sr_model, channel_multiplier=args.channel_multiplier, narrow=args.narrow, key=args.key, device='cuda' if args.use_cuda else 'cpu')
+
+    files = sorted(glob.glob(os.path.join(args.indir, '*.*g')))
+    for n, file in enumerate(files[:]):
+        filename = os.path.basename(file)
+        
+        im = cv2.imread(file, cv2.IMREAD_COLOR) # BGR
+        if not isinstance(im, np.ndarray): print(filename, 'error'); continue
+        #im = cv2.resize(im, (0,0), fx=2, fy=2) # optional
+
+        img, orig_faces, enhanced_faces = faceenhancer.process(im)
+        
+        im = cv2.resize(im, img.shape[:2][::-1])
+        cv2.imwrite(os.path.join(args.outdir, '.'.join(filename.split('.')[:-1])+'_COMP.jpg'), np.hstack((im, img)))
+        cv2.imwrite(os.path.join(args.outdir, '.'.join(filename.split('.')[:-1])+'_GPEN.jpg'), img)
+        
+        for m, (ef, of) in enumerate(zip(enhanced_faces, orig_faces)):
+            of = cv2.resize(of, ef.shape[:2])
+            cv2.imwrite(os.path.join(args.outdir, '.'.join(filename.split('.')[:-1])+'_face%02d'%m+'.jpg'), np.hstack((of, ef)))
+        
+        if n%10==0: print(n, filename)
+        
diff --git a/face_inpainting.py b/face_inpainting.py
new file mode 100755
index 0000000000000000000000000000000000000000..37c1a940ef26a44cd5923dd40b1ef98fb4dff281
--- /dev/null
+++ b/face_inpainting.py
@@ -0,0 +1,101 @@
+'''
+@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
+@author: yangxy (yangtao9009@gmail.com)
+'''
+import os
+import cv2
+import glob
+import time
+import math
+import numpy as np
+from PIL import Image, ImageDraw
+import __init_paths
+from face_model.face_gan import FaceGAN
+
+# modified by yangxy
+def brush_stroke_mask(img, color=(255,255,255)):
+    min_num_vertex = 8
+    max_num_vertex = 28
+    mean_angle = 2*math.pi / 5
+    angle_range = 2*math.pi / 15
+    min_width = 12
+    max_width = 80
+    def generate_mask(H, W, img=None):
+        average_radius = math.sqrt(H*H+W*W) / 8
+        mask = Image.new('RGB', (W, H), 0)
+        if img is not None: mask = img #Image.fromarray(img)
+
+        for _ in range(np.random.randint(1, 4)):
+            num_vertex = np.random.randint(min_num_vertex, max_num_vertex)
+            angle_min = mean_angle - np.random.uniform(0, angle_range)
+            angle_max = mean_angle + np.random.uniform(0, angle_range)
+            angles = []
+            vertex = []
+            for i in range(num_vertex):
+                if i % 2 == 0:
+                    angles.append(2*math.pi - np.random.uniform(angle_min, angle_max))
+                else:
+                    angles.append(np.random.uniform(angle_min, angle_max))
+
+            h, w = mask.size
+            vertex.append((int(np.random.randint(0, w)), int(np.random.randint(0, h))))
+            for i in range(num_vertex):
+                r = np.clip(
+                    np.random.normal(loc=average_radius, scale=average_radius//2),
+                    0, 2*average_radius)
+                new_x = np.clip(vertex[-1][0] + r * math.cos(angles[i]), 0, w)
+                new_y = np.clip(vertex[-1][1] + r * math.sin(angles[i]), 0, h)
+                vertex.append((int(new_x), int(new_y)))
+
+            draw = ImageDraw.Draw(mask)
+            width = int(np.random.uniform(min_width, max_width))
+            draw.line(vertex, fill=color, width=width)
+            for v in vertex:
+                draw.ellipse((v[0] - width//2,
+                              v[1] - width//2,
+                              v[0] + width//2,
+                              v[1] + width//2),
+                             fill=color)
+
+        return mask
+
+    width, height = img.size
+    mask = generate_mask(height, width, img)
+    return mask
+
+class FaceInpainting(object):
+    def __init__(self, base_dir='./', size=1024, model=None, channel_multiplier=2):
+        self.facegan = FaceGAN(base_dir, size, model, channel_multiplier)
+
+    # make sure the face image is well aligned. Please refer to face_enhancement.py
+    def process(self, brokenf):
+        # complete the face
+        out = self.facegan.process(brokenf)
+
+        return out
+
+if __name__=='__main__':
+    model = {'name':'GPEN-Inpainting-1024', 'size':1024}
+    
+    indir = 'examples/ffhq-10'
+    outdir = 'examples/outs-inpainting'
+    os.makedirs(outdir, exist_ok=True)
+
+    faceinpainter = FaceInpainting(size=model['size'], model=model['name'], channel_multiplier=2)
+
+    files = sorted(glob.glob(os.path.join(indir, '*.*g')))
+    for n, file in enumerate(files[:]):
+        filename = os.path.basename(file)
+        
+        originf = cv2.imread(file, cv2.IMREAD_COLOR)
+        
+        brokenf = np.asarray(brush_stroke_mask(Image.fromarray(originf)))
+
+        completef = faceinpainter.process(brokenf)
+        
+        originf = cv2.resize(originf, completef.shape[:2])
+        brokenf = cv2.resize(brokenf, completef.shape[:2])
+        cv2.imwrite(os.path.join(outdir, '.'.join(filename.split('.')[:-1])+'.jpg'), np.hstack((brokenf, completef, originf)))
+        
+        if n%10==0: print(n, file)
+        
diff --git a/face_model/face_gan.py b/face_model/face_gan.py
new file mode 100755
index 0000000000000000000000000000000000000000..163b3b3dfc54437e1fc150d8861a7097df2fb9b7
--- /dev/null
+++ b/face_model/face_gan.py
@@ -0,0 +1,57 @@
+'''
+@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
+@author: yangxy (yangtao9009@gmail.com)
+'''
+import torch
+import os
+import cv2
+import glob
+import numpy as np
+from torch import nn
+import torch.nn.functional as F
+from torchvision import transforms, utils
+from gpen_model import FullGenerator
+
+class FaceGAN(object):
+    def __init__(self, base_dir='./', size=512, model=None, channel_multiplier=2, narrow=1, key=None, is_norm=True, device='cuda'):
+        self.mfile = os.path.join(base_dir, 'weights', model+'.pth')
+        self.n_mlp = 8
+        self.device = device
+        self.is_norm = is_norm
+        self.resolution = size
+        self.key = key
+        self.load_model(channel_multiplier, narrow)
+
+    def load_model(self, channel_multiplier=2, narrow=1):
+        self.model = FullGenerator(self.resolution, 512, self.n_mlp, channel_multiplier, narrow=narrow, device=self.device)
+        pretrained_dict = torch.load(self.mfile, map_location=torch.device('cpu'))
+        if self.key is not None: pretrained_dict = pretrained_dict[self.key]
+        self.model.load_state_dict(pretrained_dict)
+        self.model.to(self.device)
+        self.model.eval()
+
+    def process(self, img):
+        img = cv2.resize(img, (self.resolution, self.resolution))
+        img_t = self.img2tensor(img)
+
+        with torch.no_grad():
+            out, __ = self.model(img_t)
+
+        out = self.tensor2img(out)
+
+        return out
+
+    def img2tensor(self, img):
+        img_t = torch.from_numpy(img).to(self.device)/255.
+        if self.is_norm:
+            img_t = (img_t - 0.5) / 0.5
+        img_t = img_t.permute(2, 0, 1).unsqueeze(0).flip(1) # BGR->RGB
+        return img_t
+
+    def tensor2img(self, img_t, pmax=255.0, imtype=np.uint8):
+        if self.is_norm:
+            img_t = img_t * 0.5 + 0.5
+        img_t = img_t.squeeze(0).permute(1, 2, 0).flip(2) # RGB->BGR
+        img_np = np.clip(img_t.float().cpu().numpy(), 0, 1) * pmax
+
+        return img_np.astype(imtype)
diff --git a/face_model/gpen_model.py b/face_model/gpen_model.py
new file mode 100755
index 0000000000000000000000000000000000000000..b700c5369190102b4cceda3f023488f859ff86cf
--- /dev/null
+++ b/face_model/gpen_model.py
@@ -0,0 +1,747 @@
+'''
+@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
+@author: yangxy (yangtao9009@gmail.com)
+'''
+import math
+import random
+import functools
+import operator
+import itertools
+
+import torch
+from torch import nn
+from torch.nn import functional as F
+from torch.autograd import Function
+
+from op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
+
+class PixelNorm(nn.Module):
+    def __init__(self):
+        super().__init__()
+
+    def forward(self, input):
+        return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
+
+
+def make_kernel(k):
+    k = torch.tensor(k, dtype=torch.float32)
+
+    if k.ndim == 1:
+        k = k[None, :] * k[:, None]
+
+    k /= k.sum()
+
+    return k
+
+
+class Upsample(nn.Module):
+    def __init__(self, kernel, factor=2, device='cpu'):
+        super().__init__()
+
+        self.factor = factor
+        kernel = make_kernel(kernel) * (factor ** 2)
+        self.register_buffer('kernel', kernel)
+
+        p = kernel.shape[0] - factor
+
+        pad0 = (p + 1) // 2 + factor - 1
+        pad1 = p // 2
+
+        self.pad = (pad0, pad1)
+        self.device = device
+
+    def forward(self, input):
+        out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad, device=self.device)
+
+        return out
+
+
+class Downsample(nn.Module):
+    def __init__(self, kernel, factor=2, device='cpu'):
+        super().__init__()
+
+        self.factor = factor
+        kernel = make_kernel(kernel)
+        self.register_buffer('kernel', kernel)
+
+        p = kernel.shape[0] - factor
+
+        pad0 = (p + 1) // 2
+        pad1 = p // 2
+
+        self.pad = (pad0, pad1)
+        self.device = device
+
+    def forward(self, input):
+        out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad, device=self.device)
+
+        return out
+
+
+class Blur(nn.Module):
+    def __init__(self, kernel, pad, upsample_factor=1, device='cpu'):
+        super().__init__()
+
+        kernel = make_kernel(kernel)
+
+        if upsample_factor > 1:
+            kernel = kernel * (upsample_factor ** 2)
+
+        self.register_buffer('kernel', kernel)
+
+        self.pad = pad
+        self.device = device
+
+    def forward(self, input):
+        out = upfirdn2d(input, self.kernel, pad=self.pad, device=self.device)
+
+        return out
+
+
+class EqualConv2d(nn.Module):
+    def __init__(
+        self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
+    ):
+        super().__init__()
+
+        self.weight = nn.Parameter(
+            torch.randn(out_channel, in_channel, kernel_size, kernel_size)
+        )
+        self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
+
+        self.stride = stride
+        self.padding = padding
+
+        if bias:
+            self.bias = nn.Parameter(torch.zeros(out_channel))
+
+        else:
+            self.bias = None
+
+    def forward(self, input):
+        out = F.conv2d(
+            input,
+            self.weight * self.scale,
+            bias=self.bias,
+            stride=self.stride,
+            padding=self.padding,
+        )
+
+        return out
+
+    def __repr__(self):
+        return (
+            f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},'
+            f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
+        )
+
+
+class EqualLinear(nn.Module):
+    def __init__(
+        self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None, device='cpu'
+    ):
+        super().__init__()
+
+        self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
+
+        if bias:
+            self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
+
+        else:
+            self.bias = None
+
+        self.activation = activation
+        self.device = device
+
+        self.scale = (1 / math.sqrt(in_dim)) * lr_mul
+        self.lr_mul = lr_mul
+
+    def forward(self, input):
+        if self.activation:
+            out = F.linear(input, self.weight * self.scale)
+            out = fused_leaky_relu(out, self.bias * self.lr_mul, device=self.device)
+
+        else:
+            out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul)
+
+        return out
+
+    def __repr__(self):
+        return (
+            f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
+        )
+
+
+class ScaledLeakyReLU(nn.Module):
+    def __init__(self, negative_slope=0.2):
+        super().__init__()
+
+        self.negative_slope = negative_slope
+
+    def forward(self, input):
+        out = F.leaky_relu(input, negative_slope=self.negative_slope)
+
+        return out * math.sqrt(2)
+
+
+class ModulatedConv2d(nn.Module):
+    def __init__(
+        self,
+        in_channel,
+        out_channel,
+        kernel_size,
+        style_dim,
+        demodulate=True,
+        upsample=False,
+        downsample=False,
+        blur_kernel=[1, 3, 3, 1],
+        device='cpu'
+    ):
+        super().__init__()
+
+        self.eps = 1e-8
+        self.kernel_size = kernel_size
+        self.in_channel = in_channel
+        self.out_channel = out_channel
+        self.upsample = upsample
+        self.downsample = downsample
+
+        if upsample:
+            factor = 2
+            p = (len(blur_kernel) - factor) - (kernel_size - 1)
+            pad0 = (p + 1) // 2 + factor - 1
+            pad1 = p // 2 + 1
+
+            self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor, device=device)
+
+        if downsample:
+            factor = 2
+            p = (len(blur_kernel) - factor) + (kernel_size - 1)
+            pad0 = (p + 1) // 2
+            pad1 = p // 2
+
+            self.blur = Blur(blur_kernel, pad=(pad0, pad1), device=device)
+
+        fan_in = in_channel * kernel_size ** 2
+        self.scale = 1 / math.sqrt(fan_in)
+        self.padding = kernel_size // 2
+
+        self.weight = nn.Parameter(
+            torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
+        )
+
+        self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
+
+        self.demodulate = demodulate
+
+    def __repr__(self):
+        return (
+            f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, '
+            f'upsample={self.upsample}, downsample={self.downsample})'
+        )
+
+    def forward(self, input, style):
+        batch, in_channel, height, width = input.shape
+
+        style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
+        weight = self.scale * self.weight * style
+
+        if self.demodulate:
+            demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
+            weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
+
+        weight = weight.view(
+            batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
+        )
+
+        if self.upsample:
+            input = input.view(1, batch * in_channel, height, width)
+            weight = weight.view(
+                batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
+            )
+            weight = weight.transpose(1, 2).reshape(
+                batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
+            )
+            out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
+            _, _, height, width = out.shape
+            out = out.view(batch, self.out_channel, height, width)
+            out = self.blur(out)
+
+        elif self.downsample:
+            input = self.blur(input)
+            _, _, height, width = input.shape
+            input = input.view(1, batch * in_channel, height, width)
+            out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
+            _, _, height, width = out.shape
+            out = out.view(batch, self.out_channel, height, width)
+
+        else:
+            input = input.view(1, batch * in_channel, height, width)
+            out = F.conv2d(input, weight, padding=self.padding, groups=batch)
+            _, _, height, width = out.shape
+            out = out.view(batch, self.out_channel, height, width)
+
+        return out
+
+
+class NoiseInjection(nn.Module):
+    def __init__(self, isconcat=True):
+        super().__init__()
+
+        self.isconcat = isconcat
+        self.weight = nn.Parameter(torch.zeros(1))
+
+    def forward(self, image, noise=None):
+        if noise is None:
+            batch, _, height, width = image.shape
+            noise = image.new_empty(batch, 1, height, width).normal_()
+
+        if self.isconcat:
+            return torch.cat((image, self.weight * noise), dim=1)
+        else:
+            return image + self.weight * noise
+
+
+class ConstantInput(nn.Module):
+    def __init__(self, channel, size=4):
+        super().__init__()
+
+        self.input = nn.Parameter(torch.randn(1, channel, size, size))
+
+    def forward(self, input):
+        batch = input.shape[0]
+        out = self.input.repeat(batch, 1, 1, 1)
+
+        return out
+
+
+class StyledConv(nn.Module):
+    def __init__(
+        self,
+        in_channel,
+        out_channel,
+        kernel_size,
+        style_dim,
+        upsample=False,
+        blur_kernel=[1, 3, 3, 1],
+        demodulate=True,
+        isconcat=True,
+        device='cpu'
+    ):
+        super().__init__()
+
+        self.conv = ModulatedConv2d(
+            in_channel,
+            out_channel,
+            kernel_size,
+            style_dim,
+            upsample=upsample,
+            blur_kernel=blur_kernel,
+            demodulate=demodulate,
+            device=device
+        )
+
+        self.noise = NoiseInjection(isconcat)
+        #self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
+        #self.activate = ScaledLeakyReLU(0.2)
+        feat_multiplier = 2 if isconcat else 1
+        self.activate = FusedLeakyReLU(out_channel*feat_multiplier, device=device)
+
+    def forward(self, input, style, noise=None):
+        out = self.conv(input, style)
+        out = self.noise(out, noise=noise)
+        # out = out + self.bias
+        out = self.activate(out)
+
+        return out
+
+
+class ToRGB(nn.Module):
+    def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1], device='cpu'):
+        super().__init__()
+
+        if upsample:
+            self.upsample = Upsample(blur_kernel, device=device)
+
+        self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False, device=device)
+        self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
+
+    def forward(self, input, style, skip=None):
+        out = self.conv(input, style)
+        out = out + self.bias
+
+        if skip is not None:
+            skip = self.upsample(skip)
+
+            out = out + skip
+
+        return out
+
+class Generator(nn.Module):
+    def __init__(
+        self,
+        size,
+        style_dim,
+        n_mlp,
+        channel_multiplier=2,
+        blur_kernel=[1, 3, 3, 1],
+        lr_mlp=0.01,
+        isconcat=True,
+        narrow=1,
+        device='cpu'
+    ):
+        super().__init__()
+
+        self.size = size
+        self.n_mlp = n_mlp
+        self.style_dim = style_dim
+        self.feat_multiplier = 2 if isconcat else 1
+
+        layers = [PixelNorm()]
+
+        for i in range(n_mlp):
+            layers.append(
+                EqualLinear(
+                    style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu', device=device
+                )
+            )
+
+        self.style = nn.Sequential(*layers)
+
+        self.channels = {
+            4: int(512 * narrow),
+            8: int(512 * narrow),
+            16: int(512 * narrow),
+            32: int(512 * narrow),
+            64: int(256 * channel_multiplier * narrow),
+            128: int(128 * channel_multiplier * narrow),
+            256: int(64 * channel_multiplier * narrow),
+            512: int(32 * channel_multiplier * narrow),
+            1024: int(16 * channel_multiplier * narrow)
+        }
+
+        self.input = ConstantInput(self.channels[4])
+        self.conv1 = StyledConv(
+            self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel, isconcat=isconcat, device=device
+        )
+        self.to_rgb1 = ToRGB(self.channels[4]*self.feat_multiplier, style_dim, upsample=False, device=device)
+
+        self.log_size = int(math.log(size, 2))
+
+        self.convs = nn.ModuleList()
+        self.upsamples = nn.ModuleList()
+        self.to_rgbs = nn.ModuleList()
+
+        in_channel = self.channels[4]
+
+        for i in range(3, self.log_size + 1):
+            out_channel = self.channels[2 ** i]
+
+            self.convs.append(
+                StyledConv(
+                    in_channel*self.feat_multiplier,
+                    out_channel,
+                    3,
+                    style_dim,
+                    upsample=True,
+                    blur_kernel=blur_kernel,
+                    isconcat=isconcat,
+                    device=device
+                )
+            )
+
+            self.convs.append(
+                StyledConv(
+                    out_channel*self.feat_multiplier, out_channel, 3, style_dim, blur_kernel=blur_kernel, isconcat=isconcat, device=device
+                )
+            )
+
+            self.to_rgbs.append(ToRGB(out_channel*self.feat_multiplier, style_dim, device=device))
+
+            in_channel = out_channel
+
+        self.n_latent = self.log_size * 2 - 2
+
+    def make_noise(self):
+        device = self.input.input.device
+
+        noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
+
+        for i in range(3, self.log_size + 1):
+            for _ in range(2):
+                noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
+
+        return noises
+
+    def mean_latent(self, n_latent):
+        latent_in = torch.randn(
+            n_latent, self.style_dim, device=self.input.input.device
+        )
+        latent = self.style(latent_in).mean(0, keepdim=True)
+
+        return latent
+
+    def get_latent(self, input):
+        return self.style(input)
+
+    def forward(
+        self,
+        styles,
+        return_latents=False,
+        inject_index=None,
+        truncation=1,
+        truncation_latent=None,
+        input_is_latent=False,
+        noise=None,
+    ):
+        if not input_is_latent:
+            styles = [self.style(s) for s in styles]
+
+        if noise is None:
+            '''
+            noise = [None] * (2 * (self.log_size - 2) + 1)
+            '''
+            noise = []
+            batch = styles[0].shape[0]
+            for i in range(self.n_mlp + 1):
+                size = 2 ** (i+2)
+                noise.append(torch.randn(batch, self.channels[size], size, size, device=styles[0].device))
+            
+        if truncation < 1:
+            style_t = []
+
+            for style in styles:
+                style_t.append(
+                    truncation_latent + truncation * (style - truncation_latent)
+                )
+
+            styles = style_t
+
+        if len(styles) < 2:
+            inject_index = self.n_latent
+
+            latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
+
+        else:
+            if inject_index is None:
+                inject_index = random.randint(1, self.n_latent - 1)
+
+            latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
+            latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
+
+            latent = torch.cat([latent, latent2], 1)
+
+        out = self.input(latent)
+        out = self.conv1(out, latent[:, 0], noise=noise[0])
+
+        skip = self.to_rgb1(out, latent[:, 1])
+
+        i = 1
+        for conv1, conv2, noise1, noise2, to_rgb in zip(
+            self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
+        ):
+            out = conv1(out, latent[:, i], noise=noise1)
+            out = conv2(out, latent[:, i + 1], noise=noise2)
+            skip = to_rgb(out, latent[:, i + 2], skip)
+
+            i += 2
+
+        image = skip
+
+        if return_latents:
+            return image, latent
+
+        else:
+            return image, None
+
+class ConvLayer(nn.Sequential):
+    def __init__(
+        self,
+        in_channel,
+        out_channel,
+        kernel_size,
+        downsample=False,
+        blur_kernel=[1, 3, 3, 1],
+        bias=True,
+        activate=True,
+        device='cpu'
+    ):
+        layers = []
+
+        if downsample:
+            factor = 2
+            p = (len(blur_kernel) - factor) + (kernel_size - 1)
+            pad0 = (p + 1) // 2
+            pad1 = p // 2
+
+            layers.append(Blur(blur_kernel, pad=(pad0, pad1), device=device))
+
+            stride = 2
+            self.padding = 0
+
+        else:
+            stride = 1
+            self.padding = kernel_size // 2
+
+        layers.append(
+            EqualConv2d(
+                in_channel,
+                out_channel,
+                kernel_size,
+                padding=self.padding,
+                stride=stride,
+                bias=bias and not activate,
+            )
+        )
+
+        if activate:
+            if bias:
+                layers.append(FusedLeakyReLU(out_channel, device=device))
+
+            else:
+                layers.append(ScaledLeakyReLU(0.2))
+
+        super().__init__(*layers)
+
+
+class ResBlock(nn.Module):
+    def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1], device='cpu'):
+        super().__init__()
+
+        self.conv1 = ConvLayer(in_channel, in_channel, 3, device=device)
+        self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True, device=device)
+
+        self.skip = ConvLayer(
+            in_channel, out_channel, 1, downsample=True, activate=False, bias=False
+        )
+
+    def forward(self, input):
+        out = self.conv1(input)
+        out = self.conv2(out)
+
+        skip = self.skip(input)
+        out = (out + skip) / math.sqrt(2)
+
+        return out
+
+class FullGenerator(nn.Module):
+    def __init__(
+        self,
+        size,
+        style_dim,
+        n_mlp,
+        channel_multiplier=2,
+        blur_kernel=[1, 3, 3, 1],
+        lr_mlp=0.01,
+        isconcat=True,
+        narrow=1,
+        device='cpu'
+    ):
+        super().__init__()
+        channels = {
+            4: int(512 * narrow),
+            8: int(512 * narrow),
+            16: int(512 * narrow),
+            32: int(512 * narrow),
+            64: int(256 * channel_multiplier * narrow),
+            128: int(128 * channel_multiplier * narrow),
+            256: int(64 * channel_multiplier * narrow),
+            512: int(32 * channel_multiplier * narrow),
+            1024: int(16 * channel_multiplier * narrow)
+        }
+
+        self.log_size = int(math.log(size, 2))
+        self.generator = Generator(size, style_dim, n_mlp, channel_multiplier=channel_multiplier, blur_kernel=blur_kernel, lr_mlp=lr_mlp, isconcat=isconcat, narrow=narrow, device=device)
+        
+        conv = [ConvLayer(3, channels[size], 1, device=device)]
+        self.ecd0 = nn.Sequential(*conv)
+        in_channel = channels[size]
+
+        self.names = ['ecd%d'%i for i in range(self.log_size-1)]
+        for i in range(self.log_size, 2, -1):
+            out_channel = channels[2 ** (i - 1)]
+            #conv = [ResBlock(in_channel, out_channel, blur_kernel)]
+            conv = [ConvLayer(in_channel, out_channel, 3, downsample=True, device=device)] 
+            setattr(self, self.names[self.log_size-i+1], nn.Sequential(*conv))
+            in_channel = out_channel
+        self.final_linear = nn.Sequential(EqualLinear(channels[4] * 4 * 4, style_dim, activation='fused_lrelu', device=device))
+
+    def forward(self,
+        inputs,
+        return_latents=False,
+        inject_index=None,
+        truncation=1,
+        truncation_latent=None,
+        input_is_latent=False,
+    ):
+        noise = []
+        for i in range(self.log_size-1):
+            ecd = getattr(self, self.names[i])
+            inputs = ecd(inputs)
+            noise.append(inputs)
+            #print(inputs.shape)
+        inputs = inputs.view(inputs.shape[0], -1)
+        outs = self.final_linear(inputs)
+        #print(outs.shape)
+        noise = list(itertools.chain.from_iterable(itertools.repeat(x, 2) for x in noise))[::-1]
+        outs = self.generator([outs], return_latents, inject_index, truncation, truncation_latent, input_is_latent, noise=noise[1:])
+        return outs
+
+class Discriminator(nn.Module):
+    def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1], narrow=1, device='cpu'):
+        super().__init__()
+
+        channels = {
+            4: int(512 * narrow),
+            8: int(512 * narrow),
+            16: int(512 * narrow),
+            32: int(512 * narrow),
+            64: int(256 * channel_multiplier * narrow),
+            128: int(128 * channel_multiplier * narrow),
+            256: int(64 * channel_multiplier * narrow),
+            512: int(32 * channel_multiplier * narrow),
+            1024: int(16 * channel_multiplier * narrow)
+        }
+
+        convs = [ConvLayer(3, channels[size], 1, device=device)]
+
+        log_size = int(math.log(size, 2))
+
+        in_channel = channels[size]
+
+        for i in range(log_size, 2, -1):
+            out_channel = channels[2 ** (i - 1)]
+
+            convs.append(ResBlock(in_channel, out_channel, blur_kernel, device=device))
+
+            in_channel = out_channel
+
+        self.convs = nn.Sequential(*convs)
+
+        self.stddev_group = 4
+        self.stddev_feat = 1
+
+        self.final_conv = ConvLayer(in_channel + 1, channels[4], 3, device=device)
+        self.final_linear = nn.Sequential(
+            EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu', device=device),
+            EqualLinear(channels[4], 1),
+        )
+
+    def forward(self, input):
+        out = self.convs(input)
+
+        batch, channel, height, width = out.shape
+        group = min(batch, self.stddev_group)
+        stddev = out.view(
+            group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
+        )
+        stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
+        stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
+        stddev = stddev.repeat(group, 1, height, width)
+        out = torch.cat([out, stddev], 1)
+
+        out = self.final_conv(out)
+
+        out = out.view(batch, -1)
+        out = self.final_linear(out)
+        return out
diff --git a/face_model/op/__init__.py b/face_model/op/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..d0918d92285955855be89f00096b888ee5597ce3
--- /dev/null
+++ b/face_model/op/__init__.py
@@ -0,0 +1,2 @@
+from .fused_act import FusedLeakyReLU, fused_leaky_relu
+from .upfirdn2d import upfirdn2d
diff --git a/face_model/op/fused_act.py b/face_model/op/fused_act.py
new file mode 100755
index 0000000000000000000000000000000000000000..59db126ebcb59423cadd12baa830cbadce8b0292
--- /dev/null
+++ b/face_model/op/fused_act.py
@@ -0,0 +1,96 @@
+import os
+import platform
+
+import torch
+from torch import nn
+import torch.nn.functional as F
+from torch.autograd import Function
+from torch.utils.cpp_extension import load, _import_module_from_library
+
+# if running GPEN without cuda, please comment line 11-19
+if platform.system() == 'Linux' and torch.cuda.is_available():
+    module_path = os.path.dirname(__file__)
+    fused = load(
+        'fused',
+        sources=[
+            os.path.join(module_path, 'fused_bias_act.cpp'),
+            os.path.join(module_path, 'fused_bias_act_kernel.cu'),
+        ],
+    )
+
+
+#fused = _import_module_from_library('fused', '/tmp/torch_extensions/fused', True)
+
+
+class FusedLeakyReLUFunctionBackward(Function):
+    @staticmethod
+    def forward(ctx, grad_output, out, negative_slope, scale):
+        ctx.save_for_backward(out)
+        ctx.negative_slope = negative_slope
+        ctx.scale = scale
+
+        empty = grad_output.new_empty(0)
+
+        grad_input = fused.fused_bias_act(
+            grad_output, empty, out, 3, 1, negative_slope, scale
+        )
+
+        dim = [0]
+
+        if grad_input.ndim > 2:
+            dim += list(range(2, grad_input.ndim))
+
+        grad_bias = grad_input.sum(dim).detach()
+
+        return grad_input, grad_bias
+
+    @staticmethod
+    def backward(ctx, gradgrad_input, gradgrad_bias):
+        out, = ctx.saved_tensors
+        gradgrad_out = fused.fused_bias_act(
+            gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale
+        )
+
+        return gradgrad_out, None, None, None
+
+
+class FusedLeakyReLUFunction(Function):
+    @staticmethod
+    def forward(ctx, input, bias, negative_slope, scale):
+        empty = input.new_empty(0)
+        out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)
+        ctx.save_for_backward(out)
+        ctx.negative_slope = negative_slope
+        ctx.scale = scale
+
+        return out
+
+    @staticmethod
+    def backward(ctx, grad_output):
+        out, = ctx.saved_tensors
+
+        grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
+            grad_output, out, ctx.negative_slope, ctx.scale
+        )
+
+        return grad_input, grad_bias, None, None
+
+
+class FusedLeakyReLU(nn.Module):
+    def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5, device='cpu'):
+        super().__init__()
+
+        self.bias = nn.Parameter(torch.zeros(channel))
+        self.negative_slope = negative_slope
+        self.scale = scale
+        self.device = device
+
+    def forward(self, input):
+        return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale, self.device)
+
+
+def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5, device='cpu'):
+    if platform.system() == 'Linux' and torch.cuda.is_available() and device != 'cpu':
+        return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
+    else:
+        return scale * F.leaky_relu(input + bias.view((1, -1)+(1,)*(len(input.shape)-2)), negative_slope=negative_slope)
diff --git a/face_model/op/fused_bias_act.cpp b/face_model/op/fused_bias_act.cpp
new file mode 100755
index 0000000000000000000000000000000000000000..02be898f970bcc8ea297867fcaa4e71b24b3d949
--- /dev/null
+++ b/face_model/op/fused_bias_act.cpp
@@ -0,0 +1,21 @@
+#include <torch/extension.h>
+
+
+torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
+    int act, int grad, float alpha, float scale);
+
+#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
+#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
+#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
+
+torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
+    int act, int grad, float alpha, float scale) {
+    CHECK_CUDA(input);
+    CHECK_CUDA(bias);
+
+    return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale);
+}
+
+PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
+    m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)");
+}
\ No newline at end of file
diff --git a/face_model/op/fused_bias_act_kernel.cu b/face_model/op/fused_bias_act_kernel.cu
new file mode 100755
index 0000000000000000000000000000000000000000..c9fa56fea7ede7072dc8925cfb0148f136eb85b8
--- /dev/null
+++ b/face_model/op/fused_bias_act_kernel.cu
@@ -0,0 +1,99 @@
+// Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
+//
+// This work is made available under the Nvidia Source Code License-NC.
+// To view a copy of this license, visit
+// https://nvlabs.github.io/stylegan2/license.html
+
+#include <torch/types.h>
+
+#include <ATen/ATen.h>
+#include <ATen/AccumulateType.h>
+#include <ATen/cuda/CUDAContext.h>
+#include <ATen/cuda/CUDAApplyUtils.cuh>
+
+#include <cuda.h>
+#include <cuda_runtime.h>
+
+
+template <typename scalar_t>
+static __global__ void fused_bias_act_kernel(scalar_t* out, const scalar_t* p_x, const scalar_t* p_b, const scalar_t* p_ref,
+    int act, int grad, scalar_t alpha, scalar_t scale, int loop_x, int size_x, int step_b, int size_b, int use_bias, int use_ref) {
+    int xi = blockIdx.x * loop_x * blockDim.x + threadIdx.x;
+
+    scalar_t zero = 0.0;
+
+    for (int loop_idx = 0; loop_idx < loop_x && xi < size_x; loop_idx++, xi += blockDim.x) {
+        scalar_t x = p_x[xi];
+
+        if (use_bias) {
+            x += p_b[(xi / step_b) % size_b];
+        }
+
+        scalar_t ref = use_ref ? p_ref[xi] : zero;
+
+        scalar_t y;
+
+        switch (act * 10 + grad) {
+            default:
+            case 10: y = x; break;
+            case 11: y = x; break;
+            case 12: y = 0.0; break;
+
+            case 30: y = (x > 0.0) ? x : x * alpha; break;
+            case 31: y = (ref > 0.0) ? x : x * alpha; break;
+            case 32: y = 0.0; break;
+        }
+
+        out[xi] = y * scale;
+    }
+}
+
+
+torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
+    int act, int grad, float alpha, float scale) {
+    int curDevice = -1;
+    cudaGetDevice(&curDevice);
+    cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
+
+    auto x = input.contiguous();
+    auto b = bias.contiguous();
+    auto ref = refer.contiguous();
+
+    int use_bias = b.numel() ? 1 : 0;
+    int use_ref = ref.numel() ? 1 : 0;
+
+    int size_x = x.numel();
+    int size_b = b.numel();
+    int step_b = 1;
+
+    for (int i = 1 + 1; i < x.dim(); i++) {
+        step_b *= x.size(i);
+    }
+
+    int loop_x = 4;
+    int block_size = 4 * 32;
+    int grid_size = (size_x - 1) / (loop_x * block_size) + 1;
+
+    auto y = torch::empty_like(x);
+
+    AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "fused_bias_act_kernel", [&] {
+        fused_bias_act_kernel<scalar_t><<<grid_size, block_size, 0, stream>>>(
+            y.data_ptr<scalar_t>(),
+            x.data_ptr<scalar_t>(),
+            b.data_ptr<scalar_t>(),
+            ref.data_ptr<scalar_t>(),
+            act,
+            grad,
+            alpha,
+            scale,
+            loop_x,
+            size_x,
+            step_b,
+            size_b,
+            use_bias,
+            use_ref
+        );
+    });
+
+    return y;
+}
\ No newline at end of file
diff --git a/face_model/op/upfirdn2d.cpp b/face_model/op/upfirdn2d.cpp
new file mode 100755
index 0000000000000000000000000000000000000000..d2e633dc896433c205e18bc3e455539192ff968e
--- /dev/null
+++ b/face_model/op/upfirdn2d.cpp
@@ -0,0 +1,23 @@
+#include <torch/extension.h>
+
+
+torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel,
+                            int up_x, int up_y, int down_x, int down_y,
+                            int pad_x0, int pad_x1, int pad_y0, int pad_y1);
+
+#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
+#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
+#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
+
+torch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel,
+                        int up_x, int up_y, int down_x, int down_y,
+                        int pad_x0, int pad_x1, int pad_y0, int pad_y1) {
+    CHECK_CUDA(input);
+    CHECK_CUDA(kernel);
+
+    return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1);
+}
+
+PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
+    m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)");
+}
\ No newline at end of file
diff --git a/face_model/op/upfirdn2d.py b/face_model/op/upfirdn2d.py
new file mode 100755
index 0000000000000000000000000000000000000000..2e3844749dea0a79fed49f161d9760ee6b4c07fd
--- /dev/null
+++ b/face_model/op/upfirdn2d.py
@@ -0,0 +1,194 @@
+import os
+import platform
+
+import torch
+import torch.nn.functional as F
+from torch.autograd import Function
+from torch.utils.cpp_extension import load, _import_module_from_library
+
+# if running GPEN without cuda, please comment line 10-18
+if platform.system() == 'Linux' and torch.cuda.is_available():
+    module_path = os.path.dirname(__file__)
+    upfirdn2d_op = load(
+        'upfirdn2d',
+        sources=[
+            os.path.join(module_path, 'upfirdn2d.cpp'),
+            os.path.join(module_path, 'upfirdn2d_kernel.cu'),
+        ],
+    )
+
+
+#upfirdn2d_op = _import_module_from_library('upfirdn2d', '/tmp/torch_extensions/upfirdn2d', True)
+
+class UpFirDn2dBackward(Function):
+    @staticmethod
+    def forward(
+        ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
+    ):
+
+        up_x, up_y = up
+        down_x, down_y = down
+        g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
+
+        grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
+
+        grad_input = upfirdn2d_op.upfirdn2d(
+            grad_output,
+            grad_kernel,
+            down_x,
+            down_y,
+            up_x,
+            up_y,
+            g_pad_x0,
+            g_pad_x1,
+            g_pad_y0,
+            g_pad_y1,
+        )
+        grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])
+
+        ctx.save_for_backward(kernel)
+
+        pad_x0, pad_x1, pad_y0, pad_y1 = pad
+
+        ctx.up_x = up_x
+        ctx.up_y = up_y
+        ctx.down_x = down_x
+        ctx.down_y = down_y
+        ctx.pad_x0 = pad_x0
+        ctx.pad_x1 = pad_x1
+        ctx.pad_y0 = pad_y0
+        ctx.pad_y1 = pad_y1
+        ctx.in_size = in_size
+        ctx.out_size = out_size
+
+        return grad_input
+
+    @staticmethod
+    def backward(ctx, gradgrad_input):
+        kernel, = ctx.saved_tensors
+
+        gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)
+
+        gradgrad_out = upfirdn2d_op.upfirdn2d(
+            gradgrad_input,
+            kernel,
+            ctx.up_x,
+            ctx.up_y,
+            ctx.down_x,
+            ctx.down_y,
+            ctx.pad_x0,
+            ctx.pad_x1,
+            ctx.pad_y0,
+            ctx.pad_y1,
+        )
+        # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3])
+        gradgrad_out = gradgrad_out.view(
+            ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
+        )
+
+        return gradgrad_out, None, None, None, None, None, None, None, None
+
+
+class UpFirDn2d(Function):
+    @staticmethod
+    def forward(ctx, input, kernel, up, down, pad):
+        up_x, up_y = up
+        down_x, down_y = down
+        pad_x0, pad_x1, pad_y0, pad_y1 = pad
+
+        kernel_h, kernel_w = kernel.shape
+        batch, channel, in_h, in_w = input.shape
+        ctx.in_size = input.shape
+
+        input = input.reshape(-1, in_h, in_w, 1)
+
+        ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
+
+        out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
+        out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
+        ctx.out_size = (out_h, out_w)
+
+        ctx.up = (up_x, up_y)
+        ctx.down = (down_x, down_y)
+        ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
+
+        g_pad_x0 = kernel_w - pad_x0 - 1
+        g_pad_y0 = kernel_h - pad_y0 - 1
+        g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
+        g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
+
+        ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
+
+        out = upfirdn2d_op.upfirdn2d(
+            input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
+        )
+        # out = out.view(major, out_h, out_w, minor)
+        out = out.view(-1, channel, out_h, out_w)
+
+        return out
+
+    @staticmethod
+    def backward(ctx, grad_output):
+        kernel, grad_kernel = ctx.saved_tensors
+
+        grad_input = UpFirDn2dBackward.apply(
+            grad_output,
+            kernel,
+            grad_kernel,
+            ctx.up,
+            ctx.down,
+            ctx.pad,
+            ctx.g_pad,
+            ctx.in_size,
+            ctx.out_size,
+        )
+
+        return grad_input, None, None, None, None
+
+
+def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0), device='cpu'):
+    if platform.system() == 'Linux' and torch.cuda.is_available() and device != 'cpu':
+        out = UpFirDn2d.apply(
+            input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])
+        )
+    else:
+        out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1])
+
+    return out
+
+
+def upfirdn2d_native(
+    input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
+):
+    input = input.permute(0, 2, 3, 1)
+    _, in_h, in_w, minor = input.shape
+    kernel_h, kernel_w = kernel.shape
+    out = input.view(-1, in_h, 1, in_w, 1, minor)
+    out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
+    out = out.view(-1, in_h * up_y, in_w * up_x, minor)
+
+    out = F.pad(
+        out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
+    )
+    out = out[
+        :,
+        max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
+        max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
+        :,
+    ]
+
+    out = out.permute(0, 3, 1, 2)
+    out = out.reshape(
+        [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
+    )
+    w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
+    out = F.conv2d(out, w)
+    out = out.reshape(
+        -1,
+        minor,
+        in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
+        in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
+    )
+    # out = out.permute(0, 2, 3, 1)
+    return out[:, :, ::down_y, ::down_x]
+
diff --git a/face_model/op/upfirdn2d_kernel.cu b/face_model/op/upfirdn2d_kernel.cu
new file mode 100755
index 0000000000000000000000000000000000000000..2a710aa6adc3d43ac93136a1814e3c39970e1c7e
--- /dev/null
+++ b/face_model/op/upfirdn2d_kernel.cu
@@ -0,0 +1,272 @@
+// Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
+//
+// This work is made available under the Nvidia Source Code License-NC.
+// To view a copy of this license, visit
+// https://nvlabs.github.io/stylegan2/license.html
+
+#include <torch/types.h>
+
+#include <ATen/ATen.h>
+#include <ATen/AccumulateType.h>
+#include <ATen/cuda/CUDAContext.h>
+#include <ATen/cuda/CUDAApplyUtils.cuh>
+
+#include <cuda.h>
+#include <cuda_runtime.h>
+
+
+static __host__ __device__ __forceinline__ int floor_div(int a, int b) {
+    int c = a / b;
+
+    if (c * b > a) {
+        c--;
+    }
+
+    return c;
+}
+
+
+struct UpFirDn2DKernelParams {
+    int up_x;
+    int up_y;
+    int down_x;
+    int down_y;
+    int pad_x0;
+    int pad_x1;
+    int pad_y0;
+    int pad_y1;
+
+    int major_dim;
+    int in_h;
+    int in_w;
+    int minor_dim;
+    int kernel_h;
+    int kernel_w;
+    int out_h;
+    int out_w;
+    int loop_major;
+    int loop_x;
+};
+
+
+template <typename scalar_t, int up_x, int up_y, int down_x, int down_y, int kernel_h, int kernel_w, int tile_out_h, int tile_out_w>
+__global__ void upfirdn2d_kernel(scalar_t* out, const scalar_t* input, const scalar_t* kernel, const UpFirDn2DKernelParams p) {
+    const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1;
+    const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1;
+
+    __shared__ volatile float sk[kernel_h][kernel_w];
+    __shared__ volatile float sx[tile_in_h][tile_in_w];
+
+    int minor_idx = blockIdx.x;
+    int tile_out_y = minor_idx / p.minor_dim;
+    minor_idx -= tile_out_y * p.minor_dim;
+    tile_out_y *= tile_out_h;
+    int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w;
+    int major_idx_base = blockIdx.z * p.loop_major;
+
+    if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h | major_idx_base >= p.major_dim) {
+        return;
+    }
+
+    for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w; tap_idx += blockDim.x) {
+        int ky = tap_idx / kernel_w;
+        int kx = tap_idx - ky * kernel_w;
+        scalar_t v = 0.0;
+
+        if (kx < p.kernel_w & ky < p.kernel_h) {
+            v = kernel[(p.kernel_h - 1 - ky) * p.kernel_w + (p.kernel_w - 1 - kx)];
+        }
+
+        sk[ky][kx] = v;
+    }
+
+    for (int loop_major = 0, major_idx = major_idx_base; loop_major < p.loop_major & major_idx < p.major_dim; loop_major++, major_idx++) {
+        for (int loop_x = 0, tile_out_x = tile_out_x_base; loop_x < p.loop_x & tile_out_x < p.out_w; loop_x++, tile_out_x += tile_out_w) {
+            int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0;
+            int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0;
+            int tile_in_x = floor_div(tile_mid_x, up_x);
+            int tile_in_y = floor_div(tile_mid_y, up_y);
+
+            __syncthreads();
+
+            for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w; in_idx += blockDim.x) {
+                int rel_in_y = in_idx / tile_in_w;
+                int rel_in_x = in_idx - rel_in_y * tile_in_w;
+                int in_x = rel_in_x + tile_in_x;
+                int in_y = rel_in_y + tile_in_y;
+
+                scalar_t v = 0.0;
+
+                if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) {
+                    v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim + minor_idx];
+                }
+
+                sx[rel_in_y][rel_in_x] = v;
+            }
+
+            __syncthreads();
+            for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w; out_idx += blockDim.x) {
+                int rel_out_y = out_idx / tile_out_w;
+                int rel_out_x = out_idx - rel_out_y * tile_out_w;
+                int out_x = rel_out_x + tile_out_x;
+                int out_y = rel_out_y + tile_out_y;
+
+                int mid_x = tile_mid_x + rel_out_x * down_x;
+                int mid_y = tile_mid_y + rel_out_y * down_y;
+                int in_x = floor_div(mid_x, up_x);
+                int in_y = floor_div(mid_y, up_y);
+                int rel_in_x = in_x - tile_in_x;
+                int rel_in_y = in_y - tile_in_y;
+                int kernel_x = (in_x + 1) * up_x - mid_x - 1;
+                int kernel_y = (in_y + 1) * up_y - mid_y - 1;
+
+                scalar_t v = 0.0;
+
+                #pragma unroll
+                for (int y = 0; y < kernel_h / up_y; y++)
+                    #pragma unroll
+                    for (int x = 0; x < kernel_w / up_x; x++)
+                        v += sx[rel_in_y + y][rel_in_x + x] * sk[kernel_y + y * up_y][kernel_x + x * up_x];
+
+                if (out_x < p.out_w & out_y < p.out_h) {
+                    out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + minor_idx] = v;
+                }
+            }
+        }
+    }
+}
+
+
+torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel,
+    int up_x, int up_y, int down_x, int down_y,
+    int pad_x0, int pad_x1, int pad_y0, int pad_y1) {
+    int curDevice = -1;
+    cudaGetDevice(&curDevice);
+    cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
+
+    UpFirDn2DKernelParams p;
+
+    auto x = input.contiguous();
+    auto k = kernel.contiguous();
+
+    p.major_dim = x.size(0);
+    p.in_h = x.size(1);
+    p.in_w = x.size(2);
+    p.minor_dim = x.size(3);
+    p.kernel_h = k.size(0);
+    p.kernel_w = k.size(1);
+    p.up_x = up_x;
+    p.up_y = up_y;
+    p.down_x = down_x;
+    p.down_y = down_y;
+    p.pad_x0 = pad_x0;
+    p.pad_x1 = pad_x1;
+    p.pad_y0 = pad_y0;
+    p.pad_y1 = pad_y1;
+
+    p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) / p.down_y;
+    p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) / p.down_x;
+
+    auto out = at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options());
+
+    int mode = -1;
+
+    int tile_out_h;
+    int tile_out_w;
+
+    if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) {
+        mode = 1;
+        tile_out_h = 16;
+        tile_out_w = 64;
+    }
+
+    if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 3 && p.kernel_w <= 3) {
+        mode = 2;
+        tile_out_h = 16;
+        tile_out_w = 64;
+    }
+
+    if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) {
+        mode = 3;
+        tile_out_h = 16;
+        tile_out_w = 64;
+    }
+
+    if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 2 && p.kernel_w <= 2) {
+        mode = 4;
+        tile_out_h = 16;
+        tile_out_w = 64;
+    }
+
+    if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 4 && p.kernel_w <= 4) {
+        mode = 5;
+        tile_out_h = 8;
+        tile_out_w = 32;
+    }
+
+    if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 2 && p.kernel_w <= 2) {
+        mode = 6;
+        tile_out_h = 8;
+        tile_out_w = 32;
+    }
+
+    dim3 block_size;
+    dim3 grid_size;
+
+    if (tile_out_h > 0 && tile_out_w) {
+        p.loop_major = (p.major_dim - 1) / 16384 + 1;
+        p.loop_x = 1;
+        block_size = dim3(32 * 8, 1, 1);
+        grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim,
+                         (p.out_w - 1) / (p.loop_x * tile_out_w) + 1,
+                         (p.major_dim - 1) / p.loop_major + 1);
+    }
+
+    AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] {
+        switch (mode) {
+        case 1:
+            upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 4, 4, 16, 64><<<grid_size, block_size, 0, stream>>>(
+                out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p
+            );
+
+            break;
+
+        case 2:
+            upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 3, 3, 16, 64><<<grid_size, block_size, 0, stream>>>(
+                out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p
+            );
+
+            break;
+
+        case 3:
+            upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 4, 4, 16, 64><<<grid_size, block_size, 0, stream>>>(
+                out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p
+            );
+
+            break;
+
+        case 4:
+            upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 2, 2, 16, 64><<<grid_size, block_size, 0, stream>>>(
+                out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p
+            );
+
+            break;
+
+        case 5:
+            upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32><<<grid_size, block_size, 0, stream>>>(
+                out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p
+            );
+
+            break;
+
+        case 6:
+            upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32><<<grid_size, block_size, 0, stream>>>(
+                out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p
+            );
+
+            break;
+        }
+    });
+
+    return out;
+}
\ No newline at end of file
diff --git a/face_parse/blocks.py b/face_parse/blocks.py
new file mode 100755
index 0000000000000000000000000000000000000000..fc7facb9ea3aa57d4897750ea65735473200852c
--- /dev/null
+++ b/face_parse/blocks.py
@@ -0,0 +1,127 @@
+# -*- coding: utf-8 -*-
+import torch
+import torch.nn as nn
+from torch.nn.parameter import Parameter
+from torch.nn import functional as F
+import numpy as np
+
+class NormLayer(nn.Module):
+    """Normalization Layers.
+    ------------
+    # Arguments
+        - channels: input channels, for batch norm and instance norm.
+        - input_size: input shape without batch size, for layer norm.
+    """
+    def __init__(self, channels, normalize_shape=None, norm_type='bn', ref_channels=None):
+        super(NormLayer, self).__init__()
+        norm_type = norm_type.lower()
+        self.norm_type = norm_type
+        if norm_type == 'bn':
+            self.norm = nn.BatchNorm2d(channels, affine=True)
+        elif norm_type == 'in':
+            self.norm = nn.InstanceNorm2d(channels, affine=False)
+        elif norm_type == 'gn':
+            self.norm = nn.GroupNorm(32, channels, affine=True)
+        elif norm_type == 'pixel':
+            self.norm = lambda x: F.normalize(x, p=2, dim=1)
+        elif norm_type == 'layer':
+            self.norm = nn.LayerNorm(normalize_shape)
+        elif norm_type == 'none':
+            self.norm = lambda x: x*1.0
+        else:
+            assert 1==0, 'Norm type {} not support.'.format(norm_type)
+
+    def forward(self, x, ref=None):
+        if self.norm_type == 'spade':
+            return self.norm(x, ref)
+        else:
+            return self.norm(x)
+
+
+class ReluLayer(nn.Module):
+    """Relu Layer.
+    ------------
+    # Arguments
+        - relu type: type of relu layer, candidates are
+            - ReLU
+            - LeakyReLU: default relu slope 0.2
+            - PRelu 
+            - SELU
+            - none: direct pass
+    """
+    def __init__(self, channels, relu_type='relu'):
+        super(ReluLayer, self).__init__()
+        relu_type = relu_type.lower()
+        if relu_type == 'relu':
+            self.func = nn.ReLU(True)
+        elif relu_type == 'leakyrelu':
+            self.func = nn.LeakyReLU(0.2, inplace=True)
+        elif relu_type == 'prelu':
+            self.func = nn.PReLU(channels)
+        elif relu_type == 'selu':
+            self.func = nn.SELU(True)
+        elif relu_type == 'none':
+            self.func = lambda x: x*1.0
+        else:
+            assert 1==0, 'Relu type {} not support.'.format(relu_type)
+
+    def forward(self, x):
+        return self.func(x)
+
+
+class ConvLayer(nn.Module):
+    def __init__(self, in_channels, out_channels, kernel_size=3, scale='none', norm_type='none', relu_type='none', use_pad=True, bias=True):
+        super(ConvLayer, self).__init__()
+        self.use_pad = use_pad
+        self.norm_type = norm_type
+        if norm_type in ['bn']:
+            bias = False
+        
+        stride = 2 if scale == 'down' else 1
+
+        self.scale_func = lambda x: x
+        if scale == 'up':
+            self.scale_func = lambda x: nn.functional.interpolate(x, scale_factor=2, mode='nearest')
+
+        self.reflection_pad = nn.ReflectionPad2d(int(np.ceil((kernel_size - 1.)/2))) 
+        self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=bias)
+
+        self.relu = ReluLayer(out_channels, relu_type)
+        self.norm = NormLayer(out_channels, norm_type=norm_type)
+
+    def forward(self, x):
+        out = self.scale_func(x)
+        if self.use_pad:
+            out = self.reflection_pad(out)
+        out = self.conv2d(out)
+        out = self.norm(out)
+        out = self.relu(out)
+        return out
+
+
+class ResidualBlock(nn.Module):
+    """
+    Residual block recommended in: http://torch.ch/blog/2016/02/04/resnets.html
+    """
+    def __init__(self, c_in, c_out, relu_type='prelu', norm_type='bn', scale='none'):
+        super(ResidualBlock, self).__init__()
+
+        if scale == 'none' and c_in == c_out:
+            self.shortcut_func = lambda x: x
+        else:
+            self.shortcut_func = ConvLayer(c_in, c_out, 3, scale)
+        
+        scale_config_dict = {'down': ['none', 'down'], 'up': ['up', 'none'], 'none': ['none', 'none']}
+        scale_conf = scale_config_dict[scale]
+
+        self.conv1 = ConvLayer(c_in, c_out, 3, scale_conf[0], norm_type=norm_type, relu_type=relu_type) 
+        self.conv2 = ConvLayer(c_out, c_out, 3, scale_conf[1], norm_type=norm_type, relu_type='none')
+  
+    def forward(self, x):
+        identity = self.shortcut_func(x)
+
+        res = self.conv1(x)
+        res = self.conv2(res)
+        return identity + res
+        
+
diff --git a/face_parse/face_parsing.py b/face_parse/face_parsing.py
new file mode 100755
index 0000000000000000000000000000000000000000..87234ef48b69575320fd184de447350a54910359
--- /dev/null
+++ b/face_parse/face_parsing.py
@@ -0,0 +1,78 @@
+'''
+@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
+@author: yangxy (yangtao9009@gmail.com)
+'''
+import os
+import cv2
+import torch
+import numpy as np
+from parse_model import ParseNet
+import torch.nn.functional as F
+
+class FaceParse(object):
+    def __init__(self, base_dir='./', model='ParseNet-latest', device='cuda'):
+        self.mfile = os.path.join(base_dir, 'weights', model+'.pth')
+        self.size = 512
+        self.device = device
+
+        '''
+        0: 'background' 1: 'skin'   2: 'nose'
+        3: 'eye_g'  4: 'l_eye'  5: 'r_eye'
+        6: 'l_brow' 7: 'r_brow' 8: 'l_ear'
+        9: 'r_ear'  10: 'mouth' 11: 'u_lip'
+        12: 'l_lip' 13: 'hair'  14: 'hat'
+        15: 'ear_r' 16: 'neck_l'    17: 'neck'
+        18: 'cloth'
+        '''
+        #self.MASK_COLORMAP = [[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255], [204, 0, 204], [0, 255, 255], [255, 204, 204], [102, 51, 0], [255, 0, 0], [102, 204, 0], [255, 255, 0], [0, 0, 153], [0, 0, 204], [255, 51, 153], [0, 204, 204], [0, 51, 0], [255, 153, 51], [0, 204, 0]]
+        #self.#MASK_COLORMAP = [[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255], [204, 0, 204], [0, 255, 255], [255, 204, 204], [102, 51, 0], [255, 0, 0], [102, 204, 0], [255, 255, 0], [0, 0, 153], [0, 0, 204], [255, 51, 153], [0, 204, 204], [0, 51, 0], [255, 153, 51], [0, 204, 0]] = [[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255], [204, 0, 204], [0, 255, 255], [255, 204, 204], [102, 51, 0], [255, 0, 0], [102, 204, 0], [255, 255, 0], [0, 0, 153], [0, 0, 204], [255, 51, 153], [0, 204, 204], [0, 51, 0], [0, 0, 0], [0, 0, 0]]
+        self.MASK_COLORMAP = [0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 0, 0, 0]
+        self.load_model()
+
+    def load_model(self):
+        self.faceparse = ParseNet(self.size, self.size, 32, 64, 19, norm_type='bn', relu_type='LeakyReLU', ch_range=[32, 256])
+        self.faceparse.load_state_dict(torch.load(self.mfile, map_location=torch.device('cpu')))
+        self.faceparse.to(self.device)
+        self.faceparse.eval()
+
+    def process(self, im):
+        im = cv2.resize(im, (self.size, self.size))
+        imt = self.img2tensor(im)
+        pred_mask, sr_img_tensor = self.faceparse(imt)
+        mask = self.tenor2mask(pred_mask)
+
+        return mask
+
+    def process_tensor(self, imt):
+        imt = F.interpolate(imt.flip(1)*2-1, (self.size, self.size))
+        pred_mask, sr_img_tensor = self.faceparse(imt)
+
+        mask = pred_mask.argmax(dim=1)
+        for idx, color in enumerate(self.MASK_COLORMAP):
+            mask = torch.where(mask==idx, color, mask)
+        #mask = mask.repeat(3, 1, 1).unsqueeze(0) #.cpu().float().numpy()
+        mask = mask.unsqueeze(0)
+
+        return mask
+
+    def img2tensor(self, img):
+        img = img[..., ::-1]
+        img = img / 255. * 2 - 1
+        img_tensor = torch.from_numpy(img.transpose(2, 0, 1)).unsqueeze(0).to(self.device)
+        return img_tensor.float()
+
+    def tenor2mask(self, tensor):
+        if len(tensor.shape) < 4:
+            tensor = tensor.unsqueeze(0)
+        if tensor.shape[1] > 1:
+            tensor = tensor.argmax(dim=1) 
+
+        tensor = tensor.squeeze(1).data.cpu().numpy()
+        color_maps = []
+        for t in tensor:
+            #tmp_img = np.zeros(tensor.shape[1:] + (3,))
+            tmp_img = np.zeros(tensor.shape[1:])
+            for idx, color in enumerate(self.MASK_COLORMAP):
+                tmp_img[t == idx] = color
+            color_maps.append(tmp_img.astype(np.uint8))
+        return color_maps
\ No newline at end of file
diff --git a/face_parse/mask.png b/face_parse/mask.png
new file mode 100755
index 0000000000000000000000000000000000000000..729f36dfb23b90381fe819fe24326153ba46828e
Binary files /dev/null and b/face_parse/mask.png differ
diff --git a/face_parse/parse_model.py b/face_parse/parse_model.py
new file mode 100755
index 0000000000000000000000000000000000000000..b0717eedb97bab83331312c13cfd48a1e268b862
--- /dev/null
+++ b/face_parse/parse_model.py
@@ -0,0 +1,77 @@
+'''
+@Created by chaofengc (chaofenghust@gmail.com)
+
+@Modified by yangxy (yangtao9009@gmail.com)
+'''
+
+from blocks import *
+import torch
+from torch import nn
+import numpy as np
+
+def define_P(in_size=512, out_size=512, min_feat_size=32, relu_type='LeakyReLU', isTrain=False, weight_path=None):
+    net = ParseNet(in_size, out_size, min_feat_size, 64, 19, norm_type='bn', relu_type=relu_type, ch_range=[32, 256])
+    if not isTrain:
+        net.eval()  
+    if weight_path is not None:
+        net.load_state_dict(torch.load(weight_path))
+    return net
+
+
+class ParseNet(nn.Module):
+    def __init__(self,
+                in_size=128,
+                out_size=128,
+                min_feat_size=32,
+                base_ch=64,
+                parsing_ch=19,
+                res_depth=10,
+                relu_type='prelu',
+                norm_type='bn',
+                ch_range=[32, 512],
+                ):
+        super().__init__()
+        self.res_depth = res_depth
+        act_args = {'norm_type': norm_type, 'relu_type': relu_type}
+        min_ch, max_ch = ch_range
+
+        ch_clip = lambda x: max(min_ch, min(x, max_ch))
+        min_feat_size = min(in_size, min_feat_size)
+
+        down_steps = int(np.log2(in_size//min_feat_size))
+        up_steps = int(np.log2(out_size//min_feat_size))
+
+        # =============== define encoder-body-decoder ==================== 
+        self.encoder = []
+        self.encoder.append(ConvLayer(3, base_ch, 3, 1))
+        head_ch = base_ch
+        for i in range(down_steps):
+            cin, cout = ch_clip(head_ch), ch_clip(head_ch * 2)
+            self.encoder.append(ResidualBlock(cin, cout, scale='down', **act_args))
+            head_ch = head_ch * 2
+
+        self.body = []
+        for i in range(res_depth):
+            self.body.append(ResidualBlock(ch_clip(head_ch), ch_clip(head_ch), **act_args))
+
+        self.decoder = []
+        for i in range(up_steps):
+            cin, cout = ch_clip(head_ch), ch_clip(head_ch // 2)
+            self.decoder.append(ResidualBlock(cin, cout, scale='up', **act_args))
+            head_ch = head_ch // 2
+
+        self.encoder = nn.Sequential(*self.encoder)
+        self.body = nn.Sequential(*self.body)
+        self.decoder = nn.Sequential(*self.decoder)
+        self.out_img_conv = ConvLayer(ch_clip(head_ch), 3)
+        self.out_mask_conv = ConvLayer(ch_clip(head_ch), parsing_ch)
+
+    def forward(self, x):
+        feat = self.encoder(x)
+        x = feat + self.body(feat)
+        x = self.decoder(x)
+        out_img = self.out_img_conv(x) 
+        out_mask = self.out_mask_conv(x)
+        return out_mask, out_img
+
+
diff --git a/face_parse/test.png b/face_parse/test.png
new file mode 100755
index 0000000000000000000000000000000000000000..f2bea14ce322ad51da09a640c0fcd3617ef3105a
Binary files /dev/null and b/face_parse/test.png differ
diff --git a/loss/helpers.py b/loss/helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..b51fdf97141407fcc1c9d249a086ddbfd042469f
--- /dev/null
+++ b/loss/helpers.py
@@ -0,0 +1,119 @@
+from collections import namedtuple
+import torch
+from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module
+
+"""
+ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
+"""
+
+
+class Flatten(Module):
+	def forward(self, input):
+		return input.view(input.size(0), -1)
+
+
+def l2_norm(input, axis=1):
+	norm = torch.norm(input, 2, axis, True)
+	output = torch.div(input, norm)
+	return output
+
+
+class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
+	""" A named tuple describing a ResNet block. """
+
+
+def get_block(in_channel, depth, num_units, stride=2):
+	return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
+
+
+def get_blocks(num_layers):
+	if num_layers == 50:
+		blocks = [
+			get_block(in_channel=64, depth=64, num_units=3),
+			get_block(in_channel=64, depth=128, num_units=4),
+			get_block(in_channel=128, depth=256, num_units=14),
+			get_block(in_channel=256, depth=512, num_units=3)
+		]
+	elif num_layers == 100:
+		blocks = [
+			get_block(in_channel=64, depth=64, num_units=3),
+			get_block(in_channel=64, depth=128, num_units=13),
+			get_block(in_channel=128, depth=256, num_units=30),
+			get_block(in_channel=256, depth=512, num_units=3)
+		]
+	elif num_layers == 152:
+		blocks = [
+			get_block(in_channel=64, depth=64, num_units=3),
+			get_block(in_channel=64, depth=128, num_units=8),
+			get_block(in_channel=128, depth=256, num_units=36),
+			get_block(in_channel=256, depth=512, num_units=3)
+		]
+	else:
+		raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
+	return blocks
+
+
+class SEModule(Module):
+	def __init__(self, channels, reduction):
+		super(SEModule, self).__init__()
+		self.avg_pool = AdaptiveAvgPool2d(1)
+		self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
+		self.relu = ReLU(inplace=True)
+		self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
+		self.sigmoid = Sigmoid()
+
+	def forward(self, x):
+		module_input = x
+		x = self.avg_pool(x)
+		x = self.fc1(x)
+		x = self.relu(x)
+		x = self.fc2(x)
+		x = self.sigmoid(x)
+		return module_input * x
+
+
+class bottleneck_IR(Module):
+	def __init__(self, in_channel, depth, stride):
+		super(bottleneck_IR, self).__init__()
+		if in_channel == depth:
+			self.shortcut_layer = MaxPool2d(1, stride)
+		else:
+			self.shortcut_layer = Sequential(
+				Conv2d(in_channel, depth, (1, 1), stride, bias=False),
+				BatchNorm2d(depth)
+			)
+		self.res_layer = Sequential(
+			BatchNorm2d(in_channel),
+			Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
+			Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)
+		)
+
+	def forward(self, x):
+		shortcut = self.shortcut_layer(x)
+		res = self.res_layer(x)
+		return res + shortcut
+
+
+class bottleneck_IR_SE(Module):
+	def __init__(self, in_channel, depth, stride):
+		super(bottleneck_IR_SE, self).__init__()
+		if in_channel == depth:
+			self.shortcut_layer = MaxPool2d(1, stride)
+		else:
+			self.shortcut_layer = Sequential(
+				Conv2d(in_channel, depth, (1, 1), stride, bias=False),
+				BatchNorm2d(depth)
+			)
+		self.res_layer = Sequential(
+			BatchNorm2d(in_channel),
+			Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
+			PReLU(depth),
+			Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
+			BatchNorm2d(depth),
+			SEModule(depth, 16)
+		)
+
+	def forward(self, x):
+		shortcut = self.shortcut_layer(x)
+		res = self.res_layer(x)
+		return res + shortcut
diff --git a/loss/id_loss.py b/loss/id_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..c26e2bfa764844237b55a0791e5b365438ef7fac
--- /dev/null
+++ b/loss/id_loss.py
@@ -0,0 +1,50 @@
+import os
+import torch
+from torch import nn
+from model_irse import Backbone
+
+class IDLoss(nn.Module):
+    def __init__(self, base_dir='./', device='cuda', ckpt_dict=None):
+        super(IDLoss, self).__init__()
+        print('Loading ResNet ArcFace')
+        self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se').to(device)
+        if ckpt_dict is None:
+            self.facenet.load_state_dict(torch.load(os.path.join(base_dir, 'weights', 'model_ir_se50.pth'), map_location=torch.device('cpu')))
+        else:
+            self.facenet.load_state_dict(ckpt_dict)
+        self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112))
+        self.facenet.eval()
+
+    def extract_feats(self, x):
+        _, _, h, w = x.shape
+        assert h==w
+        ss = h//256
+        x = x[:, :, 35*ss:-33*ss, 32*ss:-36*ss]  # Crop interesting region
+        x = self.face_pool(x)
+        x_feats = self.facenet(x)
+        return x_feats
+
+    def forward(self, y_hat, y, x):
+        n_samples = x.shape[0]
+        x_feats = self.extract_feats(x)
+        y_feats = self.extract_feats(y)  # Otherwise use the feature from there
+        y_hat_feats = self.extract_feats(y_hat)
+        y_feats = y_feats.detach()
+        loss = 0
+        sim_improvement = 0
+        id_logs = []
+        count = 0
+        for i in range(n_samples):
+            diff_target = y_hat_feats[i].dot(y_feats[i])
+            diff_input = y_hat_feats[i].dot(x_feats[i])
+            diff_views = y_feats[i].dot(x_feats[i])
+            id_logs.append({'diff_target': float(diff_target),
+                            'diff_input': float(diff_input),
+                            'diff_views': float(diff_views)})
+            loss += 1 - diff_target
+            id_diff = float(diff_target) - float(diff_views)
+            sim_improvement += id_diff
+            count += 1
+
+        return loss / count, sim_improvement / count, id_logs
+
diff --git a/loss/model_irse.py b/loss/model_irse.py
new file mode 100644
index 0000000000000000000000000000000000000000..b3bd6f79dfdcc3f2bd32f8667d29acf1f0d8dbf8
--- /dev/null
+++ b/loss/model_irse.py
@@ -0,0 +1,85 @@
+from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module
+#from models.encoders.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm
+from helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm
+
+"""
+Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
+"""
+
+
+class Backbone(Module):
+	def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True):
+		super(Backbone, self).__init__()
+		assert input_size in [112, 224], "input_size should be 112 or 224"
+		assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
+		assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
+		blocks = get_blocks(num_layers)
+		if mode == 'ir':
+			unit_module = bottleneck_IR
+		elif mode == 'ir_se':
+			unit_module = bottleneck_IR_SE
+		self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
+									  BatchNorm2d(64),
+									  PReLU(64))
+		if input_size == 112:
+			self.output_layer = Sequential(BatchNorm2d(512),
+			                               Dropout(drop_ratio),
+			                               Flatten(),
+			                               Linear(512 * 7 * 7, 512),
+			                               BatchNorm1d(512, affine=affine))
+		else:
+			self.output_layer = Sequential(BatchNorm2d(512),
+			                               Dropout(drop_ratio),
+			                               Flatten(),
+			                               Linear(512 * 14 * 14, 512),
+			                               BatchNorm1d(512, affine=affine))
+
+		modules = []
+		for block in blocks:
+			for bottleneck in block:
+				modules.append(unit_module(bottleneck.in_channel,
+										   bottleneck.depth,
+										   bottleneck.stride))
+		self.body = Sequential(*modules)
+
+	def forward(self, x):
+		x = self.input_layer(x)
+		x = self.body(x)
+		x = self.output_layer(x)
+		return l2_norm(x)
+
+
+def IR_50(input_size):
+	"""Constructs a ir-50 model."""
+	model = Backbone(input_size, num_layers=50, mode='ir', drop_ratio=0.4, affine=False)
+	return model
+
+
+def IR_101(input_size):
+	"""Constructs a ir-101 model."""
+	model = Backbone(input_size, num_layers=100, mode='ir', drop_ratio=0.4, affine=False)
+	return model
+
+
+def IR_152(input_size):
+	"""Constructs a ir-152 model."""
+	model = Backbone(input_size, num_layers=152, mode='ir', drop_ratio=0.4, affine=False)
+	return model
+
+
+def IR_SE_50(input_size):
+	"""Constructs a ir_se-50 model."""
+	model = Backbone(input_size, num_layers=50, mode='ir_se', drop_ratio=0.4, affine=False)
+	return model
+
+
+def IR_SE_101(input_size):
+	"""Constructs a ir_se-101 model."""
+	model = Backbone(input_size, num_layers=100, mode='ir_se', drop_ratio=0.4, affine=False)
+	return model
+
+
+def IR_SE_152(input_size):
+	"""Constructs a ir_se-152 model."""
+	model = Backbone(input_size, num_layers=152, mode='ir_se', drop_ratio=0.4, affine=False)
+	return model
diff --git a/lpips/__init__.py b/lpips/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..62b9a079ea694d139d31bab1263ce4fc318059fb
--- /dev/null
+++ b/lpips/__init__.py
@@ -0,0 +1,178 @@
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+import torch
+# from torch.autograd import Variable
+
+from lpips.trainer import *
+from lpips.lpips import *
+
+# class PerceptualLoss(torch.nn.Module):
+#     def __init__(self, model='lpips', net='alex', spatial=False, use_gpu=False, gpu_ids=[0], version='0.1'): # VGG using our perceptually-learned weights (LPIPS metric)
+#     # def __init__(self, model='net', net='vgg', use_gpu=True): # "default" way of using VGG as a perceptual loss
+#         super(PerceptualLoss, self).__init__()
+#         print('Setting up Perceptual loss...')
+#         self.use_gpu = use_gpu
+#         self.spatial = spatial
+#         self.gpu_ids = gpu_ids
+#         self.model = dist_model.DistModel()
+#         self.model.initialize(model=model, net=net, use_gpu=use_gpu, spatial=self.spatial, gpu_ids=gpu_ids, version=version)
+#         print('...[%s] initialized'%self.model.name())
+#         print('...Done')
+
+#     def forward(self, pred, target, normalize=False):
+#         """
+#         Pred and target are Variables.
+#         If normalize is True, assumes the images are between [0,1] and then scales them between [-1,+1]
+#         If normalize is False, assumes the images are already between [-1,+1]
+
+#         Inputs pred and target are Nx3xHxW
+#         Output pytorch Variable N long
+#         """
+
+#         if normalize:
+#             target = 2 * target  - 1
+#             pred = 2 * pred  - 1
+
+#         return self.model.forward(target, pred)
+
+def normalize_tensor(in_feat,eps=1e-10):
+    norm_factor = torch.sqrt(torch.sum(in_feat**2,dim=1,keepdim=True))
+    return in_feat/(norm_factor+eps)
+
+def l2(p0, p1, range=255.):
+    return .5*np.mean((p0 / range - p1 / range)**2)
+
+def psnr(p0, p1, peak=255.):
+    return 10*np.log10(peak**2/np.mean((1.*p0-1.*p1)**2))
+
+def dssim(p0, p1, range=255.):
+    from skimage.measure import compare_ssim
+    return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2.
+
+def rgb2lab(in_img,mean_cent=False):
+    from skimage import color
+    img_lab = color.rgb2lab(in_img)
+    if(mean_cent):
+        img_lab[:,:,0] = img_lab[:,:,0]-50
+    return img_lab
+
+def tensor2np(tensor_obj):
+    # change dimension of a tensor object into a numpy array
+    return tensor_obj[0].cpu().float().numpy().transpose((1,2,0))
+
+def np2tensor(np_obj):
+     # change dimenion of np array into tensor array
+    return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
+
+def tensor2tensorlab(image_tensor,to_norm=True,mc_only=False):
+    # image tensor to lab tensor
+    from skimage import color
+
+    img = tensor2im(image_tensor)
+    img_lab = color.rgb2lab(img)
+    if(mc_only):
+        img_lab[:,:,0] = img_lab[:,:,0]-50
+    if(to_norm and not mc_only):
+        img_lab[:,:,0] = img_lab[:,:,0]-50
+        img_lab = img_lab/100.
+
+    return np2tensor(img_lab)
+
+def tensorlab2tensor(lab_tensor,return_inbnd=False):
+    from skimage import color
+    import warnings
+    warnings.filterwarnings("ignore")
+
+    lab = tensor2np(lab_tensor)*100.
+    lab[:,:,0] = lab[:,:,0]+50
+
+    rgb_back = 255.*np.clip(color.lab2rgb(lab.astype('float')),0,1)
+    if(return_inbnd):
+        # convert back to lab, see if we match
+        lab_back = color.rgb2lab(rgb_back.astype('uint8'))
+        mask = 1.*np.isclose(lab_back,lab,atol=2.)
+        mask = np2tensor(np.prod(mask,axis=2)[:,:,np.newaxis])
+        return (im2tensor(rgb_back),mask)
+    else:
+        return im2tensor(rgb_back)
+
+def load_image(path):
+    if(path[-3:] == 'dng'):
+        import rawpy
+        with rawpy.imread(path) as raw:
+            img = raw.postprocess()
+    elif(path[-3:]=='bmp' or path[-3:]=='jpg' or path[-3:]=='png'):
+        import cv2
+        return cv2.imread(path)[:,:,::-1]
+    else:
+        img = (255*plt.imread(path)[:,:,:3]).astype('uint8')
+
+    return img
+
+def rgb2lab(input):
+    from skimage import color
+    return color.rgb2lab(input / 255.)
+
+def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.):
+    image_numpy = image_tensor[0].cpu().float().numpy()
+    image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
+    return image_numpy.astype(imtype)
+
+def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):
+    return torch.Tensor((image / factor - cent)
+                        [:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
+
+def tensor2vec(vector_tensor):
+    return vector_tensor.data.cpu().numpy()[:, :, 0, 0]
+
+
+def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.):
+# def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.):
+    image_numpy = image_tensor[0].cpu().float().numpy()
+    image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
+    return image_numpy.astype(imtype)
+
+def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):
+# def im2tensor(image, imtype=np.uint8, cent=1., factor=1.):
+    return torch.Tensor((image / factor - cent)
+                        [:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
+
+
+
+def voc_ap(rec, prec, use_07_metric=False):
+    """ ap = voc_ap(rec, prec, [use_07_metric])
+    Compute VOC AP given precision and recall.
+    If use_07_metric is true, uses the
+    VOC 07 11 point method (default:False).
+    """
+    if use_07_metric:
+        # 11 point metric
+        ap = 0.
+        for t in np.arange(0., 1.1, 0.1):
+            if np.sum(rec >= t) == 0:
+                p = 0
+            else:
+                p = np.max(prec[rec >= t])
+            ap = ap + p / 11.
+    else:
+        # correct AP calculation
+        # first append sentinel values at the end
+        mrec = np.concatenate(([0.], rec, [1.]))
+        mpre = np.concatenate(([0.], prec, [0.]))
+
+        # compute the precision envelope
+        for i in range(mpre.size - 1, 0, -1):
+            mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
+
+        # to calculate area under PR curve, look for points
+        # where X axis (recall) changes value
+        i = np.where(mrec[1:] != mrec[:-1])[0]
+
+        # and sum (\Delta recall) * prec
+        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
+    return ap
+
diff --git a/lpips/lpips.py b/lpips/lpips.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b979c082fdc45092e4669c52179339e16fa9d1f
--- /dev/null
+++ b/lpips/lpips.py
@@ -0,0 +1,219 @@
+
+from __future__ import absolute_import
+
+import torch
+import torch.nn as nn
+import torch.nn.init as init
+from torch.autograd import Variable
+import numpy as np
+from . import pretrained_networks as pn
+import torch.nn
+
+import lpips
+
+def spatial_average(in_tens, keepdim=True):
+    return in_tens.mean([2,3],keepdim=keepdim)
+
+def upsample(in_tens, out_HW=(64,64)): # assumes scale factor is same for H and W
+    in_H, in_W = in_tens.shape[2], in_tens.shape[3]
+    return nn.Upsample(size=out_HW, mode='bilinear', align_corners=False)(in_tens)
+
+# Learned perceptual metric
+class LPIPS(nn.Module):
+    def __init__(self, pretrained=True, net='alex', version='0.1', lpips=True, spatial=False, 
+        pnet_rand=False, pnet_tune=False, use_dropout=True, model_path=None, eval_mode=True, verbose=True):
+        # lpips - [True] means with linear calibration on top of base network
+        # pretrained - [True] means load linear weights
+
+        super(LPIPS, self).__init__()
+        if(verbose):
+            print('Setting up [%s] perceptual loss: trunk [%s], v[%s], spatial [%s]'%
+                ('LPIPS' if lpips else 'baseline', net, version, 'on' if spatial else 'off'))
+
+        self.pnet_type = net
+        self.pnet_tune = pnet_tune
+        self.pnet_rand = pnet_rand
+        self.spatial = spatial
+        self.lpips = lpips # false means baseline of just averaging all layers
+        self.version = version
+        self.scaling_layer = ScalingLayer()
+
+        if(self.pnet_type in ['vgg','vgg16']):
+            net_type = pn.vgg16
+            self.chns = [64,128,256,512,512]
+        elif(self.pnet_type=='alex'):
+            net_type = pn.alexnet
+            self.chns = [64,192,384,256,256]
+        elif(self.pnet_type=='squeeze'):
+            net_type = pn.squeezenet
+            self.chns = [64,128,256,384,384,512,512]
+        self.L = len(self.chns)
+
+        self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune)
+
+        if(lpips):
+            self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
+            self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
+            self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
+            self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
+            self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
+            self.lins = [self.lin0,self.lin1,self.lin2,self.lin3,self.lin4]
+            if(self.pnet_type=='squeeze'): # 7 layers for squeezenet
+                self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout)
+                self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout)
+                self.lins+=[self.lin5,self.lin6]
+            self.lins = nn.ModuleList(self.lins)
+
+            if(pretrained):
+                if(model_path is None):
+                    import inspect
+                    import os
+                    model_path = os.path.abspath(os.path.join(inspect.getfile(self.__init__), '..', 'weights/v%s/%s.pth'%(version,net)))
+
+                if(verbose):
+                    print('Loading model from: %s'%model_path)
+                self.load_state_dict(torch.load(model_path, map_location='cpu'), strict=False)          
+
+        if(eval_mode):
+            self.eval()
+
+    def forward(self, in0, in1, retPerLayer=False, normalize=False):
+        if normalize: # turn on this flag if input is [0,1] so it can be adjusted to [-1, +1]
+            in0 = 2 * in0  - 1
+            in1 = 2 * in1  - 1
+
+        # v0.0 - original release had a bug, where input was not scaled
+        in0_input, in1_input = (self.scaling_layer(in0), self.scaling_layer(in1)) if self.version=='0.1' else (in0, in1)
+        outs0, outs1 = self.net.forward(in0_input), self.net.forward(in1_input)
+        feats0, feats1, diffs = {}, {}, {}
+
+        for kk in range(self.L):
+            feats0[kk], feats1[kk] = lpips.normalize_tensor(outs0[kk]), lpips.normalize_tensor(outs1[kk])
+            diffs[kk] = (feats0[kk]-feats1[kk])**2
+
+        if(self.lpips):
+            if(self.spatial):
+                res = [upsample(self.lins[kk](diffs[kk]), out_HW=in0.shape[2:]) for kk in range(self.L)]
+            else:
+                res = [spatial_average(self.lins[kk](diffs[kk]), keepdim=True) for kk in range(self.L)]
+        else:
+            if(self.spatial):
+                res = [upsample(diffs[kk].sum(dim=1,keepdim=True), out_HW=in0.shape[2:]) for kk in range(self.L)]
+            else:
+                res = [spatial_average(diffs[kk].sum(dim=1,keepdim=True), keepdim=True) for kk in range(self.L)]
+
+        val = res[0]
+        for l in range(1,self.L):
+            val += res[l]
+
+        # a = spatial_average(self.lins[kk](diffs[kk]), keepdim=True)
+        # b = torch.max(self.lins[kk](feats0[kk]**2))
+        # for kk in range(self.L):
+        #     a += spatial_average(self.lins[kk](diffs[kk]), keepdim=True)
+        #     b = torch.max(b,torch.max(self.lins[kk](feats0[kk]**2)))
+        # a = a/self.L
+        # from IPython import embed
+        # embed()
+        # return 10*torch.log10(b/a)
+        
+        if(retPerLayer):
+            return (val, res)
+        else:
+            return val
+
+
+class ScalingLayer(nn.Module):
+    def __init__(self):
+        super(ScalingLayer, self).__init__()
+        self.register_buffer('shift', torch.Tensor([-.030,-.088,-.188])[None,:,None,None])
+        self.register_buffer('scale', torch.Tensor([.458,.448,.450])[None,:,None,None])
+
+    def forward(self, inp):
+        return (inp - self.shift) / self.scale
+
+
+class NetLinLayer(nn.Module):
+    ''' A single linear layer which does a 1x1 conv '''
+    def __init__(self, chn_in, chn_out=1, use_dropout=False):
+        super(NetLinLayer, self).__init__()
+
+        layers = [nn.Dropout(),] if(use_dropout) else []
+        layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False),]
+        self.model = nn.Sequential(*layers)
+
+    def forward(self, x):
+        return self.model(x)
+
+class Dist2LogitLayer(nn.Module):
+    ''' takes 2 distances, puts through fc layers, spits out value between [0,1] (if use_sigmoid is True) '''
+    def __init__(self, chn_mid=32, use_sigmoid=True):
+        super(Dist2LogitLayer, self).__init__()
+
+        layers = [nn.Conv2d(5, chn_mid, 1, stride=1, padding=0, bias=True),]
+        layers += [nn.LeakyReLU(0.2,True),]
+        layers += [nn.Conv2d(chn_mid, chn_mid, 1, stride=1, padding=0, bias=True),]
+        layers += [nn.LeakyReLU(0.2,True),]
+        layers += [nn.Conv2d(chn_mid, 1, 1, stride=1, padding=0, bias=True),]
+        if(use_sigmoid):
+            layers += [nn.Sigmoid(),]
+        self.model = nn.Sequential(*layers)
+
+    def forward(self,d0,d1,eps=0.1):
+        return self.model.forward(torch.cat((d0,d1,d0-d1,d0/(d1+eps),d1/(d0+eps)),dim=1))
+
+class BCERankingLoss(nn.Module):
+    def __init__(self, chn_mid=32):
+        super(BCERankingLoss, self).__init__()
+        self.net = Dist2LogitLayer(chn_mid=chn_mid)
+        # self.parameters = list(self.net.parameters())
+        self.loss = torch.nn.BCELoss()
+
+    def forward(self, d0, d1, judge):
+        per = (judge+1.)/2.
+        self.logit = self.net.forward(d0,d1)
+        return self.loss(self.logit, per)
+
+# L2, DSSIM metrics
+class FakeNet(nn.Module):
+    def __init__(self, use_gpu=True, colorspace='Lab'):
+        super(FakeNet, self).__init__()
+        self.use_gpu = use_gpu
+        self.colorspace = colorspace
+
+class L2(FakeNet):
+    def forward(self, in0, in1, retPerLayer=None):
+        assert(in0.size()[0]==1) # currently only supports batchSize 1
+
+        if(self.colorspace=='RGB'):
+            (N,C,X,Y) = in0.size()
+            value = torch.mean(torch.mean(torch.mean((in0-in1)**2,dim=1).view(N,1,X,Y),dim=2).view(N,1,1,Y),dim=3).view(N)
+            return value
+        elif(self.colorspace=='Lab'):
+            value = lpips.l2(lpips.tensor2np(lpips.tensor2tensorlab(in0.data,to_norm=False)), 
+                lpips.tensor2np(lpips.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float')
+            ret_var = Variable( torch.Tensor((value,) ) )
+            if(self.use_gpu):
+                ret_var = ret_var.cuda()
+            return ret_var
+
+class DSSIM(FakeNet):
+
+    def forward(self, in0, in1, retPerLayer=None):
+        assert(in0.size()[0]==1) # currently only supports batchSize 1
+
+        if(self.colorspace=='RGB'):
+            value = lpips.dssim(1.*lpips.tensor2im(in0.data), 1.*lpips.tensor2im(in1.data), range=255.).astype('float')
+        elif(self.colorspace=='Lab'):
+            value = lpips.dssim(lpips.tensor2np(lpips.tensor2tensorlab(in0.data,to_norm=False)), 
+                lpips.tensor2np(lpips.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float')
+        ret_var = Variable( torch.Tensor((value,) ) )
+        if(self.use_gpu):
+            ret_var = ret_var.cuda()
+        return ret_var
+
+def print_network(net):
+    num_params = 0
+    for param in net.parameters():
+        num_params += param.numel()
+    print('Network',net)
+    print('Total number of parameters: %d' % num_params)
diff --git a/lpips/pretrained_networks.py b/lpips/pretrained_networks.py
new file mode 100644
index 0000000000000000000000000000000000000000..a70ebbeab1618da4fe2538833f049dc569f1eea1
--- /dev/null
+++ b/lpips/pretrained_networks.py
@@ -0,0 +1,180 @@
+from collections import namedtuple
+import torch
+from torchvision import models as tv
+
+class squeezenet(torch.nn.Module):
+    def __init__(self, requires_grad=False, pretrained=True):
+        super(squeezenet, self).__init__()
+        pretrained_features = tv.squeezenet1_1(pretrained=pretrained).features
+        self.slice1 = torch.nn.Sequential()
+        self.slice2 = torch.nn.Sequential()
+        self.slice3 = torch.nn.Sequential()
+        self.slice4 = torch.nn.Sequential()
+        self.slice5 = torch.nn.Sequential()
+        self.slice6 = torch.nn.Sequential()
+        self.slice7 = torch.nn.Sequential()
+        self.N_slices = 7
+        for x in range(2):
+            self.slice1.add_module(str(x), pretrained_features[x])
+        for x in range(2,5):
+            self.slice2.add_module(str(x), pretrained_features[x])
+        for x in range(5, 8):
+            self.slice3.add_module(str(x), pretrained_features[x])
+        for x in range(8, 10):
+            self.slice4.add_module(str(x), pretrained_features[x])
+        for x in range(10, 11):
+            self.slice5.add_module(str(x), pretrained_features[x])
+        for x in range(11, 12):
+            self.slice6.add_module(str(x), pretrained_features[x])
+        for x in range(12, 13):
+            self.slice7.add_module(str(x), pretrained_features[x])
+        if not requires_grad:
+            for param in self.parameters():
+                param.requires_grad = False
+
+    def forward(self, X):
+        h = self.slice1(X)
+        h_relu1 = h
+        h = self.slice2(h)
+        h_relu2 = h
+        h = self.slice3(h)
+        h_relu3 = h
+        h = self.slice4(h)
+        h_relu4 = h
+        h = self.slice5(h)
+        h_relu5 = h
+        h = self.slice6(h)
+        h_relu6 = h
+        h = self.slice7(h)
+        h_relu7 = h
+        vgg_outputs = namedtuple("SqueezeOutputs", ['relu1','relu2','relu3','relu4','relu5','relu6','relu7'])
+        out = vgg_outputs(h_relu1,h_relu2,h_relu3,h_relu4,h_relu5,h_relu6,h_relu7)
+
+        return out
+
+
+class alexnet(torch.nn.Module):
+    def __init__(self, requires_grad=False, pretrained=True):
+        super(alexnet, self).__init__()
+        alexnet_pretrained_features = tv.alexnet(pretrained=pretrained).features
+        self.slice1 = torch.nn.Sequential()
+        self.slice2 = torch.nn.Sequential()
+        self.slice3 = torch.nn.Sequential()
+        self.slice4 = torch.nn.Sequential()
+        self.slice5 = torch.nn.Sequential()
+        self.N_slices = 5
+        for x in range(2):
+            self.slice1.add_module(str(x), alexnet_pretrained_features[x])
+        for x in range(2, 5):
+            self.slice2.add_module(str(x), alexnet_pretrained_features[x])
+        for x in range(5, 8):
+            self.slice3.add_module(str(x), alexnet_pretrained_features[x])
+        for x in range(8, 10):
+            self.slice4.add_module(str(x), alexnet_pretrained_features[x])
+        for x in range(10, 12):
+            self.slice5.add_module(str(x), alexnet_pretrained_features[x])
+        if not requires_grad:
+            for param in self.parameters():
+                param.requires_grad = False
+
+    def forward(self, X):
+        h = self.slice1(X)
+        h_relu1 = h
+        h = self.slice2(h)
+        h_relu2 = h
+        h = self.slice3(h)
+        h_relu3 = h
+        h = self.slice4(h)
+        h_relu4 = h
+        h = self.slice5(h)
+        h_relu5 = h
+        alexnet_outputs = namedtuple("AlexnetOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5'])
+        out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5)
+
+        return out
+
+class vgg16(torch.nn.Module):
+    def __init__(self, requires_grad=False, pretrained=True):
+        super(vgg16, self).__init__()
+        vgg_pretrained_features = tv.vgg16(pretrained=pretrained).features
+        self.slice1 = torch.nn.Sequential()
+        self.slice2 = torch.nn.Sequential()
+        self.slice3 = torch.nn.Sequential()
+        self.slice4 = torch.nn.Sequential()
+        self.slice5 = torch.nn.Sequential()
+        self.N_slices = 5
+        for x in range(4):
+            self.slice1.add_module(str(x), vgg_pretrained_features[x])
+        for x in range(4, 9):
+            self.slice2.add_module(str(x), vgg_pretrained_features[x])
+        for x in range(9, 16):
+            self.slice3.add_module(str(x), vgg_pretrained_features[x])
+        for x in range(16, 23):
+            self.slice4.add_module(str(x), vgg_pretrained_features[x])
+        for x in range(23, 30):
+            self.slice5.add_module(str(x), vgg_pretrained_features[x])
+        if not requires_grad:
+            for param in self.parameters():
+                param.requires_grad = False
+
+    def forward(self, X):
+        h = self.slice1(X)
+        h_relu1_2 = h
+        h = self.slice2(h)
+        h_relu2_2 = h
+        h = self.slice3(h)
+        h_relu3_3 = h
+        h = self.slice4(h)
+        h_relu4_3 = h
+        h = self.slice5(h)
+        h_relu5_3 = h
+        vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
+        out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
+
+        return out
+
+
+
+class resnet(torch.nn.Module):
+    def __init__(self, requires_grad=False, pretrained=True, num=18):
+        super(resnet, self).__init__()
+        if(num==18):
+            self.net = tv.resnet18(pretrained=pretrained)
+        elif(num==34):
+            self.net = tv.resnet34(pretrained=pretrained)
+        elif(num==50):
+            self.net = tv.resnet50(pretrained=pretrained)
+        elif(num==101):
+            self.net = tv.resnet101(pretrained=pretrained)
+        elif(num==152):
+            self.net = tv.resnet152(pretrained=pretrained)
+        self.N_slices = 5
+
+        self.conv1 = self.net.conv1
+        self.bn1 = self.net.bn1
+        self.relu = self.net.relu
+        self.maxpool = self.net.maxpool
+        self.layer1 = self.net.layer1
+        self.layer2 = self.net.layer2
+        self.layer3 = self.net.layer3
+        self.layer4 = self.net.layer4
+
+    def forward(self, X):
+        h = self.conv1(X)
+        h = self.bn1(h)
+        h = self.relu(h)
+        h_relu1 = h
+        h = self.maxpool(h)
+        h = self.layer1(h)
+        h_conv2 = h
+        h = self.layer2(h)
+        h_conv3 = h
+        h = self.layer3(h)
+        h_conv4 = h
+        h = self.layer4(h)
+        h_conv5 = h
+
+        outputs = namedtuple("Outputs", ['relu1','conv2','conv3','conv4','conv5'])
+        out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5)
+
+        return out
diff --git a/lpips/trainer.py b/lpips/trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..52b6112cdc79db7a429ec52e60fcefdb756f776b
--- /dev/null
+++ b/lpips/trainer.py
@@ -0,0 +1,280 @@
+
+from __future__ import absolute_import
+
+import numpy as np
+import torch
+from torch import nn
+from collections import OrderedDict
+from torch.autograd import Variable
+from scipy.ndimage import zoom
+from tqdm import tqdm
+import lpips
+import os
+
+
+class Trainer():
+    def name(self):
+        return self.model_name
+
+    def initialize(self, model='lpips', net='alex', colorspace='Lab', pnet_rand=False, pnet_tune=False, model_path=None,
+            use_gpu=True, printNet=False, spatial=False, 
+            is_train=False, lr=.0001, beta1=0.5, version='0.1', gpu_ids=[0]):
+        '''
+        INPUTS
+            model - ['lpips'] for linearly calibrated network
+                    ['baseline'] for off-the-shelf network
+                    ['L2'] for L2 distance in Lab colorspace
+                    ['SSIM'] for ssim in RGB colorspace
+            net - ['squeeze','alex','vgg']
+            model_path - if None, will look in weights/[NET_NAME].pth
+            colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM
+            use_gpu - bool - whether or not to use a GPU
+            printNet - bool - whether or not to print network architecture out
+            spatial - bool - whether to output an array containing varying distances across spatial dimensions
+            is_train - bool - [True] for training mode
+            lr - float - initial learning rate
+            beta1 - float - initial momentum term for adam
+            version - 0.1 for latest, 0.0 was original (with a bug)
+            gpu_ids - int array - [0] by default, gpus to use
+        '''
+        self.use_gpu = use_gpu
+        self.gpu_ids = gpu_ids
+        self.model = model
+        self.net = net
+        self.is_train = is_train
+        self.spatial = spatial
+        self.model_name = '%s [%s]'%(model,net)
+
+        if(self.model == 'lpips'): # pretrained net + linear layer
+            self.net = lpips.LPIPS(pretrained=not is_train, net=net, version=version, lpips=True, spatial=spatial, 
+                pnet_rand=pnet_rand, pnet_tune=pnet_tune, 
+                use_dropout=True, model_path=model_path, eval_mode=False)
+        elif(self.model=='baseline'): # pretrained network
+            self.net = lpips.LPIPS(pnet_rand=pnet_rand, net=net, lpips=False)
+        elif(self.model in ['L2','l2']):
+            self.net = lpips.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing
+            self.model_name = 'L2'
+        elif(self.model in ['DSSIM','dssim','SSIM','ssim']):
+            self.net = lpips.DSSIM(use_gpu=use_gpu,colorspace=colorspace)
+            self.model_name = 'SSIM'
+        else:
+            raise ValueError("Model [%s] not recognized." % self.model)
+
+        self.parameters = list(self.net.parameters())
+
+        if self.is_train: # training mode
+            # extra network on top to go from distances (d0,d1) => predicted human judgment (h*)
+            self.rankLoss = lpips.BCERankingLoss()
+            self.parameters += list(self.rankLoss.net.parameters())
+            self.lr = lr
+            self.old_lr = lr
+            self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999))
+        else: # test mode
+            self.net.eval()
+
+        if(use_gpu):
+            self.net.to(gpu_ids[0])
+            self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids)
+            if(self.is_train):
+                self.rankLoss = self.rankLoss.to(device=gpu_ids[0]) # just put this on GPU0
+
+        if(printNet):
+            print('---------- Networks initialized -------------')
+            networks.print_network(self.net)
+            print('-----------------------------------------------')
+
+    def forward(self, in0, in1, retPerLayer=False):
+        ''' Function computes the distance between image patches in0 and in1
+        INPUTS
+            in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1]
+        OUTPUT
+            computed distances between in0 and in1
+        '''
+
+        return self.net.forward(in0, in1, retPerLayer=retPerLayer)
+
+    # ***** TRAINING FUNCTIONS *****
+    def optimize_parameters(self):
+        self.forward_train()
+        self.optimizer_net.zero_grad()
+        self.backward_train()
+        self.optimizer_net.step()
+        self.clamp_weights()
+
+    def clamp_weights(self):
+        for module in self.net.modules():
+            if(hasattr(module, 'weight') and module.kernel_size==(1,1)):
+                module.weight.data = torch.clamp(module.weight.data,min=0)
+
+    def set_input(self, data):
+        self.input_ref = data['ref']
+        self.input_p0 = data['p0']
+        self.input_p1 = data['p1']
+        self.input_judge = data['judge']
+
+        if(self.use_gpu):
+            self.input_ref = self.input_ref.to(device=self.gpu_ids[0])
+            self.input_p0 = self.input_p0.to(device=self.gpu_ids[0])
+            self.input_p1 = self.input_p1.to(device=self.gpu_ids[0])
+            self.input_judge = self.input_judge.to(device=self.gpu_ids[0])
+
+        self.var_ref = Variable(self.input_ref,requires_grad=True)
+        self.var_p0 = Variable(self.input_p0,requires_grad=True)
+        self.var_p1 = Variable(self.input_p1,requires_grad=True)
+
+    def forward_train(self): # run forward pass
+        self.d0 = self.forward(self.var_ref, self.var_p0)
+        self.d1 = self.forward(self.var_ref, self.var_p1)
+        self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge)
+
+        self.var_judge = Variable(1.*self.input_judge).view(self.d0.size())
+
+        self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.)
+
+        return self.loss_total
+
+    def backward_train(self):
+        torch.mean(self.loss_total).backward()
+
+    def compute_accuracy(self,d0,d1,judge):
+        ''' d0, d1 are Variables, judge is a Tensor '''
+        d1_lt_d0 = (d1<d0).cpu().data.numpy().flatten()
+        judge_per = judge.cpu().numpy().flatten()
+        return d1_lt_d0*judge_per + (1-d1_lt_d0)*(1-judge_per)
+
+    def get_current_errors(self):
+        retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),
+                            ('acc_r', self.acc_r)])
+
+        for key in retDict.keys():
+            retDict[key] = np.mean(retDict[key])
+
+        return retDict
+
+    def get_current_visuals(self):
+        zoom_factor = 256/self.var_ref.data.size()[2]
+
+        ref_img = lpips.tensor2im(self.var_ref.data)
+        p0_img = lpips.tensor2im(self.var_p0.data)
+        p1_img = lpips.tensor2im(self.var_p1.data)
+
+        ref_img_vis = zoom(ref_img,[zoom_factor, zoom_factor, 1],order=0)
+        p0_img_vis = zoom(p0_img,[zoom_factor, zoom_factor, 1],order=0)
+        p1_img_vis = zoom(p1_img,[zoom_factor, zoom_factor, 1],order=0)
+
+        return OrderedDict([('ref', ref_img_vis),
+                            ('p0', p0_img_vis),
+                            ('p1', p1_img_vis)])
+
+    def save(self, path, label):
+        if(self.use_gpu):
+            self.save_network(self.net.module, path, '', label)
+        else:
+            self.save_network(self.net, path, '', label)
+        self.save_network(self.rankLoss.net, path, 'rank', label)
+
+    # helper saving function that can be used by subclasses
+    def save_network(self, network, path, network_label, epoch_label):
+        save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
+        save_path = os.path.join(path, save_filename)
+        torch.save(network.state_dict(), save_path)
+
+    # helper loading function that can be used by subclasses
+    def load_network(self, network, network_label, epoch_label):
+        save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
+        save_path = os.path.join(self.save_dir, save_filename)
+        print('Loading network from %s'%save_path)
+        network.load_state_dict(torch.load(save_path))
+
+    def update_learning_rate(self,nepoch_decay):
+        lrd = self.lr / nepoch_decay
+        lr = self.old_lr - lrd
+
+        for param_group in self.optimizer_net.param_groups:
+            param_group['lr'] = lr
+
+        print('update lr [%s] decay: %f -> %f' % (type,self.old_lr, lr))
+        self.old_lr = lr
+
+
+    def get_image_paths(self):
+        return self.image_paths
+
+    def save_done(self, flag=False):
+        np.save(os.path.join(self.save_dir, 'done_flag'),flag)
+        np.savetxt(os.path.join(self.save_dir, 'done_flag'),[flag,],fmt='%i')
+
+
+def score_2afc_dataset(data_loader, func, name=''):
+    ''' Function computes Two Alternative Forced Choice (2AFC) score using
+        distance function 'func' in dataset 'data_loader'
+    INPUTS
+        data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside
+        func - callable distance function - calling d=func(in0,in1) should take 2
+            pytorch tensors with shape Nx3xXxY, and return numpy array of length N
+    OUTPUTS
+        [0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators
+        [1] - dictionary with following elements
+            d0s,d1s - N arrays containing distances between reference patch to perturbed patches 
+            gts - N array in [0,1], preferred patch selected by human evaluators
+                (closer to "0" for left patch p0, "1" for right patch p1,
+                "0.6" means 60pct people preferred right patch, 40pct preferred left)
+            scores - N array in [0,1], corresponding to what percentage function agreed with humans
+    CONSTS
+        N - number of test triplets in data_loader
+    '''
+
+    d0s = []
+    d1s = []
+    gts = []
+
+    for data in tqdm(data_loader.load_data(), desc=name):
+        d0s+=func(data['ref'],data['p0']).data.cpu().numpy().flatten().tolist()
+        d1s+=func(data['ref'],data['p1']).data.cpu().numpy().flatten().tolist()
+        gts+=data['judge'].cpu().numpy().flatten().tolist()
+
+    d0s = np.array(d0s)
+    d1s = np.array(d1s)
+    gts = np.array(gts)
+    scores = (d0s<d1s)*(1.-gts) + (d1s<d0s)*gts + (d1s==d0s)*.5
+
+    return(np.mean(scores), dict(d0s=d0s,d1s=d1s,gts=gts,scores=scores))
+
+def score_jnd_dataset(data_loader, func, name=''):
+    ''' Function computes JND score using distance function 'func' in dataset 'data_loader'
+    INPUTS
+        data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside
+        func - callable distance function - calling d=func(in0,in1) should take 2
+            pytorch tensors with shape Nx3xXxY, and return pytorch array of length N
+    OUTPUTS
+        [0] - JND score in [0,1], mAP score (area under precision-recall curve)
+        [1] - dictionary with following elements
+            ds - N array containing distances between two patches shown to human evaluator
+            sames - N array containing fraction of people who thought the two patches were identical
+    CONSTS
+        N - number of test triplets in data_loader
+    '''
+
+    ds = []
+    gts = []
+
+    for data in tqdm(data_loader.load_data(), desc=name):
+        ds+=func(data['p0'],data['p1']).data.cpu().numpy().tolist()
+        gts+=data['same'].cpu().numpy().flatten().tolist()
+
+    sames = np.array(gts)
+    ds = np.array(ds)
+
+    sorted_inds = np.argsort(ds)
+    ds_sorted = ds[sorted_inds]
+    sames_sorted = sames[sorted_inds]
+
+    TPs = np.cumsum(sames_sorted)
+    FPs = np.cumsum(1-sames_sorted)
+    FNs = np.sum(sames_sorted)-TPs
+
+    precs = TPs/(TPs+FPs)
+    recs = TPs/(TPs+FNs)
+    score = lpips.voc_ap(recs,precs)
+
+    return(score, dict(ds=ds,sames=sames))
diff --git a/lpips/weights/v0.0/alex.pth b/lpips/weights/v0.0/alex.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6246af3a4abd8adec5f4f755a23f0c26a87b1ee6
--- /dev/null
+++ b/lpips/weights/v0.0/alex.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:18720f55913d0af89042f13faa7e536a6ce1444a0914e6db9461355ece1e8cd5
+size 5455
diff --git a/lpips/weights/v0.0/squeeze.pth b/lpips/weights/v0.0/squeeze.pth
new file mode 100644
index 0000000000000000000000000000000000000000..e82f07b8954ebcda0e7e825655fdffa2668d3f55
--- /dev/null
+++ b/lpips/weights/v0.0/squeeze.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c27abd3a0145541baa50990817df58d3759c3f8154949f42af3b59b4e042d0bf
+size 10057
diff --git a/lpips/weights/v0.0/vgg.pth b/lpips/weights/v0.0/vgg.pth
new file mode 100644
index 0000000000000000000000000000000000000000..715f20ad4bc778ff235258ca7f591b9bce0051de
--- /dev/null
+++ b/lpips/weights/v0.0/vgg.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b9e4236260c3dd988fc79d2a48d645d885afcbb21f9fd595e6744cf7419b582c
+size 6735
diff --git a/lpips/weights/v0.1/alex.pth b/lpips/weights/v0.1/alex.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fa4067abc5d4da16a7204fd94776506e4868030e
--- /dev/null
+++ b/lpips/weights/v0.1/alex.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:df73285e35b22355a2df87cdb6b70b343713b667eddbda73e1977e0c860835c0
+size 6009
diff --git a/lpips/weights/v0.1/squeeze.pth b/lpips/weights/v0.1/squeeze.pth
new file mode 100644
index 0000000000000000000000000000000000000000..f892a84a130828b1c9e2e8156e84fc5a962c665d
--- /dev/null
+++ b/lpips/weights/v0.1/squeeze.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4a5350f23600cb79923ce65bb07cbf57dca461329894153e05a1346bd531cf76
+size 10811
diff --git a/lpips/weights/v0.1/vgg.pth b/lpips/weights/v0.1/vgg.pth
new file mode 100644
index 0000000000000000000000000000000000000000..f57dcf5cc764d61c8a460365847fb2137ff0a62d
--- /dev/null
+++ b/lpips/weights/v0.1/vgg.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a78928a0af1e5f0fcb1f3b9e8f8c3a2a5a3de244d830ad5c1feddc79b8432868
+size 7289
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c3c00b777bda3a42cf0f53f65cd1e4c94e93e181
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,9 @@
+ninja
+torch
+torchvision
+opencv-python
+numpy
+scikit-image
+scipy
+pillow
+tqdm
\ No newline at end of file
diff --git a/segmentation2face.py b/segmentation2face.py
new file mode 100755
index 0000000000000000000000000000000000000000..68668fd6ab991460e484ab42fdeb2ff99ec9c05c
--- /dev/null
+++ b/segmentation2face.py
@@ -0,0 +1,47 @@
+'''
+@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
+@author: yangxy (yangtao9009@gmail.com)
+'''
+import os
+import cv2
+import glob
+import time
+import numpy as np
+from PIL import Image
+import __init_paths
+from face_model.face_gan import FaceGAN
+
+class Segmentation2Face(object):
+    def __init__(self, base_dir='./', size=1024, model=None, channel_multiplier=2, narrow=1, is_norm=True):
+        self.facegan = FaceGAN(base_dir, size, model, channel_multiplier, narrow, is_norm)
+
+    # make sure the face image is well aligned. Please refer to face_enhancement.py
+    def process(self, segf):
+        # from segmentations to faces
+        out = self.facegan.process(segf)
+
+        return out
+        
+
+if __name__=='__main__':
+    model = {'name':'GPEN-Seg2face-512', 'size':512}
+    
+    indir = 'examples/segs'
+    outdir = 'examples/outs-seg2face'
+    os.makedirs(outdir, exist_ok=True)
+
+    seg2face = Segmentation2Face(size=model['size'], model=model['name'], channel_multiplier=2, is_norm=False)
+
+    files = sorted(glob.glob(os.path.join(indir, '*.*g')))
+    for n, file in enumerate(files[:]):
+        filename = os.path.basename(file)
+        
+        segf = cv2.imread(file, cv2.IMREAD_COLOR)
+
+        realf = seg2face.process(segf)
+        
+        segf = cv2.resize(segf, realf.shape[:2])
+        cv2.imwrite(os.path.join(outdir, '.'.join(filename.split('.')[:-1])+'.jpg'), np.hstack((segf, realf)))
+        
+        if n%10==0: print(n, file)
+        
diff --git a/sr_model/arch_util.py b/sr_model/arch_util.py
new file mode 100755
index 0000000000000000000000000000000000000000..ce5b9d92f418d3f8b5b8887a24491f65660b33f9
--- /dev/null
+++ b/sr_model/arch_util.py
@@ -0,0 +1,125 @@
+import math
+import torch
+from torch import nn as nn
+from torch.nn import functional as F
+from torch.nn import init as init
+from torch.nn.modules.batchnorm import _BatchNorm
+
+@torch.no_grad()
+def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs):
+    """Initialize network weights.
+
+    Args:
+        module_list (list[nn.Module] | nn.Module): Modules to be initialized.
+        scale (float): Scale initialized weights, especially for residual
+            blocks. Default: 1.
+        bias_fill (float): The value to fill bias. Default: 0
+        kwargs (dict): Other arguments for initialization function.
+    """
+    if not isinstance(module_list, list):
+        module_list = [module_list]
+    for module in module_list:
+        for m in module.modules():
+            if isinstance(m, nn.Conv2d):
+                init.kaiming_normal_(m.weight, **kwargs)
+                m.weight.data *= scale
+                if m.bias is not None:
+                    m.bias.data.fill_(bias_fill)
+            elif isinstance(m, nn.Linear):
+                init.kaiming_normal_(m.weight, **kwargs)
+                m.weight.data *= scale
+                if m.bias is not None:
+                    m.bias.data.fill_(bias_fill)
+            elif isinstance(m, _BatchNorm):
+                init.constant_(m.weight, 1)
+                if m.bias is not None:
+                    m.bias.data.fill_(bias_fill)
+
+
+def make_layer(basic_block, num_basic_block, **kwarg):
+    """Make layers by stacking the same blocks.
+
+    Args:
+        basic_block (nn.module): nn.module class for basic block.
+        num_basic_block (int): number of blocks.
+
+    Returns:
+        nn.Sequential: Stacked blocks in nn.Sequential.
+    """
+    layers = []
+    for _ in range(num_basic_block):
+        layers.append(basic_block(**kwarg))
+    return nn.Sequential(*layers)
+
+
+class ResidualBlockNoBN(nn.Module):
+    """Residual block without BN.
+
+    It has a style of:
+        ---Conv-ReLU-Conv-+-
+         |________________|
+
+    Args:
+        num_feat (int): Channel number of intermediate features.
+            Default: 64.
+        res_scale (float): Residual scale. Default: 1.
+        pytorch_init (bool): If set to True, use pytorch default init,
+            otherwise, use default_init_weights. Default: False.
+    """
+
+    def __init__(self, num_feat=64, res_scale=1, pytorch_init=False):
+        super(ResidualBlockNoBN, self).__init__()
+        self.res_scale = res_scale
+        self.conv1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True)
+        self.conv2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True)
+        self.relu = nn.ReLU(inplace=True)
+
+        if not pytorch_init:
+            default_init_weights([self.conv1, self.conv2], 0.1)
+
+    def forward(self, x):
+        identity = x
+        out = self.conv2(self.relu(self.conv1(x)))
+        return identity + out * self.res_scale
+
+
+class Upsample(nn.Sequential):
+    """Upsample module.
+
+    Args:
+        scale (int): Scale factor. Supported scales: 2^n and 3.
+        num_feat (int): Channel number of intermediate features.
+    """
+
+    def __init__(self, scale, num_feat):
+        m = []
+        if (scale & (scale - 1)) == 0:  # scale = 2^n
+            for _ in range(int(math.log(scale, 2))):
+                m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
+                m.append(nn.PixelShuffle(2))
+        elif scale == 3:
+            m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
+            m.append(nn.PixelShuffle(3))
+        else:
+            raise ValueError(f'scale {scale} is not supported. '
+                             'Supported scales: 2^n and 3.')
+        super(Upsample, self).__init__(*m)
+
+# TODO: may write a cpp file
+def pixel_unshuffle(x, scale):
+    """ Pixel unshuffle.
+
+    Args:
+        x (Tensor): Input feature with shape (b, c, hh, hw).
+        scale (int): Downsample ratio.
+
+    Returns:
+        Tensor: the pixel unshuffled feature.
+    """
+    b, c, hh, hw = x.size()
+    out_channel = c * (scale**2)
+    assert hh % scale == 0 and hw % scale == 0
+    h = hh // scale
+    w = hw // scale
+    x_view = x.view(b, c, h, scale, w, scale)
+    return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w)
\ No newline at end of file
diff --git a/sr_model/real_esrnet.py b/sr_model/real_esrnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..fdf86641e6526b15250a6faa004a277d0c5f827b
--- /dev/null
+++ b/sr_model/real_esrnet.py
@@ -0,0 +1,58 @@
+import os
+import torch
+import numpy as np
+from rrdbnet_arch import RRDBNet
+from torch.nn import functional as F
+
+class RealESRNet(object):
+    def __init__(self, base_dir='./', model=None, scale=2, device='cuda'):
+        self.base_dir = base_dir
+        self.scale = scale
+        self.device = device
+        self.load_srmodel(base_dir, model)
+
+    def load_srmodel(self, base_dir, model):
+        self.srmodel = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=32, num_block=23, num_grow_ch=32, scale=self.scale)
+        if model is None:
+            loadnet = torch.load(os.path.join(self.base_dir, 'weights', 'rrdb_realesrnet_psnr.pth'))
+        else:
+            loadnet = torch.load(os.path.join(self.base_dir, 'weights', model+'.pth'))
+        #print(loadnet['params_ema'].keys)
+        self.srmodel.load_state_dict(loadnet['params_ema'], strict=True)
+        self.srmodel.eval()
+        self.srmodel = self.srmodel.to(self.device)
+
+    def process(self, img):
+        img = img.astype(np.float32) / 255.
+        img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
+        img = img.unsqueeze(0).to(self.device)
+
+        if self.scale == 2:
+            mod_scale = 2
+        elif self.scale == 1:
+            mod_scale = 4
+        else:
+            mod_scale = None
+        if mod_scale is not None:
+            h_pad, w_pad = 0, 0
+            _, _, h, w = img.size()
+            if (h % mod_scale != 0):
+                h_pad = (mod_scale - h % mod_scale)
+            if (w % mod_scale != 0):
+                w_pad = (mod_scale - w % mod_scale)
+            img = F.pad(img, (0, w_pad, 0, h_pad), 'reflect')
+
+        try:
+            with torch.no_grad():
+                output = self.srmodel(img)
+            # remove extra pad
+            if mod_scale is not None:
+                _, _, h, w = output.size()
+                output = output[:, :, 0:h - h_pad, 0:w - w_pad]
+            output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
+            output = np.transpose(output[[2, 1, 0], :, :], (1, 2, 0))
+            output = (output * 255.0).round().astype(np.uint8)
+
+            return output
+        except:
+            return None
\ No newline at end of file
diff --git a/sr_model/rrdbnet_arch.py b/sr_model/rrdbnet_arch.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e1f04c5aee5bcdcd2ddae5471843ff057d863b4
--- /dev/null
+++ b/sr_model/rrdbnet_arch.py
@@ -0,0 +1,116 @@
+import torch
+from torch import nn as nn
+from torch.nn import functional as F
+
+from arch_util import default_init_weights, make_layer, pixel_unshuffle
+
+
+class ResidualDenseBlock(nn.Module):
+    """Residual Dense Block.
+
+    Used in RRDB block in ESRGAN.
+
+    Args:
+        num_feat (int): Channel number of intermediate features.
+        num_grow_ch (int): Channels for each growth.
+    """
+
+    def __init__(self, num_feat=64, num_grow_ch=32):
+        super(ResidualDenseBlock, self).__init__()
+        self.conv1 = nn.Conv2d(num_feat, num_grow_ch, 3, 1, 1)
+        self.conv2 = nn.Conv2d(num_feat + num_grow_ch, num_grow_ch, 3, 1, 1)
+        self.conv3 = nn.Conv2d(num_feat + 2 * num_grow_ch, num_grow_ch, 3, 1, 1)
+        self.conv4 = nn.Conv2d(num_feat + 3 * num_grow_ch, num_grow_ch, 3, 1, 1)
+        self.conv5 = nn.Conv2d(num_feat + 4 * num_grow_ch, num_feat, 3, 1, 1)
+
+        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
+
+        # initialization
+        default_init_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1)
+
+    def forward(self, x):
+        x1 = self.lrelu(self.conv1(x))
+        x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
+        x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
+        x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
+        x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
+        # Emperically, we use 0.2 to scale the residual for better performance
+        return x5 * 0.2 + x
+
+
+class RRDB(nn.Module):
+    """Residual in Residual Dense Block.
+
+    Used in RRDB-Net in ESRGAN.
+
+    Args:
+        num_feat (int): Channel number of intermediate features.
+        num_grow_ch (int): Channels for each growth.
+    """
+
+    def __init__(self, num_feat, num_grow_ch=32):
+        super(RRDB, self).__init__()
+        self.rdb1 = ResidualDenseBlock(num_feat, num_grow_ch)
+        self.rdb2 = ResidualDenseBlock(num_feat, num_grow_ch)
+        self.rdb3 = ResidualDenseBlock(num_feat, num_grow_ch)
+
+    def forward(self, x):
+        out = self.rdb1(x)
+        out = self.rdb2(out)
+        out = self.rdb3(out)
+        # Emperically, we use 0.2 to scale the residual for better performance
+        return out * 0.2 + x
+
+class RRDBNet(nn.Module):
+    """Networks consisting of Residual in Residual Dense Block, which is used
+    in ESRGAN.
+
+    ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks.
+
+    We extend ESRGAN for scale x2 and scale x1.
+    Note: This is one option for scale 1, scale 2 in RRDBNet.
+    We first employ the pixel-unshuffle (an inverse operation of pixelshuffle to reduce the spatial size
+    and enlarge the channel size before feeding inputs into the main ESRGAN architecture.
+
+    Args:
+        num_in_ch (int): Channel number of inputs.
+        num_out_ch (int): Channel number of outputs.
+        num_feat (int): Channel number of intermediate features.
+            Default: 64
+        num_block (int): Block number in the trunk network. Defaults: 23
+        num_grow_ch (int): Channels for each growth. Default: 32.
+    """
+
+    def __init__(self, num_in_ch, num_out_ch, scale=4, num_feat=64, num_block=23, num_grow_ch=32):
+        super(RRDBNet, self).__init__()
+        self.scale = scale
+        if scale == 2:
+            num_in_ch = num_in_ch * 4
+        elif scale == 1:
+            num_in_ch = num_in_ch * 16
+        self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
+        self.body = make_layer(RRDB, num_block, num_feat=num_feat, num_grow_ch=num_grow_ch)
+        self.conv_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
+        # upsample
+        self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
+        self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
+        self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
+        self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
+
+        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
+
+    def forward(self, x):
+        if self.scale == 2:
+            feat = pixel_unshuffle(x, scale=2)
+        elif self.scale == 1:
+            feat = pixel_unshuffle(x, scale=4)
+        else:
+            feat = x
+        feat = self.conv_first(feat)
+        body_feat = self.conv_body(self.body(feat))
+        feat = feat + body_feat
+        # upsample
+        feat = self.lrelu(self.conv_up1(F.interpolate(feat, scale_factor=2, mode='nearest')))
+        feat = self.lrelu(self.conv_up2(F.interpolate(feat, scale_factor=2, mode='nearest')))
+        out = self.conv_last(self.lrelu(self.conv_hr(feat)))
+        return out
diff --git a/train_simple.py b/train_simple.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ce3b194ac2275eb08b82990007d4f288032ef40
--- /dev/null
+++ b/train_simple.py
@@ -0,0 +1,414 @@
+'''
+This is a simplified training code of GPEN. It achieves comparable performance as in the paper.
+
+@Created by rosinality
+
+@Modified by yangxy (yangtao9009@gmail.com)
+'''
+import argparse
+import math
+import random
+import os
+import cv2
+import glob
+from tqdm import tqdm
+
+import torch
+from torch import nn, autograd, optim
+from torch.nn import functional as F
+from torch.utils import data
+import torch.distributed as dist
+from torchvision import transforms, utils
+
+import __init_paths
+from data_loader.dataset_face import FaceDataset
+from face_model.gpen_model import FullGenerator, Discriminator
+
+from loss.id_loss import IDLoss
+from distributed import (
+    get_rank,
+    synchronize,
+    reduce_loss_dict,
+    reduce_sum,
+    get_world_size,
+)
+
+import lpips
+
+
+def data_sampler(dataset, shuffle, distributed):
+    if distributed:
+        return data.distributed.DistributedSampler(dataset, shuffle=shuffle)
+
+    if shuffle:
+        return data.RandomSampler(dataset)
+
+    else:
+        return data.SequentialSampler(dataset)
+
+
+def requires_grad(model, flag=True):
+    for p in model.parameters():
+        p.requires_grad = flag
+
+
+def accumulate(model1, model2, decay=0.999):
+    par1 = dict(model1.named_parameters())
+    par2 = dict(model2.named_parameters())
+
+    for k in par1.keys():
+        par1[k].data.mul_(decay).add_(1 - decay, par2[k].data)
+
+
+def sample_data(loader):
+    while True:
+        for batch in loader:
+            yield batch
+
+
+def d_logistic_loss(real_pred, fake_pred):
+    real_loss = F.softplus(-real_pred)
+    fake_loss = F.softplus(fake_pred)
+
+    return real_loss.mean() + fake_loss.mean()
+
+
+def d_r1_loss(real_pred, real_img):
+    grad_real, = autograd.grad(
+        outputs=real_pred.sum(), inputs=real_img, create_graph=True
+    )
+    grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean()
+
+    return grad_penalty
+
+
+def g_nonsaturating_loss(fake_pred, loss_funcs=None, fake_img=None, real_img=None, input_img=None):
+    smooth_l1_loss, id_loss = loss_funcs
+    
+    loss = F.softplus(-fake_pred).mean()
+    loss_l1 = smooth_l1_loss(fake_img, real_img)
+    loss_id, __, __ = id_loss(fake_img, real_img, input_img)
+    loss += 1.0*loss_l1 + 1.0*loss_id
+
+    return loss
+
+
+def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
+    noise = torch.randn_like(fake_img) / math.sqrt(
+        fake_img.shape[2] * fake_img.shape[3]
+    )
+    grad, = autograd.grad(
+        outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True
+    )
+    path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
+
+    path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)
+
+    path_penalty = (path_lengths - path_mean).pow(2).mean()
+
+    return path_penalty, path_mean.detach(), path_lengths
+
+def validation(model, lpips_func, args, device):
+    lq_files = sorted(glob.glob(os.path.join(args.val_dir, 'lq', '*.*')))
+    hq_files = sorted(glob.glob(os.path.join(args.val_dir, 'hq', '*.*')))
+
+    assert len(lq_files) == len(hq_files)
+
+    dist_sum = 0
+    model.eval()
+    for lq_f, hq_f in zip(lq_files, hq_files):
+        img_lq = cv2.imread(lq_f, cv2.IMREAD_COLOR)
+        img_t = torch.from_numpy(img_lq).to(device).permute(2, 0, 1).unsqueeze(0)
+        img_t = (img_t/255.-0.5)/0.5
+        img_t = F.interpolate(img_t, (args.size, args.size))
+        img_t = torch.flip(img_t, [1])
+        
+        with torch.no_grad():
+            img_out, __ = model(img_t)
+        
+            img_hq = lpips.im2tensor(lpips.load_image(hq_f)).to(device)
+            img_hq = F.interpolate(img_hq, (args.size, args.size))
+            dist_sum += lpips_func.forward(img_out, img_hq)
+    
+    return dist_sum.data/len(lq_files)
+
+
+def train(args, loader, generator, discriminator, losses, g_optim, d_optim, g_ema, lpips_func, device):
+    loader = sample_data(loader)
+
+    pbar = range(0, args.iter)
+
+    if get_rank() == 0:
+        pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01)
+
+    mean_path_length = 0
+
+    d_loss_val = 0
+    r1_loss = torch.tensor(0.0, device=device)
+    g_loss_val = 0
+    path_loss = torch.tensor(0.0, device=device)
+    path_lengths = torch.tensor(0.0, device=device)
+    mean_path_length_avg = 0
+    loss_dict = {}
+
+    if args.distributed:
+        g_module = generator.module
+        d_module = discriminator.module
+
+    else:
+        g_module = generator
+        d_module = discriminator
+ 
+    accum = 0.5 ** (32 / (10 * 1000))
+
+    for idx in pbar:
+        i = idx + args.start_iter
+
+        if i > args.iter:
+            print('Done!')
+
+            break
+
+        degraded_img, real_img = next(loader)
+        degraded_img = degraded_img.to(device)
+        real_img = real_img.to(device)
+
+        requires_grad(generator, False)
+        requires_grad(discriminator, True)
+
+        fake_img, _ = generator(degraded_img)
+        fake_pred = discriminator(fake_img)
+
+        real_pred = discriminator(real_img)
+        d_loss = d_logistic_loss(real_pred, fake_pred)
+
+        loss_dict['d'] = d_loss
+        loss_dict['real_score'] = real_pred.mean()
+        loss_dict['fake_score'] = fake_pred.mean()
+
+        discriminator.zero_grad()
+        d_loss.backward()
+        d_optim.step()
+
+        d_regularize = i % args.d_reg_every == 0
+
+        if d_regularize:
+            real_img.requires_grad = True
+            real_pred = discriminator(real_img)
+            r1_loss = d_r1_loss(real_pred, real_img)
+
+            discriminator.zero_grad()
+            (args.r1 / 2 * r1_loss * args.d_reg_every + 0 * real_pred[0]).backward()
+
+            d_optim.step()
+
+        loss_dict['r1'] = r1_loss
+
+        requires_grad(generator, True)
+        requires_grad(discriminator, False)
+
+        fake_img, _ = generator(degraded_img)
+        fake_pred = discriminator(fake_img)
+        g_loss = g_nonsaturating_loss(fake_pred, losses, fake_img, real_img, degraded_img)
+
+        loss_dict['g'] = g_loss
+
+        generator.zero_grad()
+        g_loss.backward()
+        g_optim.step()
+
+        g_regularize = i % args.g_reg_every == 0
+
+        if g_regularize:
+            path_batch_size = max(1, args.batch // args.path_batch_shrink)
+
+            fake_img, latents = generator(degraded_img, return_latents=True)
+
+            path_loss, mean_path_length, path_lengths = g_path_regularize(
+                fake_img, latents, mean_path_length
+            )
+
+            generator.zero_grad()
+            weighted_path_loss = args.path_regularize * args.g_reg_every * path_loss
+
+            if args.path_batch_shrink:
+                weighted_path_loss += 0 * fake_img[0, 0, 0, 0]
+
+            weighted_path_loss.backward()
+
+            g_optim.step()
+
+            mean_path_length_avg = (
+                reduce_sum(mean_path_length).item() / get_world_size()
+            )
+
+        loss_dict['path'] = path_loss
+        loss_dict['path_length'] = path_lengths.mean()
+
+        accumulate(g_ema, g_module, accum)
+
+        loss_reduced = reduce_loss_dict(loss_dict)
+
+        d_loss_val = loss_reduced['d'].mean().item()
+        g_loss_val = loss_reduced['g'].mean().item()
+        r1_val = loss_reduced['r1'].mean().item()
+        path_loss_val = loss_reduced['path'].mean().item()
+        real_score_val = loss_reduced['real_score'].mean().item()
+        fake_score_val = loss_reduced['fake_score'].mean().item()
+        path_length_val = loss_reduced['path_length'].mean().item()
+
+        if get_rank() == 0:
+            pbar.set_description(
+                (
+                    f'd: {d_loss_val:.4f}; g: {g_loss_val:.4f}; r1: {r1_val:.4f}; '
+                )
+            )
+            
+            if i % args.save_freq == 0:
+                with torch.no_grad():
+                    g_ema.eval()
+                    sample, _ = g_ema(degraded_img)
+                    sample = torch.cat((degraded_img, sample, real_img), 0) 
+                    utils.save_image(
+                        sample,
+                        f'{args.sample}/{str(i).zfill(6)}.png',
+                        nrow=args.batch,
+                        normalize=True,
+                        range=(-1, 1),
+                    )
+
+                lpips_value = validation(g_ema, lpips_func, args, device)
+                print(f'{i}/{args.iter}: lpips: {lpips_value.cpu().numpy()[0][0][0][0]}')
+
+            if i and i % args.save_freq == 0:
+                torch.save(
+                    {
+                        'g': g_module.state_dict(),
+                        'd': d_module.state_dict(),
+                        'g_ema': g_ema.state_dict(),
+                        'g_optim': g_optim.state_dict(),
+                        'd_optim': d_optim.state_dict(),
+                    },
+                    f'{args.ckpt}/{str(i).zfill(6)}.pth',
+                )
+
+
+if __name__ == '__main__':
+
+    parser = argparse.ArgumentParser()
+
+    parser.add_argument('--path', type=str, required=True)
+    parser.add_argument('--base_dir', type=str, default='./')
+    parser.add_argument('--iter', type=int, default=4000000)
+    parser.add_argument('--batch', type=int, default=4)
+    parser.add_argument('--size', type=int, default=256)
+    parser.add_argument('--channel_multiplier', type=int, default=2)
+    parser.add_argument('--narrow', type=float, default=1.0)
+    parser.add_argument('--r1', type=float, default=10)
+    parser.add_argument('--path_regularize', type=float, default=2)
+    parser.add_argument('--path_batch_shrink', type=int, default=2)
+    parser.add_argument('--d_reg_every', type=int, default=16)
+    parser.add_argument('--g_reg_every', type=int, default=4)
+    parser.add_argument('--save_freq', type=int, default=10000)
+    parser.add_argument('--lr', type=float, default=0.002)
+    parser.add_argument('--local_rank', type=int, default=0)
+    parser.add_argument('--ckpt', type=str, default='ckpts')
+    parser.add_argument('--pretrain', type=str, default=None)
+    parser.add_argument('--sample', type=str, default='sample')
+    parser.add_argument('--val_dir', type=str, default='val')
+
+    args = parser.parse_args()
+
+    os.makedirs(args.ckpt, exist_ok=True)
+    os.makedirs(args.sample, exist_ok=True)
+
+    device = 'cuda'
+
+    n_gpu = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
+    args.distributed = n_gpu > 1
+
+    if args.distributed:
+        torch.cuda.set_device(args.local_rank)
+        torch.distributed.init_process_group(backend='nccl', init_method='env://')
+        synchronize()
+
+    args.latent = 512
+    args.n_mlp = 8
+
+    args.start_iter = 0
+
+    generator = FullGenerator(
+        args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier, narrow=args.narrow, device=device
+    ).to(device)
+    discriminator = Discriminator(
+        args.size, channel_multiplier=args.channel_multiplier, narrow=args.narrow, device=device
+    ).to(device)
+    g_ema = FullGenerator(
+        args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier, narrow=args.narrow, device=device
+    ).to(device)
+    g_ema.eval()
+    accumulate(g_ema, generator, 0)
+
+    g_reg_ratio = args.g_reg_every / (args.g_reg_every + 1)
+    d_reg_ratio = args.d_reg_every / (args.d_reg_every + 1)
+    
+    g_optim = optim.Adam(
+        generator.parameters(),
+        lr=args.lr * g_reg_ratio,
+        betas=(0 ** g_reg_ratio, 0.99 ** g_reg_ratio),
+    )
+
+    d_optim = optim.Adam(
+        discriminator.parameters(),
+        lr=args.lr * d_reg_ratio,
+        betas=(0 ** d_reg_ratio, 0.99 ** d_reg_ratio),
+    )
+
+    if args.pretrain is not None:
+        print('load model:', args.pretrain)
+        
+        ckpt = torch.load(args.pretrain)
+
+        generator.load_state_dict(ckpt['g'])
+        discriminator.load_state_dict(ckpt['d'])
+        g_ema.load_state_dict(ckpt['g_ema'])
+            
+        g_optim.load_state_dict(ckpt['g_optim'])
+        d_optim.load_state_dict(ckpt['d_optim'])
+    
+    smooth_l1_loss = torch.nn.SmoothL1Loss().to(device)
+    id_loss = IDLoss(args.base_dir, device, ckpt_dict=None)
+    lpips_func = lpips.LPIPS(net='alex',version='0.1').to(device)
+    
+    if args.distributed:
+        generator = nn.parallel.DistributedDataParallel(
+            generator,
+            device_ids=[args.local_rank],
+            output_device=args.local_rank,
+            broadcast_buffers=False,
+        )
+
+        discriminator = nn.parallel.DistributedDataParallel(
+            discriminator,
+            device_ids=[args.local_rank],
+            output_device=args.local_rank,
+            broadcast_buffers=False,
+        )
+
+        id_loss = nn.parallel.DistributedDataParallel(
+            id_loss,
+            device_ids=[args.local_rank],
+            output_device=args.local_rank,
+            broadcast_buffers=False,
+        )
+
+    dataset = FaceDataset(args.path, args.size)
+    loader = data.DataLoader(
+        dataset,
+        batch_size=args.batch,
+        sampler=data_sampler(dataset, shuffle=True, distributed=args.distributed),
+        drop_last=True,
+    )
+
+    train(args, loader, generator, discriminator, [smooth_l1_loss, id_loss], g_optim, d_optim, g_ema, lpips_func, device)
+   
diff --git a/weights/README.md b/weights/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..91210d235c5edfbbdfdb4e52e06b537da9e31e0e
--- /dev/null
+++ b/weights/README.md
@@ -0,0 +1,5 @@
+## Pre-trained Model
+
+Download RetinaFace model and our pre-trained model and put them here.
+
+[RetinaFace-R50](https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/RetinaFace-R50.pth) | [ParseNet-latest](https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/ParseNet-latest.pth) | [GPEN-BFR-512](https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/GPEN-BFR-512.pth) | [GPEN-BFR-512-D](https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/GPEN-BFR-512-D.pth) | [GPEN-BFR-256](https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/GPEN-BFR-256.pth) | [GPEN-BFR-256-D](https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/GPEN-BFR-256-D.pth) | [GPEN-Colorization-1024](https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/GPEN-Colorization-1024.pth) | [GPEN-Inpainting-1024](https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/GPEN-Inpainting-1024.pth) | [GPEN-Seg2face-512](https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/GPEN-Seg2face-512.pth) | [rrdb_realesrnet_psnr](https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/rrdb_realesrnet_psnr.pth)