Spaces:
Running
on
Zero
Running
on
Zero
File size: 9,103 Bytes
b213d84 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 |
# Copyright (c) Facebook, Inc. and its affiliates.
import math
from typing import Any, List
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import CfgNode
from detectron2.structures import Instances
from .. import DensePoseConfidenceModelConfig, DensePoseUVConfidenceType
from .chart import DensePoseChartLoss
from .registry import DENSEPOSE_LOSS_REGISTRY
from .utils import BilinearInterpolationHelper, LossDict
@DENSEPOSE_LOSS_REGISTRY.register()
class DensePoseChartWithConfidenceLoss(DensePoseChartLoss):
""" """
def __init__(self, cfg: CfgNode):
super().__init__(cfg)
self.confidence_model_cfg = DensePoseConfidenceModelConfig.from_cfg(cfg)
if self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO:
self.uv_loss_with_confidences = IIDIsotropicGaussianUVLoss(
self.confidence_model_cfg.uv_confidence.epsilon
)
elif self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.INDEP_ANISO:
self.uv_loss_with_confidences = IndepAnisotropicGaussianUVLoss(
self.confidence_model_cfg.uv_confidence.epsilon
)
def produce_fake_densepose_losses_uv(self, densepose_predictor_outputs: Any) -> LossDict:
"""
Overrides fake losses for fine segmentation and U/V coordinates to
include computation graphs for additional confidence parameters.
These are used when no suitable ground truth data was found in a batch.
The loss has a value 0 and is primarily used to construct the computation graph,
so that `DistributedDataParallel` has similar graphs on all GPUs and can
perform reduction properly.
Args:
densepose_predictor_outputs: DensePose predictor outputs, an object
of a dataclass that is assumed to have the following attributes:
* fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S]
* u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S]
* v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S]
Return:
dict: str -> tensor: dict of losses with the following entries:
* `loss_densepose_U`: has value 0
* `loss_densepose_V`: has value 0
* `loss_densepose_I`: has value 0
"""
conf_type = self.confidence_model_cfg.uv_confidence.type
if self.confidence_model_cfg.uv_confidence.enabled:
loss_uv = (
densepose_predictor_outputs.u.sum() + densepose_predictor_outputs.v.sum()
) * 0
if conf_type == DensePoseUVConfidenceType.IID_ISO:
loss_uv += densepose_predictor_outputs.sigma_2.sum() * 0
elif conf_type == DensePoseUVConfidenceType.INDEP_ANISO:
loss_uv += (
densepose_predictor_outputs.sigma_2.sum()
+ densepose_predictor_outputs.kappa_u.sum()
+ densepose_predictor_outputs.kappa_v.sum()
) * 0
return {"loss_densepose_UV": loss_uv}
else:
return super().produce_fake_densepose_losses_uv(densepose_predictor_outputs)
def produce_densepose_losses_uv(
self,
proposals_with_gt: List[Instances],
densepose_predictor_outputs: Any,
packed_annotations: Any,
interpolator: BilinearInterpolationHelper,
j_valid_fg: torch.Tensor,
) -> LossDict:
conf_type = self.confidence_model_cfg.uv_confidence.type
if self.confidence_model_cfg.uv_confidence.enabled:
u_gt = packed_annotations.u_gt[j_valid_fg]
u_est = interpolator.extract_at_points(densepose_predictor_outputs.u)[j_valid_fg]
v_gt = packed_annotations.v_gt[j_valid_fg]
v_est = interpolator.extract_at_points(densepose_predictor_outputs.v)[j_valid_fg]
sigma_2_est = interpolator.extract_at_points(densepose_predictor_outputs.sigma_2)[
j_valid_fg
]
if conf_type == DensePoseUVConfidenceType.IID_ISO:
return {
"loss_densepose_UV": (
self.uv_loss_with_confidences(u_est, v_est, sigma_2_est, u_gt, v_gt)
* self.w_points
)
}
elif conf_type in [DensePoseUVConfidenceType.INDEP_ANISO]:
kappa_u_est = interpolator.extract_at_points(densepose_predictor_outputs.kappa_u)[
j_valid_fg
]
kappa_v_est = interpolator.extract_at_points(densepose_predictor_outputs.kappa_v)[
j_valid_fg
]
return {
"loss_densepose_UV": (
self.uv_loss_with_confidences(
u_est, v_est, sigma_2_est, kappa_u_est, kappa_v_est, u_gt, v_gt
)
* self.w_points
)
}
return super().produce_densepose_losses_uv(
proposals_with_gt,
densepose_predictor_outputs,
packed_annotations,
interpolator,
j_valid_fg,
)
class IIDIsotropicGaussianUVLoss(nn.Module):
"""
Loss for the case of iid residuals with isotropic covariance:
$Sigma_i = sigma_i^2 I$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi) + 2 log sigma_i^2 + ||delta_i||^2 / sigma_i^2)$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: float):
super(IIDIsotropicGaussianUVLoss, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(
self,
u: torch.Tensor,
v: torch.Tensor,
sigma_u: torch.Tensor,
target_u: torch.Tensor,
target_v: torch.Tensor,
):
# compute $\sigma_i^2$
# use sigma_lower_bound to avoid degenerate solution for variance
# (sigma -> 0)
sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound
# compute \|delta_i\|^2
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
delta_t_delta = (u - target_u) ** 2 + (v - target_v) ** 2
# the total loss from the formula above:
loss = 0.5 * (self.log2pi + 2 * torch.log(sigma2) + delta_t_delta / sigma2)
return loss.sum()
class IndepAnisotropicGaussianUVLoss(nn.Module):
"""
Loss for the case of independent residuals with anisotropic covariances:
$Sigma_i = sigma_i^2 I + r_i r_i^T$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi)
+ log sigma_i^2 (sigma_i^2 + ||r_i||^2)
+ ||delta_i||^2 / sigma_i^2
- <delta_i, r_i>^2 / (sigma_i^2 * (sigma_i^2 + ||r_i||^2)))$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: float):
super(IndepAnisotropicGaussianUVLoss, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(
self,
u: torch.Tensor,
v: torch.Tensor,
sigma_u: torch.Tensor,
kappa_u_est: torch.Tensor,
kappa_v_est: torch.Tensor,
target_u: torch.Tensor,
target_v: torch.Tensor,
):
# compute $\sigma_i^2$
sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound
# compute \|r_i\|^2
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
r_sqnorm2 = kappa_u_est**2 + kappa_v_est**2
delta_u = u - target_u
delta_v = v - target_v
# compute \|delta_i\|^2
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
delta_sqnorm = delta_u**2 + delta_v**2
delta_u_r_u = delta_u * kappa_u_est
delta_v_r_v = delta_v * kappa_v_est
# compute the scalar product <delta_i, r_i>
delta_r = delta_u_r_u + delta_v_r_v
# compute squared scalar product <delta_i, r_i>^2
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
delta_r_sqnorm = delta_r**2
denom2 = sigma2 * (sigma2 + r_sqnorm2)
loss = 0.5 * (
self.log2pi + torch.log(denom2) + delta_sqnorm / sigma2 - delta_r_sqnorm / denom2
)
return loss.sum()
|