File size: 5,344 Bytes
62c7319 a1335ed 62c7319 8b973ee 62c7319 8b973ee 62c7319 8b973ee 62c7319 8b973ee 62c7319 a9b8ec2 8b973ee a9b8ec2 62c7319 a1335ed 62c7319 8b973ee 62c7319 8b973ee 62c7319 8b973ee 62c7319 8b973ee 62c7319 8b973ee 62c7319 a9b8ec2 8b973ee a9b8ec2 62c7319 a1335ed 62c7319 8b973ee 62c7319 8b973ee 62c7319 8b973ee 62c7319 8b973ee 62c7319 8b973ee 62c7319 a9b8ec2 8b973ee a9b8ec2 62c7319 8b973ee 62c7319 8b973ee 62c7319 8b973ee 62c7319 8b973ee 62c7319 8b973ee |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
from typing import Optional, Union
import torch
from torch import device
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as tvm
import gc
device = "cuda" if torch.cuda.is_available() else "cpu"
class ResNet50(nn.Module):
def __init__(
self,
pretrained=False,
high_res=False,
weights=None,
dilation=None,
freeze_bn=True,
anti_aliased=False,
early_exit=False,
amp=False,
) -> None:
super().__init__()
if dilation is None:
dilation = [False, False, False]
if anti_aliased:
pass
else:
if weights is not None:
self.net = tvm.resnet50(
weights=weights, replace_stride_with_dilation=dilation
)
else:
self.net = tvm.resnet50(
pretrained=pretrained, replace_stride_with_dilation=dilation
)
self.high_res = high_res
self.freeze_bn = freeze_bn
self.early_exit = early_exit
self.amp = amp
if torch.cuda.is_available():
if torch.cuda.is_bf16_supported():
self.amp_dtype = torch.bfloat16
else:
self.amp_dtype = torch.float16
else:
self.amp_dtype = torch.float32
def forward(self, x, **kwargs):
with torch.autocast(device, enabled=self.amp, dtype=self.amp_dtype):
net = self.net
feats = {1: x}
x = net.conv1(x)
x = net.bn1(x)
x = net.relu(x)
feats[2] = x
x = net.maxpool(x)
x = net.layer1(x)
feats[4] = x
x = net.layer2(x)
feats[8] = x
if self.early_exit:
return feats
x = net.layer3(x)
feats[16] = x
x = net.layer4(x)
feats[32] = x
return feats
def train(self, mode=True):
super().train(mode)
if self.freeze_bn:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
pass
class VGG19(nn.Module):
def __init__(self, pretrained=False, amp=False) -> None:
super().__init__()
self.layers = nn.ModuleList(tvm.vgg19_bn(pretrained=pretrained).features[:40])
self.amp = amp
if torch.cuda.is_available():
if torch.cuda.is_bf16_supported():
self.amp_dtype = torch.bfloat16
else:
self.amp_dtype = torch.float16
else:
self.amp_dtype = torch.float32
def forward(self, x, **kwargs):
with torch.autocast(device, enabled=self.amp, dtype=self.amp_dtype):
feats = {}
scale = 1
for layer in self.layers:
if isinstance(layer, nn.MaxPool2d):
feats[scale] = x
scale = scale * 2
x = layer(x)
return feats
class CNNandDinov2(nn.Module):
def __init__(self, cnn_kwargs=None, amp=False, use_vgg=False, dinov2_weights=None):
super().__init__()
if dinov2_weights is None:
dinov2_weights = torch.hub.load_state_dict_from_url(
"https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_pretrain.pth",
map_location="cpu",
)
from .transformer import vit_large
vit_kwargs = dict(
img_size=518,
patch_size=14,
init_values=1.0,
ffn_layer="mlp",
block_chunks=0,
)
dinov2_vitl14 = vit_large(**vit_kwargs).eval()
dinov2_vitl14.load_state_dict(dinov2_weights)
cnn_kwargs = cnn_kwargs if cnn_kwargs is not None else {}
if not use_vgg:
self.cnn = ResNet50(**cnn_kwargs)
else:
self.cnn = VGG19(**cnn_kwargs)
self.amp = amp
if torch.cuda.is_available():
if torch.cuda.is_bf16_supported():
self.amp_dtype = torch.bfloat16
else:
self.amp_dtype = torch.float16
else:
self.amp_dtype = torch.float32
if self.amp:
dinov2_vitl14 = dinov2_vitl14.to(self.amp_dtype)
self.dinov2_vitl14 = [dinov2_vitl14] # ugly hack to not show parameters to DDP
def train(self, mode: bool = True):
return self.cnn.train(mode)
def forward(self, x, upsample=False):
B, C, H, W = x.shape
feature_pyramid = self.cnn(x)
if not upsample:
with torch.no_grad():
if self.dinov2_vitl14[0].device != x.device:
self.dinov2_vitl14[0] = (
self.dinov2_vitl14[0].to(x.device).to(self.amp_dtype)
)
dinov2_features_16 = self.dinov2_vitl14[0].forward_features(
x.to(self.amp_dtype)
)
features_16 = (
dinov2_features_16["x_norm_patchtokens"]
.permute(0, 2, 1)
.reshape(B, 1024, H // 14, W // 14)
)
del dinov2_features_16
feature_pyramid[16] = features_16
return feature_pyramid
|