|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
import torch.utils.checkpoint as checkpoint |
|
|
|
from .ViT_helper import to_2tuple, to_ntuple,DropPath |
|
|
|
class Mlp(nn.Module): |
|
"""MLP as implemented in timm |
|
""" |
|
|
|
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): |
|
super().__init__() |
|
out_features = out_features or in_features |
|
hidden_features = hidden_features or in_features |
|
drops = to_2tuple(drop) |
|
|
|
self.fc1 = nn.Linear(in_features, hidden_features) |
|
self.act = act_layer() |
|
self.drop1 = nn.Dropout(drops[0]) |
|
self.fc2 = nn.Linear(hidden_features, out_features) |
|
self.drop2 = nn.Dropout(drops[1]) |
|
|
|
def forward(self, x): |
|
x = self.fc1(x) |
|
x = self.act(x) |
|
x = self.drop1(x) |
|
x = self.fc2(x) |
|
x = self.drop2(x) |
|
return x |
|
|
|
|
|
class Attention(nn.Module): |
|
"""Self Attention as implemented in timm |
|
""" |
|
|
|
def __init__(self, d_model, nhead=8, qkv_bias=False, attn_drop=0., proj_drop=0.): |
|
super().__init__() |
|
assert d_model % nhead == 0, 'd_model needs to be divisible by nhead' |
|
self.nhead = nhead |
|
self.scale = (d_model // nhead) ** -0.5 |
|
|
|
self.to_qkv = nn.Linear(d_model, d_model * 3, bias=qkv_bias) |
|
self.attn_drop = nn.Dropout(attn_drop) |
|
self.proj = nn.Linear(d_model, d_model) |
|
self.proj_drop = nn.Dropout(proj_drop) |
|
|
|
def forward(self, x): |
|
B, N, C = x.size() |
|
qkv = self.to_qkv(x).reshape(B, N, 3, self.nhead, C // self.nhead).permute(2, 0, 3, 1, 4) |
|
q, k, v = qkv.unbind(0) |
|
|
|
attn = (q @ k.transpose(-1, -2)) * self.scale |
|
attn = attn.softmax(dim=-1) |
|
attn = self.attn_drop(attn) |
|
|
|
x = (attn @ v).transpose(1, 2).reshape(B, N, C) |
|
x = self.proj(x) |
|
x = self.proj_drop(x) |
|
|
|
return x |
|
|
|
|
|
class Attention_Cross(nn.Module): |
|
"""Attention for decoder layer.Some palce may be called "inter attention" |
|
""" |
|
|
|
def __init__(self, d_model, nhead=8, qkv_bias=False, attn_drop=0., proj_drop=0.): |
|
super().__init__() |
|
assert d_model % nhead == 0, 'd_model needs to be divisible by nhead' |
|
self.nhead = nhead |
|
self.scale = (d_model // nhead) ** -0.5 |
|
|
|
self.to_q = nn.Linear(d_model, d_model, bias=qkv_bias) |
|
self.to_kv = nn.Linear(d_model, d_model * 2, bias=qkv_bias) |
|
self.attn_drop = nn.Dropout(attn_drop) |
|
self.proj = nn.Linear(d_model, d_model) |
|
self.proj_drop = nn.Dropout(proj_drop) |
|
|
|
def forward(self, x, y): |
|
""" |
|
Args: |
|
x: output of the former layer |
|
y: memery of the encoder layer |
|
""" |
|
B, Nx, C = x.size() |
|
_, Ny, _ = y.size() |
|
q = self.to_q(x).reshape(B, Nx, self.nhead, C // self.nhead).permute(0, 2, 1, 3) |
|
kv = self.to_kv(y).reshape(B, Ny, 2, self.nhead, C // self.nhead).permute(2, 0, 3, 1, 4) |
|
k, v = kv.unbind(0) |
|
|
|
attn = (q @ k.transpose(-1, -2)) * self.scale |
|
attn = attn.softmax(dim=-1) |
|
attn = self.attn_drop(attn) |
|
|
|
x = (attn @ v).transpose(1, 2).reshape(B, Nx, C) |
|
x = self.proj(x) |
|
x = self.proj_drop(x) |
|
|
|
return x |
|
|
|
def split_int(num): |
|
"""Split an integer into 2 integers evenly |
|
Args: |
|
num (int): The input integer |
|
Returns: |
|
num_1 (int) |
|
num_2 (int) |
|
""" |
|
if num % 2 == 0: |
|
num_1 = num_2 = num // 2 |
|
else: |
|
num_1 = num // 2 |
|
num_2 = num_1 + 1 |
|
return num_1, num_2 |
|
|
|
|
|
def unpad2D(input, pad): |
|
"""Crop the input tensor according to pad.(Inverse operation for padding) |
|
Args: |
|
input (Tensor): (B, C, H, W) |
|
pad (Tuple of int): (left, right, top, bottom) |
|
Returns: |
|
output (Tensor): (B, C, new_H, new_W) |
|
""" |
|
pad_W_left, pad_W_right, pad_H_top, pad_H_bottom = pad |
|
if pad_H_top == 0 and pad_H_bottom == 0 and not (pad_W_left == 0 and pad_W_right == 0): |
|
output = input[:, :, :, pad_W_left:-pad_W_right] |
|
elif pad_W_left == 0 and pad_W_right == 0 and not (pad_H_top == 0 and pad_H_bottom == 0): |
|
output = input[:, :, pad_H_top:-pad_H_bottom, :] |
|
elif pad_H_top == 0 and pad_H_bottom == 0 and pad_W_left == 0 and pad_W_right == 0: |
|
output = input |
|
else: |
|
output = input[:, :, pad_H_top:-pad_H_bottom, pad_W_left:-pad_W_right] |
|
return output |
|
|
|
|
|
def seq_padding(x, dividable_size, input_resolution, pad_mode='constant'): |
|
"""Padding for sequential data |
|
Args: |
|
x (Tensor): (B, L, C) |
|
dividable_size (Tuple | int): dividable size |
|
input_resolution (Tuple): resolution of x |
|
Returns: |
|
x (Tensor): (B, new_L, C) |
|
output_resolution (Tuple): new resolution of x |
|
pad (Tuple of int): (left, right, top, bottom) |
|
""" |
|
H, W = input_resolution |
|
B, L, C = x.shape |
|
assert L == H * W, 'Input of wrong size.' |
|
dividable_size = to_2tuple(dividable_size) |
|
x = x.permute(0, 2, 1).reshape(B, C, H, W) |
|
|
|
rema_H, rema_W = H % dividable_size[0], W % dividable_size[1] |
|
pad_H, pad_W = dividable_size[0] - rema_H, dividable_size[1] - rema_W |
|
|
|
pad_H_top, pad_H_bottom = split_int(pad_H) if rema_H != 0 else (0, 0) |
|
pad_W_left, pad_W_right = split_int(pad_W) if rema_W != 0 else (0, 0) |
|
|
|
x = F.pad(x, (pad_W_left, pad_W_right, pad_H_top, pad_H_bottom), pad_mode, 0) |
|
|
|
padded_H, padded_W = x.shape[-2:] |
|
x = x.reshape(B, C, -1).permute(0, 2, 1) |
|
return x, (padded_H, padded_W), (pad_W_left, pad_W_right, pad_H_top, pad_H_bottom) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def seq_unpad(x, input_resolution, pad): |
|
"""Unpadding for sequential data |
|
Args: |
|
x (Tensor): (B, L, C) |
|
input_resolution (Tuple): resolution of x |
|
pad (Tuple of int): (left, right, top, bottom) |
|
Returns: |
|
x (Tensor): (B, new_L, C) |
|
output_resolution (Tuple): new resolution of x |
|
""" |
|
padded_H, padded_W = input_resolution |
|
B, L, C = x.shape |
|
assert L == padded_H * padded_W, 'Input of wrong size.' |
|
x = x.permute(0, 2, 1).reshape(B, C, padded_H, padded_W) |
|
|
|
x = unpad2D(x, pad=pad) |
|
|
|
H, W = x.shape[-2:] |
|
x = x.reshape(B, C, -1).permute(0, 2, 1) |
|
return x, (H, W) |
|
def window_partition(x, window_size): |
|
"""Slightly modified for arbitrary window_size & resolution combination |
|
Args: |
|
x: (B,H,W,C) |
|
window_size (tuple[int] | int): window size |
|
Returns: |
|
windows: (num_windows*B, window_size, window_size, C) |
|
""" |
|
window_size = to_2tuple(window_size) |
|
B, H, W, C = x.shape |
|
n_win_H = H // window_size[0] |
|
n_win_W = W // window_size[1] |
|
if not (H % window_size[0] == 0 and W % window_size[1] == 0): |
|
x = x[:, :n_win_H * window_size[0], :n_win_W * window_size[1], :] |
|
x = x.view(B, n_win_H, window_size[0], n_win_W, window_size[1], C) |
|
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) |
|
return windows |
|
|
|
|
|
def window_reverse(windows, window_size, H, W): |
|
"""Slightly modified for arbitrary window_size & resolution combination |
|
Args: |
|
windows: (num_windows*B, window_size, window_size, C) |
|
window_size (tuple[int] | int): Window size |
|
H (int): Height of image |
|
W (int): Width of image |
|
Returns: |
|
x: (B, H, W, C) |
|
""" |
|
window_size = to_2tuple(window_size) |
|
n_win_H = H // window_size[0] |
|
n_win_W = W // window_size[1] |
|
B = windows.shape[0] // (n_win_H * n_win_W) |
|
x = windows.view(B, n_win_H, n_win_W, window_size[0], window_size[1], -1) |
|
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, n_win_H * window_size[0], n_win_W * window_size[1], -1) |
|
return x |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def seq_crop(x, dividable_size, input_resolution): |
|
""" |
|
Arg: |
|
x (Tensor): (B, L, C) |
|
dividable_size (Tuple | int): dividable size |
|
input_resolution (Tuple): resolution of x |
|
Returns: |
|
x (Tensor): (B, new_L, C) |
|
output_resolution (Tuple): new resolution of x |
|
""" |
|
H, W = input_resolution |
|
B, L, C = x.shape |
|
assert L == H * W, 'Input of wrong size.' |
|
dividable_size = to_2tuple(dividable_size) |
|
x = x.reshape(B, H, W, C) |
|
|
|
rema_H, rema_W = H % dividable_size[0], W % dividable_size[1] |
|
new_H, new_W = H - rema_H, W - rema_W |
|
if rema_H != 0 or rema_W != 0: |
|
x = x[:, :new_H, :new_W, :] |
|
|
|
x = x.reshape(B, -1, C) |
|
return x, (new_H, new_W) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class PatchEmbed_Kai(nn.Module): |
|
""" 2D Image to Patch Embedding |
|
""" |
|
|
|
def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None, flatten=True): |
|
super().__init__() |
|
patch_size = to_2tuple(patch_size) |
|
self.in_chans = in_chans |
|
self.flatten = flatten |
|
|
|
self.proj = nn.Conv2d(self.in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) |
|
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() |
|
|
|
def forward(self, x): |
|
B, C, H, W = x.shape |
|
assert C == self.in_chans, 'Input image need to have same numbers of channels with the initialed.' |
|
x = self.proj(x) |
|
H, W = x.shape[2], x.shape[3] |
|
if self.flatten: |
|
x = x.flatten(2).transpose(1, 2) |
|
x = self.norm(x) |
|
return x, (H, W) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class PatchMerging_Kai(nn.Module): |
|
""" Patch Merging Layer. |
|
Args: |
|
input_resolution (tuple[int] | int): Resolution of input feature. |
|
d_model (int): Number of input channels. |
|
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm |
|
""" |
|
|
|
def __init__(self, input_resolution, d_model, norm_layer=nn.LayerNorm): |
|
super().__init__() |
|
self.input_resolution = to_2tuple(input_resolution) |
|
self.d_model = d_model |
|
self.reduction = nn.Linear(4 * d_model, 2 * d_model, bias=False) |
|
self.norm = norm_layer(4 * d_model) |
|
|
|
def forward(self, x): |
|
""" |
|
Args: |
|
x (Tuple): (Tensor, arbitrary_input, (H,W)), arbitrary_input (bool) |
|
if arbitrary_input=False, (H,W) will not be required |
|
B, H*W, C -> B, H/2*W/2, 4*C |
|
""" |
|
arbitrary_input = x[1] |
|
if arbitrary_input: |
|
H, W = x[2] |
|
else: |
|
H, W = self.input_resolution |
|
|
|
x = x[0] |
|
B, L, C = x.shape |
|
assert L == H * W, "input feature has wrong size" |
|
|
|
x = x.view(B, H, W, C) |
|
if H % 2 != 0: |
|
x = x[:, 0:-1, :, :] |
|
if W % 2 != 0: |
|
x = x[:, :, 0:-1, :] |
|
|
|
x0 = x[:, 0::2, 0::2, :] |
|
x1 = x[:, 1::2, 0::2, :] |
|
x2 = x[:, 0::2, 1::2, :] |
|
x3 = x[:, 1::2, 1::2, :] |
|
x = torch.cat([x0, x1, x2, x3], -1) |
|
H, W = x.shape[1], x.shape[2] |
|
x = x.view(B, -1, 4 * C) |
|
|
|
x = self.norm(x) |
|
x = self.reduction(x) |
|
|
|
return x, arbitrary_input, (H, W) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class WindowAttention_Kai(nn.Module): |
|
def __init__(self, d_model, window_size, nhead, qkv_bias=True, attn_drop=0., proj_drop=0.): |
|
super().__init__() |
|
assert d_model % nhead == 0, 'd_model needs to be divisible by nhead' |
|
self.window_size = to_2tuple(window_size) |
|
self.nhead = nhead |
|
self.scale = (d_model // nhead) ** -0.5 |
|
|
|
|
|
self.relative_position_bias_table = nn.Parameter( |
|
torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), nhead) |
|
) |
|
|
|
|
|
coords_h = torch.arange(self.window_size[0]) |
|
coords_w = torch.arange(self.window_size[1]) |
|
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) |
|
coords_flatten = torch.flatten(coords, 1) |
|
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] |
|
relative_coords = relative_coords.permute(1, 2, 0).contiguous() |
|
relative_coords[:, :, 0] += self.window_size[0] - 1 |
|
relative_coords[:, :, 1] += self.window_size[1] - 1 |
|
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 |
|
relative_position_index = relative_coords.sum(-1) |
|
self.register_buffer("relative_position_index", relative_position_index) |
|
|
|
self.qkv = nn.Linear(d_model, d_model * 3, bias=qkv_bias) |
|
self.attn_drop = nn.Dropout(attn_drop) |
|
self.proj = nn.Linear(d_model, d_model) |
|
self.proj_drop = nn.Dropout(proj_drop) |
|
|
|
nn.init.trunc_normal_(self.relative_position_bias_table, std=.02) |
|
|
|
def forward(self, x, shape): |
|
H, W = shape |
|
Bi, Ni, Ci = x.size() |
|
assert Ni == H * W, "Inputs with wrong size." |
|
x = x.reshape(Bi, H, W, Ci) |
|
|
|
|
|
x = window_partition(x, self.window_size) |
|
x = x.reshape(-1, self.window_size[0] * self.window_size[1], Ci) |
|
|
|
B_, N, C = x.shape |
|
qkv = self.qkv(x).reshape(B_, N, 3, self.nhead, C // self.nhead).permute(2, 0, 3, 1, 4) |
|
q, k, v = qkv.unbind(0) |
|
|
|
attn = (q @ k.transpose(-2, -1)) * self.scale |
|
|
|
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( |
|
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], self.nhead) |
|
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() |
|
attn = attn + relative_position_bias.unsqueeze(0) |
|
|
|
attn = attn.softmax(dim=-1) |
|
attn = self.attn_drop(attn) |
|
|
|
x = (attn @ v).transpose(1, 2).reshape(B_, N, C) |
|
x = self.proj(x) |
|
x = self.proj_drop(x) |
|
|
|
x = window_reverse(x, self.window_size, H, W) |
|
x = x.reshape(Bi, Ni, Ci) |
|
return x |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class StripAttention(nn.Module): |
|
def __init__(self, d_model, nhead=8, strip_width=7, is_vertical=False, qkv_bias=False, attn_drop=0., proj_drop=0.): |
|
super().__init__() |
|
self.d_model = d_model |
|
self.strip_width = strip_width |
|
self.is_vertical = is_vertical |
|
|
|
self.attn = Attention( |
|
d_model=d_model, |
|
nhead=nhead, |
|
qkv_bias=qkv_bias, |
|
attn_drop=attn_drop, |
|
proj_drop=proj_drop, |
|
) |
|
|
|
def forward(self, x, shape): |
|
H, W = shape |
|
B, N, C = x.size() |
|
assert N == H * W, "Inputs with wrong size." |
|
x = x.reshape(B, H, W, C) |
|
|
|
|
|
if self.is_vertical: |
|
x = window_partition(x, (H, self.strip_width)) |
|
x = x.reshape(-1, H * self.strip_width, C) |
|
else: |
|
x = window_partition(x, (self.strip_width, W)) |
|
x = x.reshape(-1, W * self.strip_width, C) |
|
|
|
wins = self.attn(x) |
|
|
|
if self.is_vertical: |
|
x = window_reverse(wins, (H, self.strip_width), H, W) |
|
else: |
|
x = window_reverse(wins, (self.strip_width, W), H, W) |
|
|
|
x = x.reshape(B, N, C) |
|
return x |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class StripAttentionBlock(nn.Module): |
|
def __init__(self, d_model, input_resolution, nhead=8, strip_width=7, |
|
mlp_ratio=4, qkv_bias=False, drop=0., attn_drop=0., drop_path=0., |
|
act_layer=nn.GELU, norm_layer=nn.LayerNorm): |
|
super().__init__() |
|
self.d_model = d_model |
|
self.input_resolution = to_2tuple(input_resolution) |
|
self.strip_width = strip_width |
|
|
|
self.norm1 = norm_layer(d_model) |
|
|
|
self.attn1 = StripAttention( |
|
d_model=d_model, |
|
nhead=nhead, |
|
strip_width=strip_width, |
|
is_vertical=False, |
|
qkv_bias=qkv_bias, |
|
attn_drop=attn_drop, |
|
proj_drop=drop |
|
) |
|
self.attn2 = StripAttention( |
|
d_model=d_model, |
|
nhead=nhead, |
|
strip_width=strip_width, |
|
is_vertical=True, |
|
qkv_bias=qkv_bias, |
|
attn_drop=attn_drop, |
|
proj_drop=drop |
|
) |
|
self.attn3 = WindowAttention_Kai( |
|
d_model=d_model, |
|
window_size=(strip_width * 2, strip_width * 2), |
|
nhead=nhead, |
|
qkv_bias=qkv_bias, |
|
attn_drop=attn_drop, |
|
proj_drop=drop |
|
) |
|
|
|
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() |
|
self.norm2 = norm_layer(d_model) |
|
mlp_hidden_dim = int(d_model * mlp_ratio) |
|
self.mlp = Mlp(d_model, hidden_features=mlp_hidden_dim, out_features=d_model, act_layer=act_layer, drop=drop) |
|
|
|
def forward(self, x): |
|
arbitrary_input = x[1] |
|
if arbitrary_input: |
|
H, W = x[2] |
|
|
|
x, (H, W), pad = seq_padding(x[0], dividable_size=self.strip_width * 2, input_resolution=(H, W), |
|
pad_mode='constant') |
|
else: |
|
H, W = self.input_resolution |
|
x = x[0] |
|
|
|
B, L, C = x.shape |
|
assert L == H * W, "input feature has wrong size" |
|
|
|
shortcut = x |
|
x = self.norm1(x) |
|
|
|
x1 = self.attn1(x, shape=(H, W)) |
|
x2 = self.attn2(x, shape=(H, W)) |
|
x3 = self.attn3(x, shape=(H, W)) |
|
|
|
|
|
|
|
|
|
q_x = x.unsqueeze(dim=2) |
|
k_x = torch.stack([x, x1, x2, x3], dim=2) |
|
attn_x = (q_x @ k_x.transpose(-1, -2)).softmax(dim=-1) |
|
x = attn_x @ k_x |
|
x = x.squeeze(dim=2) |
|
x = shortcut + self.drop_path(x) |
|
|
|
|
|
x = x + self.drop_path(self.mlp(self.norm2(x))) |
|
|
|
if arbitrary_input: |
|
x, (H, W) = seq_unpad(x, (H, W), pad) |
|
|
|
return (x, arbitrary_input, (H, W)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class BasicLayer_SA(nn.Module): |
|
def __init__(self, d_model, input_resolution, depth, nhead, strip_width, |
|
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., |
|
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): |
|
super().__init__() |
|
self.d_model = d_model |
|
self.input_resolution = to_2tuple(input_resolution) |
|
self.depth = depth |
|
self.strip_width = list(to_ntuple(self.depth)(strip_width)) |
|
self.use_checkpoint = use_checkpoint |
|
|
|
|
|
self.blocks = nn.ModuleList([ |
|
StripAttentionBlock( |
|
d_model=d_model, |
|
input_resolution=self.input_resolution, |
|
nhead=nhead, |
|
strip_width=self.strip_width[i], |
|
mlp_ratio=mlp_ratio, |
|
qkv_bias=qkv_bias, |
|
drop=drop, |
|
attn_drop=attn_drop, |
|
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, |
|
norm_layer=norm_layer |
|
) |
|
for i in range(self.depth) |
|
]) |
|
|
|
|
|
if downsample is not None: |
|
self.downsample = downsample(self.input_resolution, d_model=d_model, norm_layer=norm_layer) |
|
else: |
|
self.downsample = None |
|
|
|
def forward(self, x): |
|
for blk in self.blocks: |
|
if self.use_checkpoint: |
|
x = checkpoint.checkpoint(blk, x) |
|
else: |
|
x = blk(x) |
|
|
|
if self.downsample is not None: |
|
x = self.downsample(x) |
|
return x |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class HeTransformerEncoder(nn.Module): |
|
def __init__(self, img_size=224, patch_size=4, in_chans=3, |
|
embed_dim=96, depths=[2, 2, 6, 2], nhead=[3, 6, 12, 24], |
|
strip_width=7, mlp_ratio=4., qkv_bias=True, |
|
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, |
|
norm_layer=nn.LayerNorm, ape=False, patch_norm=True, |
|
use_checkpoint=False): |
|
super().__init__() |
|
|
|
self.img_size = to_2tuple(img_size) |
|
self.patch_size = to_2tuple(patch_size) |
|
self.num_layers = len(depths) |
|
self.strip_width = list(to_ntuple(self.num_layers)(strip_width)) |
|
self.embed_dim = embed_dim |
|
self.ape = ape |
|
self.printed_modes = set() |
|
self.patch_norm = patch_norm |
|
self.device="cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
|
|
self.patch_embed = PatchEmbed_Kai( |
|
patch_size=patch_size, |
|
in_chans=in_chans, |
|
embed_dim=embed_dim, |
|
norm_layer=norm_layer if self.patch_norm else None |
|
) |
|
self.patches_resolution = (self.img_size[0] // self.patch_size[0], self.img_size[1] // self.patch_size[1]) |
|
self.num_patches = self.patches_resolution[0] * self.patches_resolution[1] |
|
|
|
|
|
if self.ape: |
|
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, self.num_patches, embed_dim)) |
|
nn.init.trunc_normal_(self.absolute_pos_embed, std=.02) |
|
self.pos_drop = nn.Dropout(drop_rate) |
|
|
|
|
|
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] |
|
|
|
|
|
self.layers = nn.ModuleList() |
|
for i in range(self.num_layers): |
|
layer = BasicLayer_SA( |
|
d_model=int(self.embed_dim * 2 ** i), |
|
input_resolution=(self.patches_resolution[0] // (2 ** i), |
|
self.patches_resolution[1] // (2 ** i)), |
|
depth=depths[i], |
|
nhead=nhead[i], |
|
strip_width=self.strip_width[i], |
|
mlp_ratio=mlp_ratio, |
|
qkv_bias=qkv_bias, |
|
drop=drop_rate, |
|
attn_drop=attn_drop_rate, |
|
drop_path=dpr[sum(depths[:i]):sum(depths[:i + 1])], |
|
norm_layer=norm_layer, |
|
downsample=PatchMerging_Kai if (i < self.num_layers - 1) else None, |
|
use_checkpoint=use_checkpoint |
|
) |
|
self.layers.append(layer) |
|
|
|
self.apply(self._init_weights) |
|
|
|
def _init_weights(self, m): |
|
if isinstance(m, nn.Linear): |
|
nn.init.trunc_normal_(m.weight, std=.02) |
|
if m.bias is not None: |
|
nn.init.constant_(m.bias, 0) |
|
elif isinstance(m, nn.LayerNorm): |
|
nn.init.constant_(m.bias, 0) |
|
nn.init.constant_(m.weight, 1.0) |
|
|
|
@torch.jit.ignore |
|
def no_weight_decay(self): |
|
return {'absolute_pos_embed'} |
|
|
|
@torch.jit.ignore |
|
def no_weight_decay_keywords(self): |
|
return {'relative_position_bias_table'} |
|
|
|
|
|
def add_z(self, x, alpha, mode): |
|
if mode not in self.printed_modes: |
|
if mode == 1: |
|
print("Using Hadamard Product => Element-wise multiplication") |
|
elif mode == 2: |
|
print("Using Addition") |
|
elif mode == 3: |
|
print("Joint Embedding (concatenation along a new dimension)") |
|
else: |
|
raise ValueError("Invalid mode. Please choose 1, 2, or 3") |
|
|
|
self.printed_modes.add(mode) |
|
|
|
size = x[0].size() |
|
alpha_exp = alpha.expand(size) |
|
|
|
if mode == 1: |
|
x_concat_alpha = x[0].to(self.device) * alpha_exp.to(self.device) |
|
elif mode == 2: |
|
x_concat_alpha = x[0].to(self.device) + alpha_exp.to(self.device) |
|
elif mode == 3: |
|
x_concat_alpha = torch.cat((x[0].to(self.device), alpha_exp.to(self.device))) |
|
else: |
|
raise ValueError("Invalid mode. Please choose 1, 2, or 3") |
|
|
|
return x_concat_alpha.to(self.device) |
|
|
|
|
|
def forward_features(self, x,alpha,mode): |
|
x, arbitrary_input = x[0], x[1] |
|
x, (H, W) = self.patch_embed(x) |
|
if self.ape: |
|
x = x + self.absolute_pos_embed |
|
x = self.pos_drop(x) |
|
|
|
x = (x, arbitrary_input, (H, W)) |
|
for layer in self.layers: |
|
x = layer(x) |
|
x = (self.add_z(x, alpha, mode), x[1], x[2]) |
|
|
|
return x |
|
|
|
def forward(self, x,arbitrary_input=False,alpha=None,mode=2): |
|
if arbitrary_input: |
|
H, W = x.shape[2], x.shape[3] |
|
x = (x, arbitrary_input, (H, W)) |
|
else: |
|
x = (x, arbitrary_input) |
|
|
|
x = self.forward_features(x,alpha,mode) |
|
|
|
return x |
|
|