RRFRRF
add some cv models and update readme.md
bdbd148
"""
DenseNet in pytorch
see the details in papaer
[1] Gao Huang, Zhuang Liu, Laurens van der Maaten, Kilian Q. Weinberger.
Densely Connected Convolutional Networks
https://arxiv.org/abs/1608.06993v5
"""
import torch
import torch.nn as nn
import math
class Bottleneck(nn.Module):
"""
Dense Block
这里的growth_rate=out_channels, 就是每个Block自己输出的通道数。
先通过1x1卷积层,将通道数缩小为4 * growth_rate,然后再通过3x3卷积层降低到growth_rate。
"""
# 通常1×1卷积的通道数为GrowthRate的4倍
expansion = 4
def __init__(self, in_channels, growth_rate):
super(Bottleneck, self).__init__()
zip_channels = self.expansion * growth_rate
self.features = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(True),
nn.Conv2d(in_channels, zip_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(zip_channels),
nn.ReLU(True),
nn.Conv2d(zip_channels, growth_rate, kernel_size=3, padding=1, bias=False)
)
def forward(self, x):
out = self.features(x)
out = torch.cat([out, x], 1)
return out
class Transition(nn.Module):
"""
改变维数的Transition层 具体包括BN、ReLU、1×1卷积(Conv)、2×2平均池化操作
先通过1x1的卷积层减少channels,再通过2x2的平均池化层缩小feature-map
"""
# 1×1卷积的作用是降维,起到压缩模型的作用,而平均池化则是降低特征图的尺寸。
def __init__(self, in_channels, out_channels):
super(Transition, self).__init__()
self.features = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(True),
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
nn.AvgPool2d(2)
)
def forward(self, x):
out = self.features(x)
return out
class DenseNet(nn.Module):
"""
Dense Net
paper中growth_rate取12,维度压缩的参数θ,即reduction取0.5
且初始化方法为kaiming_normal()
num_blocks为每段网络中的DenseBlock数量
DenseNet和ResNet一样也是六段式网络(一段卷积+四段Dense+平均池化层),最后FC层。
第一段将维数从3变到2 * growth_rate
(3, 32, 32) -> [Conv2d] -> (24, 32, 32) -> [layer1] -> (48, 16, 16) -> [layer2]
->(96, 8, 8) -> [layer3] -> (192, 4, 4) -> [layer4] -> (384, 4, 4) -> [AvgPool]
->(384, 1, 1) -> [Linear] -> (10)
"""
def __init__(self, num_blocks, growth_rate=12, reduction=0.5, num_classes=10, init_weights=True):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
self.reduction = reduction
num_channels = 2 * growth_rate
self.features = nn.Conv2d(3, num_channels, kernel_size=3, padding=1, bias=False)
self.layer1, num_channels = self._make_dense_layer(num_channels, num_blocks[0])
self.layer2, num_channels = self._make_dense_layer(num_channels, num_blocks[1])
self.layer3, num_channels = self._make_dense_layer(num_channels, num_blocks[2])
self.layer4, num_channels = self._make_dense_layer(num_channels, num_blocks[3], transition=False)
self.avg_pool = nn.Sequential(
nn.BatchNorm2d(num_channels),
nn.ReLU(True),
nn.AvgPool2d(4),
)
self.classifier = nn.Linear(num_channels, num_classes)
if init_weights:
self._initialize_weights()
def _make_dense_layer(self, in_channels, nblock, transition=True):
layers = []
for i in range(nblock):
layers += [Bottleneck(in_channels, self.growth_rate)]
in_channels += self.growth_rate
out_channels = in_channels
if transition:
out_channels = int(math.floor(in_channels * self.reduction))
layers += [Transition(in_channels, out_channels)]
return nn.Sequential(*layers), out_channels
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def forward(self, x):
out = self.features(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avg_pool(out)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def DenseNet121():
return DenseNet([6,12,24,16], growth_rate=32)
def DenseNet169():
return DenseNet([6,12,32,32], growth_rate=32)
def DenseNet201():
return DenseNet([6,12,48,32], growth_rate=32)
def DenseNet161():
return DenseNet([6,12,36,24], growth_rate=48)
def densenet_cifar():
return DenseNet([6,12,24,16], growth_rate=12)
def test():
net = densenet_cifar()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
from torchinfo import summary
device = 'cuda' if torch.cuda.is_available() else 'cpu'
net = net.to(device)
summary(net,(1,3,32,32))