RRFRRF
commited on
Commit
•
bdbd148
1
Parent(s):
438467e
add some cv models and update readme.md
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- Image/AlexNet/code/model.py +81 -0
- Image/AlexNet/code/train.py +41 -0
- {Graph → Image/AlexNet/dataset}/.gitkeep +0 -0
- Image/{.gitkeep → AlexNet/model/.gitkeep} +0 -0
- Image/DenseNet/code/model.py +152 -0
- Image/DenseNet/code/train.py +29 -0
- Image/DenseNet/dataset/.gitkeep +0 -0
- Image/DenseNet/model/.gitkeep +0 -0
- Image/EfficientNet/code/model.py +267 -0
- Image/EfficientNet/code/train.py +29 -0
- Image/EfficientNet/dataset/.gitkeep +0 -0
- Image/EfficientNet/model/.gitkeep +0 -0
- Image/GoogLeNet/code/model.py +159 -0
- Image/GoogLeNet/code/train.py +29 -0
- Image/GoogLeNet/dataset/.gitkeep +0 -0
- Image/GoogLeNet/model/.gitkeep +0 -0
- Image/LeNet5/code/model.py +175 -0
- Image/LeNet5/code/train.py +29 -0
- Image/LeNet5/dataset/.gitkeep +0 -0
- Image/LeNet5/model/.gitkeep +0 -0
- Image/MobileNetv1/code/model.py +163 -0
- Image/MobileNetv1/code/train.py +29 -0
- Image/MobileNetv1/dataset/.gitkeep +0 -0
- Image/MobileNetv1/model/.gitkeep +0 -0
- Image/MobileNetv2/code/model.py +176 -0
- Image/MobileNetv2/code/train.py +29 -0
- Image/MobileNetv2/dataset/.gitkeep +0 -0
- Image/MobileNetv2/model/.gitkeep +0 -0
- Image/MobileNetv3/code/model.py +252 -0
- Image/MobileNetv3/code/train.py +29 -0
- Image/MobileNetv3/dataset/.gitkeep +0 -0
- Image/MobileNetv3/model/.gitkeep +0 -0
- Image/ResNet/code/model.py +259 -0
- Image/ResNet/code/train.py +29 -0
- Image/ResNet/dataset/.gitkeep +0 -0
- Image/ResNet/model/.gitkeep +0 -0
- Image/SENet/code/model.py +251 -0
- Image/SENet/code/train.py +29 -0
- Image/SENet/dataset/.gitkeep +0 -0
- Image/SENet/model/.gitkeep +0 -0
- Image/ShuffleNet/code/model.py +263 -0
- Image/ShuffleNet/code/train.py +29 -0
- Image/ShuffleNet/dataset/.gitkeep +0 -0
- Image/ShuffleNet/model/.gitkeep +0 -0
- Image/ShuffleNetv2/code/model.py +345 -0
- Image/ShuffleNetv2/code/train.py +29 -0
- Image/ShuffleNetv2/dataset/.gitkeep +0 -0
- Image/ShuffleNetv2/model/.gitkeep +0 -0
- Image/SwinTransformer/code/model.py +230 -0
- Image/SwinTransformer/code/train.py +43 -0
Image/AlexNet/code/model.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
AlexNet in Pytorch
|
3 |
+
'''
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import torch.nn as nn
|
7 |
+
|
8 |
+
class AlexNet(nn.Module): # 训练 ALexNet
|
9 |
+
'''
|
10 |
+
AlexNet模型
|
11 |
+
'''
|
12 |
+
def __init__(self,num_classes=10):
|
13 |
+
super(AlexNet,self).__init__()
|
14 |
+
# 五个卷积层 输入 32 * 32 * 3
|
15 |
+
self.conv1 = nn.Sequential(
|
16 |
+
nn.Conv2d(in_channels=3, out_channels=6, kernel_size=3, stride=1, padding=1), # (32-3+2)/1+1 = 32
|
17 |
+
nn.ReLU(),
|
18 |
+
nn.MaxPool2d(kernel_size=2, stride=2, padding=0) # (32-2)/2+1 = 16
|
19 |
+
)
|
20 |
+
self.conv2 = nn.Sequential( # 输入 16 * 16 * 6
|
21 |
+
nn.Conv2d(in_channels=6, out_channels=16, kernel_size=3, stride=1, padding=1), # (16-3+2)/1+1 = 16
|
22 |
+
nn.ReLU(),
|
23 |
+
nn.MaxPool2d(kernel_size=2, stride=2, padding=0) # (16-2)/2+1 = 8
|
24 |
+
)
|
25 |
+
self.conv3 = nn.Sequential( # 输入 8 * 8 * 16
|
26 |
+
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1), # (8-3+2)/1+1 = 8
|
27 |
+
nn.ReLU(),
|
28 |
+
nn.MaxPool2d(kernel_size=2, stride=2, padding=0) # (8-2)/2+1 = 4
|
29 |
+
)
|
30 |
+
self.conv4 = nn.Sequential( # 输入 4 * 4 * 64
|
31 |
+
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1), # (4-3+2)/1+1 = 4
|
32 |
+
nn.ReLU(),
|
33 |
+
nn.MaxPool2d(kernel_size=2, stride=2, padding=0) # (4-2)/2+1 = 2
|
34 |
+
)
|
35 |
+
self.conv5 = nn.Sequential( # 输入 2 * 2 * 128
|
36 |
+
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),# (2-3+2)/1+1 = 2
|
37 |
+
nn.ReLU(),
|
38 |
+
nn.MaxPool2d(kernel_size=2, stride=2, padding=0) # (2-2)/2+1 = 1
|
39 |
+
) # 最后一层卷积层,输出 1 * 1 * 128
|
40 |
+
# 全连接层
|
41 |
+
self.dense = nn.Sequential(
|
42 |
+
nn.Linear(128,120),
|
43 |
+
nn.ReLU(),
|
44 |
+
nn.Linear(120,84),
|
45 |
+
nn.ReLU(),
|
46 |
+
nn.Linear(84,num_classes)
|
47 |
+
)
|
48 |
+
|
49 |
+
# 初始化权重
|
50 |
+
self._initialize_weights()
|
51 |
+
|
52 |
+
def forward(self,x):
|
53 |
+
x = self.conv1(x)
|
54 |
+
x = self.conv2(x)
|
55 |
+
x = self.conv3(x)
|
56 |
+
x = self.conv4(x)
|
57 |
+
x = self.conv5(x)
|
58 |
+
x = x.view(x.size()[0],-1)
|
59 |
+
x = self.dense(x)
|
60 |
+
return x
|
61 |
+
|
62 |
+
def _initialize_weights(self):
|
63 |
+
for m in self.modules():
|
64 |
+
if isinstance(m, nn.Conv2d):
|
65 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
66 |
+
if m.bias is not None:
|
67 |
+
nn.init.constant_(m.bias, 0)
|
68 |
+
elif isinstance(m, nn.Linear):
|
69 |
+
nn.init.normal_(m.weight, 0, 0.01)
|
70 |
+
if m.bias is not None:
|
71 |
+
nn.init.constant_(m.bias, 0)
|
72 |
+
|
73 |
+
def test():
|
74 |
+
net = AlexNet()
|
75 |
+
x = torch.randn(2,3,32,32)
|
76 |
+
y = net(x)
|
77 |
+
print(y.size())
|
78 |
+
from torchinfo import summary
|
79 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
80 |
+
net = net.to(device)
|
81 |
+
summary(net,(3,32,32))
|
Image/AlexNet/code/train.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
import argparse
|
4 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
5 |
+
|
6 |
+
from utils.dataset_utils import get_cifar10_dataloaders
|
7 |
+
from utils.train_utils import train_model
|
8 |
+
from model import AlexNet
|
9 |
+
|
10 |
+
def parse_args():
|
11 |
+
parser = argparse.ArgumentParser(description='训练AlexNet模型')
|
12 |
+
parser.add_argument('--gpu', type=int, default=0, help='GPU设备编号 (0,1,2,3)')
|
13 |
+
parser.add_argument('--batch-size', type=int, default=128, help='批次大小')
|
14 |
+
parser.add_argument('--epochs', type=int, default=200, help='训练轮数')
|
15 |
+
parser.add_argument('--lr', type=float, default=0.1, help='学习率')
|
16 |
+
return parser.parse_args()
|
17 |
+
|
18 |
+
def main():
|
19 |
+
# 解析命令行参数
|
20 |
+
args = parse_args()
|
21 |
+
|
22 |
+
# 获取数据加载器
|
23 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size)
|
24 |
+
|
25 |
+
# 创建模型
|
26 |
+
model = AlexNet()
|
27 |
+
|
28 |
+
# 训练模型
|
29 |
+
train_model(
|
30 |
+
model=model,
|
31 |
+
trainloader=trainloader,
|
32 |
+
testloader=testloader,
|
33 |
+
epochs=args.epochs,
|
34 |
+
lr=args.lr,
|
35 |
+
device=f'cuda:{args.gpu}',
|
36 |
+
save_dir='../model',
|
37 |
+
model_name='alexnet'
|
38 |
+
)
|
39 |
+
|
40 |
+
if __name__ == '__main__':
|
41 |
+
main()
|
{Graph → Image/AlexNet/dataset}/.gitkeep
RENAMED
File without changes
|
Image/{.gitkeep → AlexNet/model/.gitkeep}
RENAMED
File without changes
|
Image/DenseNet/code/model.py
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
DenseNet in pytorch
|
3 |
+
see the details in papaer
|
4 |
+
[1] Gao Huang, Zhuang Liu, Laurens van der Maaten, Kilian Q. Weinberger.
|
5 |
+
Densely Connected Convolutional Networks
|
6 |
+
https://arxiv.org/abs/1608.06993v5
|
7 |
+
"""
|
8 |
+
import torch
|
9 |
+
import torch.nn as nn
|
10 |
+
import math
|
11 |
+
|
12 |
+
class Bottleneck(nn.Module):
|
13 |
+
"""
|
14 |
+
Dense Block
|
15 |
+
这里的growth_rate=out_channels, 就是每个Block自己输出的通道数。
|
16 |
+
先通过1x1卷积层,将通道数缩小为4 * growth_rate,然后再通过3x3卷积层降低到growth_rate。
|
17 |
+
"""
|
18 |
+
# 通常1×1卷积的通道数为GrowthRate的4倍
|
19 |
+
expansion = 4
|
20 |
+
|
21 |
+
def __init__(self, in_channels, growth_rate):
|
22 |
+
super(Bottleneck, self).__init__()
|
23 |
+
zip_channels = self.expansion * growth_rate
|
24 |
+
self.features = nn.Sequential(
|
25 |
+
nn.BatchNorm2d(in_channels),
|
26 |
+
nn.ReLU(True),
|
27 |
+
nn.Conv2d(in_channels, zip_channels, kernel_size=1, bias=False),
|
28 |
+
nn.BatchNorm2d(zip_channels),
|
29 |
+
nn.ReLU(True),
|
30 |
+
nn.Conv2d(zip_channels, growth_rate, kernel_size=3, padding=1, bias=False)
|
31 |
+
)
|
32 |
+
|
33 |
+
def forward(self, x):
|
34 |
+
out = self.features(x)
|
35 |
+
out = torch.cat([out, x], 1)
|
36 |
+
return out
|
37 |
+
|
38 |
+
|
39 |
+
class Transition(nn.Module):
|
40 |
+
"""
|
41 |
+
改变维数的Transition层 具体包括BN、ReLU、1×1卷积(Conv)、2×2平均池化操作
|
42 |
+
先通过1x1的卷积层减少channels,再通过2x2的平均池化层缩小feature-map
|
43 |
+
"""
|
44 |
+
# 1×1卷积的作用是降维,起到压缩模型的作用,而平均池化则是降低特征图的尺寸。
|
45 |
+
def __init__(self, in_channels, out_channels):
|
46 |
+
super(Transition, self).__init__()
|
47 |
+
self.features = nn.Sequential(
|
48 |
+
nn.BatchNorm2d(in_channels),
|
49 |
+
nn.ReLU(True),
|
50 |
+
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
|
51 |
+
nn.AvgPool2d(2)
|
52 |
+
)
|
53 |
+
|
54 |
+
def forward(self, x):
|
55 |
+
out = self.features(x)
|
56 |
+
return out
|
57 |
+
|
58 |
+
class DenseNet(nn.Module):
|
59 |
+
"""
|
60 |
+
Dense Net
|
61 |
+
paper中growth_rate取12,维度压缩的参数θ,即reduction取0.5
|
62 |
+
且初始化方法为kaiming_normal()
|
63 |
+
num_blocks为每段网络中的DenseBlock数量
|
64 |
+
DenseNet和ResNet一样也是六段式网络(一段卷积+四段Dense+平均池化层),最后FC层。
|
65 |
+
第一段将维数从3变到2 * growth_rate
|
66 |
+
|
67 |
+
(3, 32, 32) -> [Conv2d] -> (24, 32, 32) -> [layer1] -> (48, 16, 16) -> [layer2]
|
68 |
+
->(96, 8, 8) -> [layer3] -> (192, 4, 4) -> [layer4] -> (384, 4, 4) -> [AvgPool]
|
69 |
+
->(384, 1, 1) -> [Linear] -> (10)
|
70 |
+
"""
|
71 |
+
def __init__(self, num_blocks, growth_rate=12, reduction=0.5, num_classes=10, init_weights=True):
|
72 |
+
super(DenseNet, self).__init__()
|
73 |
+
self.growth_rate = growth_rate
|
74 |
+
self.reduction = reduction
|
75 |
+
|
76 |
+
num_channels = 2 * growth_rate
|
77 |
+
|
78 |
+
self.features = nn.Conv2d(3, num_channels, kernel_size=3, padding=1, bias=False)
|
79 |
+
self.layer1, num_channels = self._make_dense_layer(num_channels, num_blocks[0])
|
80 |
+
self.layer2, num_channels = self._make_dense_layer(num_channels, num_blocks[1])
|
81 |
+
self.layer3, num_channels = self._make_dense_layer(num_channels, num_blocks[2])
|
82 |
+
self.layer4, num_channels = self._make_dense_layer(num_channels, num_blocks[3], transition=False)
|
83 |
+
self.avg_pool = nn.Sequential(
|
84 |
+
nn.BatchNorm2d(num_channels),
|
85 |
+
nn.ReLU(True),
|
86 |
+
nn.AvgPool2d(4),
|
87 |
+
)
|
88 |
+
self.classifier = nn.Linear(num_channels, num_classes)
|
89 |
+
|
90 |
+
if init_weights:
|
91 |
+
self._initialize_weights()
|
92 |
+
|
93 |
+
def _make_dense_layer(self, in_channels, nblock, transition=True):
|
94 |
+
layers = []
|
95 |
+
for i in range(nblock):
|
96 |
+
layers += [Bottleneck(in_channels, self.growth_rate)]
|
97 |
+
in_channels += self.growth_rate
|
98 |
+
out_channels = in_channels
|
99 |
+
if transition:
|
100 |
+
out_channels = int(math.floor(in_channels * self.reduction))
|
101 |
+
layers += [Transition(in_channels, out_channels)]
|
102 |
+
return nn.Sequential(*layers), out_channels
|
103 |
+
|
104 |
+
def _initialize_weights(self):
|
105 |
+
for m in self.modules():
|
106 |
+
if isinstance(m, nn.Conv2d):
|
107 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
108 |
+
if m.bias is not None:
|
109 |
+
nn.init.constant_(m.bias, 0)
|
110 |
+
elif isinstance(m, nn.BatchNorm2d):
|
111 |
+
nn.init.constant_(m.weight, 1)
|
112 |
+
nn.init.constant_(m.bias, 0)
|
113 |
+
elif isinstance(m, nn.Linear):
|
114 |
+
nn.init.normal_(m.weight, 0, 0.01)
|
115 |
+
nn.init.constant_(m.bias, 0)
|
116 |
+
|
117 |
+
def forward(self, x):
|
118 |
+
out = self.features(x)
|
119 |
+
out = self.layer1(out)
|
120 |
+
out = self.layer2(out)
|
121 |
+
out = self.layer3(out)
|
122 |
+
out = self.layer4(out)
|
123 |
+
out = self.avg_pool(out)
|
124 |
+
out = out.view(out.size(0), -1)
|
125 |
+
out = self.classifier(out)
|
126 |
+
return out
|
127 |
+
|
128 |
+
def DenseNet121():
|
129 |
+
return DenseNet([6,12,24,16], growth_rate=32)
|
130 |
+
|
131 |
+
def DenseNet169():
|
132 |
+
return DenseNet([6,12,32,32], growth_rate=32)
|
133 |
+
|
134 |
+
def DenseNet201():
|
135 |
+
return DenseNet([6,12,48,32], growth_rate=32)
|
136 |
+
|
137 |
+
def DenseNet161():
|
138 |
+
return DenseNet([6,12,36,24], growth_rate=48)
|
139 |
+
|
140 |
+
def densenet_cifar():
|
141 |
+
return DenseNet([6,12,24,16], growth_rate=12)
|
142 |
+
|
143 |
+
|
144 |
+
def test():
|
145 |
+
net = densenet_cifar()
|
146 |
+
x = torch.randn(1,3,32,32)
|
147 |
+
y = net(x)
|
148 |
+
print(y.size())
|
149 |
+
from torchinfo import summary
|
150 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
151 |
+
net = net.to(device)
|
152 |
+
summary(net,(1,3,32,32))
|
Image/DenseNet/code/train.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
4 |
+
|
5 |
+
from utils.dataset_utils import get_cifar10_dataloaders
|
6 |
+
from utils.train_utils import train_model
|
7 |
+
from model import DenseNet121
|
8 |
+
|
9 |
+
def main():
|
10 |
+
# 获取数据加载器
|
11 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=128)
|
12 |
+
|
13 |
+
# 创建模型
|
14 |
+
model = DenseNet121()
|
15 |
+
|
16 |
+
# 训练模型
|
17 |
+
train_model(
|
18 |
+
model=model,
|
19 |
+
trainloader=trainloader,
|
20 |
+
testloader=testloader,
|
21 |
+
epochs=200,
|
22 |
+
lr=0.1,
|
23 |
+
device='cuda',
|
24 |
+
save_dir='../model',
|
25 |
+
model_name='densenet121'
|
26 |
+
)
|
27 |
+
|
28 |
+
if __name__ == '__main__':
|
29 |
+
main()
|
Image/DenseNet/dataset/.gitkeep
ADDED
File without changes
|
Image/DenseNet/model/.gitkeep
ADDED
File without changes
|
Image/EfficientNet/code/model.py
ADDED
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
EfficientNet in PyTorch.
|
3 |
+
|
4 |
+
Paper: "EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks"
|
5 |
+
Reference: https://github.com/keras-team/keras-applications/blob/master/keras_applications/efficientnet.py
|
6 |
+
|
7 |
+
主要特点:
|
8 |
+
1. 使用MBConv作为基本模块,包含SE注意力机制
|
9 |
+
2. 通过复合缩放方法(compound scaling)同时调整网络的宽度、深度和分辨率
|
10 |
+
3. 使用Swish激活函数和DropConnect正则化
|
11 |
+
'''
|
12 |
+
import torch
|
13 |
+
import torch.nn as nn
|
14 |
+
import torch.nn.functional as F
|
15 |
+
import math
|
16 |
+
|
17 |
+
def swish(x):
|
18 |
+
"""Swish激活函数: x * sigmoid(x)"""
|
19 |
+
return x * x.sigmoid()
|
20 |
+
|
21 |
+
def drop_connect(x, drop_ratio):
|
22 |
+
"""DropConnect正则化
|
23 |
+
|
24 |
+
Args:
|
25 |
+
x: 输入tensor
|
26 |
+
drop_ratio: 丢弃率
|
27 |
+
|
28 |
+
Returns:
|
29 |
+
经过DropConnect处理的tensor
|
30 |
+
"""
|
31 |
+
keep_ratio = 1.0 - drop_ratio
|
32 |
+
mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)
|
33 |
+
mask.bernoulli_(keep_ratio)
|
34 |
+
x.div_(keep_ratio)
|
35 |
+
x.mul_(mask)
|
36 |
+
return x
|
37 |
+
|
38 |
+
class SE(nn.Module):
|
39 |
+
'''Squeeze-and-Excitation注意力模块
|
40 |
+
|
41 |
+
Args:
|
42 |
+
in_channels: 输入通道数
|
43 |
+
se_channels: SE模块中间层的通道数
|
44 |
+
'''
|
45 |
+
def __init__(self, in_channels, se_channels):
|
46 |
+
super(SE, self).__init__()
|
47 |
+
self.se1 = nn.Conv2d(in_channels, se_channels, kernel_size=1, bias=True)
|
48 |
+
self.se2 = nn.Conv2d(se_channels, in_channels, kernel_size=1, bias=True)
|
49 |
+
|
50 |
+
def forward(self, x):
|
51 |
+
out = F.adaptive_avg_pool2d(x, (1, 1)) # 全局平均池化
|
52 |
+
out = swish(self.se1(out))
|
53 |
+
out = self.se2(out).sigmoid()
|
54 |
+
return x * out # 特征重标定
|
55 |
+
|
56 |
+
class MBConv(nn.Module):
|
57 |
+
'''MBConv模块: Mobile Inverted Bottleneck Convolution
|
58 |
+
|
59 |
+
Args:
|
60 |
+
in_channels: 输入通道数
|
61 |
+
out_channels: 输出通道数
|
62 |
+
kernel_size: 卷积核大小
|
63 |
+
stride: 步长
|
64 |
+
expand_ratio: 扩展比率
|
65 |
+
se_ratio: SE模块的压缩比率
|
66 |
+
drop_rate: DropConnect的丢弃率
|
67 |
+
'''
|
68 |
+
def __init__(self,
|
69 |
+
in_channels,
|
70 |
+
out_channels,
|
71 |
+
kernel_size,
|
72 |
+
stride,
|
73 |
+
expand_ratio=1,
|
74 |
+
se_ratio=0.25,
|
75 |
+
drop_rate=0.):
|
76 |
+
super(MBConv, self).__init__()
|
77 |
+
self.stride = stride
|
78 |
+
self.drop_rate = drop_rate
|
79 |
+
self.expand_ratio = expand_ratio
|
80 |
+
|
81 |
+
# Expansion phase
|
82 |
+
channels = expand_ratio * in_channels
|
83 |
+
self.conv1 = nn.Conv2d(in_channels, channels, kernel_size=1, stride=1, padding=0, bias=False)
|
84 |
+
self.bn1 = nn.BatchNorm2d(channels)
|
85 |
+
|
86 |
+
# Depthwise conv
|
87 |
+
self.conv2 = nn.Conv2d(channels, channels, kernel_size=kernel_size, stride=stride,
|
88 |
+
padding=(1 if kernel_size == 3 else 2), groups=channels, bias=False)
|
89 |
+
self.bn2 = nn.BatchNorm2d(channels)
|
90 |
+
|
91 |
+
# SE layers
|
92 |
+
se_channels = int(in_channels * se_ratio)
|
93 |
+
self.se = SE(channels, se_channels)
|
94 |
+
|
95 |
+
# Output phase
|
96 |
+
self.conv3 = nn.Conv2d(channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
|
97 |
+
self.bn3 = nn.BatchNorm2d(out_channels)
|
98 |
+
|
99 |
+
# Shortcut connection
|
100 |
+
self.has_skip = (stride == 1) and (in_channels == out_channels)
|
101 |
+
|
102 |
+
def forward(self, x):
|
103 |
+
# Expansion
|
104 |
+
out = x if self.expand_ratio == 1 else swish(self.bn1(self.conv1(x)))
|
105 |
+
# Depthwise convolution
|
106 |
+
out = swish(self.bn2(self.conv2(out)))
|
107 |
+
# Squeeze-and-excitation
|
108 |
+
out = self.se(out)
|
109 |
+
# Pointwise convolution
|
110 |
+
out = self.bn3(self.conv3(out))
|
111 |
+
# Shortcut
|
112 |
+
if self.has_skip:
|
113 |
+
if self.training and self.drop_rate > 0:
|
114 |
+
out = drop_connect(out, self.drop_rate)
|
115 |
+
out = out + x
|
116 |
+
return out
|
117 |
+
|
118 |
+
class EfficientNet(nn.Module):
|
119 |
+
'''EfficientNet模型
|
120 |
+
|
121 |
+
Args:
|
122 |
+
width_coefficient: 宽度系数
|
123 |
+
depth_coefficient: 深度系数
|
124 |
+
dropout_rate: 分类层的dropout率
|
125 |
+
num_classes: 分类数量
|
126 |
+
'''
|
127 |
+
def __init__(self,
|
128 |
+
width_coefficient=1.0,
|
129 |
+
depth_coefficient=1.0,
|
130 |
+
dropout_rate=0.2,
|
131 |
+
num_classes=10):
|
132 |
+
super(EfficientNet, self).__init__()
|
133 |
+
|
134 |
+
# 模型配置
|
135 |
+
cfg = {
|
136 |
+
'num_blocks': [1, 2, 2, 3, 3, 4, 1], # 每个stage的block数量
|
137 |
+
'expansion': [1, 6, 6, 6, 6, 6, 6], # 扩展比率
|
138 |
+
'out_channels': [16, 24, 40, 80, 112, 192, 320], # 输出通道数
|
139 |
+
'kernel_size': [3, 3, 5, 3, 5, 5, 3], # 卷积核大小
|
140 |
+
'stride': [1, 2, 2, 2, 1, 2, 1], # 步长
|
141 |
+
'dropout_rate': dropout_rate,
|
142 |
+
'drop_connect_rate': 0.2,
|
143 |
+
}
|
144 |
+
|
145 |
+
self.cfg = cfg
|
146 |
+
self.width_coefficient = width_coefficient
|
147 |
+
self.depth_coefficient = depth_coefficient
|
148 |
+
|
149 |
+
# Stem layer
|
150 |
+
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
|
151 |
+
self.bn1 = nn.BatchNorm2d(32)
|
152 |
+
|
153 |
+
# Build blocks
|
154 |
+
self.layers = self._make_layers(in_channels=32)
|
155 |
+
|
156 |
+
# Head layer
|
157 |
+
final_channels = cfg['out_channels'][-1] * int(width_coefficient)
|
158 |
+
self.linear = nn.Linear(final_channels, num_classes)
|
159 |
+
|
160 |
+
def _make_layers(self, in_channels):
|
161 |
+
layers = []
|
162 |
+
cfg = [self.cfg[k] for k in ['expansion', 'out_channels', 'num_blocks', 'kernel_size', 'stride']]
|
163 |
+
blocks = sum(self.cfg['num_blocks'])
|
164 |
+
b = 0 # 用于计算drop_connect_rate
|
165 |
+
|
166 |
+
for expansion, out_channels, num_blocks, kernel_size, stride in zip(*cfg):
|
167 |
+
out_channels = int(out_channels * self.width_coefficient)
|
168 |
+
num_blocks = int(math.ceil(num_blocks * self.depth_coefficient))
|
169 |
+
|
170 |
+
for i in range(num_blocks):
|
171 |
+
stride_i = stride if i == 0 else 1
|
172 |
+
drop_rate = self.cfg['drop_connect_rate'] * b / blocks
|
173 |
+
layers.append(
|
174 |
+
MBConv(in_channels,
|
175 |
+
out_channels,
|
176 |
+
kernel_size,
|
177 |
+
stride_i,
|
178 |
+
expansion,
|
179 |
+
se_ratio=0.25,
|
180 |
+
drop_rate=drop_rate))
|
181 |
+
in_channels = out_channels
|
182 |
+
b += 1
|
183 |
+
|
184 |
+
return nn.Sequential(*layers)
|
185 |
+
|
186 |
+
def forward(self, x):
|
187 |
+
# Stem
|
188 |
+
out = swish(self.bn1(self.conv1(x)))
|
189 |
+
# Blocks
|
190 |
+
out = self.layers(out)
|
191 |
+
# Head
|
192 |
+
out = F.adaptive_avg_pool2d(out, 1)
|
193 |
+
out = out.view(out.size(0), -1)
|
194 |
+
if self.training and self.cfg['dropout_rate'] > 0:
|
195 |
+
out = F.dropout(out, p=self.cfg['dropout_rate'])
|
196 |
+
out = self.linear(out)
|
197 |
+
return out
|
198 |
+
|
199 |
+
def EfficientNetB0(num_classes=10):
|
200 |
+
"""EfficientNet-B0"""
|
201 |
+
return EfficientNet(width_coefficient=1.0,
|
202 |
+
depth_coefficient=1.0,
|
203 |
+
dropout_rate=0.2,
|
204 |
+
num_classes=num_classes)
|
205 |
+
|
206 |
+
def EfficientNetB1(num_classes=10):
|
207 |
+
"""EfficientNet-B1"""
|
208 |
+
return EfficientNet(width_coefficient=1.0,
|
209 |
+
depth_coefficient=1.1,
|
210 |
+
dropout_rate=0.2,
|
211 |
+
num_classes=num_classes)
|
212 |
+
|
213 |
+
def EfficientNetB2(num_classes=10):
|
214 |
+
"""EfficientNet-B2"""
|
215 |
+
return EfficientNet(width_coefficient=1.1,
|
216 |
+
depth_coefficient=1.2,
|
217 |
+
dropout_rate=0.3,
|
218 |
+
num_classes=num_classes)
|
219 |
+
|
220 |
+
def EfficientNetB3(num_classes=10):
|
221 |
+
"""EfficientNet-B3"""
|
222 |
+
return EfficientNet(width_coefficient=1.2,
|
223 |
+
depth_coefficient=1.4,
|
224 |
+
dropout_rate=0.3,
|
225 |
+
num_classes=num_classes)
|
226 |
+
|
227 |
+
def EfficientNetB4(num_classes=10):
|
228 |
+
"""EfficientNet-B4"""
|
229 |
+
return EfficientNet(width_coefficient=1.4,
|
230 |
+
depth_coefficient=1.8,
|
231 |
+
dropout_rate=0.4,
|
232 |
+
num_classes=num_classes)
|
233 |
+
|
234 |
+
def EfficientNetB5(num_classes=10):
|
235 |
+
"""EfficientNet-B5"""
|
236 |
+
return EfficientNet(width_coefficient=1.6,
|
237 |
+
depth_coefficient=2.2,
|
238 |
+
dropout_rate=0.4,
|
239 |
+
num_classes=num_classes)
|
240 |
+
|
241 |
+
def EfficientNetB6(num_classes=10):
|
242 |
+
"""EfficientNet-B6"""
|
243 |
+
return EfficientNet(width_coefficient=1.8,
|
244 |
+
depth_coefficient=2.6,
|
245 |
+
dropout_rate=0.5,
|
246 |
+
num_classes=num_classes)
|
247 |
+
|
248 |
+
def EfficientNetB7(num_classes=10):
|
249 |
+
"""EfficientNet-B7"""
|
250 |
+
return EfficientNet(width_coefficient=2.0,
|
251 |
+
depth_coefficient=3.1,
|
252 |
+
dropout_rate=0.5,
|
253 |
+
num_classes=num_classes)
|
254 |
+
|
255 |
+
def test():
|
256 |
+
"""测试函数"""
|
257 |
+
net = EfficientNetB0()
|
258 |
+
x = torch.randn(1, 3, 32, 32)
|
259 |
+
y = net(x)
|
260 |
+
print(y.size())
|
261 |
+
from torchinfo import summary
|
262 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
263 |
+
net = net.to(device)
|
264 |
+
summary(net, (1, 3, 32, 32))
|
265 |
+
|
266 |
+
if __name__ == '__main__':
|
267 |
+
test()
|
Image/EfficientNet/code/train.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
4 |
+
|
5 |
+
from utils.dataset_utils import get_cifar10_dataloaders
|
6 |
+
from utils.train_utils import train_model
|
7 |
+
from model import EfficientNetB0
|
8 |
+
|
9 |
+
def main():
|
10 |
+
# 获取数据加载器
|
11 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=128)
|
12 |
+
|
13 |
+
# 创建模型
|
14 |
+
model = EfficientNetB0()
|
15 |
+
|
16 |
+
# 训练模型
|
17 |
+
train_model(
|
18 |
+
model=model,
|
19 |
+
trainloader=trainloader,
|
20 |
+
testloader=testloader,
|
21 |
+
epochs=200,
|
22 |
+
lr=0.1,
|
23 |
+
device='cuda',
|
24 |
+
save_dir='../model',
|
25 |
+
model_name='efficientnet_b0'
|
26 |
+
)
|
27 |
+
|
28 |
+
if __name__ == '__main__':
|
29 |
+
main()
|
Image/EfficientNet/dataset/.gitkeep
ADDED
File without changes
|
Image/EfficientNet/model/.gitkeep
ADDED
File without changes
|
Image/GoogLeNet/code/model.py
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
GoogLeNet in PyTorch.
|
3 |
+
|
4 |
+
Paper: "Going Deeper with Convolutions"
|
5 |
+
Reference: https://arxiv.org/abs/1409.4842
|
6 |
+
|
7 |
+
主要特点:
|
8 |
+
1. 使用Inception模块,通过多尺度卷积提取特征
|
9 |
+
2. 采用1x1卷积降维,减少计算量
|
10 |
+
3. 使用全局平均池化代替全连接层
|
11 |
+
4. 引入辅助分类器帮助训练(本实现未包含)
|
12 |
+
'''
|
13 |
+
import torch
|
14 |
+
import torch.nn as nn
|
15 |
+
|
16 |
+
class Inception(nn.Module):
|
17 |
+
'''Inception模块
|
18 |
+
|
19 |
+
Args:
|
20 |
+
in_planes: 输入通道数
|
21 |
+
n1x1: 1x1卷积分支的输出通道数
|
22 |
+
n3x3red: 3x3卷积分支的降维通道数
|
23 |
+
n3x3: 3x3卷积分支的输出通道数
|
24 |
+
n5x5red: 5x5卷积分支的降维通道数
|
25 |
+
n5x5: 5x5卷积分支的输出通道数
|
26 |
+
pool_planes: 池化分支的输出通道数
|
27 |
+
'''
|
28 |
+
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
|
29 |
+
super(Inception, self).__init__()
|
30 |
+
|
31 |
+
# 1x1卷积分支
|
32 |
+
self.branch1 = nn.Sequential(
|
33 |
+
nn.Conv2d(in_planes, n1x1, kernel_size=1),
|
34 |
+
nn.BatchNorm2d(n1x1),
|
35 |
+
nn.ReLU(True),
|
36 |
+
)
|
37 |
+
|
38 |
+
# 1x1 -> 3x3卷积分支
|
39 |
+
self.branch2 = nn.Sequential(
|
40 |
+
nn.Conv2d(in_planes, n3x3red, kernel_size=1),
|
41 |
+
nn.BatchNorm2d(n3x3red),
|
42 |
+
nn.ReLU(True),
|
43 |
+
nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),
|
44 |
+
nn.BatchNorm2d(n3x3),
|
45 |
+
nn.ReLU(True),
|
46 |
+
)
|
47 |
+
|
48 |
+
# 1x1 -> 5x5卷积分支(用两个3x3代替)
|
49 |
+
self.branch3 = nn.Sequential(
|
50 |
+
nn.Conv2d(in_planes, n5x5red, kernel_size=1),
|
51 |
+
nn.BatchNorm2d(n5x5red),
|
52 |
+
nn.ReLU(True),
|
53 |
+
nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),
|
54 |
+
nn.BatchNorm2d(n5x5),
|
55 |
+
nn.ReLU(True),
|
56 |
+
nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
|
57 |
+
nn.BatchNorm2d(n5x5),
|
58 |
+
nn.ReLU(True),
|
59 |
+
)
|
60 |
+
|
61 |
+
# 3x3池化 -> 1x1卷积分支
|
62 |
+
self.branch4 = nn.Sequential(
|
63 |
+
nn.MaxPool2d(3, stride=1, padding=1),
|
64 |
+
nn.Conv2d(in_planes, pool_planes, kernel_size=1),
|
65 |
+
nn.BatchNorm2d(pool_planes),
|
66 |
+
nn.ReLU(True),
|
67 |
+
)
|
68 |
+
|
69 |
+
def forward(self, x):
|
70 |
+
'''前向传播,将四个分支的输出在通道维度上拼接'''
|
71 |
+
b1 = self.branch1(x)
|
72 |
+
b2 = self.branch2(x)
|
73 |
+
b3 = self.branch3(x)
|
74 |
+
b4 = self.branch4(x)
|
75 |
+
return torch.cat([b1, b2, b3, b4], 1)
|
76 |
+
|
77 |
+
|
78 |
+
class GoogLeNet(nn.Module):
|
79 |
+
'''GoogLeNet/Inception v1网络
|
80 |
+
|
81 |
+
特点:
|
82 |
+
1. 使用Inception模块构建深层网络
|
83 |
+
2. 通过1x1卷积降维减少计算量
|
84 |
+
3. 使用全局平均池化代替全连接层减少参数量
|
85 |
+
'''
|
86 |
+
def __init__(self, num_classes=10):
|
87 |
+
super(GoogLeNet, self).__init__()
|
88 |
+
|
89 |
+
# 第一阶段:标准卷积层
|
90 |
+
self.pre_layers = nn.Sequential(
|
91 |
+
nn.Conv2d(3, 192, kernel_size=3, padding=1),
|
92 |
+
nn.BatchNorm2d(192),
|
93 |
+
nn.ReLU(True),
|
94 |
+
)
|
95 |
+
|
96 |
+
# 第二阶段:2个Inception模块
|
97 |
+
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32) # 输出通道:256
|
98 |
+
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64) # 输出通道:480
|
99 |
+
|
100 |
+
# 最大池化层
|
101 |
+
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
|
102 |
+
|
103 |
+
# 第三阶段:5个Inception模块
|
104 |
+
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64) # 输出通道:512
|
105 |
+
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64) # 输出通道:512
|
106 |
+
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64) # 输出通道:512
|
107 |
+
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64) # 输出通道:528
|
108 |
+
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128) # 输出通道:832
|
109 |
+
|
110 |
+
# 第四阶段:2个Inception模块
|
111 |
+
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128) # 输出通道:832
|
112 |
+
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128) # 输出通道:1024
|
113 |
+
|
114 |
+
# 全局平均池化和分类器
|
115 |
+
self.avgpool = nn.AvgPool2d(8, stride=1)
|
116 |
+
self.linear = nn.Linear(1024, num_classes)
|
117 |
+
|
118 |
+
def forward(self, x):
|
119 |
+
# 第一阶段
|
120 |
+
out = self.pre_layers(x)
|
121 |
+
|
122 |
+
# 第二阶段
|
123 |
+
out = self.a3(out)
|
124 |
+
out = self.b3(out)
|
125 |
+
out = self.maxpool(out)
|
126 |
+
|
127 |
+
# 第三阶段
|
128 |
+
out = self.a4(out)
|
129 |
+
out = self.b4(out)
|
130 |
+
out = self.c4(out)
|
131 |
+
out = self.d4(out)
|
132 |
+
out = self.e4(out)
|
133 |
+
out = self.maxpool(out)
|
134 |
+
|
135 |
+
# 第四阶段
|
136 |
+
out = self.a5(out)
|
137 |
+
out = self.b5(out)
|
138 |
+
|
139 |
+
# 分类器
|
140 |
+
out = self.avgpool(out)
|
141 |
+
out = out.view(out.size(0), -1)
|
142 |
+
out = self.linear(out)
|
143 |
+
return out
|
144 |
+
|
145 |
+
def test():
|
146 |
+
"""测试函数"""
|
147 |
+
net = GoogLeNet()
|
148 |
+
x = torch.randn(1, 3, 32, 32)
|
149 |
+
y = net(x)
|
150 |
+
print(y.size())
|
151 |
+
|
152 |
+
# 打印模型结构
|
153 |
+
from torchinfo import summary
|
154 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
155 |
+
net = net.to(device)
|
156 |
+
summary(net, (1, 3, 32, 32))
|
157 |
+
|
158 |
+
if __name__ == '__main__':
|
159 |
+
test()
|
Image/GoogLeNet/code/train.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
4 |
+
|
5 |
+
from utils.dataset_utils import get_cifar10_dataloaders
|
6 |
+
from utils.train_utils import train_model
|
7 |
+
from model import GoogLeNet
|
8 |
+
|
9 |
+
def main():
|
10 |
+
# 获取数据加载器
|
11 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=128)
|
12 |
+
|
13 |
+
# 创建模型
|
14 |
+
model = GoogLeNet()
|
15 |
+
|
16 |
+
# 训练模型
|
17 |
+
train_model(
|
18 |
+
model=model,
|
19 |
+
trainloader=trainloader,
|
20 |
+
testloader=testloader,
|
21 |
+
epochs=200,
|
22 |
+
lr=0.1,
|
23 |
+
device='cuda',
|
24 |
+
save_dir='../model',
|
25 |
+
model_name='googlenet'
|
26 |
+
)
|
27 |
+
|
28 |
+
if __name__ == '__main__':
|
29 |
+
main()
|
Image/GoogLeNet/dataset/.gitkeep
ADDED
File without changes
|
Image/GoogLeNet/model/.gitkeep
ADDED
File without changes
|
Image/LeNet5/code/model.py
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
LeNet5 in PyTorch
|
3 |
+
|
4 |
+
LeNet5是由Yann LeCun等人在1998年提出的一个经典卷积神经网络模型。
|
5 |
+
主要用于手写数字识别,具有以下特点:
|
6 |
+
1. 使用卷积层提取特征
|
7 |
+
2. 使用平均池化层降低特征维度
|
8 |
+
3. 使用全连接层进行分类
|
9 |
+
4. 网络结构简单,参数量少
|
10 |
+
|
11 |
+
网络架构:
|
12 |
+
5x5 conv, 6 2x2 pool 5x5 conv, 16 2x2 pool FC 120 FC 84 FC 10
|
13 |
+
input(32x32x3) -> [conv1+relu+pool] --------> 28x28x6 -----> 14x14x6 -----> 10x10x16 -----> 5x5x16 -> 120 -> 84 -> 10
|
14 |
+
stride 1 stride 2 stride 1 stride 2
|
15 |
+
|
16 |
+
参考论文:
|
17 |
+
[1] Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner, "Gradient-based learning applied to document recognition,"
|
18 |
+
Proceedings of the IEEE, vol. 86, no. 11, pp. 2278-2324, Nov. 1998.
|
19 |
+
'''
|
20 |
+
|
21 |
+
import torch
|
22 |
+
import torch.nn as nn
|
23 |
+
import torch.nn.functional as F
|
24 |
+
|
25 |
+
|
26 |
+
class ConvBlock(nn.Module):
|
27 |
+
"""卷积块模块
|
28 |
+
|
29 |
+
包含: 卷积层 -> ReLU -> 最大池化层
|
30 |
+
|
31 |
+
Args:
|
32 |
+
in_channels (int): 输入通道数
|
33 |
+
out_channels (int): 输出通道数
|
34 |
+
kernel_size (int): 卷积核大小
|
35 |
+
stride (int): 卷积步长
|
36 |
+
padding (int): 填充大小
|
37 |
+
"""
|
38 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
|
39 |
+
super(ConvBlock, self).__init__()
|
40 |
+
self.conv = nn.Conv2d(
|
41 |
+
in_channels=in_channels,
|
42 |
+
out_channels=out_channels,
|
43 |
+
kernel_size=kernel_size,
|
44 |
+
stride=stride,
|
45 |
+
padding=padding
|
46 |
+
)
|
47 |
+
self.relu = nn.ReLU(inplace=True) # inplace操作可以节省内存
|
48 |
+
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
|
49 |
+
|
50 |
+
def forward(self, x):
|
51 |
+
"""前向传播
|
52 |
+
|
53 |
+
Args:
|
54 |
+
x (torch.Tensor): 输入特征图
|
55 |
+
|
56 |
+
Returns:
|
57 |
+
torch.Tensor: 输出特征图
|
58 |
+
"""
|
59 |
+
x = self.conv(x)
|
60 |
+
x = self.relu(x)
|
61 |
+
x = self.pool(x)
|
62 |
+
return x
|
63 |
+
|
64 |
+
|
65 |
+
class LeNet5(nn.Module):
|
66 |
+
'''LeNet5网络模型
|
67 |
+
|
68 |
+
网络结构:
|
69 |
+
1. 卷积层1: 3通道输入,6个5x5卷积核,步长1
|
70 |
+
2. 最大池化层1: 2x2窗口,步长2
|
71 |
+
3. 卷积层2: 6通道输入,16个5x5卷积核,步长1
|
72 |
+
4. 最大池化层2: 2x2窗口,步长2
|
73 |
+
5. 全连接层1: 400->120
|
74 |
+
6. 全连接层2: 120->84
|
75 |
+
7. 全连接层3: 84->num_classes
|
76 |
+
|
77 |
+
Args:
|
78 |
+
num_classes (int): 分类数量,默认为10
|
79 |
+
init_weights (bool): 是否初始化权重,默认为True
|
80 |
+
'''
|
81 |
+
def __init__(self, num_classes=10, init_weights=True):
|
82 |
+
super(LeNet5, self).__init__()
|
83 |
+
|
84 |
+
# 第一个卷积块: 32x32x3 -> 28x28x6 -> 14x14x6
|
85 |
+
self.conv1 = ConvBlock(
|
86 |
+
in_channels=3,
|
87 |
+
out_channels=6,
|
88 |
+
kernel_size=5,
|
89 |
+
stride=1
|
90 |
+
)
|
91 |
+
|
92 |
+
# 第二个卷积块: 14x14x6 -> 10x10x16 -> 5x5x16
|
93 |
+
self.conv2 = ConvBlock(
|
94 |
+
in_channels=6,
|
95 |
+
out_channels=16,
|
96 |
+
kernel_size=5,
|
97 |
+
stride=1
|
98 |
+
)
|
99 |
+
|
100 |
+
# 全连接层
|
101 |
+
self.classifier = nn.Sequential(
|
102 |
+
nn.Linear(5*5*16, 120),
|
103 |
+
nn.ReLU(inplace=True),
|
104 |
+
nn.Linear(120, 84),
|
105 |
+
nn.ReLU(inplace=True),
|
106 |
+
nn.Linear(84, num_classes)
|
107 |
+
)
|
108 |
+
|
109 |
+
# 初始化权重
|
110 |
+
if init_weights:
|
111 |
+
self._initialize_weights()
|
112 |
+
|
113 |
+
def forward(self, x):
|
114 |
+
'''前向传播
|
115 |
+
|
116 |
+
Args:
|
117 |
+
x (torch.Tensor): 输入图像张量,[N,3,32,32]
|
118 |
+
|
119 |
+
Returns:
|
120 |
+
torch.Tensor: 输出预测张量,[N,num_classes]
|
121 |
+
'''
|
122 |
+
# 特征提取
|
123 |
+
x = self.conv1(x) # -> [N,6,14,14]
|
124 |
+
x = self.conv2(x) # -> [N,16,5,5]
|
125 |
+
|
126 |
+
# 分类
|
127 |
+
x = torch.flatten(x, 1) # -> [N,16*5*5]
|
128 |
+
x = self.classifier(x) # -> [N,num_classes]
|
129 |
+
return x
|
130 |
+
|
131 |
+
def _initialize_weights(self):
|
132 |
+
'''初始化模型权重
|
133 |
+
|
134 |
+
采用kaiming初始化方法:
|
135 |
+
- 卷积层权重采用kaiming_normal_初始化
|
136 |
+
- 线性层权重采用normal_初始化
|
137 |
+
- 所有偏置项初始化为0
|
138 |
+
'''
|
139 |
+
for m in self.modules():
|
140 |
+
if isinstance(m, nn.Conv2d):
|
141 |
+
# 采用kaiming初始化,适合ReLU激活函数
|
142 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
143 |
+
if m.bias is not None:
|
144 |
+
nn.init.zeros_(m.bias)
|
145 |
+
elif isinstance(m, nn.Linear):
|
146 |
+
# 采用正态分布初始化
|
147 |
+
nn.init.normal_(m.weight, 0, 0.01)
|
148 |
+
nn.init.zeros_(m.bias)
|
149 |
+
|
150 |
+
|
151 |
+
def test():
|
152 |
+
"""测试函数
|
153 |
+
|
154 |
+
创建模型并进行前向传播测试,打印模型结构和参数信息
|
155 |
+
"""
|
156 |
+
# 创建模型
|
157 |
+
net = LeNet5()
|
158 |
+
print('Model Structure:')
|
159 |
+
print(net)
|
160 |
+
|
161 |
+
# 测试前向传播
|
162 |
+
x = torch.randn(2,3,32,32)
|
163 |
+
y = net(x)
|
164 |
+
print('\nInput Shape:', x.shape)
|
165 |
+
print('Output Shape:', y.shape)
|
166 |
+
|
167 |
+
# 打印模型信息
|
168 |
+
from torchinfo import summary
|
169 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
170 |
+
net = net.to(device)
|
171 |
+
summary(net, (2,3,32,32))
|
172 |
+
|
173 |
+
|
174 |
+
if __name__ == '__main__':
|
175 |
+
test()
|
Image/LeNet5/code/train.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
4 |
+
|
5 |
+
from utils.dataset_utils import get_cifar10_dataloaders
|
6 |
+
from utils.train_utils import train_model
|
7 |
+
from model import LeNet5
|
8 |
+
|
9 |
+
def main():
|
10 |
+
# 获取数据加载器
|
11 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=128)
|
12 |
+
|
13 |
+
# 创建模型
|
14 |
+
model = LeNet5()
|
15 |
+
|
16 |
+
# 训练模型
|
17 |
+
train_model(
|
18 |
+
model=model,
|
19 |
+
trainloader=trainloader,
|
20 |
+
testloader=testloader,
|
21 |
+
epochs=200,
|
22 |
+
lr=0.1,
|
23 |
+
device='cuda',
|
24 |
+
save_dir='../model',
|
25 |
+
model_name='lenet5'
|
26 |
+
)
|
27 |
+
|
28 |
+
if __name__ == '__main__':
|
29 |
+
main()
|
Image/LeNet5/dataset/.gitkeep
ADDED
File without changes
|
Image/LeNet5/model/.gitkeep
ADDED
File without changes
|
Image/MobileNetv1/code/model.py
ADDED
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
MobileNetv1 in PyTorch.
|
3 |
+
|
4 |
+
论文: "MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications"
|
5 |
+
参考: https://arxiv.org/abs/1704.04861
|
6 |
+
|
7 |
+
主要特点:
|
8 |
+
1. 使用深度可分离卷积(Depthwise Separable Convolution)减少参数量和计算量
|
9 |
+
2. 引入宽度乘子(Width Multiplier)和分辨率乘子(Resolution Multiplier)进一步压缩模型
|
10 |
+
3. 适用于移动设备和嵌入式设备的轻量级CNN架构
|
11 |
+
'''
|
12 |
+
|
13 |
+
import torch
|
14 |
+
import torch.nn as nn
|
15 |
+
|
16 |
+
|
17 |
+
class Block(nn.Module):
|
18 |
+
'''深度可分离卷积块 (Depthwise Separable Convolution Block)
|
19 |
+
|
20 |
+
包含:
|
21 |
+
1. 深度卷积(Depthwise Conv): 对每个通道单独进行空间卷积
|
22 |
+
2. 逐点卷积(Pointwise Conv): 1x1卷积实现通道混合
|
23 |
+
|
24 |
+
Args:
|
25 |
+
in_channels: 输入通道数
|
26 |
+
out_channels: 输出通道数
|
27 |
+
stride: 卷积步长
|
28 |
+
'''
|
29 |
+
def __init__(self, in_channels, out_channels, stride=1):
|
30 |
+
super(Block, self).__init__()
|
31 |
+
|
32 |
+
# 深度卷积 - 每个通道单独进行3x3卷积
|
33 |
+
self.conv1 = nn.Conv2d(
|
34 |
+
in_channels=in_channels,
|
35 |
+
out_channels=in_channels,
|
36 |
+
kernel_size=3,
|
37 |
+
stride=stride,
|
38 |
+
padding=1,
|
39 |
+
groups=in_channels, # groups=in_channels 即为深度可分离卷积
|
40 |
+
bias=False
|
41 |
+
)
|
42 |
+
self.bn1 = nn.BatchNorm2d(in_channels)
|
43 |
+
self.relu1 = nn.ReLU(inplace=True)
|
44 |
+
|
45 |
+
# 逐点卷积 - 1x1卷积用于通道混合
|
46 |
+
self.conv2 = nn.Conv2d(
|
47 |
+
in_channels=in_channels,
|
48 |
+
out_channels=out_channels,
|
49 |
+
kernel_size=1,
|
50 |
+
stride=1,
|
51 |
+
padding=0,
|
52 |
+
bias=False
|
53 |
+
)
|
54 |
+
self.bn2 = nn.BatchNorm2d(out_channels)
|
55 |
+
self.relu2 = nn.ReLU(inplace=True)
|
56 |
+
|
57 |
+
def forward(self, x):
|
58 |
+
# 深度卷积
|
59 |
+
x = self.conv1(x)
|
60 |
+
x = self.bn1(x)
|
61 |
+
x = self.relu1(x)
|
62 |
+
|
63 |
+
# 逐点卷积
|
64 |
+
x = self.conv2(x)
|
65 |
+
x = self.bn2(x)
|
66 |
+
x = self.relu2(x)
|
67 |
+
return x
|
68 |
+
|
69 |
+
|
70 |
+
class MobileNet(nn.Module):
|
71 |
+
'''MobileNet v1网络
|
72 |
+
|
73 |
+
Args:
|
74 |
+
num_classes: 分类数量
|
75 |
+
alpha: 宽度乘子,用于控制网络宽度(默认1.0)
|
76 |
+
beta: 分辨率乘子,用于控制输入分辨率(默认1.0)
|
77 |
+
init_weights: 是否初始化权重
|
78 |
+
'''
|
79 |
+
# 网络配置: (输出通道数, 步长),步长默认为1
|
80 |
+
cfg = [64, (128,2), 128, (256,2), 256, (512,2),
|
81 |
+
512, 512, 512, 512, 512, (1024,2), 1024]
|
82 |
+
|
83 |
+
def __init__(self, num_classes=10, alpha=1.0, beta=1.0, init_weights=True):
|
84 |
+
super(MobileNet, self).__init__()
|
85 |
+
|
86 |
+
# 第一层标准卷积
|
87 |
+
self.conv1 = nn.Sequential(
|
88 |
+
nn.Conv2d(3, 32, kernel_size=3, stride=1, bias=False),
|
89 |
+
nn.BatchNorm2d(32),
|
90 |
+
nn.ReLU(inplace=True)
|
91 |
+
)
|
92 |
+
|
93 |
+
# 深度可分离卷积层
|
94 |
+
self.layers = self._make_layers(in_channels=32)
|
95 |
+
|
96 |
+
# 全局平均池化和分类器
|
97 |
+
self.avg = nn.AdaptiveAvgPool2d(1) # 自适应平均池化,输出大小为1x1
|
98 |
+
self.linear = nn.Linear(1024, num_classes)
|
99 |
+
|
100 |
+
# 初始化权重
|
101 |
+
if init_weights:
|
102 |
+
self._initialize_weights()
|
103 |
+
|
104 |
+
def _make_layers(self, in_channels):
|
105 |
+
'''构建深度可分离卷积层
|
106 |
+
|
107 |
+
Args:
|
108 |
+
in_channels: 输入通道数
|
109 |
+
'''
|
110 |
+
layers = []
|
111 |
+
for x in self.cfg:
|
112 |
+
out_channels = x if isinstance(x, int) else x[0]
|
113 |
+
stride = 1 if isinstance(x, int) else x[1]
|
114 |
+
layers.append(Block(in_channels, out_channels, stride))
|
115 |
+
in_channels = out_channels
|
116 |
+
return nn.Sequential(*layers)
|
117 |
+
|
118 |
+
def forward(self, x):
|
119 |
+
# 标准卷积
|
120 |
+
x = self.conv1(x)
|
121 |
+
|
122 |
+
# 深度可分离卷积层
|
123 |
+
x = self.layers(x)
|
124 |
+
|
125 |
+
# 全局平均池化和分类器
|
126 |
+
x = self.avg(x)
|
127 |
+
x = x.view(x.size(0), -1)
|
128 |
+
x = self.linear(x)
|
129 |
+
return x
|
130 |
+
|
131 |
+
def _initialize_weights(self):
|
132 |
+
'''初始化模型权重'''
|
133 |
+
for m in self.modules():
|
134 |
+
if isinstance(m, nn.Conv2d):
|
135 |
+
# 使用kaiming初始化卷积层
|
136 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out')
|
137 |
+
if m.bias is not None:
|
138 |
+
nn.init.zeros_(m.bias)
|
139 |
+
elif isinstance(m, nn.BatchNorm2d):
|
140 |
+
# 初始化BN层
|
141 |
+
nn.init.ones_(m.weight)
|
142 |
+
nn.init.zeros_(m.bias)
|
143 |
+
elif isinstance(m, nn.Linear):
|
144 |
+
# 初始化全连接层
|
145 |
+
nn.init.normal_(m.weight, 0, 0.01)
|
146 |
+
nn.init.zeros_(m.bias)
|
147 |
+
|
148 |
+
|
149 |
+
def test():
|
150 |
+
"""测试函数"""
|
151 |
+
net = MobileNet()
|
152 |
+
x = torch.randn(2, 3, 32, 32)
|
153 |
+
y = net(x)
|
154 |
+
print(y.size())
|
155 |
+
|
156 |
+
# 打印模型结构
|
157 |
+
from torchinfo import summary
|
158 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
159 |
+
net = net.to(device)
|
160 |
+
summary(net, (2, 3, 32, 32))
|
161 |
+
|
162 |
+
if __name__ == '__main__':
|
163 |
+
test()
|
Image/MobileNetv1/code/train.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
4 |
+
|
5 |
+
from utils.dataset_utils import get_cifar10_dataloaders
|
6 |
+
from utils.train_utils import train_model
|
7 |
+
from model import MobileNet
|
8 |
+
|
9 |
+
def main():
|
10 |
+
# 获取数据加载器
|
11 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=128)
|
12 |
+
|
13 |
+
# 创建模型
|
14 |
+
model = MobileNet()
|
15 |
+
|
16 |
+
# 训练模型
|
17 |
+
train_model(
|
18 |
+
model=model,
|
19 |
+
trainloader=trainloader,
|
20 |
+
testloader=testloader,
|
21 |
+
epochs=200,
|
22 |
+
lr=0.1,
|
23 |
+
device='cuda',
|
24 |
+
save_dir='../model',
|
25 |
+
model_name='mobilenetv1'
|
26 |
+
)
|
27 |
+
|
28 |
+
if __name__ == '__main__':
|
29 |
+
main()
|
Image/MobileNetv1/dataset/.gitkeep
ADDED
File without changes
|
Image/MobileNetv1/model/.gitkeep
ADDED
File without changes
|
Image/MobileNetv2/code/model.py
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
MobileNetV2 in PyTorch.
|
3 |
+
|
4 |
+
论文: "Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, Detection and Segmentation"
|
5 |
+
参考: https://arxiv.org/abs/1801.04381
|
6 |
+
|
7 |
+
主要特点:
|
8 |
+
1. 引入倒残差结构(Inverted Residual),先升维后降维
|
9 |
+
2. 使用线性瓶颈(Linear Bottlenecks),去除最后一个ReLU保留特征
|
10 |
+
3. 使用ReLU6作为激活函数,提高在低精度计算下的鲁棒性
|
11 |
+
4. 残差连接时使用加法而不是拼接,减少内存占用
|
12 |
+
'''
|
13 |
+
|
14 |
+
import torch
|
15 |
+
import torch.nn as nn
|
16 |
+
|
17 |
+
|
18 |
+
class Block(nn.Module):
|
19 |
+
'''倒残差块 (Inverted Residual Block)
|
20 |
+
|
21 |
+
结构: expand(1x1) -> depthwise(3x3) -> project(1x1)
|
22 |
+
特点:
|
23 |
+
1. 使用1x1卷积先升维再降维(与ResNet相反)
|
24 |
+
2. 使用深度可分离卷积减少参数量
|
25 |
+
3. 使用shortcut连接(当stride=1且输入输出通道数相同时)
|
26 |
+
|
27 |
+
Args:
|
28 |
+
in_channels: 输入通道数
|
29 |
+
out_channels: 输出通道数
|
30 |
+
expansion: 扩展因子,控制中间层的通道数
|
31 |
+
stride: 步长,控制特征图大小
|
32 |
+
'''
|
33 |
+
def __init__(self, in_channels, out_channels, expansion, stride):
|
34 |
+
super(Block, self).__init__()
|
35 |
+
self.stride = stride
|
36 |
+
channels = expansion * in_channels # 扩展通道数
|
37 |
+
|
38 |
+
# 1x1卷积升维
|
39 |
+
self.conv1 = nn.Conv2d(
|
40 |
+
in_channels, channels,
|
41 |
+
kernel_size=1, stride=1, padding=0, bias=False
|
42 |
+
)
|
43 |
+
self.bn1 = nn.BatchNorm2d(channels)
|
44 |
+
|
45 |
+
# 3x3深度可分离卷积
|
46 |
+
self.conv2 = nn.Conv2d(
|
47 |
+
channels, channels,
|
48 |
+
kernel_size=3, stride=stride, padding=1,
|
49 |
+
groups=channels, bias=False # groups=channels即为深度可分离卷积
|
50 |
+
)
|
51 |
+
self.bn2 = nn.BatchNorm2d(channels)
|
52 |
+
|
53 |
+
# 1x1卷积降维(线性瓶颈,不使用激活函数)
|
54 |
+
self.conv3 = nn.Conv2d(
|
55 |
+
channels, out_channels,
|
56 |
+
kernel_size=1, stride=1, padding=0, bias=False
|
57 |
+
)
|
58 |
+
self.bn3 = nn.BatchNorm2d(out_channels)
|
59 |
+
|
60 |
+
# shortcut连接
|
61 |
+
self.shortcut = nn.Sequential()
|
62 |
+
if stride == 1 and in_channels != out_channels:
|
63 |
+
self.shortcut = nn.Sequential(
|
64 |
+
nn.Conv2d(
|
65 |
+
in_channels, out_channels,
|
66 |
+
kernel_size=1, stride=1, padding=0, bias=False
|
67 |
+
),
|
68 |
+
nn.BatchNorm2d(out_channels)
|
69 |
+
)
|
70 |
+
|
71 |
+
self.relu6 = nn.ReLU6(inplace=True)
|
72 |
+
|
73 |
+
def forward(self, x):
|
74 |
+
# 主分支
|
75 |
+
out = self.relu6(self.bn1(self.conv1(x))) # 升维
|
76 |
+
out = self.relu6(self.bn2(self.conv2(out))) # 深度卷积
|
77 |
+
out = self.bn3(self.conv3(out)) # 降维(线性瓶颈)
|
78 |
+
|
79 |
+
# shortcut连接(仅在stride=1时)
|
80 |
+
out = out + self.shortcut(x) if self.stride == 1 else out
|
81 |
+
return out
|
82 |
+
|
83 |
+
|
84 |
+
class MobileNetV2(nn.Module):
|
85 |
+
'''MobileNetV2网络
|
86 |
+
|
87 |
+
Args:
|
88 |
+
num_classes: 分类数量
|
89 |
+
|
90 |
+
网络配置:
|
91 |
+
cfg = [(expansion, out_channels, num_blocks, stride), ...]
|
92 |
+
- expansion: 扩展因子
|
93 |
+
- out_channels: 输出通道数
|
94 |
+
- num_blocks: 块的数量
|
95 |
+
- stride: 第一个块的步长
|
96 |
+
'''
|
97 |
+
# 网络结构配置
|
98 |
+
cfg = [
|
99 |
+
# (expansion, out_channels, num_blocks, stride)
|
100 |
+
(1, 16, 1, 1), # conv1
|
101 |
+
(6, 24, 2, 1), # conv2,注意:原论文stride=2,这里改为1以适应CIFAR10
|
102 |
+
(6, 32, 3, 2), # conv3
|
103 |
+
(6, 64, 4, 2), # conv4
|
104 |
+
(6, 96, 3, 1), # conv5
|
105 |
+
(6, 160, 3, 2), # conv6
|
106 |
+
(6, 320, 1, 1), # conv7
|
107 |
+
]
|
108 |
+
|
109 |
+
def __init__(self, num_classes=10):
|
110 |
+
super(MobileNetV2, self).__init__()
|
111 |
+
|
112 |
+
# 第一层卷积(注意:原论文stride=2,这里改为1以适应CIFAR10)
|
113 |
+
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
|
114 |
+
self.bn1 = nn.BatchNorm2d(32)
|
115 |
+
|
116 |
+
# 主干网络
|
117 |
+
self.layers = self._make_layers(in_channels=32)
|
118 |
+
|
119 |
+
# 最后的1x1卷积
|
120 |
+
self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False)
|
121 |
+
self.bn2 = nn.BatchNorm2d(1280)
|
122 |
+
|
123 |
+
# 分类器
|
124 |
+
self.avgpool = nn.AdaptiveAvgPool2d(1) # 全局平均池化
|
125 |
+
self.linear = nn.Linear(1280, num_classes)
|
126 |
+
self.relu6 = nn.ReLU6(inplace=True)
|
127 |
+
|
128 |
+
def _make_layers(self, in_channels):
|
129 |
+
'''构建网络层
|
130 |
+
|
131 |
+
Args:
|
132 |
+
in_channels: 输入通道数
|
133 |
+
'''
|
134 |
+
layers = []
|
135 |
+
for expansion, out_channels, num_blocks, stride in self.cfg:
|
136 |
+
# 对于每个配置,第一个block使用指定的stride,后续blocks使用stride=1
|
137 |
+
strides = [stride] + [1]*(num_blocks-1)
|
138 |
+
for stride in strides:
|
139 |
+
layers.append(
|
140 |
+
Block(in_channels, out_channels, expansion, stride)
|
141 |
+
)
|
142 |
+
in_channels = out_channels
|
143 |
+
return nn.Sequential(*layers)
|
144 |
+
|
145 |
+
def forward(self, x):
|
146 |
+
# 第一层卷积
|
147 |
+
out = self.relu6(self.bn1(self.conv1(x)))
|
148 |
+
|
149 |
+
# 主干网络
|
150 |
+
out = self.layers(out)
|
151 |
+
|
152 |
+
# 最后的1x1卷积
|
153 |
+
out = self.relu6(self.bn2(self.conv2(out)))
|
154 |
+
|
155 |
+
# 分类器
|
156 |
+
out = self.avgpool(out)
|
157 |
+
out = out.view(out.size(0), -1)
|
158 |
+
out = self.linear(out)
|
159 |
+
return out
|
160 |
+
|
161 |
+
|
162 |
+
def test():
|
163 |
+
"""测试函数"""
|
164 |
+
net = MobileNetV2()
|
165 |
+
x = torch.randn(2, 3, 32, 32)
|
166 |
+
y = net(x)
|
167 |
+
print(y.size())
|
168 |
+
|
169 |
+
# 打印模型结构
|
170 |
+
from torchinfo import summary
|
171 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
172 |
+
net = net.to(device)
|
173 |
+
summary(net, (2, 3, 32, 32))
|
174 |
+
|
175 |
+
if __name__ == '__main__':
|
176 |
+
test()
|
Image/MobileNetv2/code/train.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
4 |
+
|
5 |
+
from utils.dataset_utils import get_cifar10_dataloaders
|
6 |
+
from utils.train_utils import train_model
|
7 |
+
from model import MobileNetV2
|
8 |
+
|
9 |
+
def main():
|
10 |
+
# 获取数据加载器
|
11 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=128)
|
12 |
+
|
13 |
+
# 创建模型
|
14 |
+
model = MobileNetV2()
|
15 |
+
|
16 |
+
# 训练模型
|
17 |
+
train_model(
|
18 |
+
model=model,
|
19 |
+
trainloader=trainloader,
|
20 |
+
testloader=testloader,
|
21 |
+
epochs=200,
|
22 |
+
lr=0.1,
|
23 |
+
device='cuda',
|
24 |
+
save_dir='../model',
|
25 |
+
model_name='mobilenetv2'
|
26 |
+
)
|
27 |
+
|
28 |
+
if __name__ == '__main__':
|
29 |
+
main()
|
Image/MobileNetv2/dataset/.gitkeep
ADDED
File without changes
|
Image/MobileNetv2/model/.gitkeep
ADDED
File without changes
|
Image/MobileNetv3/code/model.py
ADDED
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
MobileNetV3 in PyTorch.
|
3 |
+
|
4 |
+
论文: "Searching for MobileNetV3"
|
5 |
+
参考: https://arxiv.org/abs/1905.02244
|
6 |
+
|
7 |
+
主要特点:
|
8 |
+
1. 引入基于NAS的网络架构搜索
|
9 |
+
2. 使用改进的SE注意力机块
|
10 |
+
3. 使用h-swish激活函数
|
11 |
+
4. 重新设计了网络的最后几层
|
12 |
+
5. 提供了Large和Small两个版本
|
13 |
+
'''
|
14 |
+
|
15 |
+
import torch
|
16 |
+
import torch.nn as nn
|
17 |
+
import torch.nn.functional as F
|
18 |
+
|
19 |
+
|
20 |
+
def get_activation(name):
|
21 |
+
'''获取激活函数
|
22 |
+
|
23 |
+
Args:
|
24 |
+
name: 激活函数名称 ('relu' 或 'hardswish')
|
25 |
+
'''
|
26 |
+
if name == 'relu':
|
27 |
+
return nn.ReLU(inplace=True)
|
28 |
+
elif name == 'hardswish':
|
29 |
+
return nn.Hardswish(inplace=True)
|
30 |
+
else:
|
31 |
+
raise NotImplementedError
|
32 |
+
|
33 |
+
|
34 |
+
class SEModule(nn.Module):
|
35 |
+
'''Squeeze-and-Excitation模块
|
36 |
+
|
37 |
+
通过全局平均池化和两层全连接网络学习通道注意力权重
|
38 |
+
|
39 |
+
Args:
|
40 |
+
channel: 输入通道数
|
41 |
+
reduction: 降维比例
|
42 |
+
'''
|
43 |
+
def __init__(self, channel, reduction=4):
|
44 |
+
super(SEModule, self).__init__()
|
45 |
+
self.avg_pool = nn.AdaptiveAvgPool2d(1)
|
46 |
+
self.fc = nn.Sequential(
|
47 |
+
nn.Linear(channel, channel // reduction, bias=False),
|
48 |
+
nn.ReLU(inplace=True),
|
49 |
+
nn.Linear(channel // reduction, channel, bias=False),
|
50 |
+
nn.Hardsigmoid(inplace=True)
|
51 |
+
)
|
52 |
+
|
53 |
+
def forward(self, x):
|
54 |
+
b, c, _, _ = x.size()
|
55 |
+
y = self.avg_pool(x).view(b, c) # squeeze
|
56 |
+
y = self.fc(y).view(b, c, 1, 1) # excitation
|
57 |
+
return x * y.expand_as(x) # scale
|
58 |
+
|
59 |
+
|
60 |
+
class Bottleneck(nn.Module):
|
61 |
+
'''MobileNetV3 Bottleneck
|
62 |
+
|
63 |
+
包含:
|
64 |
+
1. Expansion layer (1x1 conv)
|
65 |
+
2. Depthwise layer (3x3 or 5x5 depthwise conv)
|
66 |
+
3. SE module (optional)
|
67 |
+
4. Projection layer (1x1 conv)
|
68 |
+
|
69 |
+
Args:
|
70 |
+
in_channels: 输入通道数
|
71 |
+
exp_channels: 扩展层通道数
|
72 |
+
out_channels: 输出通道数
|
73 |
+
kernel_size: 深度卷积核大小
|
74 |
+
stride: 步长
|
75 |
+
use_SE: 是否使用SE模块
|
76 |
+
activation: 激活函数类型
|
77 |
+
use_residual: 是否使用残差连接
|
78 |
+
'''
|
79 |
+
def __init__(self, in_channels, exp_channels, out_channels, kernel_size,
|
80 |
+
stride, use_SE, activation, use_residual=True):
|
81 |
+
super(Bottleneck, self).__init__()
|
82 |
+
self.use_residual = use_residual and stride == 1 and in_channels == out_channels
|
83 |
+
padding = (kernel_size - 1) // 2
|
84 |
+
|
85 |
+
layers = []
|
86 |
+
# Expansion layer
|
87 |
+
if exp_channels != in_channels:
|
88 |
+
layers.extend([
|
89 |
+
nn.Conv2d(in_channels, exp_channels, 1, bias=False),
|
90 |
+
nn.BatchNorm2d(exp_channels),
|
91 |
+
get_activation(activation)
|
92 |
+
])
|
93 |
+
|
94 |
+
# Depthwise conv
|
95 |
+
layers.extend([
|
96 |
+
nn.Conv2d(
|
97 |
+
exp_channels, exp_channels, kernel_size,
|
98 |
+
stride, padding, groups=exp_channels, bias=False
|
99 |
+
),
|
100 |
+
nn.BatchNorm2d(exp_channels),
|
101 |
+
get_activation(activation)
|
102 |
+
])
|
103 |
+
|
104 |
+
# SE module
|
105 |
+
if use_SE:
|
106 |
+
layers.append(SEModule(exp_channels))
|
107 |
+
|
108 |
+
# Projection layer
|
109 |
+
layers.extend([
|
110 |
+
nn.Conv2d(exp_channels, out_channels, 1, bias=False),
|
111 |
+
nn.BatchNorm2d(out_channels)
|
112 |
+
])
|
113 |
+
|
114 |
+
self.conv = nn.Sequential(*layers)
|
115 |
+
|
116 |
+
def forward(self, x):
|
117 |
+
if self.use_residual:
|
118 |
+
return x + self.conv(x)
|
119 |
+
else:
|
120 |
+
return self.conv(x)
|
121 |
+
|
122 |
+
|
123 |
+
class MobileNetV3(nn.Module):
|
124 |
+
'''MobileNetV3网络
|
125 |
+
|
126 |
+
Args:
|
127 |
+
num_classes: 分类数量
|
128 |
+
mode: 'large' 或 'small',选择网络版本
|
129 |
+
'''
|
130 |
+
def __init__(self, num_classes=10, mode='small'):
|
131 |
+
super(MobileNetV3, self).__init__()
|
132 |
+
|
133 |
+
if mode == 'large':
|
134 |
+
# MobileNetV3-Large架构
|
135 |
+
self.config = [
|
136 |
+
# k, exp, out, SE, activation, stride
|
137 |
+
[3, 16, 16, False, 'relu', 1],
|
138 |
+
[3, 64, 24, False, 'relu', 2],
|
139 |
+
[3, 72, 24, False, 'relu', 1],
|
140 |
+
[5, 72, 40, True, 'relu', 2],
|
141 |
+
[5, 120, 40, True, 'relu', 1],
|
142 |
+
[5, 120, 40, True, 'relu', 1],
|
143 |
+
[3, 240, 80, False, 'hardswish', 2],
|
144 |
+
[3, 200, 80, False, 'hardswish', 1],
|
145 |
+
[3, 184, 80, False, 'hardswish', 1],
|
146 |
+
[3, 184, 80, False, 'hardswish', 1],
|
147 |
+
[3, 480, 112, True, 'hardswish', 1],
|
148 |
+
[3, 672, 112, True, 'hardswish', 1],
|
149 |
+
[5, 672, 160, True, 'hardswish', 2],
|
150 |
+
[5, 960, 160, True, 'hardswish', 1],
|
151 |
+
[5, 960, 160, True, 'hardswish', 1],
|
152 |
+
]
|
153 |
+
init_conv_out = 16
|
154 |
+
final_conv_out = 960
|
155 |
+
else:
|
156 |
+
# MobileNetV3-Small架构
|
157 |
+
self.config = [
|
158 |
+
# k, exp, out, SE, activation, stride
|
159 |
+
[3, 16, 16, True, 'relu', 2],
|
160 |
+
[3, 72, 24, False, 'relu', 2],
|
161 |
+
[3, 88, 24, False, 'relu', 1],
|
162 |
+
[5, 96, 40, True, 'hardswish', 2],
|
163 |
+
[5, 240, 40, True, 'hardswish', 1],
|
164 |
+
[5, 240, 40, True, 'hardswish', 1],
|
165 |
+
[5, 120, 48, True, 'hardswish', 1],
|
166 |
+
[5, 144, 48, True, 'hardswish', 1],
|
167 |
+
[5, 288, 96, True, 'hardswish', 2],
|
168 |
+
[5, 576, 96, True, 'hardswish', 1],
|
169 |
+
[5, 576, 96, True, 'hardswish', 1],
|
170 |
+
]
|
171 |
+
init_conv_out = 16
|
172 |
+
final_conv_out = 576
|
173 |
+
|
174 |
+
# 第一层卷积
|
175 |
+
self.conv_stem = nn.Sequential(
|
176 |
+
nn.Conv2d(3, init_conv_out, 3, 2, 1, bias=False),
|
177 |
+
nn.BatchNorm2d(init_conv_out),
|
178 |
+
get_activation('hardswish')
|
179 |
+
)
|
180 |
+
|
181 |
+
# 构建Bottleneck层
|
182 |
+
features = []
|
183 |
+
in_channels = init_conv_out
|
184 |
+
for k, exp, out, se, activation, stride in self.config:
|
185 |
+
features.append(
|
186 |
+
Bottleneck(in_channels, exp, out, k, stride, se, activation)
|
187 |
+
)
|
188 |
+
in_channels = out
|
189 |
+
self.features = nn.Sequential(*features)
|
190 |
+
|
191 |
+
# 最后的卷积层
|
192 |
+
self.conv_head = nn.Sequential(
|
193 |
+
nn.Conv2d(in_channels, final_conv_out, 1, bias=False),
|
194 |
+
nn.BatchNorm2d(final_conv_out),
|
195 |
+
get_activation('hardswish')
|
196 |
+
)
|
197 |
+
|
198 |
+
# 分类器
|
199 |
+
self.avgpool = nn.AdaptiveAvgPool2d(1)
|
200 |
+
self.classifier = nn.Sequential(
|
201 |
+
nn.Linear(final_conv_out, num_classes)
|
202 |
+
)
|
203 |
+
|
204 |
+
# 初始化权重
|
205 |
+
self._initialize_weights()
|
206 |
+
|
207 |
+
def _initialize_weights(self):
|
208 |
+
'''初始化模型权重'''
|
209 |
+
for m in self.modules():
|
210 |
+
if isinstance(m, nn.Conv2d):
|
211 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out')
|
212 |
+
if m.bias is not None:
|
213 |
+
nn.init.zeros_(m.bias)
|
214 |
+
elif isinstance(m, nn.BatchNorm2d):
|
215 |
+
nn.init.ones_(m.weight)
|
216 |
+
nn.init.zeros_(m.bias)
|
217 |
+
elif isinstance(m, nn.Linear):
|
218 |
+
nn.init.normal_(m.weight, 0, 0.01)
|
219 |
+
if m.bias is not None:
|
220 |
+
nn.init.zeros_(m.bias)
|
221 |
+
|
222 |
+
def forward(self, x):
|
223 |
+
x = self.conv_stem(x)
|
224 |
+
x = self.features(x)
|
225 |
+
x = self.conv_head(x)
|
226 |
+
x = self.avgpool(x)
|
227 |
+
x = x.view(x.size(0), -1)
|
228 |
+
x = self.classifier(x)
|
229 |
+
return x
|
230 |
+
|
231 |
+
|
232 |
+
def test():
|
233 |
+
"""测试函数"""
|
234 |
+
# 测试Large版本
|
235 |
+
net_large = MobileNetV3(mode='large')
|
236 |
+
x = torch.randn(2, 3, 32, 32)
|
237 |
+
y = net_large(x)
|
238 |
+
print('Large output size:', y.size())
|
239 |
+
|
240 |
+
# 测试Small版本
|
241 |
+
net_small = MobileNetV3(mode='small')
|
242 |
+
y = net_small(x)
|
243 |
+
print('Small output size:', y.size())
|
244 |
+
|
245 |
+
# 打印模型结构
|
246 |
+
from torchinfo import summary
|
247 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
248 |
+
net_small = net_small.to(device)
|
249 |
+
summary(net_small, (2, 3, 32, 32))
|
250 |
+
|
251 |
+
if __name__ == '__main__':
|
252 |
+
test()
|
Image/MobileNetv3/code/train.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
4 |
+
|
5 |
+
from utils.dataset_utils import get_cifar10_dataloaders
|
6 |
+
from utils.train_utils import train_model
|
7 |
+
from model import MobileNetV3
|
8 |
+
|
9 |
+
def main():
|
10 |
+
# 获取数据加载器
|
11 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=128)
|
12 |
+
|
13 |
+
# 创建模型
|
14 |
+
model = MobileNetV3(num_classes=10, mode='small') # 使用small版本,适合CIFAR10
|
15 |
+
|
16 |
+
# 训练模型
|
17 |
+
train_model(
|
18 |
+
model=model,
|
19 |
+
trainloader=trainloader,
|
20 |
+
testloader=testloader,
|
21 |
+
epochs=200,
|
22 |
+
lr=0.1,
|
23 |
+
device='cuda',
|
24 |
+
save_dir='../model',
|
25 |
+
model_name='mobilenetv3_small'
|
26 |
+
)
|
27 |
+
|
28 |
+
if __name__ == '__main__':
|
29 |
+
main()
|
Image/MobileNetv3/dataset/.gitkeep
ADDED
File without changes
|
Image/MobileNetv3/model/.gitkeep
ADDED
File without changes
|
Image/ResNet/code/model.py
ADDED
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
ResNet in PyTorch.
|
3 |
+
|
4 |
+
ResNet(深度残差网络)是由微软研究院的Kaiming He等人提出的深度神经网络架构。
|
5 |
+
主要创新点是引入了残差学习的概念,通过跳跃连接解决了深层网络的退化问题。
|
6 |
+
|
7 |
+
主要特点:
|
8 |
+
1. 引入残差块(Residual Block),使用跳跃连接
|
9 |
+
2. 使用Batch Normalization进行归一化
|
10 |
+
3. 支持更深的网络结构(最深可达152层)
|
11 |
+
4. 在多个计算机视觉任务上取得了突破性进展
|
12 |
+
|
13 |
+
Reference:
|
14 |
+
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
|
15 |
+
Deep Residual Learning for Image Recognition. arXiv:1512.03385
|
16 |
+
'''
|
17 |
+
import torch
|
18 |
+
import torch.nn as nn
|
19 |
+
|
20 |
+
class BasicBlock(nn.Module):
|
21 |
+
"""基础残差块
|
22 |
+
|
23 |
+
用于ResNet18/34等浅层网络。结构为:
|
24 |
+
x -> Conv -> BN -> ReLU -> Conv -> BN -> (+) -> ReLU
|
25 |
+
|------------------------------------------|
|
26 |
+
|
27 |
+
Args:
|
28 |
+
in_channels: 输入通道数
|
29 |
+
out_channels: 输出通道数
|
30 |
+
stride: 步长,用于下采样,默认为1
|
31 |
+
|
32 |
+
注意:基础模块没有通道压缩,expansion=1
|
33 |
+
"""
|
34 |
+
expansion = 1
|
35 |
+
|
36 |
+
def __init__(self, in_channels, out_channels, stride=1):
|
37 |
+
super(BasicBlock,self).__init__()
|
38 |
+
self.features = nn.Sequential(
|
39 |
+
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
|
40 |
+
nn.BatchNorm2d(out_channels),
|
41 |
+
nn.ReLU(True),
|
42 |
+
nn.Conv2d(out_channels,out_channels, kernel_size=3, stride=1, padding=1, bias=False),
|
43 |
+
nn.BatchNorm2d(out_channels)
|
44 |
+
)
|
45 |
+
|
46 |
+
# 如果输入输出维度不等,则使用1x1卷积层来改变维度
|
47 |
+
self.shortcut = nn.Sequential()
|
48 |
+
if stride != 1 or in_channels != self.expansion * out_channels:
|
49 |
+
self.shortcut = nn.Sequential(
|
50 |
+
nn.Conv2d(in_channels, self.expansion * out_channels, kernel_size=1, stride=stride, bias=False),
|
51 |
+
nn.BatchNorm2d(self.expansion * out_channels),
|
52 |
+
)
|
53 |
+
|
54 |
+
def forward(self, x):
|
55 |
+
out = self.features(x)
|
56 |
+
out += self.shortcut(x)
|
57 |
+
out = torch.relu(out)
|
58 |
+
return out
|
59 |
+
|
60 |
+
|
61 |
+
class Bottleneck(nn.Module):
|
62 |
+
"""瓶颈残差块
|
63 |
+
|
64 |
+
用于ResNet50/101/152等深层网络。结构为:
|
65 |
+
x -> 1x1Conv -> BN -> ReLU -> 3x3Conv -> BN -> ReLU -> 1x1Conv -> BN -> (+) -> ReLU
|
66 |
+
|-------------------------------------------------------------------|
|
67 |
+
|
68 |
+
Args:
|
69 |
+
in_channels: 输入通道数
|
70 |
+
zip_channels: 压缩后的通道数
|
71 |
+
stride: 步长,用于下采样,默认为1
|
72 |
+
|
73 |
+
注意:通过1x1卷积先压缩通道数,再还原,expansion=4
|
74 |
+
"""
|
75 |
+
expansion = 4
|
76 |
+
|
77 |
+
def __init__(self, in_channels, zip_channels, stride=1):
|
78 |
+
super(Bottleneck, self).__init__()
|
79 |
+
out_channels = self.expansion * zip_channels
|
80 |
+
self.features = nn.Sequential(
|
81 |
+
# 1x1卷积压缩通道
|
82 |
+
nn.Conv2d(in_channels, zip_channels, kernel_size=1, bias=False),
|
83 |
+
nn.BatchNorm2d(zip_channels),
|
84 |
+
nn.ReLU(inplace=True),
|
85 |
+
# 3x3卷积提取特征
|
86 |
+
nn.Conv2d(zip_channels, zip_channels, kernel_size=3, stride=stride, padding=1, bias=False),
|
87 |
+
nn.BatchNorm2d(zip_channels),
|
88 |
+
nn.ReLU(inplace=True),
|
89 |
+
# 1x1卷积还原通道
|
90 |
+
nn.Conv2d(zip_channels, out_channels, kernel_size=1, bias=False),
|
91 |
+
nn.BatchNorm2d(out_channels)
|
92 |
+
)
|
93 |
+
|
94 |
+
self.shortcut = nn.Sequential()
|
95 |
+
if stride != 1 or in_channels != out_channels:
|
96 |
+
self.shortcut = nn.Sequential(
|
97 |
+
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
|
98 |
+
nn.BatchNorm2d(out_channels)
|
99 |
+
)
|
100 |
+
|
101 |
+
def forward(self, x):
|
102 |
+
out = self.features(x)
|
103 |
+
out += self.shortcut(x)
|
104 |
+
out = torch.relu(out)
|
105 |
+
return out
|
106 |
+
|
107 |
+
class ResNet(nn.Module):
|
108 |
+
"""ResNet模型
|
109 |
+
|
110 |
+
网络结构:
|
111 |
+
1. 一个卷积层用于特征提取
|
112 |
+
2. 四个残差层,每层包含多个残差块
|
113 |
+
3. 平均池化和全连接层进行分类
|
114 |
+
|
115 |
+
对于CIFAR10,特征图大小变化为:
|
116 |
+
(32,32,3) -> [Conv] -> (32,32,64) -> [Layer1] -> (32,32,64) -> [Layer2]
|
117 |
+
-> (16,16,128) -> [Layer3] -> (8,8,256) -> [Layer4] -> (4,4,512) -> [AvgPool]
|
118 |
+
-> (1,1,512) -> [FC] -> (num_classes)
|
119 |
+
|
120 |
+
Args:
|
121 |
+
block: 残差块类型(BasicBlock或Bottleneck)
|
122 |
+
num_blocks: 每层残差块数量的列表
|
123 |
+
num_classes: 分类数量,默认为10
|
124 |
+
verbose: 是否打印中间特征图大小
|
125 |
+
init_weights: 是否初始化权重
|
126 |
+
"""
|
127 |
+
def __init__(self, block, num_blocks, num_classes=10, verbose=False, init_weights=True):
|
128 |
+
super(ResNet, self).__init__()
|
129 |
+
self.verbose = verbose
|
130 |
+
self.in_channels = 64
|
131 |
+
|
132 |
+
# 第一层卷积
|
133 |
+
self.features = nn.Sequential(
|
134 |
+
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),
|
135 |
+
nn.BatchNorm2d(64),
|
136 |
+
nn.ReLU(inplace=True)
|
137 |
+
)
|
138 |
+
|
139 |
+
# 四个残差层
|
140 |
+
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
|
141 |
+
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
|
142 |
+
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
|
143 |
+
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
|
144 |
+
|
145 |
+
# 分类层
|
146 |
+
self.avg_pool = nn.AvgPool2d(kernel_size=4)
|
147 |
+
self.classifier = nn.Linear(512 * block.expansion, num_classes)
|
148 |
+
|
149 |
+
if init_weights:
|
150 |
+
self._initialize_weights()
|
151 |
+
|
152 |
+
def _make_layer(self, block, out_channels, num_blocks, stride):
|
153 |
+
"""构建残差层
|
154 |
+
|
155 |
+
Args:
|
156 |
+
block: 残差块类型
|
157 |
+
out_channels: 输出通道数
|
158 |
+
num_blocks: 残差块数量
|
159 |
+
stride: 第一个残差块的步长(用于下采样)
|
160 |
+
|
161 |
+
Returns:
|
162 |
+
nn.Sequential: 残差层
|
163 |
+
"""
|
164 |
+
strides = [stride] + [1] * (num_blocks - 1)
|
165 |
+
layers = []
|
166 |
+
for stride in strides:
|
167 |
+
layers.append(block(self.in_channels, out_channels, stride))
|
168 |
+
self.in_channels = out_channels * block.expansion
|
169 |
+
return nn.Sequential(*layers)
|
170 |
+
|
171 |
+
def forward(self, x):
|
172 |
+
"""前向传播
|
173 |
+
|
174 |
+
Args:
|
175 |
+
x: 输入张量,[N,3,32,32]
|
176 |
+
|
177 |
+
Returns:
|
178 |
+
out: 输出张量,[N,num_classes]
|
179 |
+
"""
|
180 |
+
out = self.features(x)
|
181 |
+
if self.verbose:
|
182 |
+
print('block 1 output: {}'.format(out.shape))
|
183 |
+
|
184 |
+
out = self.layer1(out)
|
185 |
+
if self.verbose:
|
186 |
+
print('block 2 output: {}'.format(out.shape))
|
187 |
+
|
188 |
+
out = self.layer2(out)
|
189 |
+
if self.verbose:
|
190 |
+
print('block 3 output: {}'.format(out.shape))
|
191 |
+
|
192 |
+
out = self.layer3(out)
|
193 |
+
if self.verbose:
|
194 |
+
print('block 4 output: {}'.format(out.shape))
|
195 |
+
|
196 |
+
out = self.layer4(out)
|
197 |
+
if self.verbose:
|
198 |
+
print('block 5 output: {}'.format(out.shape))
|
199 |
+
|
200 |
+
out = self.avg_pool(out)
|
201 |
+
out = out.view(out.size(0), -1)
|
202 |
+
out = self.classifier(out)
|
203 |
+
return out
|
204 |
+
|
205 |
+
def _initialize_weights(self):
|
206 |
+
"""初始化模型权重
|
207 |
+
|
208 |
+
采用kaiming初始化方法:
|
209 |
+
- 卷积层权重采用kaiming_normal_初始化
|
210 |
+
- BN层参数采用常数初始化
|
211 |
+
- 线性层采用正态分布初始化
|
212 |
+
"""
|
213 |
+
for m in self.modules():
|
214 |
+
if isinstance(m, nn.Conv2d):
|
215 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
216 |
+
if m.bias is not None:
|
217 |
+
nn.init.constant_(m.bias, 0)
|
218 |
+
elif isinstance(m, nn.BatchNorm2d):
|
219 |
+
nn.init.constant_(m.weight, 1)
|
220 |
+
nn.init.constant_(m.bias, 0)
|
221 |
+
elif isinstance(m, nn.Linear):
|
222 |
+
nn.init.normal_(m.weight, 0, 0.01)
|
223 |
+
nn.init.constant_(m.bias, 0)
|
224 |
+
|
225 |
+
def ResNet18(verbose=False):
|
226 |
+
"""ResNet-18模型"""
|
227 |
+
return ResNet(BasicBlock, [2,2,2,2], verbose=verbose)
|
228 |
+
|
229 |
+
def ResNet34(verbose=False):
|
230 |
+
"""ResNet-34模型"""
|
231 |
+
return ResNet(BasicBlock, [3,4,6,3], verbose=verbose)
|
232 |
+
|
233 |
+
def ResNet50(verbose=False):
|
234 |
+
"""ResNet-50模型"""
|
235 |
+
return ResNet(Bottleneck, [3,4,6,3], verbose=verbose)
|
236 |
+
|
237 |
+
def ResNet101(verbose=False):
|
238 |
+
"""ResNet-101模型"""
|
239 |
+
return ResNet(Bottleneck, [3,4,23,3], verbose=verbose)
|
240 |
+
|
241 |
+
def ResNet152(verbose=False):
|
242 |
+
"""ResNet-152模型"""
|
243 |
+
return ResNet(Bottleneck, [3,8,36,3], verbose=verbose)
|
244 |
+
|
245 |
+
def test():
|
246 |
+
"""测试函数"""
|
247 |
+
net = ResNet34()
|
248 |
+
x = torch.randn(2,3,32,32)
|
249 |
+
y = net(x)
|
250 |
+
print('Output shape:', y.size())
|
251 |
+
|
252 |
+
# 打印模型结构
|
253 |
+
from torchinfo import summary
|
254 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
255 |
+
net = net.to(device)
|
256 |
+
summary(net,(2,3,32,32))
|
257 |
+
|
258 |
+
if __name__ == '__main__':
|
259 |
+
test()
|
Image/ResNet/code/train.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
4 |
+
|
5 |
+
from utils.dataset_utils import get_cifar10_dataloaders
|
6 |
+
from utils.train_utils import train_model
|
7 |
+
from model import ResNet18
|
8 |
+
|
9 |
+
def main():
|
10 |
+
# 获取数据加载器
|
11 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=128)
|
12 |
+
|
13 |
+
# 创建模型
|
14 |
+
model = ResNet18()
|
15 |
+
|
16 |
+
# 训练模型
|
17 |
+
train_model(
|
18 |
+
model=model,
|
19 |
+
trainloader=trainloader,
|
20 |
+
testloader=testloader,
|
21 |
+
epochs=200,
|
22 |
+
lr=0.1,
|
23 |
+
device='cuda',
|
24 |
+
save_dir='../model',
|
25 |
+
model_name='resnet18'
|
26 |
+
)
|
27 |
+
|
28 |
+
if __name__ == '__main__':
|
29 |
+
main()
|
Image/ResNet/dataset/.gitkeep
ADDED
File without changes
|
Image/ResNet/model/.gitkeep
ADDED
File without changes
|
Image/SENet/code/model.py
ADDED
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
SENet (Squeeze-and-Excitation Networks) in PyTorch.
|
3 |
+
|
4 |
+
SENet通过引入SE模块来自适应地重新校准通道特征响应。SE模块可以集成到现有的网络架构中,
|
5 |
+
通过显式建模通道之间的相互依赖关系,自适应地重新校准通道特征响应。
|
6 |
+
|
7 |
+
主要特点:
|
8 |
+
1. 引入Squeeze-and-Excitation(SE)模块,增强特征的表示能力
|
9 |
+
2. SE模块包含squeeze操作(全局平均池化)和excitation操作(两个FC层)
|
10 |
+
3. 通过attention机制来增强重要通道的权重,抑制不重要通道
|
11 |
+
4. 几乎可以嵌入到任何现有的网络结构中
|
12 |
+
|
13 |
+
Reference:
|
14 |
+
[1] Jie Hu, Li Shen, Samuel Albanie, Gang Sun, Enhua Wu
|
15 |
+
Squeeze-and-Excitation Networks. CVPR 2018.
|
16 |
+
'''
|
17 |
+
import torch
|
18 |
+
import torch.nn as nn
|
19 |
+
import torch.nn.functional as F
|
20 |
+
|
21 |
+
|
22 |
+
class BasicBlock(nn.Module):
|
23 |
+
"""基础残差块+SE模块
|
24 |
+
|
25 |
+
结构:
|
26 |
+
x -> Conv -> BN -> ReLU -> Conv -> BN -> SE -> (+) -> ReLU
|
27 |
+
|------------------------------------------|
|
28 |
+
|
29 |
+
Args:
|
30 |
+
in_channels: 输入通道数
|
31 |
+
channels: 输出通道数
|
32 |
+
stride: 步长,用于下采样,默认为1
|
33 |
+
"""
|
34 |
+
def __init__(self, in_channels, channels, stride=1):
|
35 |
+
super(BasicBlock, self).__init__()
|
36 |
+
self.conv1 = nn.Conv2d(in_channels, channels, kernel_size=3, stride=stride, padding=1, bias=False)
|
37 |
+
self.bn1 = nn.BatchNorm2d(channels)
|
38 |
+
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1, bias=False)
|
39 |
+
self.bn2 = nn.BatchNorm2d(channels)
|
40 |
+
|
41 |
+
# 残差连接
|
42 |
+
self.shortcut = nn.Sequential()
|
43 |
+
if stride != 1 or in_channels != channels:
|
44 |
+
self.shortcut = nn.Sequential(
|
45 |
+
nn.Conv2d(in_channels, channels, kernel_size=1, stride=stride, bias=False),
|
46 |
+
nn.BatchNorm2d(channels)
|
47 |
+
)
|
48 |
+
|
49 |
+
# SE模块
|
50 |
+
self.squeeze = nn.AdaptiveAvgPool2d(1) # 全局平均池化
|
51 |
+
self.excitation = nn.Sequential(
|
52 |
+
nn.Conv2d(channels, channels//16, kernel_size=1), # 通道降维
|
53 |
+
nn.ReLU(inplace=True),
|
54 |
+
nn.Conv2d(channels//16, channels, kernel_size=1), # 通道升维
|
55 |
+
nn.Sigmoid() # 归一化到[0,1]
|
56 |
+
)
|
57 |
+
|
58 |
+
def forward(self, x):
|
59 |
+
# 主分支
|
60 |
+
out = F.relu(self.bn1(self.conv1(x)))
|
61 |
+
out = self.bn2(self.conv2(out))
|
62 |
+
|
63 |
+
# SE模块
|
64 |
+
w = self.squeeze(out) # Squeeze
|
65 |
+
w = self.excitation(w) # Excitation
|
66 |
+
out = out * w # 特征重标定
|
67 |
+
|
68 |
+
# 残差连接
|
69 |
+
out += self.shortcut(x)
|
70 |
+
out = F.relu(out)
|
71 |
+
return out
|
72 |
+
|
73 |
+
|
74 |
+
class PreActBlock(nn.Module):
|
75 |
+
"""Pre-activation版本的基础块+SE模块
|
76 |
+
|
77 |
+
结构:
|
78 |
+
x -> BN -> ReLU -> Conv -> BN -> ReLU -> Conv -> SE -> (+)
|
79 |
+
|-------------------------------------------|
|
80 |
+
|
81 |
+
Args:
|
82 |
+
in_channels: 输入通道数
|
83 |
+
channels: 输出通道数
|
84 |
+
stride: 步长,用于下采样,默认为1
|
85 |
+
"""
|
86 |
+
def __init__(self, in_channels, channels, stride=1):
|
87 |
+
super(PreActBlock, self).__init__()
|
88 |
+
self.bn1 = nn.BatchNorm2d(in_channels)
|
89 |
+
self.conv1 = nn.Conv2d(in_channels, channels, kernel_size=3, stride=stride, padding=1, bias=False)
|
90 |
+
self.bn2 = nn.BatchNorm2d(channels)
|
91 |
+
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1, bias=False)
|
92 |
+
|
93 |
+
# 残差连接
|
94 |
+
if stride != 1 or in_channels != channels:
|
95 |
+
self.shortcut = nn.Sequential(
|
96 |
+
nn.Conv2d(in_channels, channels, kernel_size=1, stride=stride, bias=False)
|
97 |
+
)
|
98 |
+
|
99 |
+
# SE模块
|
100 |
+
self.squeeze = nn.AdaptiveAvgPool2d(1)
|
101 |
+
self.excitation = nn.Sequential(
|
102 |
+
nn.Conv2d(channels, channels//16, kernel_size=1),
|
103 |
+
nn.ReLU(inplace=True),
|
104 |
+
nn.Conv2d(channels//16, channels, kernel_size=1),
|
105 |
+
nn.Sigmoid()
|
106 |
+
)
|
107 |
+
|
108 |
+
def forward(self, x):
|
109 |
+
# Pre-activation
|
110 |
+
out = F.relu(self.bn1(x))
|
111 |
+
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
|
112 |
+
|
113 |
+
# 主分支
|
114 |
+
out = self.conv1(out)
|
115 |
+
out = self.conv2(F.relu(self.bn2(out)))
|
116 |
+
|
117 |
+
# SE模块
|
118 |
+
w = self.squeeze(out)
|
119 |
+
w = self.excitation(w)
|
120 |
+
out = out * w
|
121 |
+
|
122 |
+
# 残差连接
|
123 |
+
out += shortcut
|
124 |
+
return out
|
125 |
+
|
126 |
+
|
127 |
+
class SENet(nn.Module):
|
128 |
+
"""SENet模型
|
129 |
+
|
130 |
+
网络结构:
|
131 |
+
1. 一个卷积层进行特征提取
|
132 |
+
2. 四个残差层,每层包含多个带SE模块的残差块
|
133 |
+
3. 平均池化和全连接层进行分类
|
134 |
+
|
135 |
+
Args:
|
136 |
+
block: 残差块类型(BasicBlock或PreActBlock)
|
137 |
+
num_blocks: 每层残差块数量的列表
|
138 |
+
num_classes: 分类数量,默认为10
|
139 |
+
"""
|
140 |
+
def __init__(self, block, num_blocks, num_classes=10):
|
141 |
+
super(SENet, self).__init__()
|
142 |
+
self.in_channels = 64
|
143 |
+
|
144 |
+
# 第一层卷积
|
145 |
+
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
|
146 |
+
self.bn1 = nn.BatchNorm2d(64)
|
147 |
+
|
148 |
+
# 四个残差层
|
149 |
+
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
|
150 |
+
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
|
151 |
+
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
|
152 |
+
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
|
153 |
+
|
154 |
+
# 分类层
|
155 |
+
self.avg_pool = nn.AdaptiveAvgPool2d(1)
|
156 |
+
self.classifier = nn.Linear(512, num_classes)
|
157 |
+
|
158 |
+
# 初始化权重
|
159 |
+
self._initialize_weights()
|
160 |
+
|
161 |
+
def _make_layer(self, block, channels, num_blocks, stride):
|
162 |
+
"""构建残差层
|
163 |
+
|
164 |
+
Args:
|
165 |
+
block: 残差块类型
|
166 |
+
channels: 输出通道数
|
167 |
+
num_blocks: 残差块数量
|
168 |
+
stride: 第一个残差块的步长(用于下采样)
|
169 |
+
|
170 |
+
Returns:
|
171 |
+
nn.Sequential: 残差层
|
172 |
+
"""
|
173 |
+
strides = [stride] + [1]*(num_blocks-1)
|
174 |
+
layers = []
|
175 |
+
for stride in strides:
|
176 |
+
layers.append(block(self.in_channels, channels, stride))
|
177 |
+
self.in_channels = channels
|
178 |
+
return nn.Sequential(*layers)
|
179 |
+
|
180 |
+
def forward(self, x):
|
181 |
+
"""前向传播
|
182 |
+
|
183 |
+
Args:
|
184 |
+
x: 输入张量,[N,3,32,32]
|
185 |
+
|
186 |
+
Returns:
|
187 |
+
out: 输出张量,[N,num_classes]
|
188 |
+
"""
|
189 |
+
# 特征提取
|
190 |
+
out = F.relu(self.bn1(self.conv1(x)))
|
191 |
+
|
192 |
+
# 残差层
|
193 |
+
out = self.layer1(out)
|
194 |
+
out = self.layer2(out)
|
195 |
+
out = self.layer3(out)
|
196 |
+
out = self.layer4(out)
|
197 |
+
|
198 |
+
# 分类
|
199 |
+
out = self.avg_pool(out)
|
200 |
+
out = out.view(out.size(0), -1)
|
201 |
+
out = self.classifier(out)
|
202 |
+
return out
|
203 |
+
|
204 |
+
def _initialize_weights(self):
|
205 |
+
"""初始化模型权重
|
206 |
+
|
207 |
+
采用kaiming初始化方法:
|
208 |
+
- 卷积层权重采用kaiming_normal_初始化
|
209 |
+
- BN层参数采用常数初始化
|
210 |
+
- 线性层采用正态分布初始化
|
211 |
+
"""
|
212 |
+
for m in self.modules():
|
213 |
+
if isinstance(m, nn.Conv2d):
|
214 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
215 |
+
if m.bias is not None:
|
216 |
+
nn.init.constant_(m.bias, 0)
|
217 |
+
elif isinstance(m, nn.BatchNorm2d):
|
218 |
+
nn.init.constant_(m.weight, 1)
|
219 |
+
nn.init.constant_(m.bias, 0)
|
220 |
+
elif isinstance(m, nn.Linear):
|
221 |
+
nn.init.normal_(m.weight, 0, 0.01)
|
222 |
+
nn.init.constant_(m.bias, 0)
|
223 |
+
|
224 |
+
|
225 |
+
def SENet18():
|
226 |
+
"""SENet-18模型"""
|
227 |
+
return SENet(PreActBlock, [2,2,2,2])
|
228 |
+
|
229 |
+
|
230 |
+
def test():
|
231 |
+
"""测试函数"""
|
232 |
+
# 创建模型
|
233 |
+
net = SENet18()
|
234 |
+
print('Model Structure:')
|
235 |
+
print(net)
|
236 |
+
|
237 |
+
# 测试前向传播
|
238 |
+
x = torch.randn(1,3,32,32)
|
239 |
+
y = net(x)
|
240 |
+
print('\nInput Shape:', x.shape)
|
241 |
+
print('Output Shape:', y.shape)
|
242 |
+
|
243 |
+
# 打印模型信息
|
244 |
+
from torchinfo import summary
|
245 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
246 |
+
net = net.to(device)
|
247 |
+
summary(net, (1,3,32,32))
|
248 |
+
|
249 |
+
|
250 |
+
if __name__ == '__main__':
|
251 |
+
test()
|
Image/SENet/code/train.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
4 |
+
|
5 |
+
from utils.dataset_utils import get_cifar10_dataloaders
|
6 |
+
from utils.train_utils import train_model
|
7 |
+
from model import SENet18
|
8 |
+
|
9 |
+
def main():
|
10 |
+
# 获取数据加载器
|
11 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=128)
|
12 |
+
|
13 |
+
# 创建模型
|
14 |
+
model = SENet18()
|
15 |
+
|
16 |
+
# 训练模型
|
17 |
+
train_model(
|
18 |
+
model=model,
|
19 |
+
trainloader=trainloader,
|
20 |
+
testloader=testloader,
|
21 |
+
epochs=200,
|
22 |
+
lr=0.1,
|
23 |
+
device='cuda',
|
24 |
+
save_dir='../model',
|
25 |
+
model_name='senet18'
|
26 |
+
)
|
27 |
+
|
28 |
+
if __name__ == '__main__':
|
29 |
+
main()
|
Image/SENet/dataset/.gitkeep
ADDED
File without changes
|
Image/SENet/model/.gitkeep
ADDED
File without changes
|
Image/ShuffleNet/code/model.py
ADDED
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
ShuffleNet in PyTorch.
|
3 |
+
|
4 |
+
ShuffleNet是一个专门为移动设备设计的高效卷积神经网络。其主要创新点在于使用了两个新操作:
|
5 |
+
1. 逐点组卷积(pointwise group convolution)
|
6 |
+
2. 通道重排(channel shuffle)
|
7 |
+
这两个操作大大降低了计算复杂度,同时保持了良好的准确率。
|
8 |
+
|
9 |
+
主要特点:
|
10 |
+
1. 使用组卷积减少参数量和计算量
|
11 |
+
2. 使用通道重排操作使不同组之间的信息可以流通
|
12 |
+
3. 使用深度可分离卷积进一步降低计算复杂度
|
13 |
+
4. 设计了多个计算复杂度版本以适应不同的设备
|
14 |
+
|
15 |
+
Reference:
|
16 |
+
[1] Xiangyu Zhang, Xinyu Zhou, Mengxiao Lin, Jian Sun
|
17 |
+
ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices. CVPR 2018.
|
18 |
+
'''
|
19 |
+
import torch
|
20 |
+
import torch.nn as nn
|
21 |
+
import torch.nn.functional as F
|
22 |
+
|
23 |
+
|
24 |
+
class ShuffleBlock(nn.Module):
|
25 |
+
"""通道重排模块
|
26 |
+
|
27 |
+
通过重新排列通道的顺序来实现不同组之间的信息交流。
|
28 |
+
|
29 |
+
Args:
|
30 |
+
groups (int): 分组数量
|
31 |
+
"""
|
32 |
+
def __init__(self, groups):
|
33 |
+
super(ShuffleBlock, self).__init__()
|
34 |
+
self.groups = groups
|
35 |
+
|
36 |
+
def forward(self, x):
|
37 |
+
"""通道重排的前向传播
|
38 |
+
|
39 |
+
步骤:
|
40 |
+
1. [N,C,H,W] -> [N,g,C/g,H,W] # 重塑为g组
|
41 |
+
2. [N,g,C/g,H,W] -> [N,C/g,g,H,W] # 转置g维度
|
42 |
+
3. [N,C/g,g,H,W] -> [N,C,H,W] # 重塑回原始形状
|
43 |
+
|
44 |
+
Args:
|
45 |
+
x: 输入张量,[N,C,H,W]
|
46 |
+
|
47 |
+
Returns:
|
48 |
+
out: 通道重排后的张量,[N,C,H,W]
|
49 |
+
"""
|
50 |
+
N, C, H, W = x.size()
|
51 |
+
g = self.groups
|
52 |
+
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).reshape(N,C,H,W)
|
53 |
+
|
54 |
+
|
55 |
+
class Bottleneck(nn.Module):
|
56 |
+
"""ShuffleNet的基本模块
|
57 |
+
|
58 |
+
结构:
|
59 |
+
x -> 1x1 GConv -> BN -> Shuffle -> 3x3 DWConv -> BN -> 1x1 GConv -> BN -> (+) -> ReLU
|
60 |
+
|---------------------|
|
61 |
+
|
62 |
+
Args:
|
63 |
+
in_channels (int): 输入通道数
|
64 |
+
out_channels (int): 输出通道数
|
65 |
+
stride (int): 步长,用于下采样
|
66 |
+
groups (int): 组卷积的分组数
|
67 |
+
"""
|
68 |
+
def __init__(self, in_channels, out_channels, stride, groups):
|
69 |
+
super(Bottleneck, self).__init__()
|
70 |
+
self.stride = stride
|
71 |
+
|
72 |
+
# 确定中间通道数和分组数
|
73 |
+
mid_channels = out_channels // 4
|
74 |
+
g = 1 if in_channels == 24 else groups
|
75 |
+
|
76 |
+
# 第一个1x1组卷积
|
77 |
+
self.conv1 = nn.Conv2d(in_channels, mid_channels,
|
78 |
+
kernel_size=1, groups=g, bias=False)
|
79 |
+
self.bn1 = nn.BatchNorm2d(mid_channels)
|
80 |
+
self.shuffle1 = ShuffleBlock(groups=g)
|
81 |
+
|
82 |
+
# 3x3深度可分离卷积
|
83 |
+
self.conv2 = nn.Conv2d(mid_channels, mid_channels,
|
84 |
+
kernel_size=3, stride=stride, padding=1,
|
85 |
+
groups=mid_channels, bias=False)
|
86 |
+
self.bn2 = nn.BatchNorm2d(mid_channels)
|
87 |
+
|
88 |
+
# 第二个1x1组卷积
|
89 |
+
self.conv3 = nn.Conv2d(mid_channels, out_channels,
|
90 |
+
kernel_size=1, groups=groups, bias=False)
|
91 |
+
self.bn3 = nn.BatchNorm2d(out_channels)
|
92 |
+
|
93 |
+
# 残差连接
|
94 |
+
self.shortcut = nn.Sequential()
|
95 |
+
if stride == 2:
|
96 |
+
self.shortcut = nn.Sequential(
|
97 |
+
nn.AvgPool2d(3, stride=2, padding=1)
|
98 |
+
)
|
99 |
+
|
100 |
+
def forward(self, x):
|
101 |
+
# 主分支
|
102 |
+
out = F.relu(self.bn1(self.conv1(x)))
|
103 |
+
out = self.shuffle1(out)
|
104 |
+
out = F.relu(self.bn2(self.conv2(out)))
|
105 |
+
out = self.bn3(self.conv3(out))
|
106 |
+
|
107 |
+
# 残差连接
|
108 |
+
res = self.shortcut(x)
|
109 |
+
|
110 |
+
# 如果是下采样层,拼接残差;否则相加
|
111 |
+
out = F.relu(torch.cat([out, res], 1)) if self.stride == 2 else F.relu(out + res)
|
112 |
+
return out
|
113 |
+
|
114 |
+
|
115 |
+
class ShuffleNet(nn.Module):
|
116 |
+
"""ShuffleNet模型
|
117 |
+
|
118 |
+
网络结构:
|
119 |
+
1. 一个卷积层进行特征提取
|
120 |
+
2. 三个阶段,每个阶段包含多个带重排的残差块
|
121 |
+
3. 平均池化和全连接层进行分类
|
122 |
+
|
123 |
+
Args:
|
124 |
+
cfg (dict): 配置字典,包含:
|
125 |
+
- out_channels (list): 每个阶段的输出通道数
|
126 |
+
- num_blocks (list): 每个阶段的块数
|
127 |
+
- groups (int): 组卷积的分组数
|
128 |
+
"""
|
129 |
+
def __init__(self, cfg):
|
130 |
+
super(ShuffleNet, self).__init__()
|
131 |
+
out_channels = cfg['out_channels']
|
132 |
+
num_blocks = cfg['num_blocks']
|
133 |
+
groups = cfg['groups']
|
134 |
+
|
135 |
+
# 第一层卷积
|
136 |
+
self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
|
137 |
+
self.bn1 = nn.BatchNorm2d(24)
|
138 |
+
self.in_channels = 24
|
139 |
+
|
140 |
+
# 三个阶段
|
141 |
+
self.layer1 = self._make_layer(out_channels[0], num_blocks[0], groups)
|
142 |
+
self.layer2 = self._make_layer(out_channels[1], num_blocks[1], groups)
|
143 |
+
self.layer3 = self._make_layer(out_channels[2], num_blocks[2], groups)
|
144 |
+
|
145 |
+
# 分类层
|
146 |
+
self.avg_pool = nn.AdaptiveAvgPool2d(1)
|
147 |
+
self.classifier = nn.Linear(out_channels[2], 10)
|
148 |
+
|
149 |
+
# 初始化权重
|
150 |
+
self._initialize_weights()
|
151 |
+
|
152 |
+
def _make_layer(self, out_channels, num_blocks, groups):
|
153 |
+
"""构建ShuffleNet的一个阶段
|
154 |
+
|
155 |
+
Args:
|
156 |
+
out_channels (int): 输出通道数
|
157 |
+
num_blocks (int): 块的数量
|
158 |
+
groups (int): 分组数
|
159 |
+
|
160 |
+
Returns:
|
161 |
+
nn.Sequential: 一个阶段的层序列
|
162 |
+
"""
|
163 |
+
layers = []
|
164 |
+
for i in range(num_blocks):
|
165 |
+
stride = 2 if i == 0 else 1
|
166 |
+
cat_channels = self.in_channels if i == 0 else 0
|
167 |
+
layers.append(
|
168 |
+
Bottleneck(
|
169 |
+
self.in_channels,
|
170 |
+
out_channels - cat_channels,
|
171 |
+
stride=stride,
|
172 |
+
groups=groups
|
173 |
+
)
|
174 |
+
)
|
175 |
+
self.in_channels = out_channels
|
176 |
+
return nn.Sequential(*layers)
|
177 |
+
|
178 |
+
def forward(self, x):
|
179 |
+
"""前向传播
|
180 |
+
|
181 |
+
Args:
|
182 |
+
x: 输入张量,[N,3,32,32]
|
183 |
+
|
184 |
+
Returns:
|
185 |
+
out: 输出张量,[N,num_classes]
|
186 |
+
"""
|
187 |
+
# 特征提取
|
188 |
+
out = F.relu(self.bn1(self.conv1(x)))
|
189 |
+
|
190 |
+
# 三个阶段
|
191 |
+
out = self.layer1(out)
|
192 |
+
out = self.layer2(out)
|
193 |
+
out = self.layer3(out)
|
194 |
+
|
195 |
+
# 分类
|
196 |
+
out = self.avg_pool(out)
|
197 |
+
out = out.view(out.size(0), -1)
|
198 |
+
out = self.classifier(out)
|
199 |
+
return out
|
200 |
+
|
201 |
+
def _initialize_weights(self):
|
202 |
+
"""初始化模型权重
|
203 |
+
|
204 |
+
采用kaiming初始化方法:
|
205 |
+
- 卷积层权重采用kaiming_normal_初始化
|
206 |
+
- BN层参数采用常数初始化
|
207 |
+
- 线性层采用正态分布初始化
|
208 |
+
"""
|
209 |
+
for m in self.modules():
|
210 |
+
if isinstance(m, nn.Conv2d):
|
211 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
212 |
+
if m.bias is not None:
|
213 |
+
nn.init.constant_(m.bias, 0)
|
214 |
+
elif isinstance(m, nn.BatchNorm2d):
|
215 |
+
nn.init.constant_(m.weight, 1)
|
216 |
+
nn.init.constant_(m.bias, 0)
|
217 |
+
elif isinstance(m, nn.Linear):
|
218 |
+
nn.init.normal_(m.weight, 0, 0.01)
|
219 |
+
nn.init.constant_(m.bias, 0)
|
220 |
+
|
221 |
+
|
222 |
+
def ShuffleNetG2():
|
223 |
+
"""返回groups=2的ShuffleNet模型"""
|
224 |
+
cfg = {
|
225 |
+
'out_channels': [200,400,800],
|
226 |
+
'num_blocks': [4,8,4],
|
227 |
+
'groups': 2
|
228 |
+
}
|
229 |
+
return ShuffleNet(cfg)
|
230 |
+
|
231 |
+
|
232 |
+
def ShuffleNetG3():
|
233 |
+
"""返回groups=3的ShuffleNet模型"""
|
234 |
+
cfg = {
|
235 |
+
'out_channels': [240,480,960],
|
236 |
+
'num_blocks': [4,8,4],
|
237 |
+
'groups': 3
|
238 |
+
}
|
239 |
+
return ShuffleNet(cfg)
|
240 |
+
|
241 |
+
|
242 |
+
def test():
|
243 |
+
"""测试函数"""
|
244 |
+
# 创建模型
|
245 |
+
net = ShuffleNetG2()
|
246 |
+
print('Model Structure:')
|
247 |
+
print(net)
|
248 |
+
|
249 |
+
# 测试前向传播
|
250 |
+
x = torch.randn(1,3,32,32)
|
251 |
+
y = net(x)
|
252 |
+
print('\nInput Shape:', x.shape)
|
253 |
+
print('Output Shape:', y.shape)
|
254 |
+
|
255 |
+
# 打印模型信息
|
256 |
+
from torchinfo import summary
|
257 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
258 |
+
net = net.to(device)
|
259 |
+
summary(net, (1,3,32,32))
|
260 |
+
|
261 |
+
|
262 |
+
if __name__ == '__main__':
|
263 |
+
test()
|
Image/ShuffleNet/code/train.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
4 |
+
|
5 |
+
from utils.dataset_utils import get_cifar10_dataloaders
|
6 |
+
from utils.train_utils import train_model
|
7 |
+
from model import ShuffleNet
|
8 |
+
|
9 |
+
def main():
|
10 |
+
# 获取数据加载器
|
11 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=128)
|
12 |
+
|
13 |
+
# 创建模型
|
14 |
+
model = ShuffleNet()
|
15 |
+
|
16 |
+
# 训练模型
|
17 |
+
train_model(
|
18 |
+
model=model,
|
19 |
+
trainloader=trainloader,
|
20 |
+
testloader=testloader,
|
21 |
+
epochs=200,
|
22 |
+
lr=0.1,
|
23 |
+
device='cuda',
|
24 |
+
save_dir='../model',
|
25 |
+
model_name='shufflenet'
|
26 |
+
)
|
27 |
+
|
28 |
+
if __name__ == '__main__':
|
29 |
+
main()
|
Image/ShuffleNet/dataset/.gitkeep
ADDED
File without changes
|
Image/ShuffleNet/model/.gitkeep
ADDED
File without changes
|
Image/ShuffleNetv2/code/model.py
ADDED
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
ShuffleNetV2 in PyTorch.
|
3 |
+
|
4 |
+
ShuffleNetV2是ShuffleNet的改进版本,通过实验总结出了四个高效网络设计的实用准则:
|
5 |
+
1. 输入输出通道数相等时计算量最小
|
6 |
+
2. 过度使用组卷积会增加MAC(内存访问代价)
|
7 |
+
3. 网络碎片化会降低并行度
|
8 |
+
4. Element-wise操作不可忽视
|
9 |
+
|
10 |
+
主要改进:
|
11 |
+
1. 通道分离(Channel Split)替代组卷积
|
12 |
+
2. 重新设计了基本单元,使输入输出通道数相等
|
13 |
+
3. 每个阶段使用不同的通道数配置
|
14 |
+
4. 简化了下采样模块的设计
|
15 |
+
|
16 |
+
Reference:
|
17 |
+
[1] Ningning Ma, Xiangyu Zhang, Hai-Tao Zheng, Jian Sun
|
18 |
+
ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design. ECCV 2018.
|
19 |
+
'''
|
20 |
+
import torch
|
21 |
+
import torch.nn as nn
|
22 |
+
import torch.nn.functional as F
|
23 |
+
|
24 |
+
|
25 |
+
class ShuffleBlock(nn.Module):
|
26 |
+
"""通道重排模块
|
27 |
+
|
28 |
+
通过重新排列通道的顺序来实现不同特征的信息交流。
|
29 |
+
|
30 |
+
Args:
|
31 |
+
groups (int): 分组数量,默认为2
|
32 |
+
"""
|
33 |
+
def __init__(self, groups=2):
|
34 |
+
super(ShuffleBlock, self).__init__()
|
35 |
+
self.groups = groups
|
36 |
+
|
37 |
+
def forward(self, x):
|
38 |
+
"""通道重排的前向传播
|
39 |
+
|
40 |
+
步骤:
|
41 |
+
1. [N,C,H,W] -> [N,g,C/g,H,W] # 重塑为g组
|
42 |
+
2. [N,g,C/g,H,W] -> [N,C/g,g,H,W] # 转置g维度
|
43 |
+
3. [N,C/g,g,H,W] -> [N,C,H,W] # 重塑回原始形状
|
44 |
+
|
45 |
+
Args:
|
46 |
+
x: 输入张量,[N,C,H,W]
|
47 |
+
|
48 |
+
Returns:
|
49 |
+
out: 通道重排后的张量,[N,C,H,W]
|
50 |
+
"""
|
51 |
+
N, C, H, W = x.size()
|
52 |
+
g = self.groups
|
53 |
+
return x.view(N, g, C//g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)
|
54 |
+
|
55 |
+
|
56 |
+
class SplitBlock(nn.Module):
|
57 |
+
"""通道分离模块
|
58 |
+
|
59 |
+
将输入特征图按比例分成两部分。
|
60 |
+
|
61 |
+
Args:
|
62 |
+
ratio (float): 分离比例,默认为0.5
|
63 |
+
"""
|
64 |
+
def __init__(self, ratio):
|
65 |
+
super(SplitBlock, self).__init__()
|
66 |
+
self.ratio = ratio
|
67 |
+
|
68 |
+
def forward(self, x):
|
69 |
+
"""通道分离的前向传播
|
70 |
+
|
71 |
+
Args:
|
72 |
+
x: 输入张量,[N,C,H,W]
|
73 |
+
|
74 |
+
Returns:
|
75 |
+
tuple: 分离后的两个张量,[N,C1,H,W]和[N,C2,H,W]
|
76 |
+
"""
|
77 |
+
c = int(x.size(1) * self.ratio)
|
78 |
+
return x[:, :c, :, :], x[:, c:, :, :]
|
79 |
+
|
80 |
+
|
81 |
+
class BasicBlock(nn.Module):
|
82 |
+
"""ShuffleNetV2的基本模块
|
83 |
+
|
84 |
+
结构:
|
85 |
+
x -------|-----------------|
|
86 |
+
| | |
|
87 |
+
| 1x1 Conv |
|
88 |
+
| 3x3 DWConv |
|
89 |
+
| 1x1 Conv |
|
90 |
+
| |
|
91 |
+
|------------------Concat
|
92 |
+
|
|
93 |
+
Channel Shuffle
|
94 |
+
|
95 |
+
Args:
|
96 |
+
in_channels (int): 输入通道数
|
97 |
+
split_ratio (float): 通道分离比例,默认为0.5
|
98 |
+
"""
|
99 |
+
def __init__(self, in_channels, split_ratio=0.5):
|
100 |
+
super(BasicBlock, self).__init__()
|
101 |
+
self.split = SplitBlock(split_ratio)
|
102 |
+
in_channels = int(in_channels * split_ratio)
|
103 |
+
|
104 |
+
# 主分支
|
105 |
+
self.conv1 = nn.Conv2d(in_channels, in_channels,
|
106 |
+
kernel_size=1, bias=False)
|
107 |
+
self.bn1 = nn.BatchNorm2d(in_channels)
|
108 |
+
|
109 |
+
self.conv2 = nn.Conv2d(in_channels, in_channels,
|
110 |
+
kernel_size=3, stride=1, padding=1,
|
111 |
+
groups=in_channels, bias=False)
|
112 |
+
self.bn2 = nn.BatchNorm2d(in_channels)
|
113 |
+
|
114 |
+
self.conv3 = nn.Conv2d(in_channels, in_channels,
|
115 |
+
kernel_size=1, bias=False)
|
116 |
+
self.bn3 = nn.BatchNorm2d(in_channels)
|
117 |
+
|
118 |
+
self.shuffle = ShuffleBlock()
|
119 |
+
|
120 |
+
def forward(self, x):
|
121 |
+
# 通道分离
|
122 |
+
x1, x2 = self.split(x)
|
123 |
+
|
124 |
+
# 主分支
|
125 |
+
out = F.relu(self.bn1(self.conv1(x2)))
|
126 |
+
out = self.bn2(self.conv2(out))
|
127 |
+
out = F.relu(self.bn3(self.conv3(out)))
|
128 |
+
|
129 |
+
# 拼接并重排
|
130 |
+
out = torch.cat([x1, out], 1)
|
131 |
+
out = self.shuffle(out)
|
132 |
+
return out
|
133 |
+
|
134 |
+
|
135 |
+
class DownBlock(nn.Module):
|
136 |
+
"""下采样模块
|
137 |
+
|
138 |
+
结构:
|
139 |
+
3x3 DWConv(s=2) 1x1 Conv
|
140 |
+
x -----> 1x1 Conv 3x3 DWConv(s=2)
|
141 |
+
1x1 Conv
|
142 |
+
|
|
143 |
+
Concat
|
144 |
+
|
|
145 |
+
Channel Shuffle
|
146 |
+
|
147 |
+
Args:
|
148 |
+
in_channels (int): 输入通道数
|
149 |
+
out_channels (int): 输出通道数
|
150 |
+
"""
|
151 |
+
def __init__(self, in_channels, out_channels):
|
152 |
+
super(DownBlock, self).__init__()
|
153 |
+
mid_channels = out_channels // 2
|
154 |
+
|
155 |
+
# 左分支
|
156 |
+
self.branch1 = nn.Sequential(
|
157 |
+
# 3x3深度可分离卷积,步长为2
|
158 |
+
nn.Conv2d(in_channels, in_channels,
|
159 |
+
kernel_size=3, stride=2, padding=1,
|
160 |
+
groups=in_channels, bias=False),
|
161 |
+
nn.BatchNorm2d(in_channels),
|
162 |
+
# 1x1卷积
|
163 |
+
nn.Conv2d(in_channels, mid_channels,
|
164 |
+
kernel_size=1, bias=False),
|
165 |
+
nn.BatchNorm2d(mid_channels)
|
166 |
+
)
|
167 |
+
|
168 |
+
# 右分支
|
169 |
+
self.branch2 = nn.Sequential(
|
170 |
+
# 1x1卷积
|
171 |
+
nn.Conv2d(in_channels, mid_channels,
|
172 |
+
kernel_size=1, bias=False),
|
173 |
+
nn.BatchNorm2d(mid_channels),
|
174 |
+
# 3x3深度可分离卷积,步长为2
|
175 |
+
nn.Conv2d(mid_channels, mid_channels,
|
176 |
+
kernel_size=3, stride=2, padding=1,
|
177 |
+
groups=mid_channels, bias=False),
|
178 |
+
nn.BatchNorm2d(mid_channels),
|
179 |
+
# 1x1卷积
|
180 |
+
nn.Conv2d(mid_channels, mid_channels,
|
181 |
+
kernel_size=1, bias=False),
|
182 |
+
nn.BatchNorm2d(mid_channels)
|
183 |
+
)
|
184 |
+
|
185 |
+
self.shuffle = ShuffleBlock()
|
186 |
+
|
187 |
+
def forward(self, x):
|
188 |
+
# 左分支
|
189 |
+
out1 = self.branch1(x)
|
190 |
+
|
191 |
+
# 右分支
|
192 |
+
out2 = self.branch2(x)
|
193 |
+
|
194 |
+
# 拼接并重排
|
195 |
+
out = torch.cat([out1, out2], 1)
|
196 |
+
out = self.shuffle(out)
|
197 |
+
return out
|
198 |
+
|
199 |
+
|
200 |
+
class ShuffleNetV2(nn.Module):
|
201 |
+
"""ShuffleNetV2模型
|
202 |
+
|
203 |
+
网络结构:
|
204 |
+
1. 一个卷积层进行特征提取
|
205 |
+
2. 三个阶段,每个阶段包含多个基本块和一个下采样块
|
206 |
+
3. 最后一个卷积层
|
207 |
+
4. 平均池化和全连接层进行分类
|
208 |
+
|
209 |
+
Args:
|
210 |
+
net_size (float): 网络大小系数,可选0.5/1.0/1.5/2.0
|
211 |
+
"""
|
212 |
+
def __init__(self, net_size):
|
213 |
+
super(ShuffleNetV2, self).__init__()
|
214 |
+
out_channels = configs[net_size]['out_channels']
|
215 |
+
num_blocks = configs[net_size]['num_blocks']
|
216 |
+
|
217 |
+
# 第一层卷积
|
218 |
+
self.conv1 = nn.Conv2d(3, 24, kernel_size=3,
|
219 |
+
stride=1, padding=1, bias=False)
|
220 |
+
self.bn1 = nn.BatchNorm2d(24)
|
221 |
+
self.in_channels = 24
|
222 |
+
|
223 |
+
# 三个阶段
|
224 |
+
self.layer1 = self._make_layer(out_channels[0], num_blocks[0])
|
225 |
+
self.layer2 = self._make_layer(out_channels[1], num_blocks[1])
|
226 |
+
self.layer3 = self._make_layer(out_channels[2], num_blocks[2])
|
227 |
+
|
228 |
+
# 最后的1x1卷积
|
229 |
+
self.conv2 = nn.Conv2d(out_channels[2], out_channels[3],
|
230 |
+
kernel_size=1, stride=1, padding=0, bias=False)
|
231 |
+
self.bn2 = nn.BatchNorm2d(out_channels[3])
|
232 |
+
|
233 |
+
# 分类层
|
234 |
+
self.avg_pool = nn.AdaptiveAvgPool2d(1)
|
235 |
+
self.classifier = nn.Linear(out_channels[3], 10)
|
236 |
+
|
237 |
+
# 初始化权重
|
238 |
+
self._initialize_weights()
|
239 |
+
|
240 |
+
def _make_layer(self, out_channels, num_blocks):
|
241 |
+
"""构建一个阶段
|
242 |
+
|
243 |
+
Args:
|
244 |
+
out_channels (int): 输出通道数
|
245 |
+
num_blocks (int): 基本块的数量
|
246 |
+
|
247 |
+
Returns:
|
248 |
+
nn.Sequential: 一个阶段的层序列
|
249 |
+
"""
|
250 |
+
layers = [DownBlock(self.in_channels, out_channels)]
|
251 |
+
for i in range(num_blocks):
|
252 |
+
layers.append(BasicBlock(out_channels))
|
253 |
+
self.in_channels = out_channels
|
254 |
+
return nn.Sequential(*layers)
|
255 |
+
|
256 |
+
def forward(self, x):
|
257 |
+
"""前向传播
|
258 |
+
|
259 |
+
Args:
|
260 |
+
x: 输入张量,[N,3,32,32]
|
261 |
+
|
262 |
+
Returns:
|
263 |
+
out: 输出张量,[N,num_classes]
|
264 |
+
"""
|
265 |
+
# 特征提取
|
266 |
+
out = F.relu(self.bn1(self.conv1(x)))
|
267 |
+
|
268 |
+
# 三个阶段
|
269 |
+
out = self.layer1(out)
|
270 |
+
out = self.layer2(out)
|
271 |
+
out = self.layer3(out)
|
272 |
+
|
273 |
+
# 最后的特征提取
|
274 |
+
out = F.relu(self.bn2(self.conv2(out)))
|
275 |
+
|
276 |
+
# 分类
|
277 |
+
out = self.avg_pool(out)
|
278 |
+
out = out.view(out.size(0), -1)
|
279 |
+
out = self.classifier(out)
|
280 |
+
return out
|
281 |
+
|
282 |
+
def _initialize_weights(self):
|
283 |
+
"""初始化模型权重
|
284 |
+
|
285 |
+
采用kaiming初始化方法:
|
286 |
+
- 卷积层权重采用kaiming_normal_初始化
|
287 |
+
- BN层参数采用常数初始化
|
288 |
+
- 线性层采用正态分布初始化
|
289 |
+
"""
|
290 |
+
for m in self.modules():
|
291 |
+
if isinstance(m, nn.Conv2d):
|
292 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
293 |
+
if m.bias is not None:
|
294 |
+
nn.init.constant_(m.bias, 0)
|
295 |
+
elif isinstance(m, nn.BatchNorm2d):
|
296 |
+
nn.init.constant_(m.weight, 1)
|
297 |
+
nn.init.constant_(m.bias, 0)
|
298 |
+
elif isinstance(m, nn.Linear):
|
299 |
+
nn.init.normal_(m.weight, 0, 0.01)
|
300 |
+
nn.init.constant_(m.bias, 0)
|
301 |
+
|
302 |
+
|
303 |
+
# 不同大小的网络配置
|
304 |
+
configs = {
|
305 |
+
0.5: {
|
306 |
+
'out_channels': (48, 96, 192, 1024),
|
307 |
+
'num_blocks': (3, 7, 3)
|
308 |
+
},
|
309 |
+
1.0: {
|
310 |
+
'out_channels': (116, 232, 464, 1024),
|
311 |
+
'num_blocks': (3, 7, 3)
|
312 |
+
},
|
313 |
+
1.5: {
|
314 |
+
'out_channels': (176, 352, 704, 1024),
|
315 |
+
'num_blocks': (3, 7, 3)
|
316 |
+
},
|
317 |
+
2.0: {
|
318 |
+
'out_channels': (224, 488, 976, 2048),
|
319 |
+
'num_blocks': (3, 7, 3)
|
320 |
+
}
|
321 |
+
}
|
322 |
+
|
323 |
+
|
324 |
+
def test():
|
325 |
+
"""测试函数"""
|
326 |
+
# 创建模型
|
327 |
+
net = ShuffleNetV2(net_size=0.5)
|
328 |
+
print('Model Structure:')
|
329 |
+
print(net)
|
330 |
+
|
331 |
+
# 测试前向传播
|
332 |
+
x = torch.randn(1,3,32,32)
|
333 |
+
y = net(x)
|
334 |
+
print('\nInput Shape:', x.shape)
|
335 |
+
print('Output Shape:', y.shape)
|
336 |
+
|
337 |
+
# 打印模型信息
|
338 |
+
from torchinfo import summary
|
339 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
340 |
+
net = net.to(device)
|
341 |
+
summary(net, (1,3,32,32))
|
342 |
+
|
343 |
+
|
344 |
+
if __name__ == '__main__':
|
345 |
+
test()
|
Image/ShuffleNetv2/code/train.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
4 |
+
|
5 |
+
from utils.dataset_utils import get_cifar10_dataloaders
|
6 |
+
from utils.train_utils import train_model
|
7 |
+
from model import ShuffleNetV2
|
8 |
+
|
9 |
+
def main():
|
10 |
+
# 获取数据加载器
|
11 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=128)
|
12 |
+
|
13 |
+
# 创建模型
|
14 |
+
model = ShuffleNetV2(1) # width_mult=1.0
|
15 |
+
|
16 |
+
# 训练模型
|
17 |
+
train_model(
|
18 |
+
model=model,
|
19 |
+
trainloader=trainloader,
|
20 |
+
testloader=testloader,
|
21 |
+
epochs=200,
|
22 |
+
lr=0.1,
|
23 |
+
device='cuda',
|
24 |
+
save_dir='../model',
|
25 |
+
model_name='shufflenetv2'
|
26 |
+
)
|
27 |
+
|
28 |
+
if __name__ == '__main__':
|
29 |
+
main()
|
Image/ShuffleNetv2/dataset/.gitkeep
ADDED
File without changes
|
Image/ShuffleNetv2/model/.gitkeep
ADDED
File without changes
|
Image/SwinTransformer/code/model.py
ADDED
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
import torch.utils.checkpoint as checkpoint
|
5 |
+
import numpy as np
|
6 |
+
from timm.models.layers import DropPath, trunc_normal_
|
7 |
+
|
8 |
+
class Mlp(nn.Module):
|
9 |
+
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
10 |
+
super().__init__()
|
11 |
+
out_features = out_features or in_features
|
12 |
+
hidden_features = hidden_features or in_features
|
13 |
+
self.fc1 = nn.Linear(in_features, hidden_features)
|
14 |
+
self.act = act_layer()
|
15 |
+
self.fc2 = nn.Linear(hidden_features, out_features)
|
16 |
+
self.drop = nn.Dropout(drop)
|
17 |
+
|
18 |
+
def forward(self, x):
|
19 |
+
x = self.fc1(x)
|
20 |
+
x = self.act(x)
|
21 |
+
x = self.drop(x)
|
22 |
+
x = self.fc2(x)
|
23 |
+
x = self.drop(x)
|
24 |
+
return x
|
25 |
+
|
26 |
+
def window_partition(x, window_size):
|
27 |
+
B, H, W, C = x.shape
|
28 |
+
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
|
29 |
+
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
|
30 |
+
return windows
|
31 |
+
|
32 |
+
def window_reverse(windows, window_size, H, W):
|
33 |
+
B = int(windows.shape[0] / (H * W / window_size / window_size))
|
34 |
+
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
|
35 |
+
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
|
36 |
+
return x
|
37 |
+
|
38 |
+
class WindowAttention(nn.Module):
|
39 |
+
def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.):
|
40 |
+
super().__init__()
|
41 |
+
self.dim = dim
|
42 |
+
self.window_size = window_size
|
43 |
+
self.num_heads = num_heads
|
44 |
+
head_dim = dim // num_heads
|
45 |
+
self.scale = head_dim ** -0.5
|
46 |
+
|
47 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
48 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
49 |
+
self.proj = nn.Linear(dim, dim)
|
50 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
51 |
+
|
52 |
+
self.softmax = nn.Softmax(dim=-1)
|
53 |
+
|
54 |
+
def forward(self, x):
|
55 |
+
B_, N, C = x.shape
|
56 |
+
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
57 |
+
q, k, v = qkv[0], qkv[1], qkv[2]
|
58 |
+
|
59 |
+
q = q * self.scale
|
60 |
+
attn = (q @ k.transpose(-2, -1))
|
61 |
+
attn = self.softmax(attn)
|
62 |
+
attn = self.attn_drop(attn)
|
63 |
+
|
64 |
+
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
|
65 |
+
x = self.proj(x)
|
66 |
+
x = self.proj_drop(x)
|
67 |
+
return x
|
68 |
+
|
69 |
+
class SwinTransformerBlock(nn.Module):
|
70 |
+
def __init__(self, dim, num_heads, window_size=7, shift_size=0,
|
71 |
+
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0.,
|
72 |
+
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
|
73 |
+
super().__init__()
|
74 |
+
self.dim = dim
|
75 |
+
self.num_heads = num_heads
|
76 |
+
self.window_size = window_size
|
77 |
+
self.shift_size = shift_size
|
78 |
+
self.mlp_ratio = mlp_ratio
|
79 |
+
|
80 |
+
self.norm1 = norm_layer(dim)
|
81 |
+
self.attn = WindowAttention(
|
82 |
+
dim, window_size=window_size, num_heads=num_heads,
|
83 |
+
qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
|
84 |
+
|
85 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
86 |
+
self.norm2 = norm_layer(dim)
|
87 |
+
mlp_hidden_dim = int(dim * mlp_ratio)
|
88 |
+
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
89 |
+
|
90 |
+
def forward(self, x):
|
91 |
+
H, W = self.H, self.W
|
92 |
+
B, L, C = x.shape
|
93 |
+
assert L == H * W, "input feature has wrong size"
|
94 |
+
|
95 |
+
shortcut = x
|
96 |
+
x = self.norm1(x)
|
97 |
+
x = x.view(B, H, W, C)
|
98 |
+
|
99 |
+
# pad feature maps to multiples of window size
|
100 |
+
pad_l = pad_t = 0
|
101 |
+
pad_r = (self.window_size - W % self.window_size) % self.window_size
|
102 |
+
pad_b = (self.window_size - H % self.window_size) % self.window_size
|
103 |
+
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
|
104 |
+
_, Hp, Wp, _ = x.shape
|
105 |
+
|
106 |
+
# cyclic shift
|
107 |
+
if self.shift_size > 0:
|
108 |
+
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
|
109 |
+
else:
|
110 |
+
shifted_x = x
|
111 |
+
|
112 |
+
# partition windows
|
113 |
+
x_windows = window_partition(shifted_x, self.window_size)
|
114 |
+
x_windows = x_windows.view(-1, self.window_size * self.window_size, C)
|
115 |
+
|
116 |
+
# W-MSA/SW-MSA
|
117 |
+
attn_windows = self.attn(x_windows)
|
118 |
+
|
119 |
+
# merge windows
|
120 |
+
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
|
121 |
+
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp)
|
122 |
+
|
123 |
+
# reverse cyclic shift
|
124 |
+
if self.shift_size > 0:
|
125 |
+
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
|
126 |
+
else:
|
127 |
+
x = shifted_x
|
128 |
+
|
129 |
+
if pad_r > 0 or pad_b > 0:
|
130 |
+
x = x[:, :H, :W, :].contiguous()
|
131 |
+
|
132 |
+
x = x.view(B, H * W, C)
|
133 |
+
|
134 |
+
# FFN
|
135 |
+
x = shortcut + self.drop_path(x)
|
136 |
+
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
137 |
+
|
138 |
+
return x
|
139 |
+
|
140 |
+
class PatchEmbed(nn.Module):
|
141 |
+
def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
|
142 |
+
super().__init__()
|
143 |
+
self.patch_size = patch_size
|
144 |
+
self.in_chans = in_chans
|
145 |
+
self.embed_dim = embed_dim
|
146 |
+
|
147 |
+
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
|
148 |
+
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
|
149 |
+
|
150 |
+
def forward(self, x):
|
151 |
+
_, _, H, W = x.shape
|
152 |
+
|
153 |
+
# padding
|
154 |
+
pad_input = (H % self.patch_size != 0) or (W % self.patch_size != 0)
|
155 |
+
if pad_input:
|
156 |
+
x = F.pad(x, (0, self.patch_size - W % self.patch_size,
|
157 |
+
0, self.patch_size - H % self.patch_size,
|
158 |
+
0, 0))
|
159 |
+
|
160 |
+
x = self.proj(x)
|
161 |
+
x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
|
162 |
+
x = self.norm(x)
|
163 |
+
return x
|
164 |
+
|
165 |
+
class SwinTransformer(nn.Module):
|
166 |
+
def __init__(self, img_size=32, patch_size=4, in_chans=3, num_classes=10,
|
167 |
+
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
|
168 |
+
window_size=7, mlp_ratio=4., qkv_bias=True,
|
169 |
+
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
|
170 |
+
norm_layer=nn.LayerNorm, patch_norm=True):
|
171 |
+
super().__init__()
|
172 |
+
|
173 |
+
self.num_classes = num_classes
|
174 |
+
self.num_layers = len(depths)
|
175 |
+
self.embed_dim = embed_dim
|
176 |
+
self.patch_norm = patch_norm
|
177 |
+
|
178 |
+
# split image into non-overlapping patches
|
179 |
+
self.patch_embed = PatchEmbed(
|
180 |
+
patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
|
181 |
+
norm_layer=norm_layer if self.patch_norm else None)
|
182 |
+
|
183 |
+
self.pos_drop = nn.Dropout(p=drop_rate)
|
184 |
+
|
185 |
+
# build layers
|
186 |
+
layers = []
|
187 |
+
for i_layer in range(self.num_layers):
|
188 |
+
layer = SwinTransformerBlock(
|
189 |
+
dim=embed_dim,
|
190 |
+
num_heads=num_heads[i_layer],
|
191 |
+
window_size=window_size,
|
192 |
+
shift_size=0 if (i_layer % 2 == 0) else window_size // 2,
|
193 |
+
mlp_ratio=mlp_ratio,
|
194 |
+
qkv_bias=qkv_bias,
|
195 |
+
drop=drop_rate,
|
196 |
+
attn_drop=attn_drop_rate,
|
197 |
+
drop_path=drop_path_rate,
|
198 |
+
norm_layer=norm_layer)
|
199 |
+
layers.append(layer)
|
200 |
+
|
201 |
+
self.layers = nn.ModuleList(layers)
|
202 |
+
self.norm = norm_layer(embed_dim)
|
203 |
+
self.avgpool = nn.AdaptiveAvgPool1d(1)
|
204 |
+
self.head = nn.Linear(embed_dim, num_classes)
|
205 |
+
|
206 |
+
self.apply(self._init_weights)
|
207 |
+
|
208 |
+
def _init_weights(self, m):
|
209 |
+
if isinstance(m, nn.Linear):
|
210 |
+
trunc_normal_(m.weight, std=.02)
|
211 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
212 |
+
nn.init.constant_(m.bias, 0)
|
213 |
+
elif isinstance(m, nn.LayerNorm):
|
214 |
+
nn.init.constant_(m.bias, 0)
|
215 |
+
nn.init.constant_(m.weight, 1.0)
|
216 |
+
|
217 |
+
def forward(self, x):
|
218 |
+
x = self.patch_embed(x)
|
219 |
+
x = self.pos_drop(x)
|
220 |
+
|
221 |
+
for layer in self.layers:
|
222 |
+
layer.H, layer.W = x.size(1), x.size(2)
|
223 |
+
x = layer(x)
|
224 |
+
|
225 |
+
x = self.norm(x)
|
226 |
+
x = self.avgpool(x.transpose(1, 2))
|
227 |
+
x = torch.flatten(x, 1)
|
228 |
+
x = self.head(x)
|
229 |
+
|
230 |
+
return x
|
Image/SwinTransformer/code/train.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
4 |
+
|
5 |
+
from utils.dataset_utils import get_cifar10_dataloaders
|
6 |
+
from utils.train_utils import train_model
|
7 |
+
from model import SwinTransformer
|
8 |
+
|
9 |
+
def main():
|
10 |
+
# 获取数据加载器
|
11 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=128)
|
12 |
+
|
13 |
+
# 创建模型
|
14 |
+
model = SwinTransformer(
|
15 |
+
img_size=32,
|
16 |
+
patch_size=4,
|
17 |
+
in_chans=3,
|
18 |
+
num_classes=10,
|
19 |
+
embed_dim=96,
|
20 |
+
depths=[2, 2, 6, 2],
|
21 |
+
num_heads=[3, 6, 12, 24],
|
22 |
+
window_size=7,
|
23 |
+
mlp_ratio=4.,
|
24 |
+
qkv_bias=True,
|
25 |
+
drop_rate=0.0,
|
26 |
+
attn_drop_rate=0.0,
|
27 |
+
drop_path_rate=0.1
|
28 |
+
)
|
29 |
+
|
30 |
+
# 训练模型
|
31 |
+
train_model(
|
32 |
+
model=model,
|
33 |
+
trainloader=trainloader,
|
34 |
+
testloader=testloader,
|
35 |
+
epochs=200,
|
36 |
+
lr=0.001, # Transformer类模型通常使用较小的学习率
|
37 |
+
device='cuda',
|
38 |
+
save_dir='../model',
|
39 |
+
model_name='swin_transformer'
|
40 |
+
)
|
41 |
+
|
42 |
+
if __name__ == '__main__':
|
43 |
+
main()
|