|
''' |
|
AlexNet in Pytorch |
|
''' |
|
|
|
import torch |
|
import torch.nn as nn |
|
|
|
class AlexNet(nn.Module): |
|
''' |
|
AlexNet模型 |
|
''' |
|
def __init__(self,num_classes=10): |
|
super(AlexNet,self).__init__() |
|
|
|
self.conv1 = nn.Sequential( |
|
nn.Conv2d(in_channels=3, out_channels=6, kernel_size=3, stride=1, padding=1), |
|
nn.ReLU(), |
|
nn.MaxPool2d(kernel_size=2, stride=2, padding=0) |
|
) |
|
self.conv2 = nn.Sequential( |
|
nn.Conv2d(in_channels=6, out_channels=16, kernel_size=3, stride=1, padding=1), |
|
nn.ReLU(), |
|
nn.MaxPool2d(kernel_size=2, stride=2, padding=0) |
|
) |
|
self.conv3 = nn.Sequential( |
|
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1), |
|
nn.ReLU(), |
|
nn.MaxPool2d(kernel_size=2, stride=2, padding=0) |
|
) |
|
self.conv4 = nn.Sequential( |
|
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1), |
|
nn.ReLU(), |
|
nn.MaxPool2d(kernel_size=2, stride=2, padding=0) |
|
) |
|
self.conv5 = nn.Sequential( |
|
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1), |
|
nn.ReLU(), |
|
nn.MaxPool2d(kernel_size=2, stride=2, padding=0) |
|
) |
|
|
|
self.dense = nn.Sequential( |
|
nn.Linear(128,120), |
|
nn.ReLU(), |
|
nn.Linear(120,84), |
|
nn.ReLU(), |
|
nn.Linear(84,num_classes) |
|
) |
|
|
|
|
|
self._initialize_weights() |
|
|
|
def forward(self,x): |
|
x = self.conv1(x) |
|
x = self.conv2(x) |
|
x = self.conv3(x) |
|
x = self.conv4(x) |
|
x = self.conv5(x) |
|
x = x.view(x.size()[0],-1) |
|
x = self.dense(x) |
|
return x |
|
|
|
def _initialize_weights(self): |
|
for m in self.modules(): |
|
if isinstance(m, nn.Conv2d): |
|
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') |
|
if m.bias is not None: |
|
nn.init.constant_(m.bias, 0) |
|
elif isinstance(m, nn.Linear): |
|
nn.init.normal_(m.weight, 0, 0.01) |
|
if m.bias is not None: |
|
nn.init.constant_(m.bias, 0) |
|
|
|
def test(): |
|
net = AlexNet() |
|
x = torch.randn(2,3,32,32) |
|
y = net(x) |
|
print(y.size()) |
|
from torchinfo import summary |
|
device = 'cuda' if torch.cuda.is_available() else 'cpu' |
|
net = net.to(device) |
|
summary(net,(3,32,32)) |