'''
ShuffleNetV2 in PyTorch.

ShuffleNetV2是ShuffleNet的改进版本,通过实验总结出了四个高效网络设计的实用准则:
1. 输入输出通道数相等时计算量最小
2. 过度使用组卷积会增加MAC(内存访问代价)
3. 网络碎片化会降低并行度
4. Element-wise操作不可忽视

主要改进:
1. 通道分离(Channel Split)替代组卷积
2. 重新设计了基本单元,使输入输出通道数相等
3. 每个阶段使用不同的通道数配置
4. 简化了下采样模块的设计

Reference:
[1] Ningning Ma, Xiangyu Zhang, Hai-Tao Zheng, Jian Sun
    ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design. ECCV 2018.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F


class ShuffleBlock(nn.Module):
    """通道重排模块
    
    通过重新排列通道的顺序来实现不同特征的信息交流。
    
    Args:
        groups (int): 分组数量,默认为2
    """
    def __init__(self, groups=2):
        super(ShuffleBlock, self).__init__()
        self.groups = groups

    def forward(self, x):
        """通道重排的前向传播
        
        步骤:
        1. [N,C,H,W] -> [N,g,C/g,H,W]  # 重塑为g组
        2. [N,g,C/g,H,W] -> [N,C/g,g,H,W]  # 转置g维度
        3. [N,C/g,g,H,W] -> [N,C,H,W]  # 重塑回原始形状
        
        Args:
            x: 输入张量,[N,C,H,W]
            
        Returns:
            out: 通道重排后的张量,[N,C,H,W]
        """
        N, C, H, W = x.size()
        g = self.groups
        return x.view(N, g, C//g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)


class SplitBlock(nn.Module):
    """通道分离模块
    
    将输入特征图按比例分成两部分。
    
    Args:
        ratio (float): 分离比例,默认为0.5
    """
    def __init__(self, ratio):
        super(SplitBlock, self).__init__()
        self.ratio = ratio

    def forward(self, x):
        """通道分离的前向传播
        
        Args:
            x: 输入张量,[N,C,H,W]
            
        Returns:
            tuple: 分离后的两个张量,[N,C1,H,W]和[N,C2,H,W]
        """
        c = int(x.size(1) * self.ratio)
        return x[:, :c, :, :], x[:, c:, :, :]


class BasicBlock(nn.Module):
    """ShuffleNetV2的基本模块
    
    结构:
    x -------|-----------------|
      |      |                |
      |      1x1 Conv         |
      |      3x3 DWConv       |
      |      1x1 Conv         |
      |                       |
      |------------------Concat
                          |
                      Channel Shuffle
                          
    Args:
        in_channels (int): 输入通道数
        split_ratio (float): 通道分离比例,默认为0.5
    """
    def __init__(self, in_channels, split_ratio=0.5):
        super(BasicBlock, self).__init__()
        self.split = SplitBlock(split_ratio)
        in_channels = int(in_channels * split_ratio)
        
        # 主分支
        self.conv1 = nn.Conv2d(in_channels, in_channels,
                              kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(in_channels)
        
        self.conv2 = nn.Conv2d(in_channels, in_channels,
                              kernel_size=3, stride=1, padding=1, 
                              groups=in_channels, bias=False)
        self.bn2 = nn.BatchNorm2d(in_channels)
        
        self.conv3 = nn.Conv2d(in_channels, in_channels,
                              kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(in_channels)
        
        self.shuffle = ShuffleBlock()

    def forward(self, x):
        # 通道分离
        x1, x2 = self.split(x)
        
        # 主分支
        out = F.relu(self.bn1(self.conv1(x2)))
        out = self.bn2(self.conv2(out))
        out = F.relu(self.bn3(self.conv3(out)))
        
        # 拼接并重排
        out = torch.cat([x1, out], 1)
        out = self.shuffle(out)
        return out


class DownBlock(nn.Module):
    """下采样模块
    
    结构:
           3x3 DWConv(s=2)     1x1 Conv
    x -----> 1x1 Conv          3x3 DWConv(s=2)
                               1x1 Conv
                                  |
                              Concat
                                  |
                          Channel Shuffle
                          
    Args:
        in_channels (int): 输入通道数
        out_channels (int): 输出通道数
    """
    def __init__(self, in_channels, out_channels):
        super(DownBlock, self).__init__()
        mid_channels = out_channels // 2
        
        # 左分支
        self.branch1 = nn.Sequential(
            # 3x3深度可分离卷积,步长为2
            nn.Conv2d(in_channels, in_channels,
                     kernel_size=3, stride=2, padding=1, 
                     groups=in_channels, bias=False),
            nn.BatchNorm2d(in_channels),
            # 1x1卷积
            nn.Conv2d(in_channels, mid_channels,
                     kernel_size=1, bias=False),
            nn.BatchNorm2d(mid_channels)
        )
        
        # 右分支
        self.branch2 = nn.Sequential(
            # 1x1卷积
            nn.Conv2d(in_channels, mid_channels,
                     kernel_size=1, bias=False),
            nn.BatchNorm2d(mid_channels),
            # 3x3深度可分离卷积,步长为2
            nn.Conv2d(mid_channels, mid_channels,
                     kernel_size=3, stride=2, padding=1,
                     groups=mid_channels, bias=False),
            nn.BatchNorm2d(mid_channels),
            # 1x1卷积
            nn.Conv2d(mid_channels, mid_channels,
                     kernel_size=1, bias=False),
            nn.BatchNorm2d(mid_channels)
        )
        
        self.shuffle = ShuffleBlock()

    def forward(self, x):
        # 左分支
        out1 = self.branch1(x)
        
        # 右分支
        out2 = self.branch2(x)
        
        # 拼接并重排
        out = torch.cat([out1, out2], 1)
        out = self.shuffle(out)
        return out


class ShuffleNetV2(nn.Module):
    """ShuffleNetV2模型
    
    网络结构:
    1. 一个卷积层进行特征提取
    2. 三个阶段,每个阶段包含多个基本块和一个下采样块
    3. 最后一个卷积层
    4. 平均池化和全连接层进行分类
    
    Args:
        net_size (float): 网络大小系数,可选0.5/1.0/1.5/2.0
    """
    def __init__(self, net_size):
        super(ShuffleNetV2, self).__init__()
        out_channels = configs[net_size]['out_channels']
        num_blocks = configs[net_size]['num_blocks']

        # 第一层卷积
        self.conv1 = nn.Conv2d(3, 24, kernel_size=3,
                              stride=1, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(24)
        self.in_channels = 24
        
        # 三个阶段
        self.layer1 = self._make_layer(out_channels[0], num_blocks[0])
        self.layer2 = self._make_layer(out_channels[1], num_blocks[1])
        self.layer3 = self._make_layer(out_channels[2], num_blocks[2])
        
        # 最后的1x1卷积
        self.conv2 = nn.Conv2d(out_channels[2], out_channels[3],
                              kernel_size=1, stride=1, padding=0, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channels[3])
        
        # 分类层
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.classifier = nn.Linear(out_channels[3], 10)
        
        # 初始化权重
        self._initialize_weights()

    def _make_layer(self, out_channels, num_blocks):
        """构建一个阶段
        
        Args:
            out_channels (int): 输出通道数
            num_blocks (int): 基本块的数量
            
        Returns:
            nn.Sequential: 一个阶段的层序列
        """
        layers = [DownBlock(self.in_channels, out_channels)]
        for i in range(num_blocks):
            layers.append(BasicBlock(out_channels))
            self.in_channels = out_channels
        return nn.Sequential(*layers)

    def forward(self, x):
        """前向传播
        
        Args:
            x: 输入张量,[N,3,32,32]
            
        Returns:
            out: 输出张量,[N,num_classes]
        """
        # 特征提取
        out = F.relu(self.bn1(self.conv1(x)))
        
        # 三个阶段
        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        
        # 最后的特征提取
        out = F.relu(self.bn2(self.conv2(out)))
        
        # 分类
        out = self.avg_pool(out)
        out = out.view(out.size(0), -1)
        out = self.classifier(out)
        return out
    
    def _initialize_weights(self):
        """初始化模型权重
        
        采用kaiming初始化方法:
        - 卷积层权重采用kaiming_normal_初始化
        - BN层参数采用常数初始化
        - 线性层采用正态分布初始化
        """
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)


# 不同大小的网络配置
configs = {
    0.5: {
        'out_channels': (48, 96, 192, 1024),
        'num_blocks': (3, 7, 3)
    },
    1.0: {
        'out_channels': (116, 232, 464, 1024),
        'num_blocks': (3, 7, 3)
    },
    1.5: {
        'out_channels': (176, 352, 704, 1024),
        'num_blocks': (3, 7, 3)
    },
    2.0: {
        'out_channels': (224, 488, 976, 2048),
        'num_blocks': (3, 7, 3)
    }
}


def test():
    """测试函数"""
    # 创建模型
    net = ShuffleNetV2(net_size=0.5)
    print('Model Structure:')
    print(net)
    
    # 测试前向传播
    x = torch.randn(1,3,32,32)
    y = net(x)
    print('\nInput Shape:', x.shape)
    print('Output Shape:', y.shape)
    
    # 打印模型信息
    from torchinfo import summary
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    net = net.to(device)
    summary(net, (1,3,32,32))


if __name__ == '__main__':
    test()