File size: 15,017 Bytes
83034b6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
Transformer Encoder Base- Content ================================================================================================================================== Layer (type (var_name)) Input Shape Output Shape Param # Trainable ================================================================================================================================== TransformerEncoder (TransformerEncoder) [784, 2, 512] [784, 2, 512] -- True ├─ModuleList (layers) -- -- -- True │ └─TransformerEncoderLayer (0) [784, 2, 512] [784, 2, 512] -- True │ │ └─MultiheadAttention (self_attn) [784, 2, 512] [784, 2, 512] 1,050,624 True │ │ └─Dropout (dropout1) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm1) [784, 2, 512] [784, 2, 512] 1,024 True │ │ └─Linear (linear1) [784, 2, 512] [784, 2, 2048] 1,050,624 True │ │ └─Dropout (dropout) [784, 2, 2048] [784, 2, 2048] -- -- │ │ └─Linear (linear2) [784, 2, 2048] [784, 2, 512] 1,049,088 True │ │ └─Dropout (dropout2) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm2) [784, 2, 512] [784, 2, 512] 1,024 True │ └─TransformerEncoderLayer (1) [784, 2, 512] [784, 2, 512] -- True │ │ └─MultiheadAttention (self_attn) [784, 2, 512] [784, 2, 512] 1,050,624 True │ │ └─Dropout (dropout1) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm1) [784, 2, 512] [784, 2, 512] 1,024 True │ │ └─Linear (linear1) [784, 2, 512] [784, 2, 2048] 1,050,624 True │ │ └─Dropout (dropout) [784, 2, 2048] [784, 2, 2048] -- -- │ │ └─Linear (linear2) [784, 2, 2048] [784, 2, 512] 1,049,088 True │ │ └─Dropout (dropout2) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm2) [784, 2, 512] [784, 2, 512] 1,024 True │ └─TransformerEncoderLayer (2) [784, 2, 512] [784, 2, 512] -- True │ │ └─MultiheadAttention (self_attn) [784, 2, 512] [784, 2, 512] 1,050,624 True │ │ └─Dropout (dropout1) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm1) [784, 2, 512] [784, 2, 512] 1,024 True │ │ └─Linear (linear1) [784, 2, 512] [784, 2, 2048] 1,050,624 True │ │ └─Dropout (dropout) [784, 2, 2048] [784, 2, 2048] -- -- │ │ └─Linear (linear2) [784, 2, 2048] [784, 2, 512] 1,049,088 True │ │ └─Dropout (dropout2) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm2) [784, 2, 512] [784, 2, 512] 1,024 True ================================================================================================================================== Total params: 9,457,152 Trainable params: 9,457,152 Non-trainable params: 0 Total mult-adds (G): 4.94 Transformer Encoder Base: Style ================================================================================================================================== Layer (type (var_name)) Input Shape Output Shape Param # Trainable ================================================================================================================================== TransformerEncoder (TransformerEncoder) [784, 2, 512] [784, 2, 512] -- True ├─ModuleList (layers) -- -- -- True │ └─TransformerEncoderLayer (0) [784, 2, 512] [784, 2, 512] -- True │ │ └─MultiheadAttention (self_attn) [784, 2, 512] [784, 2, 512] 1,050,624 True │ │ └─Dropout (dropout1) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm1) [784, 2, 512] [784, 2, 512] 1,024 True │ │ └─Linear (linear1) [784, 2, 512] [784, 2, 2048] 1,050,624 True │ │ └─Dropout (dropout) [784, 2, 2048] [784, 2, 2048] -- -- │ │ └─Linear (linear2) [784, 2, 2048] [784, 2, 512] 1,049,088 True │ │ └─Dropout (dropout2) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm2) [784, 2, 512] [784, 2, 512] 1,024 True │ └─TransformerEncoderLayer (1) [784, 2, 512] [784, 2, 512] -- True │ │ └─MultiheadAttention (self_attn) [784, 2, 512] [784, 2, 512] 1,050,624 True │ │ └─Dropout (dropout1) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm1) [784, 2, 512] [784, 2, 512] 1,024 True │ │ └─Linear (linear1) [784, 2, 512] [784, 2, 2048] 1,050,624 True │ │ └─Dropout (dropout) [784, 2, 2048] [784, 2, 2048] -- -- │ │ └─Linear (linear2) [784, 2, 2048] [784, 2, 512] 1,049,088 True │ │ └─Dropout (dropout2) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm2) [784, 2, 512] [784, 2, 512] 1,024 True │ └─TransformerEncoderLayer (2) [784, 2, 512] [784, 2, 512] -- True │ │ └─MultiheadAttention (self_attn) [784, 2, 512] [784, 2, 512] 1,050,624 True │ │ └─Dropout (dropout1) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm1) [784, 2, 512] [784, 2, 512] 1,024 True │ │ └─Linear (linear1) [784, 2, 512] [784, 2, 2048] 1,050,624 True │ │ └─Dropout (dropout) [784, 2, 2048] [784, 2, 2048] -- -- │ │ └─Linear (linear2) [784, 2, 2048] [784, 2, 512] 1,049,088 True │ │ └─Dropout (dropout2) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm2) [784, 2, 512] [784, 2, 512] 1,024 True ================================================================================================================================== Total params: 9,457,152 Trainable params: 9,457,152 Non-trainable params: 0 Total mult-adds (G): 4.94 ================================================================================================================================== Input size (MB): 3.21 Forward/backward pass size (MB): 134.87 Params size (MB): 25.22 Estimated Total Size (MB): 163.31 ================================================================================================================================== Transfomer Decoder ======================================================================================================================================= Layer (type (var_name)) Input Shape Output Shape Param # Trainable ======================================================================================================================================= TransformerDecoder (TransformerDecoder) [784, 2, 512] [1, 784, 2, 512] -- True ├─ModuleList (layers) -- -- -- True │ └─TransformerDecoderLayer (0) [784, 2, 512] [784, 2, 512] -- True │ │ └─MultiheadAttention (self_attn) [784, 2, 512] [784, 2, 512] 1,050,624 True │ │ └─Dropout (dropout1) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm1) [784, 2, 512] [784, 2, 512] 1,024 True │ │ └─MultiheadAttention (multihead_attn) -- [784, 2, 512] 1,050,624 True │ │ └─Dropout (dropout2) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm2) [784, 2, 512] [784, 2, 512] 1,024 True │ │ └─Linear (linear1) [784, 2, 512] [784, 2, 2048] 1,050,624 True │ │ └─Dropout (dropout) [784, 2, 2048] [784, 2, 2048] -- -- │ │ └─Linear (linear2) [784, 2, 2048] [784, 2, 512] 1,049,088 True │ │ └─Dropout (dropout3) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm3) [784, 2, 512] [784, 2, 512] 1,024 True │ └─TransformerDecoderLayer (1) [784, 2, 512] [784, 2, 512] -- True │ │ └─MultiheadAttention (self_attn) [784, 2, 512] [784, 2, 512] 1,050,624 True │ │ └─Dropout (dropout1) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm1) [784, 2, 512] [784, 2, 512] 1,024 True │ │ └─MultiheadAttention (multihead_attn) -- [784, 2, 512] 1,050,624 True │ │ └─Dropout (dropout2) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm2) [784, 2, 512] [784, 2, 512] 1,024 True │ │ └─Linear (linear1) [784, 2, 512] [784, 2, 2048] 1,050,624 True │ │ └─Dropout (dropout) [784, 2, 2048] [784, 2, 2048] -- -- │ │ └─Linear (linear2) [784, 2, 2048] [784, 2, 512] 1,049,088 True │ │ └─Dropout (dropout3) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm3) [784, 2, 512] [784, 2, 512] 1,024 True │ └─TransformerDecoderLayer (2) [784, 2, 512] [784, 2, 512] -- True │ │ └─MultiheadAttention (self_attn) [784, 2, 512] [784, 2, 512] 1,050,624 True │ │ └─Dropout (dropout1) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm1) [784, 2, 512] [784, 2, 512] 1,024 True │ │ └─MultiheadAttention (multihead_attn) -- [784, 2, 512] 1,050,624 True │ │ └─Dropout (dropout2) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm2) [784, 2, 512] [784, 2, 512] 1,024 True │ │ └─Linear (linear1) [784, 2, 512] [784, 2, 2048] 1,050,624 True │ │ └─Dropout (dropout) [784, 2, 2048] [784, 2, 2048] -- -- │ │ └─Linear (linear2) [784, 2, 2048] [784, 2, 512] 1,049,088 True │ │ └─Dropout (dropout3) [784, 2, 512] [784, 2, 512] -- -- │ │ └─LayerNorm (norm3) [784, 2, 512] [784, 2, 512] 1,024 True ├─LayerNorm (norm) [784, 2, 512] [784, 2, 512] 1,024 True ======================================================================================================================================= Total params: 12,613,120 Trainable params: 12,613,120 Non-trainable params: 0 Total mult-adds (G): 4.95 ======================================================================================================================================= Input size (MB): 6.42 Forward/backward pass size (MB): 160.56 Params size (MB): 25.24 Estimated Total Size (MB): 192.22 All Layers |