Abdullah-Nazhat commited on
Commit
a364693
·
verified ·
1 Parent(s): e4b4429

Upload 3 files

Browse files
Files changed (3) hide show
  1. __init__.py +1 -0
  2. core.py +104 -0
  3. train.py +179 -0
__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .core import *
core.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+ from einops.layers.torch import Rearrange
6
+
7
+
8
+
9
+
10
+ class FeedForward(nn.Module):
11
+ def __init__(self, dim, hidden_dim, dropout):
12
+ super().__init__()
13
+ self.net = nn.Sequential(
14
+ nn.Linear(dim, hidden_dim),
15
+ nn.GELU(),
16
+ nn.Dropout(dropout),
17
+ nn.Linear(hidden_dim, dim),
18
+ nn.Dropout(dropout)
19
+ )
20
+ def forward(self, x):
21
+ return self.net(x)
22
+
23
+
24
+
25
+ class MixerBlock(nn.Module):
26
+
27
+ def __init__(self, dim, num_patch, token_dim, channel_dim, dropout):
28
+ super().__init__()
29
+
30
+ self.token_mix = nn.Sequential(
31
+ nn.LayerNorm(dim),
32
+ Rearrange('b n d -> b d n'),
33
+ FeedForward(num_patch, token_dim, dropout),
34
+ Rearrange('b d n -> b n d')
35
+ )
36
+
37
+ self.channel_mix = nn.Sequential(
38
+ nn.LayerNorm(dim),
39
+ FeedForward(dim, channel_dim, dropout),
40
+ )
41
+
42
+ def forward(self, x):
43
+
44
+ x = x + self.token_mix(x)
45
+
46
+ x = x + self.channel_mix(x)
47
+
48
+ return x
49
+
50
+
51
+
52
+
53
+
54
+ class MixerGatingUnit(nn.Module):
55
+ def __init__(self,dim, seq_len, token_dim, channel_dim, dropout):
56
+ super().__init__()
57
+ self.Mixer = MixerBlock(dim, seq_len, token_dim, channel_dim, dropout)
58
+ self.proj = nn.Linear(dim,dim)
59
+
60
+ def forward(self, x):
61
+ u, v = x, x
62
+ u = self.proj(u)
63
+ v = self.Mixer(v)
64
+ out = u * v
65
+ return out
66
+
67
+
68
+ class NiNBlock(nn.Module):
69
+ def __init__(self, d_model, d_ffn, seq_len,dropout):
70
+ super().__init__()
71
+
72
+ self.norm = nn.LayerNorm(d_model)
73
+ self.mgu = MixerGatingUnit(d_model,seq_len,d_ffn,d_ffn,dropout)
74
+ self.ffn = FeedForward(d_model,d_ffn,dropout)
75
+ def forward(self, x):
76
+ residual = x
77
+ x = self.norm(x)
78
+ x = self.mgu(x)
79
+ x = x + residual
80
+ residual = x
81
+ x = self.norm(x)
82
+ x = self.ffn(x)
83
+ out = x + residual
84
+ return out
85
+
86
+
87
+ class NiNformer(nn.Module):
88
+ def __init__(self, d_model, d_ffn, seq_len, num_layers,dropout):
89
+ super().__init__()
90
+
91
+ self.model = nn.Sequential(
92
+ *[NiNBlock(d_model, d_ffn, seq_len,dropout) for _ in range(num_layers)]
93
+ )
94
+
95
+ def forward(self, x):
96
+ return self.model(x)
97
+
98
+
99
+
100
+
101
+
102
+
103
+
104
+
train.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #imports
2
+ import os
3
+ import csv
4
+ import torch
5
+ from torch import nn
6
+ from torch.utils.data import DataLoader
7
+ from torchvision import datasets
8
+ from torchvision.transforms import ToTensor, Normalize, RandomCrop, RandomHorizontalFlip, RandomRotation, Compose
9
+ from core import NiNformer
10
+
11
+ transform = Compose([
12
+ RandomCrop(32, padding=4),
13
+ RandomHorizontalFlip(),
14
+ ToTensor(),
15
+ Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
16
+
17
+ ])
18
+
19
+ training_data = datasets.CIFAR10(
20
+ root='data',
21
+ train=True,
22
+ download=True,
23
+ transform=transform
24
+ )
25
+
26
+ test_data = datasets.CIFAR10(
27
+ root='data',
28
+ train=False,
29
+ download=True,
30
+ transform=transform
31
+ )
32
+ # create dataloaders
33
+
34
+ batch_size = 128
35
+
36
+ train_dataloader = DataLoader(training_data, batch_size=batch_size,shuffle=True)
37
+ test_dataloader = DataLoader(test_data, batch_size=batch_size)
38
+
39
+
40
+ for X, y in test_dataloader:
41
+ print(f"Shape of X [N,C,H,W]:{X.shape}")
42
+ print(f"Shape of y:{y.shape}{y.dtype}")
43
+ break
44
+
45
+ # size checking for loading images
46
+ def check_sizes(image_size, patch_size):
47
+ sqrt_num_patches, remainder = divmod(image_size, patch_size)
48
+ assert remainder == 0, "`image_size` must be divisibe by `patch_size`"
49
+ num_patches = sqrt_num_patches ** 2
50
+ return num_patches
51
+
52
+
53
+
54
+ # create model
55
+ # Get cpu or gpu device for training.
56
+ device = "cuda" if torch.cuda.is_available() else "cpu"
57
+ print(f"using {device} device")
58
+ # model definition
59
+
60
+ class NiNformerImageClassification(NiNformer):
61
+ def __init__(
62
+ self,
63
+ image_size=32,
64
+ patch_size=4,
65
+ in_channels=3,
66
+ num_classes=10,
67
+ d_model=256,
68
+ d_ffn=512,
69
+ seq_len=64,
70
+ num_layers=4,
71
+ dropout=0.5
72
+ ):
73
+ num_patches = check_sizes(image_size, patch_size)
74
+ super().__init__(d_model, d_ffn, seq_len, num_layers,dropout)
75
+ self.patcher = nn.Conv2d(
76
+ in_channels, d_model, kernel_size=patch_size, stride=patch_size
77
+ )
78
+ self.classifier = nn.Linear(d_model, num_classes)
79
+
80
+ def forward(self, x):
81
+ patches = self.patcher(x)
82
+ batch_size, num_channels, _, _ = patches.shape
83
+ patches = patches.permute(0, 2, 3, 1)
84
+ patches = patches.view(batch_size, -1, num_channels)
85
+ embedding = self.model(patches)
86
+ embedding = embedding.mean(dim=1) # global average pooling
87
+ out = self.classifier(embedding)
88
+ return out
89
+
90
+ model = NiNformerImageClassification().to(device)
91
+ print(model)
92
+
93
+ # Optimizer
94
+
95
+ loss_fn = nn.CrossEntropyLoss()
96
+ optimizer = torch.optim.Adam(model.parameters(),lr=1e-3)
97
+
98
+
99
+ # Training Loop
100
+
101
+ def train(dataloader, model, loss_fn, optimizer):
102
+ size = len(dataloader.dataset)
103
+ num_batches = len(dataloader)
104
+ model.train()
105
+ train_loss = 0
106
+ correct = 0
107
+ for batch, (X,y) in enumerate(dataloader):
108
+ X, y = X.to(device), y.to(device)
109
+
110
+ #compute prediction error
111
+ pred = model(X)
112
+ loss = loss_fn(pred,y)
113
+
114
+ # backpropagation
115
+ optimizer.zero_grad()
116
+ loss.backward()
117
+ optimizer.step()
118
+ train_loss += loss.item()
119
+ _, labels = torch.max(pred.data, 1)
120
+ correct += labels.eq(y.data).type(torch.float).sum()
121
+ if batch % 100 == 0:
122
+ loss, current = loss.item(), batch * len(X)
123
+ print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
124
+
125
+ train_loss /= num_batches
126
+ train_accuracy = 100. * correct.item() / size
127
+ print(train_accuracy)
128
+ return train_loss,train_accuracy
129
+
130
+
131
+
132
+ # Test loop
133
+ def test(dataloader, model, loss_fn):
134
+ size = len(dataloader.dataset)
135
+ num_batches = len(dataloader)
136
+ model.eval()
137
+ test_loss = 0
138
+ correct = 0
139
+ with torch.no_grad():
140
+ for X,y in dataloader:
141
+ X,y = X.to(device), y.to(device)
142
+ pred = model(X)
143
+ test_loss += loss_fn(pred, y).item()
144
+ correct += (pred.argmax(1) == y).type(torch.float).sum().item()
145
+ test_loss /= num_batches
146
+ correct /= size
147
+ print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
148
+ test_accuracy = 100*correct
149
+ return test_loss, test_accuracy
150
+
151
+
152
+
153
+ # apply train and test
154
+
155
+ logname = "/home/abdullah/Proposals_experiments/NiNformer/Experiments_cifar10/logs_ninformer/logs_cifar10.csv"
156
+ if not os.path.exists(logname):
157
+ with open(logname, 'w') as logfile:
158
+ logwriter = csv.writer(logfile, delimiter=',')
159
+ logwriter.writerow(['epoch', 'train loss', 'train acc',
160
+ 'test loss', 'test acc'])
161
+
162
+
163
+ epochs = 100
164
+ for epoch in range(epochs):
165
+ print(f"Epoch {epoch+1}\n-----------------------------------")
166
+ train_loss, train_acc = train(train_dataloader, model, loss_fn, optimizer)
167
+ test_loss, test_acc = test(test_dataloader, model, loss_fn)
168
+ with open(logname, 'a') as logfile:
169
+ logwriter = csv.writer(logfile, delimiter=',')
170
+ logwriter.writerow([epoch+1, train_loss, train_acc,
171
+ test_loss, test_acc])
172
+ print("Done!")
173
+
174
+ # saving trained model
175
+ path = "/home/abdullah/Desktop/Proposals_experiments/NiNformer/Experiments_cifar10/weights_ninformer"
176
+ model_name = "NiNformerImageClassification_cifar10"
177
+ torch.save(model.state_dict(), f"{path}/{model_name}.pth")
178
+ print(f"Saved Model State to {path}/{model_name}.pth ")
179
+