Abdullah-Nazhat commited on
Commit
589ece6
1 Parent(s): 03ab12f

Upload 2 files

Browse files
Files changed (2) hide show
  1. normalizer.py +88 -0
  2. train.py +188 -0
normalizer.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+
4
+
5
+
6
+
7
+
8
+ class GatingUnit(nn.Module):
9
+ def __init__(self,dim):
10
+ super().__init__()
11
+ self.proj_1 = nn.Linear(dim,dim)
12
+ self.proj_2 = nn.Linear(dim,dim)
13
+ self.proj_3 = nn.Linear(dim,dim)
14
+ self.silu = nn.SiLU()
15
+
16
+
17
+ def forward(self, x):
18
+ u, v = x, x
19
+ u = self.proj_1(u)
20
+ u = self.silu(u)
21
+
22
+
23
+ v = self.proj_2(v)
24
+
25
+
26
+ g = u * v
27
+ g = self.proj_3(g)
28
+
29
+ out = g
30
+ return out
31
+
32
+
33
+
34
+ class NormalizerBlock(nn.Module):
35
+ def __init__(self, d_model, num_tokens):
36
+ super().__init__()
37
+
38
+
39
+ self.norm_global = nn.LayerNorm(d_model * num_tokens)
40
+ self.norm_local = nn.LayerNorm(d_model)
41
+ self.gating = GatingUnit(d_model)
42
+
43
+ def forward(self, x):
44
+
45
+ residual = x
46
+
47
+ dim0 = x.shape[0]
48
+ dim1 = x.shape[1]
49
+ dim2 = x.shape[2]
50
+ x = x.reshape([dim0,dim1*dim2])
51
+ x = self.norm_global(x)
52
+
53
+ x = x.reshape([dim0,dim1,dim2])
54
+ x = x + residual
55
+
56
+
57
+ residual = x
58
+
59
+
60
+ x = self.norm_local(x)
61
+ x = self.gating(x)
62
+
63
+ out = x + residual
64
+
65
+
66
+ return out
67
+
68
+
69
+
70
+ class Normalizer(nn.Module):
71
+ def __init__(self, d_model,num_tokens, num_layers):
72
+ super().__init__()
73
+
74
+ self.model = nn.Sequential(
75
+ *[NormalizerBlock(d_model,num_tokens) for _ in range(num_layers)]
76
+ )
77
+
78
+ def forward(self, x):
79
+
80
+ return self.model(x)
81
+
82
+
83
+
84
+
85
+
86
+
87
+
88
+
train.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import csv
3
+ import torch
4
+ from torch import nn
5
+ from torch.utils.data import DataLoader
6
+ from torchvision import datasets
7
+ from torchvision.transforms import ToTensor, Normalize, RandomCrop, RandomHorizontalFlip, Compose
8
+ from normalizer import Normalizer
9
+
10
+
11
+
12
+ transform = Compose([
13
+ RandomCrop(32, padding=4),
14
+ RandomHorizontalFlip(),
15
+ ToTensor(),
16
+ Normalize((0.5, 0.5,0.5),(0.5, 0.5,0.5))
17
+
18
+ ])
19
+
20
+ training_data = datasets.CIFAR10(
21
+ root='data',
22
+ train=True,
23
+ download=True,
24
+ transform=transform
25
+ )
26
+
27
+ test_data = datasets.CIFAR10(
28
+ root='data',
29
+ train=False,
30
+ download=True,
31
+ transform=transform
32
+ )
33
+
34
+
35
+ batch_size = 128
36
+
37
+ train_dataloader = DataLoader(training_data, batch_size=batch_size,shuffle=True)
38
+ test_dataloader = DataLoader(test_data, batch_size=batch_size)
39
+
40
+
41
+ for X, y in test_dataloader:
42
+ print(f"Shape of X [N,C,H,W]:{X.shape}")
43
+ print(f"Shape of y:{y.shape}{y.dtype}")
44
+ break
45
+
46
+
47
+ def check_sizes(image_size, patch_size):
48
+ sqrt_num_patches, remainder = divmod(image_size, patch_size)
49
+ assert remainder == 0, "`image_size` must be divisibe by `patch_size`"
50
+ num_patches = sqrt_num_patches ** 2
51
+ return num_patches
52
+
53
+
54
+
55
+
56
+ device = "cuda" if torch.cuda.is_available() else "cpu"
57
+
58
+ print(f"using {device} device")
59
+
60
+
61
+
62
+ class NormalizerImageClassification(Normalizer):
63
+ def __init__(
64
+ self,
65
+ image_size=32,
66
+ patch_size=4,
67
+ in_channels=3,
68
+ num_classes=10,
69
+ d_model = 256,
70
+ num_tokens = 64,
71
+ num_layers=4,
72
+
73
+
74
+ ):
75
+ num_patches = check_sizes(image_size, patch_size)
76
+ super().__init__(d_model,num_tokens, num_layers)
77
+ self.patcher = nn.Conv2d(
78
+ in_channels, d_model, kernel_size=patch_size, stride=patch_size
79
+ )
80
+ self.classifier = nn.Linear(d_model, num_classes)
81
+
82
+ def forward(self, x):
83
+
84
+ patches = self.patcher(x)
85
+ batch_size, num_channels, _, _ = patches.shape
86
+ patches = patches.permute(0, 2, 3, 1)
87
+ patches = patches.view(batch_size, -1, num_channels)
88
+ embedding = self.model(patches)
89
+ embedding = embedding.mean(dim=1)
90
+ out = self.classifier(embedding)
91
+ return out
92
+
93
+ model = NormalizerImageClassification().to(device)
94
+ print(model)
95
+
96
+
97
+
98
+ loss_fn = nn.CrossEntropyLoss()
99
+ optimizer = torch.optim.Adam(model.parameters(),lr=1e-3)
100
+
101
+
102
+
103
+
104
+ def train(dataloader, model, loss_fn, optimizer):
105
+ size = len(dataloader.dataset)
106
+ num_batches = len(dataloader)
107
+ model.train()
108
+ train_loss = 0
109
+ correct = 0
110
+ for batch, (X,y) in enumerate(dataloader):
111
+ X, y = X.to(device), y.to(device)
112
+
113
+
114
+ pred = model(X)
115
+ loss = loss_fn(pred,y)
116
+
117
+
118
+ optimizer.zero_grad()
119
+ loss.backward()
120
+ optimizer.step()
121
+ train_loss += loss.item()
122
+ _, labels = torch.max(pred.data, 1)
123
+ correct += labels.eq(y.data).type(torch.float).sum()
124
+
125
+
126
+
127
+
128
+ if batch % 100 == 0:
129
+ loss, current = loss.item(), batch * len(X)
130
+ print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
131
+
132
+ train_loss /= num_batches
133
+ train_accuracy = 100. * correct.item() / size
134
+ print(train_accuracy)
135
+ return train_loss,train_accuracy
136
+
137
+
138
+
139
+
140
+
141
+ def test(dataloader, model, loss_fn):
142
+ size = len(dataloader.dataset)
143
+ num_batches = len(dataloader)
144
+ model.eval()
145
+ test_loss = 0
146
+ correct = 0
147
+ with torch.no_grad():
148
+ for X,y in dataloader:
149
+ X,y = X.to(device), y.to(device)
150
+ pred = model(X)
151
+ test_loss += loss_fn(pred, y).item()
152
+ correct += (pred.argmax(1) == y).type(torch.float).sum().item()
153
+ test_loss /= num_batches
154
+ correct /= size
155
+ print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
156
+ test_accuracy = 100*correct
157
+ return test_loss, test_accuracy
158
+
159
+
160
+
161
+
162
+
163
+ logname = "/home/abdullah/Desktop/Normalizer/Experiments_cifar10/logs_normalizer/logs_cifar10.csv"
164
+ if not os.path.exists(logname):
165
+ with open(logname, 'w') as logfile:
166
+ logwriter = csv.writer(logfile, delimiter=',')
167
+ logwriter.writerow(['epoch', 'train loss', 'train acc',
168
+ 'test loss', 'test acc'])
169
+
170
+
171
+ epochs = 100
172
+ for epoch in range(epochs):
173
+ print(f"Epoch {epoch+1}\n-----------------------------------")
174
+ train_loss, train_acc = train(train_dataloader, model, loss_fn, optimizer)
175
+ test_loss, test_acc = test(test_dataloader, model, loss_fn)
176
+ with open(logname, 'a') as logfile:
177
+ logwriter = csv.writer(logfile, delimiter=',')
178
+ logwriter.writerow([epoch+1, train_loss, train_acc,
179
+ test_loss, test_acc])
180
+ print("Done!")
181
+
182
+
183
+
184
+ path = "/home/abdullah/Desktop/Normalizer/Experiments_cifar10/weights_normalizer"
185
+ model_name = "NormalizerImageClassification_cifar10"
186
+ torch.save(model.state_dict(), f"{path}/{model_name}.pth")
187
+ print(f"Saved Model State to {path}/{model_name}.pth ")
188
+