Abdullah-Nazhat commited on
Commit
ea3b94f
·
verified ·
1 Parent(s): 3cb928d

Upload 2 files

Browse files
Files changed (2) hide show
  1. train.py +191 -0
  2. uniform_activator.py +68 -0
train.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #imports
2
+
3
+ import os
4
+ import csv
5
+ import torch
6
+ from torch import nn
7
+ from torch.utils.data import DataLoader
8
+ from torchvision import datasets
9
+ from torchvision.transforms import ToTensor, Normalize, RandomCrop, RandomHorizontalFlip, Compose
10
+ from uniform_activator import ACTIVATOR
11
+
12
+ # data transforms
13
+
14
+ transform = Compose([
15
+ RandomCrop(32, padding=4),
16
+ RandomHorizontalFlip(),
17
+ ToTensor(),
18
+ Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
19
+
20
+ ])
21
+
22
+ training_data = datasets.CIFAR10(
23
+ root='data',
24
+ train=True,
25
+ download=True,
26
+ transform=transform
27
+ )
28
+
29
+ test_data = datasets.CIFAR10(
30
+ root='data',
31
+ train=False,
32
+ download=True,
33
+ transform=transform
34
+ )
35
+ # create dataloaders
36
+
37
+ batch_size = 128
38
+
39
+ train_dataloader = DataLoader(training_data, batch_size=batch_size,shuffle=True)
40
+ test_dataloader = DataLoader(test_data, batch_size=batch_size)
41
+
42
+
43
+ for X, y in test_dataloader:
44
+ print(f"Shape of X [N,C,H,W]:{X.shape}")
45
+ print(f"Shape of y:{y.shape}{y.dtype}")
46
+ break
47
+
48
+ # size checking for loading images
49
+ def check_sizes(image_size, patch_size):
50
+ sqrt_num_patches, remainder = divmod(image_size, patch_size)
51
+ assert remainder == 0, "`image_size` must be divisibe by `patch_size`"
52
+ num_patches = sqrt_num_patches ** 2
53
+ return num_patches
54
+
55
+
56
+
57
+ # create model
58
+ # Get cpu or gpu device for training.
59
+ device = "cuda" if torch.cuda.is_available() else "cpu"
60
+
61
+ print(f"using {device} device")
62
+
63
+ # model definition
64
+
65
+ class ACTIVATORImageClassification(ACTIVATOR):
66
+ def __init__(
67
+ self,
68
+ image_size=32,
69
+ patch_size=4,
70
+ in_channels=3,
71
+ num_classes=10,
72
+ d_model=256,
73
+ num_layers=4,
74
+ ):
75
+ num_patches = check_sizes(image_size, patch_size)
76
+ super().__init__(d_model, num_layers)
77
+ self.patcher = nn.Conv2d(
78
+ in_channels, d_model, kernel_size=patch_size, stride=patch_size
79
+ )
80
+ self.classifier = nn.Linear(d_model, num_classes)
81
+
82
+ def forward(self, x):
83
+
84
+ patches = self.patcher(x)
85
+ batch_size, num_channels, _, _ = patches.shape
86
+ patches = patches.permute(0, 2, 3, 1)
87
+ patches = patches.view(batch_size, -1, num_channels)
88
+ embedding = self.model(patches)
89
+ embedding = embedding.mean(dim=1) # global average pooling
90
+ out = self.classifier(embedding)
91
+ return out
92
+
93
+ model = ACTIVATORImageClassification().to(device)
94
+ print(model)
95
+
96
+ # Optimizer
97
+
98
+ loss_fn = nn.CrossEntropyLoss()
99
+ optimizer = torch.optim.Adam(model.parameters(),lr=1e-3)
100
+
101
+
102
+ # Training Loop
103
+
104
+ def train(dataloader, model, loss_fn, optimizer):
105
+ size = len(dataloader.dataset)
106
+ num_batches = len(dataloader)
107
+ model.train()
108
+ train_loss = 0
109
+ correct = 0
110
+ for batch, (X,y) in enumerate(dataloader):
111
+ X, y = X.to(device), y.to(device)
112
+
113
+ #compute prediction error
114
+ pred = model(X)
115
+ loss = loss_fn(pred,y)
116
+
117
+ # backpropagation
118
+ optimizer.zero_grad()
119
+ loss.backward()
120
+ optimizer.step()
121
+ train_loss += loss.item()
122
+ _, labels = torch.max(pred.data, 1)
123
+ correct += labels.eq(y.data).type(torch.float).sum()
124
+
125
+
126
+
127
+
128
+ if batch % 100 == 0:
129
+ loss, current = loss.item(), batch * len(X)
130
+ print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
131
+
132
+ train_loss /= num_batches
133
+ train_accuracy = 100. * correct.item() / size
134
+ print(train_accuracy)
135
+ return train_loss,train_accuracy
136
+
137
+
138
+
139
+ # Test loop
140
+
141
+ def test(dataloader, model, loss_fn):
142
+ size = len(dataloader.dataset)
143
+ num_batches = len(dataloader)
144
+ model.eval()
145
+ test_loss = 0
146
+ correct = 0
147
+ with torch.no_grad():
148
+ for X,y in dataloader:
149
+ X,y = X.to(device), y.to(device)
150
+ pred = model(X)
151
+ test_loss += loss_fn(pred, y).item()
152
+ correct += (pred.argmax(1) == y).type(torch.float).sum().item()
153
+ test_loss /= num_batches
154
+ correct /= size
155
+ print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
156
+ test_accuracy = 100*correct
157
+ return test_loss, test_accuracy
158
+
159
+
160
+
161
+ # apply train and test
162
+
163
+ logname = "/home/abdullah/Desktop/Activator_uniform/Experiments_cifar10/logs_activator/logs_cifar10.csv"
164
+ if not os.path.exists(logname):
165
+ with open(logname, 'w') as logfile:
166
+ logwriter = csv.writer(logfile, delimiter=',')
167
+ logwriter.writerow(['epoch', 'train loss', 'train acc',
168
+ 'test loss', 'test acc'])
169
+
170
+
171
+ epochs = 100
172
+ for epoch in range(epochs):
173
+ print(f"Epoch {epoch+1}\n-----------------------------------")
174
+ train_loss, train_acc = train(train_dataloader, model, loss_fn, optimizer)
175
+ # learning rate scheduler
176
+ #if scheduler is not None:
177
+ # scheduler.step()
178
+ test_loss, test_acc = test(test_dataloader, model, loss_fn)
179
+ with open(logname, 'a') as logfile:
180
+ logwriter = csv.writer(logfile, delimiter=',')
181
+ logwriter.writerow([epoch+1, train_loss, train_acc,
182
+ test_loss, test_acc])
183
+ print("Done!")
184
+
185
+ # saving trained model
186
+
187
+ path = "/home/abdullah/Desktop/Activator_uniform/Experiments_cifar10/weights_activator"
188
+ model_name = "ACTIVATOR_ImageClassification_cifar10"
189
+ torch.save(model.state_dict(), f"{path}/{model_name}.pth")
190
+ print(f"Saved Model State to {path}/{model_name}.pth ")
191
+
uniform_activator.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+
4
+
5
+
6
+
7
+
8
+ class ActivatorGatingUnit(nn.Module):
9
+ def __init__(self,dim):
10
+ super().__init__()
11
+ self.proj_1 = nn.Linear(dim,dim)
12
+ self.proj_2 = nn.Linear(dim,dim)
13
+ self.proj_3 = nn.Linear(dim,dim)
14
+ self.gelu = nn.GELU()
15
+
16
+
17
+ def forward(self, x):
18
+ u, v = x, x
19
+ u = self.proj_1(u)
20
+ u = self.gelu(u)
21
+
22
+
23
+ v = self.proj_2(v)
24
+
25
+
26
+ g = u * v
27
+
28
+ out = self.proj_3(g)
29
+ return out
30
+
31
+
32
+
33
+ class ActivatorBlock(nn.Module):
34
+ def __init__(self, d_model):
35
+ super().__init__()
36
+
37
+ self.norm = nn.LayerNorm(d_model)
38
+ self.actgu = ActivatorGatingUnit(d_model)
39
+
40
+ def forward(self, x):
41
+ residual = x
42
+ x = self.norm(x)
43
+ x = self.actgu(x)
44
+ x = x + residual
45
+ out = x
46
+ return out
47
+
48
+
49
+
50
+ class ACTIVATOR(nn.Module):
51
+ def __init__(self, d_model, num_layers):
52
+ super().__init__()
53
+
54
+ self.model = nn.Sequential(
55
+ *[ActivatorBlock(d_model) for _ in range(num_layers)]
56
+ )
57
+
58
+ def forward(self, x):
59
+
60
+ return self.model(x)
61
+
62
+
63
+
64
+
65
+
66
+
67
+
68
+