Upload airbench94.py with huggingface_hub
Browse files- airbench94.py +483 -0
airbench94.py
ADDED
@@ -0,0 +1,483 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Uncompiled variant of airbench94_compiled.py
|
2 |
+
# 3.83s runtime on an A100; 0.36 PFLOPs.
|
3 |
+
# Evidence: 94.01 average accuracy in n=1000 runs.
|
4 |
+
#
|
5 |
+
# We recorded the runtime of 3.83 seconds on an NVIDIA A100-SXM4-80GB with the following nvidia-smi:
|
6 |
+
# NVIDIA-SMI 515.105.01 Driver Version: 515.105.01 CUDA Version: 11.7
|
7 |
+
# torch.__version__ == '2.1.2+cu118'
|
8 |
+
|
9 |
+
#############################################
|
10 |
+
# Setup/Hyperparameters #
|
11 |
+
#############################################
|
12 |
+
|
13 |
+
import os
|
14 |
+
import sys
|
15 |
+
import uuid
|
16 |
+
from math import ceil
|
17 |
+
|
18 |
+
import torch
|
19 |
+
from torch import nn
|
20 |
+
import torch.nn.functional as F
|
21 |
+
import torchvision
|
22 |
+
import torchvision.transforms as T
|
23 |
+
|
24 |
+
torch.backends.cudnn.benchmark = True
|
25 |
+
|
26 |
+
# We express the main training hyperparameters (batch size, learning rate, momentum, and weight decay)
|
27 |
+
# in decoupled form, so that each one can be tuned independently. This accomplishes the following:
|
28 |
+
# * Assuming time-constant gradients, the average step size is decoupled from everything but the lr.
|
29 |
+
# * The size of the weight decay update is decoupled from everything but the wd.
|
30 |
+
# In constrast, normally when we increase the (Nesterov) momentum, this also scales up the step size
|
31 |
+
# proportionally to 1 + 1 / (1 - momentum), meaning we cannot change momentum without having to re-tune
|
32 |
+
# the learning rate. Similarly, normally when we increase the learning rate this also increases the size
|
33 |
+
# of the weight decay, requiring a proportional decrease in the wd to maintain the same decay strength.
|
34 |
+
#
|
35 |
+
# The practical impact is that hyperparameter tuning is faster, since this parametrization allows each
|
36 |
+
# one to be tuned independently. See https://myrtle.ai/learn/how-to-train-your-resnet-5-hyperparameters/.
|
37 |
+
|
38 |
+
hyp = {
|
39 |
+
'opt': {
|
40 |
+
'train_epochs': 9.9,
|
41 |
+
'batch_size': 1024,
|
42 |
+
'lr': 11.5, # learning rate per 1024 examples
|
43 |
+
'momentum': 0.85,
|
44 |
+
'weight_decay': 0.0153, # weight decay per 1024 examples (decoupled from learning rate)
|
45 |
+
'bias_scaler': 64.0, # scales up learning rate (but not weight decay) for BatchNorm biases
|
46 |
+
'label_smoothing': 0.2,
|
47 |
+
'whiten_bias_epochs': 3, # how many epochs to train the whitening layer bias before freezing
|
48 |
+
},
|
49 |
+
'aug': {
|
50 |
+
'flip': True,
|
51 |
+
'translate': 2,
|
52 |
+
},
|
53 |
+
'net': {
|
54 |
+
'widths': {
|
55 |
+
'block1': 64,
|
56 |
+
'block2': 256,
|
57 |
+
'block3': 256,
|
58 |
+
},
|
59 |
+
'batchnorm_momentum': 0.6,
|
60 |
+
'scaling_factor': 1/9,
|
61 |
+
'tta_level': 2, # the level of test-time augmentation: 0=none, 1=mirror, 2=mirror+translate
|
62 |
+
},
|
63 |
+
}
|
64 |
+
|
65 |
+
#############################################
|
66 |
+
# DataLoader #
|
67 |
+
#############################################
|
68 |
+
|
69 |
+
CIFAR_MEAN = torch.tensor((0.4914, 0.4822, 0.4465))
|
70 |
+
CIFAR_STD = torch.tensor((0.2470, 0.2435, 0.2616))
|
71 |
+
|
72 |
+
def batch_flip_lr(inputs):
|
73 |
+
flip_mask = (torch.rand(len(inputs), device=inputs.device) < 0.5).view(-1, 1, 1, 1)
|
74 |
+
return torch.where(flip_mask, inputs.flip(-1), inputs)
|
75 |
+
|
76 |
+
def batch_crop(images, crop_size):
|
77 |
+
r = (images.size(-1) - crop_size)//2
|
78 |
+
shifts = torch.randint(-r, r+1, size=(len(images), 2), device=images.device)
|
79 |
+
images_out = torch.empty((len(images), 3, crop_size, crop_size), device=images.device, dtype=images.dtype)
|
80 |
+
# The two cropping methods in this if-else produce equivalent results, but the second is faster for r > 2.
|
81 |
+
if r <= 2:
|
82 |
+
for sy in range(-r, r+1):
|
83 |
+
for sx in range(-r, r+1):
|
84 |
+
mask = (shifts[:, 0] == sy) & (shifts[:, 1] == sx)
|
85 |
+
images_out[mask] = images[mask, :, r+sy:r+sy+crop_size, r+sx:r+sx+crop_size]
|
86 |
+
else:
|
87 |
+
images_tmp = torch.empty((len(images), 3, crop_size, crop_size+2*r), device=images.device, dtype=images.dtype)
|
88 |
+
for s in range(-r, r+1):
|
89 |
+
mask = (shifts[:, 0] == s)
|
90 |
+
images_tmp[mask] = images[mask, :, r+s:r+s+crop_size, :]
|
91 |
+
for s in range(-r, r+1):
|
92 |
+
mask = (shifts[:, 1] == s)
|
93 |
+
images_out[mask] = images_tmp[mask, :, :, r+s:r+s+crop_size]
|
94 |
+
return images_out
|
95 |
+
|
96 |
+
class CifarLoader:
|
97 |
+
|
98 |
+
def __init__(self, path, train=True, batch_size=500, aug=None, drop_last=None, shuffle=None, gpu=0):
|
99 |
+
data_path = os.path.join(path, 'train.pt' if train else 'test.pt')
|
100 |
+
if not os.path.exists(data_path):
|
101 |
+
dset = torchvision.datasets.CIFAR10(path, download=True, train=train)
|
102 |
+
images = torch.tensor(dset.data)
|
103 |
+
labels = torch.tensor(dset.targets)
|
104 |
+
torch.save({'images': images, 'labels': labels, 'classes': dset.classes}, data_path)
|
105 |
+
|
106 |
+
data = torch.load(data_path, map_location=torch.device(gpu))
|
107 |
+
self.images, self.labels, self.classes = data['images'], data['labels'], data['classes']
|
108 |
+
# It's faster to load+process uint8 data than to load preprocessed fp16 data
|
109 |
+
self.images = (self.images.half() / 255).permute(0, 3, 1, 2).to(memory_format=torch.channels_last)
|
110 |
+
|
111 |
+
self.normalize = T.Normalize(CIFAR_MEAN, CIFAR_STD)
|
112 |
+
self.proc_images = {} # Saved results of image processing to be done on the first epoch
|
113 |
+
self.epoch = 0
|
114 |
+
|
115 |
+
self.aug = aug or {}
|
116 |
+
for k in self.aug.keys():
|
117 |
+
assert k in ['flip', 'translate'], 'Unrecognized key: %s' % k
|
118 |
+
|
119 |
+
self.batch_size = batch_size
|
120 |
+
self.drop_last = train if drop_last is None else drop_last
|
121 |
+
self.shuffle = train if shuffle is None else shuffle
|
122 |
+
|
123 |
+
def __len__(self):
|
124 |
+
return len(self.images)//self.batch_size if self.drop_last else ceil(len(self.images)/self.batch_size)
|
125 |
+
|
126 |
+
def __iter__(self):
|
127 |
+
|
128 |
+
if self.epoch == 0:
|
129 |
+
images = self.proc_images['norm'] = self.normalize(self.images)
|
130 |
+
# Pre-flip images in order to do every-other epoch flipping scheme
|
131 |
+
if self.aug.get('flip', False):
|
132 |
+
images = self.proc_images['flip'] = batch_flip_lr(images)
|
133 |
+
# Pre-pad images to save time when doing random translation
|
134 |
+
pad = self.aug.get('translate', 0)
|
135 |
+
if pad > 0:
|
136 |
+
self.proc_images['pad'] = F.pad(images, (pad,)*4, 'reflect')
|
137 |
+
|
138 |
+
if self.aug.get('translate', 0) > 0:
|
139 |
+
images = batch_crop(self.proc_images['pad'], self.images.shape[-2])
|
140 |
+
elif self.aug.get('flip', False):
|
141 |
+
images = self.proc_images['flip']
|
142 |
+
else:
|
143 |
+
images = self.proc_images['norm']
|
144 |
+
# Flip all images together every other epoch. This increases diversity relative to random flipping
|
145 |
+
if self.aug.get('flip', False):
|
146 |
+
if self.epoch % 2 == 1:
|
147 |
+
images = images.flip(-1)
|
148 |
+
|
149 |
+
self.epoch += 1
|
150 |
+
|
151 |
+
indices = (torch.randperm if self.shuffle else torch.arange)(len(images), device=images.device)
|
152 |
+
for i in range(len(self)):
|
153 |
+
idxs = indices[i*self.batch_size:(i+1)*self.batch_size]
|
154 |
+
yield (images[idxs], self.labels[idxs])
|
155 |
+
|
156 |
+
#############################################
|
157 |
+
# Network Components #
|
158 |
+
#############################################
|
159 |
+
|
160 |
+
class Flatten(nn.Module):
|
161 |
+
def forward(self, x):
|
162 |
+
return x.view(x.size(0), -1)
|
163 |
+
|
164 |
+
class Mul(nn.Module):
|
165 |
+
def __init__(self, scale):
|
166 |
+
super().__init__()
|
167 |
+
self.scale = scale
|
168 |
+
def forward(self, x):
|
169 |
+
return x * self.scale
|
170 |
+
|
171 |
+
class BatchNorm(nn.BatchNorm2d):
|
172 |
+
def __init__(self, num_features, momentum, eps=1e-12,
|
173 |
+
weight=False, bias=True):
|
174 |
+
super().__init__(num_features, eps=eps, momentum=1-momentum)
|
175 |
+
self.weight.requires_grad = weight
|
176 |
+
self.bias.requires_grad = bias
|
177 |
+
# Note that PyTorch already initializes the weights to one and bias to zero
|
178 |
+
|
179 |
+
class Conv(nn.Conv2d):
|
180 |
+
def __init__(self, in_channels, out_channels, kernel_size=3, padding='same', bias=False):
|
181 |
+
super().__init__(in_channels, out_channels, kernel_size=kernel_size, padding=padding, bias=bias)
|
182 |
+
|
183 |
+
def reset_parameters(self):
|
184 |
+
super().reset_parameters()
|
185 |
+
if self.bias is not None:
|
186 |
+
self.bias.data.zero_()
|
187 |
+
w = self.weight.data
|
188 |
+
torch.nn.init.dirac_(w[:w.size(1)])
|
189 |
+
|
190 |
+
class ConvGroup(nn.Module):
|
191 |
+
def __init__(self, channels_in, channels_out, batchnorm_momentum):
|
192 |
+
super().__init__()
|
193 |
+
self.conv1 = Conv(channels_in, channels_out)
|
194 |
+
self.pool = nn.MaxPool2d(2)
|
195 |
+
self.norm1 = BatchNorm(channels_out, batchnorm_momentum)
|
196 |
+
self.conv2 = Conv(channels_out, channels_out)
|
197 |
+
self.norm2 = BatchNorm(channels_out, batchnorm_momentum)
|
198 |
+
self.activ = nn.GELU()
|
199 |
+
|
200 |
+
def forward(self, x):
|
201 |
+
x = self.conv1(x)
|
202 |
+
x = self.pool(x)
|
203 |
+
x = self.norm1(x)
|
204 |
+
x = self.activ(x)
|
205 |
+
x = self.conv2(x)
|
206 |
+
x = self.norm2(x)
|
207 |
+
x = self.activ(x)
|
208 |
+
return x
|
209 |
+
|
210 |
+
#############################################
|
211 |
+
# Network Definition #
|
212 |
+
#############################################
|
213 |
+
|
214 |
+
def make_net():
|
215 |
+
widths = hyp['net']['widths']
|
216 |
+
batchnorm_momentum = hyp['net']['batchnorm_momentum']
|
217 |
+
whiten_kernel_size = 2
|
218 |
+
whiten_width = 2 * 3 * whiten_kernel_size**2
|
219 |
+
net = nn.Sequential(
|
220 |
+
Conv(3, whiten_width, whiten_kernel_size, padding=0, bias=True),
|
221 |
+
nn.GELU(),
|
222 |
+
ConvGroup(whiten_width, widths['block1'], batchnorm_momentum),
|
223 |
+
ConvGroup(widths['block1'], widths['block2'], batchnorm_momentum),
|
224 |
+
ConvGroup(widths['block2'], widths['block3'], batchnorm_momentum),
|
225 |
+
nn.MaxPool2d(3),
|
226 |
+
Flatten(),
|
227 |
+
nn.Linear(widths['block3'], 10, bias=False),
|
228 |
+
Mul(hyp['net']['scaling_factor']),
|
229 |
+
)
|
230 |
+
net[0].weight.requires_grad = False
|
231 |
+
net = net.half().cuda()
|
232 |
+
net = net.to(memory_format=torch.channels_last)
|
233 |
+
for mod in net.modules():
|
234 |
+
if isinstance(mod, BatchNorm):
|
235 |
+
mod.float()
|
236 |
+
return net
|
237 |
+
|
238 |
+
#############################################
|
239 |
+
# Whitening Conv Initialization #
|
240 |
+
#############################################
|
241 |
+
|
242 |
+
def get_patches(x, patch_shape):
|
243 |
+
c, (h, w) = x.shape[1], patch_shape
|
244 |
+
return x.unfold(2,h,1).unfold(3,w,1).transpose(1,3).reshape(-1,c,h,w).float()
|
245 |
+
|
246 |
+
def get_whitening_parameters(patches):
|
247 |
+
n,c,h,w = patches.shape
|
248 |
+
patches_flat = patches.view(n, -1)
|
249 |
+
est_patch_covariance = (patches_flat.T @ patches_flat) / n
|
250 |
+
eigenvalues, eigenvectors = torch.linalg.eigh(est_patch_covariance, UPLO='U')
|
251 |
+
return eigenvalues.flip(0).view(-1, 1, 1, 1), eigenvectors.T.reshape(c*h*w,c,h,w).flip(0)
|
252 |
+
|
253 |
+
def init_whitening_conv(layer, train_set, eps=5e-4):
|
254 |
+
patches = get_patches(train_set, patch_shape=layer.weight.data.shape[2:])
|
255 |
+
eigenvalues, eigenvectors = get_whitening_parameters(patches)
|
256 |
+
eigenvectors_scaled = eigenvectors / torch.sqrt(eigenvalues + eps)
|
257 |
+
layer.weight.data[:] = torch.cat((eigenvectors_scaled, -eigenvectors_scaled))
|
258 |
+
|
259 |
+
############################################
|
260 |
+
# Lookahead #
|
261 |
+
############################################
|
262 |
+
|
263 |
+
class LookaheadState:
|
264 |
+
def __init__(self, net):
|
265 |
+
self.net_ema = {k: v.clone() for k, v in net.state_dict().items()}
|
266 |
+
|
267 |
+
def update(self, net, decay):
|
268 |
+
for ema_param, net_param in zip(self.net_ema.values(), net.state_dict().values()):
|
269 |
+
if net_param.dtype in (torch.half, torch.float):
|
270 |
+
ema_param.lerp_(net_param, 1-decay)
|
271 |
+
net_param.copy_(ema_param)
|
272 |
+
|
273 |
+
############################################
|
274 |
+
# Logging #
|
275 |
+
############################################
|
276 |
+
|
277 |
+
def print_columns(columns_list, is_head=False, is_final_entry=False):
|
278 |
+
print_string = ''
|
279 |
+
for col in columns_list:
|
280 |
+
print_string += '| %s ' % col
|
281 |
+
print_string += '|'
|
282 |
+
if is_head:
|
283 |
+
print('-'*len(print_string))
|
284 |
+
print(print_string)
|
285 |
+
if is_head or is_final_entry:
|
286 |
+
print('-'*len(print_string))
|
287 |
+
|
288 |
+
logging_columns_list = ['run ', 'epoch', 'train_loss', 'train_acc', 'val_acc', 'tta_val_acc', 'total_time_seconds']
|
289 |
+
def print_training_details(variables, is_final_entry):
|
290 |
+
formatted = []
|
291 |
+
for col in logging_columns_list:
|
292 |
+
var = variables.get(col.strip(), None)
|
293 |
+
if type(var) in (int, str):
|
294 |
+
res = str(var)
|
295 |
+
elif type(var) is float:
|
296 |
+
res = '{:0.4f}'.format(var)
|
297 |
+
else:
|
298 |
+
assert var is None
|
299 |
+
res = ''
|
300 |
+
formatted.append(res.rjust(len(col)))
|
301 |
+
print_columns(formatted, is_final_entry=is_final_entry)
|
302 |
+
|
303 |
+
############################################
|
304 |
+
# Evaluation #
|
305 |
+
############################################
|
306 |
+
|
307 |
+
def infer(model, loader, tta_level=0):
|
308 |
+
|
309 |
+
# Test-time augmentation strategy (for tta_level=2):
|
310 |
+
# 1. Flip/mirror the image left-to-right (50% of the time).
|
311 |
+
# 2. Translate the image by one pixel either up-and-left or down-and-right (50% of the time,
|
312 |
+
# i.e. both happen 25% of the time).
|
313 |
+
#
|
314 |
+
# This creates 6 views per image (left/right times the two translations and no-translation),
|
315 |
+
# which we evaluate and then weight according to the given probabilities.
|
316 |
+
|
317 |
+
def infer_basic(inputs, net):
|
318 |
+
return net(inputs).clone()
|
319 |
+
|
320 |
+
def infer_mirror(inputs, net):
|
321 |
+
return 0.5 * net(inputs) + 0.5 * net(inputs.flip(-1))
|
322 |
+
|
323 |
+
def infer_mirror_translate(inputs, net):
|
324 |
+
logits = infer_mirror(inputs, net)
|
325 |
+
pad = 1
|
326 |
+
padded_inputs = F.pad(inputs, (pad,)*4, 'reflect')
|
327 |
+
inputs_translate_list = [
|
328 |
+
padded_inputs[:, :, 0:32, 0:32],
|
329 |
+
padded_inputs[:, :, 2:34, 2:34],
|
330 |
+
]
|
331 |
+
logits_translate_list = [infer_mirror(inputs_translate, net)
|
332 |
+
for inputs_translate in inputs_translate_list]
|
333 |
+
logits_translate = torch.stack(logits_translate_list).mean(0)
|
334 |
+
return 0.5 * logits + 0.5 * logits_translate
|
335 |
+
|
336 |
+
model.eval()
|
337 |
+
test_images = loader.normalize(loader.images)
|
338 |
+
infer_fn = [infer_basic, infer_mirror, infer_mirror_translate][tta_level]
|
339 |
+
with torch.no_grad():
|
340 |
+
return torch.cat([infer_fn(inputs, model) for inputs in test_images.split(2000)])
|
341 |
+
|
342 |
+
def evaluate(model, loader, tta_level=0):
|
343 |
+
logits = infer(model, loader, tta_level)
|
344 |
+
return (logits.argmax(1) == loader.labels).float().mean().item()
|
345 |
+
|
346 |
+
############################################
|
347 |
+
# Training #
|
348 |
+
############################################
|
349 |
+
|
350 |
+
def main(run):
|
351 |
+
|
352 |
+
batch_size = hyp['opt']['batch_size']
|
353 |
+
epochs = hyp['opt']['train_epochs']
|
354 |
+
momentum = hyp['opt']['momentum']
|
355 |
+
# Assuming gradients are constant in time, for Nesterov momentum, the below ratio is how much
|
356 |
+
# larger the default steps will be than the underlying per-example gradients. We divide the
|
357 |
+
# learning rate by this ratio in order to ensure steps are the same scale as gradients, regardless
|
358 |
+
# of the choice of momentum.
|
359 |
+
kilostep_scale = 1024 * (1 + 1 / (1 - momentum))
|
360 |
+
lr = hyp['opt']['lr'] / kilostep_scale # un-decoupled learning rate for PyTorch SGD
|
361 |
+
wd = hyp['opt']['weight_decay'] * batch_size / kilostep_scale
|
362 |
+
lr_biases = lr * hyp['opt']['bias_scaler']
|
363 |
+
|
364 |
+
loss_fn = nn.CrossEntropyLoss(label_smoothing=hyp['opt']['label_smoothing'], reduction='none')
|
365 |
+
test_loader = CifarLoader('cifar10', train=False, batch_size=2000)
|
366 |
+
train_loader = CifarLoader('cifar10', train=True, batch_size=batch_size, aug=hyp['aug'])
|
367 |
+
if run == 'warmup':
|
368 |
+
# The only purpose of the first run is to warmup, so we can use dummy data
|
369 |
+
train_loader.labels = torch.randint(0, 10, size=(len(train_loader.labels),), device=train_loader.labels.device)
|
370 |
+
total_train_steps = ceil(len(train_loader) * epochs)
|
371 |
+
|
372 |
+
model = make_net()
|
373 |
+
current_steps = 0
|
374 |
+
|
375 |
+
norm_biases = [p for k, p in model.named_parameters() if 'norm' in k and p.requires_grad]
|
376 |
+
other_params = [p for k, p in model.named_parameters() if 'norm' not in k and p.requires_grad]
|
377 |
+
param_configs = [dict(params=norm_biases, lr=lr_biases, weight_decay=wd/lr_biases),
|
378 |
+
dict(params=other_params, lr=lr, weight_decay=wd/lr)]
|
379 |
+
optimizer = torch.optim.SGD(param_configs, momentum=momentum, nesterov=True)
|
380 |
+
|
381 |
+
def get_lr(step):
|
382 |
+
warmup_steps = int(total_train_steps * 0.23)
|
383 |
+
warmdown_steps = total_train_steps - warmup_steps
|
384 |
+
if step < warmup_steps:
|
385 |
+
frac = step / warmup_steps
|
386 |
+
return 0.2 * (1 - frac) + 1.0 * frac
|
387 |
+
else:
|
388 |
+
frac = (step - warmup_steps) / warmdown_steps
|
389 |
+
return 1.0 * (1 - frac) + 0.07 * frac
|
390 |
+
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, get_lr)
|
391 |
+
|
392 |
+
alpha_schedule = 0.95**5 * (torch.arange(total_train_steps+1) / total_train_steps)**3
|
393 |
+
lookahead_state = LookaheadState(model)
|
394 |
+
|
395 |
+
# For accurately timing GPU code
|
396 |
+
starter = torch.cuda.Event(enable_timing=True)
|
397 |
+
ender = torch.cuda.Event(enable_timing=True)
|
398 |
+
total_time_seconds = 0.0
|
399 |
+
|
400 |
+
# Initialize the whitening layer using training images
|
401 |
+
starter.record()
|
402 |
+
train_images = train_loader.normalize(train_loader.images[:5000])
|
403 |
+
init_whitening_conv(model[0], train_images)
|
404 |
+
ender.record()
|
405 |
+
torch.cuda.synchronize()
|
406 |
+
total_time_seconds += 1e-3 * starter.elapsed_time(ender)
|
407 |
+
|
408 |
+
for epoch in range(ceil(epochs)):
|
409 |
+
|
410 |
+
model[0].bias.requires_grad = (epoch < hyp['opt']['whiten_bias_epochs'])
|
411 |
+
|
412 |
+
####################
|
413 |
+
# Training #
|
414 |
+
####################
|
415 |
+
|
416 |
+
starter.record()
|
417 |
+
|
418 |
+
model.train()
|
419 |
+
for inputs, labels in train_loader:
|
420 |
+
|
421 |
+
outputs = model(inputs)
|
422 |
+
loss = loss_fn(outputs, labels).sum()
|
423 |
+
optimizer.zero_grad(set_to_none=True)
|
424 |
+
loss.backward()
|
425 |
+
optimizer.step()
|
426 |
+
scheduler.step()
|
427 |
+
|
428 |
+
current_steps += 1
|
429 |
+
|
430 |
+
if current_steps % 5 == 0:
|
431 |
+
lookahead_state.update(model, decay=alpha_schedule[current_steps].item())
|
432 |
+
|
433 |
+
if current_steps >= total_train_steps:
|
434 |
+
if lookahead_state is not None:
|
435 |
+
lookahead_state.update(model, decay=1.0)
|
436 |
+
break
|
437 |
+
|
438 |
+
ender.record()
|
439 |
+
torch.cuda.synchronize()
|
440 |
+
total_time_seconds += 1e-3 * starter.elapsed_time(ender)
|
441 |
+
|
442 |
+
####################
|
443 |
+
# Evaluation #
|
444 |
+
####################
|
445 |
+
|
446 |
+
# Save the accuracy and loss from the last training batch of the epoch
|
447 |
+
train_acc = (outputs.detach().argmax(1) == labels).float().mean().item()
|
448 |
+
train_loss = loss.item() / batch_size
|
449 |
+
val_acc = evaluate(model, test_loader, tta_level=0)
|
450 |
+
print_training_details(locals(), is_final_entry=False)
|
451 |
+
run = None # Only print the run number once
|
452 |
+
|
453 |
+
####################
|
454 |
+
# TTA Evaluation #
|
455 |
+
####################
|
456 |
+
|
457 |
+
starter.record()
|
458 |
+
tta_val_acc = evaluate(model, test_loader, tta_level=hyp['net']['tta_level'])
|
459 |
+
ender.record()
|
460 |
+
torch.cuda.synchronize()
|
461 |
+
total_time_seconds += 1e-3 * starter.elapsed_time(ender)
|
462 |
+
|
463 |
+
epoch = 'eval'
|
464 |
+
print_training_details(locals(), is_final_entry=True)
|
465 |
+
|
466 |
+
return tta_val_acc
|
467 |
+
|
468 |
+
if __name__ == "__main__":
|
469 |
+
with open(sys.argv[0]) as f:
|
470 |
+
code = f.read()
|
471 |
+
|
472 |
+
print_columns(logging_columns_list, is_head=True)
|
473 |
+
#main('warmup')
|
474 |
+
accs = torch.tensor([main(run) for run in range(25)])
|
475 |
+
print('Mean: %.4f Std: %.4f' % (accs.mean(), accs.std()))
|
476 |
+
|
477 |
+
log = {'code': code, 'accs': accs}
|
478 |
+
log_dir = os.path.join('logs', str(uuid.uuid4()))
|
479 |
+
os.makedirs(log_dir, exist_ok=True)
|
480 |
+
log_path = os.path.join(log_dir, 'log.pt')
|
481 |
+
print(os.path.abspath(log_path))
|
482 |
+
torch.save(log, os.path.join(log_dir, 'log.pt'))
|
483 |
+
|