Sefika commited on
Commit
9ba9ac1
1 Parent(s): 86ae66d

Upload 9 files

Browse files
models/FullyConectedModels/model.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tensorflow.data import Dataset
2
+ import tensorflow.keras as keras
3
+ from tensorflow.keras.optimizers import Adam
4
+ from tensorflow.keras.layers import (
5
+ Conv2D,
6
+ Input,
7
+ MaxPooling2D,
8
+ Dense,
9
+ Dropout,
10
+ MaxPool1D,
11
+ Flatten,
12
+ AveragePooling1D,
13
+ BatchNormalization,
14
+ )
15
+ from tensorflow.keras import Model
16
+ import numpy as np
17
+ import tensorflow as tf
18
+ from tensorflow.keras.models import Sequential
19
+ from tensorflow.keras.models import Model
20
+ from tensorflow.keras.layers import Input, Add, Activation, Dropout, Flatten, Dense
21
+ from tensorflow.keras.layers import Convolution2D, MaxPooling2D, AveragePooling2D
22
+ from tensorflow.keras.layers import BatchNormalization
23
+ from tensorflow.keras.regularizers import l2
24
+ from tensorflow.keras import backend as K
25
+ from tensorflow.keras.optimizers import SGD
26
+ import warnings
27
+
28
+ warnings.filterwarnings("ignore")
29
+
30
+
31
+ def basemodel(weight_decay):
32
+ # 2 hidden layers
33
+ model_input = Input(
34
+ shape=(
35
+ 32,
36
+ 32,
37
+ 1,
38
+ )
39
+ )
40
+ model = Conv2D(
41
+ 32,
42
+ kernel_size=(3, 3),
43
+ kernel_regularizer=l2(weight_decay),
44
+ activation="relu",
45
+ )(model_input)
46
+ model = Conv2D(
47
+ 64, kernel_size=(3, 3), kernel_regularizer=l2(weight_decay), activation="relu"
48
+ )(model)
49
+ model = MaxPooling2D(pool_size=(2, 2))(model)
50
+ model = BatchNormalization()(model)
51
+ model = Flatten()(model)
52
+ model = Dense(4, kernel_regularizer=l2(weight_decay), activation="softmax")(model)
53
+ model = Model(inputs=model_input, outputs=model)
54
+ return model
55
+
56
+
57
+ def model_2(weight_decay):
58
+ model_input = Input(
59
+ shape=(
60
+ 32,
61
+ 32,
62
+ 1,
63
+ )
64
+ )
65
+ model = Conv2D(
66
+ 32,
67
+ kernel_size=(3, 3),
68
+ kernel_regularizer=l2(weight_decay),
69
+ activation="relu",
70
+ )(model_input)
71
+ model = Conv2D(
72
+ 64, kernel_size=(3, 3), kernel_regularizer=l2(weight_decay), activation="relu"
73
+ )(model)
74
+ model = MaxPooling2D(pool_size=(2, 2))(model)
75
+ model = BatchNormalization()(model)
76
+ model = Conv2D(
77
+ 128, kernel_size=(3, 3), kernel_regularizer=l2(weight_decay), activation="relu"
78
+ )(model)
79
+ model = MaxPooling2D(pool_size=(2, 2))(model)
80
+ model = BatchNormalization()(model)
81
+ model = Flatten()(model)
82
+ model = Dense(4, kernel_regularizer=l2(weight_decay), activation="softmax")(model)
83
+ model = Model(inputs=model_input, outputs=model)
84
+ return model
85
+
86
+
87
+ def model_3(weight_decay):
88
+ # 4 hidden layers
89
+ model_input = Input(
90
+ shape=(
91
+ 32,
92
+ 32,
93
+ 1,
94
+ )
95
+ )
96
+ model = Conv2D(
97
+ 32,
98
+ kernel_size=(3, 3),
99
+ kernel_regularizer=l2(weight_decay),
100
+ activation="relu",
101
+ )(model_input)
102
+ model = Conv2D(
103
+ 64, kernel_size=(3, 3), kernel_regularizer=l2(weight_decay), activation="relu"
104
+ )(model)
105
+ model = MaxPooling2D(pool_size=(2, 2))(model)
106
+ model = BatchNormalization()(model)
107
+ model = Conv2D(
108
+ 128, kernel_size=(3, 3), kernel_regularizer=l2(weight_decay), activation="relu"
109
+ )(model)
110
+ model = MaxPooling2D(pool_size=(2, 2))(model)
111
+ model = BatchNormalization()(model)
112
+ model = Conv2D(
113
+ 256, kernel_size=(3, 3), kernel_regularizer=l2(weight_decay), activation="relu"
114
+ )(model)
115
+ model = MaxPooling2D(pool_size=(2, 2))(model)
116
+ model = BatchNormalization()(model)
117
+ model = Flatten()(model)
118
+ model = Dense(4, kernel_regularizer=l2(weight_decay), activation="softmax")(model)
119
+ model = Model(inputs=model_input, outputs=model)
120
+ return model
models/FullyConectedModels/parseval.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tensorflow.data import Dataset
2
+ import tensorflow.keras as keras
3
+ from tensorflow.keras.optimizers import Adam
4
+ from tensorflow.keras.layers import (
5
+ Conv2D,
6
+ Input,
7
+ MaxPooling2D,
8
+ Dense,
9
+ Dropout,
10
+ MaxPool1D,
11
+ Flatten,
12
+ AveragePooling1D,
13
+ BatchNormalization,
14
+ )
15
+ from tensorflow.keras import Model
16
+ import numpy as np
17
+ import tensorflow as tf
18
+ from tensorflow.keras.models import Sequential
19
+ from tensorflow.keras.models import Model
20
+ from tensorflow.keras.layers import Input, Add, Activation, Dropout, Flatten, Dense
21
+ from tensorflow.keras.layers import Convolution2D, MaxPooling2D, AveragePooling2D
22
+ from tensorflow.keras.layers import BatchNormalization
23
+ from tensorflow.keras.regularizers import l2
24
+ from tensorflow.keras import backend as K
25
+ from tensorflow.keras.optimizers import SGD
26
+ import warnings
27
+ from constraint import tight_frame
28
+
29
+ warnings.filterwarnings("ignore")
30
+
31
+
32
+ def model_parseval(weight_decay):
33
+
34
+ model_input = Input(
35
+ shape=(
36
+ 32,
37
+ 32,
38
+ 1,
39
+ )
40
+ )
41
+ model = Conv2D(
42
+ 32,
43
+ kernel_size=(3, 3),
44
+ activation="relu",
45
+ input_shape=(32, 32, 1),
46
+ kernel_regularizer=l2(weight_decay),
47
+ kernel_constraint=tight_frame(0.001),
48
+ kernel_initializer="Orthogonal",
49
+ )(model_input)
50
+ model = Conv2D(
51
+ 64,
52
+ kernel_size=(3, 3),
53
+ activation="relu",
54
+ kernel_regularizer=l2(weight_decay),
55
+ kernel_initializer="Orthogonal",
56
+ kernel_constraint=tight_frame(0.001),
57
+ )(model)
58
+ model = MaxPooling2D(pool_size=(2, 2))(model)
59
+ model = BatchNormalization()(model)
60
+ model = Conv2D(
61
+ 128,
62
+ kernel_size=(3, 3),
63
+ activation="relu",
64
+ kernel_initializer="Orthogonal",
65
+ kernel_regularizer=l2(weight_decay),
66
+ kernel_constraint=tight_frame(0.001),
67
+ )(model)
68
+ model = MaxPooling2D(pool_size=(2, 2))(model)
69
+ model = BatchNormalization()(model)
70
+ model = Conv2D(
71
+ 256,
72
+ kernel_size=(3, 3),
73
+ activation="relu",
74
+ kernel_initializer="Orthogonal",
75
+ kernel_regularizer=l2(weight_decay),
76
+ kernel_constraint=tight_frame(0.001),
77
+ )(model)
78
+ model = MaxPooling2D(pool_size=(2, 2))(model)
79
+ model = BatchNormalization()(model)
80
+ model = Flatten()(model)
81
+ model = Dense(4, activation="softmax", kernel_regularizer=l2(weight_decay))(model)
82
+ model = Model(inputs=model_input, outputs=model)
83
+ return model
models/Parseval_Networks/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ## ParsevalNetworks
2
+ * Orthogonality Constraint
3
+ * Convexity Constraint
models/Parseval_Networks/constraint.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tensorflow.python.keras.constraints import Constraint
2
+ from tensorflow.python.ops import math_ops, array_ops
3
+
4
+
5
+ class TightFrame(Constraint):
6
+ """
7
+ Parseval (tight) frame contstraint, as introduced in https://arxiv.org/abs/1704.08847
8
+
9
+ Constraints the weight matrix to be a tight frame, so that the Lipschitz
10
+ constant of the layer is <= 1. This increases the robustness of the network
11
+ to adversarial noise.
12
+
13
+ Warning: This constraint simply performs the update step on the weight matrix
14
+ (or the unfolded weight matrix for convolutional layers). Thus, it does not
15
+ handle the necessary scalings for convolutional layers.
16
+
17
+ Args:
18
+ scale (float): Retraction parameter (length of retraction step).
19
+ num_passes (int): Number of retraction steps.
20
+
21
+ Returns:
22
+ Weight matrix after applying regularizer.
23
+ """
24
+
25
+ def __init__(self, scale, num_passes=1):
26
+ """[summary]
27
+
28
+ Args:
29
+ scale ([type]): [description]
30
+ num_passes (int, optional): [description]. Defaults to 1.
31
+
32
+ Raises:
33
+ ValueError: [description]
34
+ """
35
+ self.scale = scale
36
+
37
+ if num_passes < 1:
38
+ raise ValueError(
39
+ "Number of passes cannot be non-positive! (got {})".format(num_passes)
40
+ )
41
+ self.num_passes = num_passes
42
+
43
+ def __call__(self, w):
44
+ """[summary]
45
+
46
+ Args:
47
+ w ([type]): weight of conv or linear layers
48
+
49
+ Returns:
50
+ [type]: returns new weights
51
+ """
52
+ transpose_channels = len(w.shape) == 4
53
+
54
+ # Move channels_num to the front in order to make the dimensions correct for matmul
55
+ if transpose_channels:
56
+ w_reordered = array_ops.reshape(w, (-1, w.shape[3]))
57
+
58
+ else:
59
+ w_reordered = w
60
+
61
+ last = w_reordered
62
+ for i in range(self.num_passes):
63
+ temp1 = math_ops.matmul(last, last, transpose_a=True)
64
+ temp2 = (1 + self.scale) * w_reordered - self.scale * math_ops.matmul(
65
+ w_reordered, temp1
66
+ )
67
+
68
+ last = temp2
69
+
70
+ # Move channels_num to the back again
71
+ if transpose_channels:
72
+ return array_ops.reshape(last, w.shape)
73
+ else:
74
+ return last
75
+
76
+ def get_config(self):
77
+ return {"scale": self.scale, "num_passes": self.num_passes}
78
+
79
+
80
+ # Alias
81
+ tight_frame = TightFrame
models/Parseval_Networks/convexity_constraint.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tensorflow.python.ops import math_ops
2
+ from tensorflow.python.ops import variables
3
+ from tensorflow.python.framework import dtypes
4
+ import numpy as _np
5
+
6
+
7
+ def convex_add(input_layer, layer_3, initial_convex_par=0.5, trainable=False):
8
+ """
9
+ Do a convex combination of input_layer and layer_3. That is, return the output of
10
+
11
+ lamda* input_layer + (1 - lamda) * layer_3
12
+
13
+
14
+ Args:
15
+ input_layer (tf.Tensor): Input to take convex combinatio of
16
+ layer_3 (tf.Tensor): Input to take convex combinatio of
17
+ initial_convex_par (float): Initial value for convex parameter. Must be
18
+ in [0, 1].
19
+ trainable (bool): Whether convex parameter should be trainable
20
+ or not.
21
+
22
+ Returns:
23
+ tf.Tensor: Result of convex combination
24
+ """
25
+ # Will implement this as sigmoid(p)*input_layer + (1-sigmoid(p))*layer_3 to ensure
26
+ # convex parameter to be in the unit interval without constraints during
27
+ # optimization
28
+
29
+ # Find value for p, also check for legal initial_convex_par
30
+ if initial_convex_par < 0:
31
+ raise ValueError("Convex parameter must be >=0")
32
+
33
+ elif initial_convex_par == 0:
34
+ # sigmoid(-16) is approximately a 32bit roundoff error, practically 0
35
+ initial_p_value = -16
36
+
37
+ elif initial_convex_par < 1:
38
+ # Compute inverse of sigmoid to find initial p value
39
+ initial_p_value = -_np.log(1 / initial_convex_par - 1)
40
+
41
+ elif initial_convex_par == 1:
42
+ # Same argument as for 0
43
+ initial_p_value = 16
44
+
45
+ else:
46
+ raise ValueError("Convex parameter must be <=1")
47
+
48
+ p = variables.Variable(
49
+ initial_value=initial_p_value, dtype=dtypes.float32, trainable=trainable
50
+ )
51
+
52
+ lam = math_ops.sigmoid(p)
53
+ return input_layer * lam + (1 - lam) * layer_3
models/Parseval_Networks/parsevalnet.py ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tensorflow.keras.models import Model
2
+ from tensorflow.keras.layers import Input, Add, Activation, Dropout, Flatten, Dense
3
+ from tensorflow.keras.layers import Convolution2D, MaxPooling2D, AveragePooling2D
4
+ from tensorflow.keras.layers import BatchNormalization
5
+ from tensorflow.keras.regularizers import l2
6
+ from tensorflow.keras import backend as K
7
+ from tensorflow.keras.optimizers import SGD
8
+ import warnings
9
+ from constraint import tight_frame
10
+ from convexity_constraint import convex_add
11
+
12
+ warnings.filterwarnings("ignore")
13
+
14
+
15
+ class ParsevalNetwork(Model):
16
+ def __init__(
17
+ self,
18
+ input_dim,
19
+ weight_decay,
20
+ momentum,
21
+ nb_classes=4,
22
+ N=2,
23
+ k=1,
24
+ dropout=0.0,
25
+ verbose=1,
26
+ ):
27
+ """[Assign the initial parameters of the wide residual network]
28
+
29
+ Args:
30
+ weight_decay ([float]): [description]
31
+ input_dim ([tuple]): [input dimension]
32
+ nb_classes (int, optional): [output class]. Defaults to 4.
33
+ N (int, optional): [the number of blocks]. Defaults to 2.
34
+ k (int, optional): [network width]. Defaults to 1.
35
+ dropout (float, optional): [dropout value to prevent overfitting]. Defaults to 0.0.
36
+ verbose (int, optional): [description]. Defaults to 1.
37
+
38
+ Returns:
39
+ [Model]: [parsevalnetwork]
40
+ """
41
+ self.weight_decay = weight_decay
42
+ self.input_dim = input_dim
43
+ self.nb_classes = nb_classes
44
+ self.N = N
45
+ self.k = k
46
+ self.dropout = dropout
47
+ self.verbose = verbose
48
+
49
+ def initial_conv(self, input):
50
+ """[summary]
51
+
52
+ Args:
53
+ input ([type]): [description]
54
+
55
+ Returns:
56
+ [type]: [description]
57
+ """
58
+ x = Convolution2D(
59
+ 16,
60
+ (3, 3),
61
+ padding="same",
62
+ kernel_initializer="orthogonal",
63
+ kernel_regularizer=l2(self.weight_decay),
64
+ kernel_constraint=tight_frame(0.001),
65
+ use_bias=False,
66
+ )(input)
67
+
68
+ channel_axis = 1 if K.image_data_format() == "channels_first" else -1
69
+
70
+ x = BatchNormalization(
71
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
72
+ )(x)
73
+ x = Activation("relu")(x)
74
+ return x
75
+
76
+ def expand_conv(self, init, base, k, strides=(1, 1)):
77
+ """[summary]
78
+
79
+ Args:
80
+ init ([type]): [description]
81
+ base ([type]): [description]
82
+ k ([type]): [description]
83
+ strides (tuple, optional): [description]. Defaults to (1, 1).
84
+
85
+ Returns:
86
+ [type]: [description]
87
+ """
88
+ x = Convolution2D(
89
+ base * k,
90
+ (3, 3),
91
+ padding="same",
92
+ strides=strides,
93
+ kernel_initializer="Orthogonal",
94
+ kernel_regularizer=l2(self.weight_decay),
95
+ kernel_constraint=tight_frame(0.001),
96
+ use_bias=False,
97
+ )(init)
98
+
99
+ channel_axis = 1 if K.image_data_format() == "channels_first" else -1
100
+
101
+ x = BatchNormalization(
102
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
103
+ )(x)
104
+ x = Activation("relu")(x)
105
+
106
+ x = Convolution2D(
107
+ base * k,
108
+ (3, 3),
109
+ padding="same",
110
+ kernel_initializer="Orthogonal",
111
+ kernel_regularizer=l2(self.weight_decay),
112
+ kernel_constraint=tight_frame(0.001),
113
+ use_bias=False,
114
+ )(x)
115
+
116
+ skip = Convolution2D(
117
+ base * k,
118
+ (1, 1),
119
+ padding="same",
120
+ strides=strides,
121
+ kernel_initializer="Orthogonal",
122
+ kernel_regularizer=l2(self.weight_decay),
123
+ kernel_constraint=tight_frame(0.001),
124
+ use_bias=False,
125
+ )(init)
126
+
127
+ m = Add()([x, skip])
128
+
129
+ return m
130
+
131
+ def conv1_block(self, input, k=1, dropout=0.0):
132
+ """[summary]
133
+
134
+ Args:
135
+ input ([type]): [description]
136
+ k (int, optional): [description]. Defaults to 1.
137
+ dropout (float, optional): [description]. Defaults to 0.0.
138
+
139
+ Returns:
140
+ [type]: [description]
141
+ """
142
+ init = input
143
+
144
+ channel_axis = 1 if K.image_data_format() == "channels_first" else -1
145
+
146
+ x = BatchNormalization(
147
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
148
+ )(input)
149
+ x = Activation("relu")(x)
150
+ x = Convolution2D(
151
+ 16 * k,
152
+ (3, 3),
153
+ padding="same",
154
+ kernel_initializer="Orthogonal",
155
+ kernel_regularizer=l2(self.weight_decay),
156
+ kernel_constraint=tight_frame(0.001),
157
+ use_bias=False,
158
+ )(x)
159
+
160
+ if dropout > 0.0:
161
+ x = Dropout(dropout)(x)
162
+
163
+ x = BatchNormalization(
164
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
165
+ )(x)
166
+ x = Activation("relu")(x)
167
+ x = Convolution2D(
168
+ 16 * k,
169
+ (3, 3),
170
+ padding="same",
171
+ kernel_initializer="Orthogonal",
172
+ kernel_regularizer=l2(self.weight_decay),
173
+ kernel_constraint=tight_frame(0.001),
174
+ use_bias=False,
175
+ )(x)
176
+ m = convex_add(init, x, initial_convex_par=0.5, trainable=True)
177
+ return m
178
+
179
+ def conv2_block(self, input, k=1, dropout=0.0):
180
+ """[summary]
181
+
182
+ Args:
183
+ input ([type]): [description]
184
+ k (int, optional): [description]. Defaults to 1.
185
+ dropout (float, optional): [description]. Defaults to 0.0.
186
+
187
+ Returns:
188
+ [type]: [description]
189
+ """
190
+ init = input
191
+
192
+ channel_axis = 1 if K.image_data_format() == "channels_first" else -1
193
+ x = BatchNormalization(
194
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
195
+ )(input)
196
+ x = Activation("relu")(x)
197
+ x = Convolution2D(
198
+ 32 * k,
199
+ (3, 3),
200
+ padding="same",
201
+ kernel_initializer="Orthogonal",
202
+ kernel_regularizer=l2(self.weight_decay),
203
+ kernel_constraint=tight_frame(0.001),
204
+ use_bias=False,
205
+ )(x)
206
+
207
+ if dropout > 0.0:
208
+ x = Dropout(dropout)(x)
209
+
210
+ x = BatchNormalization(
211
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
212
+ )(x)
213
+ x = Activation("relu")(x)
214
+ x = Convolution2D(
215
+ 32 * k,
216
+ (3, 3),
217
+ padding="same",
218
+ kernel_initializer="Orthogonal",
219
+ kernel_regularizer=l2(self.weight_decay),
220
+ kernel_constraint=tight_frame(0.001),
221
+ use_bias=False,
222
+ )(x)
223
+
224
+ m = convex_add(init, x, initial_convex_par=0.5, trainable=True)
225
+ return m
226
+
227
+ def conv3_block(self, input, k=1, dropout=0.0):
228
+ init = input
229
+
230
+ channel_axis = 1 if K.image_data_format() == "channels_first" else -1
231
+ x = BatchNormalization(
232
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
233
+ )(input)
234
+ x = Activation("relu")(x)
235
+ x = Convolution2D(
236
+ 64 * k,
237
+ (3, 3),
238
+ padding="same",
239
+ kernel_initializer="Orthogonal",
240
+ kernel_constraint=tight_frame(0.001),
241
+ kernel_regularizer=l2(self.weight_decay),
242
+ use_bias=False,
243
+ )(x)
244
+
245
+ if dropout > 0.0:
246
+ x = Dropout(dropout)(x)
247
+
248
+ x = BatchNormalization(
249
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
250
+ )(x)
251
+ x = Activation("relu")(x)
252
+ x = Convolution2D(
253
+ 64 * k,
254
+ (3, 3),
255
+ padding="same",
256
+ kernel_initializer="Orthogonal",
257
+ kernel_constraint=tight_frame(0.001),
258
+ kernel_regularizer=l2(self.weight_decay),
259
+ use_bias=False,
260
+ )(x)
261
+
262
+ m = convex_add(init, x, initial_convex_par=0.5, trainable=True)
263
+ return m
264
+
265
+ def create_wide_residual_network(self):
266
+ """create a wide residual network model
267
+
268
+
269
+ Returns:
270
+ [Model]: [wide residual network]
271
+ """
272
+ channel_axis = 1 if K.image_data_format() == "channels_first" else -1
273
+
274
+ ip = Input(shape=self.input_dim)
275
+
276
+ x = self.initial_conv(ip)
277
+ nb_conv = 4
278
+
279
+ x = self.expand_conv(x, 16, self.k)
280
+ nb_conv += 2
281
+
282
+ for i in range(self.N - 1):
283
+ x = self.conv1_block(x, self.k, self.dropout)
284
+ nb_conv += 2
285
+
286
+ x = BatchNormalization(
287
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
288
+ )(x)
289
+ x = Activation("relu")(x)
290
+
291
+ x = self.expand_conv(x, 32, self.k, strides=(2, 2))
292
+ nb_conv += 2
293
+
294
+ for i in range(self.N - 1):
295
+ x = self.conv2_block(x, self.k, self.dropout)
296
+ nb_conv += 2
297
+
298
+ x = BatchNormalization(
299
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
300
+ )(x)
301
+ x = Activation("relu")(x)
302
+
303
+ x = self.expand_conv(x, 64, self.k, strides=(2, 2))
304
+ nb_conv += 2
305
+
306
+ for i in range(self.N - 1):
307
+ x = self.conv3_block(x, self.k, self.dropout)
308
+ nb_conv += 2
309
+
310
+ x = BatchNormalization(
311
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
312
+ )(x)
313
+ x = Activation("relu")(x)
314
+
315
+ x = AveragePooling2D((8, 8))(x)
316
+ x = Flatten()(x)
317
+
318
+ x = Dense(
319
+ self.nb_classes,
320
+ kernel_regularizer=l2(self.weight_decay),
321
+ activation="softmax",
322
+ )(x)
323
+
324
+ model = Model(ip, x)
325
+
326
+ if self.verbose:
327
+ print("Parseval Network-%d-%d created." % (nb_conv, self.k))
328
+ return model
models/README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Models
2
+
3
+ ````
4
+ ├── Parseval_network
5
+ │   ├── __init__.py
6
+ │   └── Parseval_resnet.py
7
+ ├── Parseval_Networks_OC
8
+ │   ├── constraint.py
9
+ │   ├── parsnet_oc.py
10
+ │   └── README.md
11
+ ├── README.md
12
+ ├── _utility.py
13
+ └── wideresnet
14
+ └── wresnet.py
15
+ ````
models/_utility.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tensorflow.keras.callbacks import LearningRateScheduler
2
+
3
+ # Define configuration parameters
4
+ import math
5
+ import cleverhans
6
+ from cleverhans.tf2.attacks.fast_gradient_method import fast_gradient_method
7
+ import tensorflow as tf
8
+
9
+ import numpy as np
10
+
11
+
12
+ def step_decay(epoch):
13
+ """[summary]
14
+
15
+ Args:
16
+ epoch (int): epoch number
17
+
18
+ Returns:
19
+ lrate(float): new learning rate
20
+ """
21
+ initial_lrate = 0.1
22
+ factor = 0.1
23
+ if epoch < 10:
24
+ lrate = initial_lrate
25
+ elif epoch < 20:
26
+ lrate = initial_lrate * math.pow(factor, 1)
27
+ elif epoch < 30:
28
+ lrate = initial_lrate * math.pow(factor, 2)
29
+ elif epoch < 40:
30
+ lrate = initial_lrate * math.pow(factor, 3)
31
+ else:
32
+ lrate = initial_lrate * math.pow(factor, 4)
33
+ return lrate
34
+
35
+
36
+ def step_decay_conv(epoch):
37
+ """step decay for learning rate in convolutional networks
38
+
39
+ Args:
40
+ epoch (int): epoch number
41
+
42
+ Returns:
43
+ lrate(float): new learning rate
44
+ """
45
+ initial_lrate = 0.01
46
+ factor = 0.1
47
+ if epoch < 10:
48
+ lrate = initial_lrate
49
+ elif epoch < 20:
50
+ lrate = initial_lrate * math.pow(factor, 1)
51
+ elif epoch < 30:
52
+ lrate = initial_lrate * math.pow(factor, 2)
53
+ elif epoch < 40:
54
+ lrate = initial_lrate * math.pow(factor, 3)
55
+ else:
56
+ lrate = initial_lrate * math.pow(factor, 4)
57
+ return lrate
58
+
59
+
60
+ def print_test(model, X_adv, X_test, y_test, epsilon):
61
+ """
62
+ returns the test results and show the SNR and evaluation results
63
+ """
64
+ loss, acc = model.evaluate(X_adv, y_test)
65
+ print("epsilon: {} and test evaluation : {}, {}".format(epsilon, loss, acc))
66
+ SNR = 20 * np.log10(np.linalg.norm(X_test) / np.linalg.norm(X_test - X_adv))
67
+ print("SNR: {}".format(SNR))
68
+ return loss, acc
69
+
70
+
71
+ def get_adversarial_examples(pretrained_model, X_true, y_true, epsilon):
72
+ """
73
+ The attack requires the model to ouput the logits
74
+ returns the adversarial example/s of a given image/s for epsilon value using
75
+ fast gradient sign method
76
+ """
77
+ logits_model = tf.keras.Model(
78
+ pretrained_model.input, pretrained_model.layers[-1].output
79
+ )
80
+ X_adv = []
81
+
82
+ for i in range(len(X_true)):
83
+
84
+ random_index = i
85
+
86
+ original_image = X_true[random_index]
87
+ original_image = tf.convert_to_tensor(
88
+ original_image.reshape((1, 32, 32))
89
+ ) # The .reshape just gives it the proper form to input into the model, a batch of 1 a.k.a a tensor
90
+ original_label = y_true[random_index]
91
+ original_label = np.reshape(np.argmax(original_label), (1,)).astype("int64")
92
+
93
+ adv_example_targeted_label = fast_gradient_method(
94
+ logits_model,
95
+ original_image,
96
+ epsilon,
97
+ np.inf,
98
+ y=original_label,
99
+ targeted=False,
100
+ )
101
+ X_adv.append(np.array(adv_example_targeted_label).reshape(32, 32, 1))
102
+
103
+ X_adv = np.array(X_adv)
104
+
105
+ return X_adv
106
+
107
+
108
+ lrate_conv = LearningRateScheduler(step_decay_conv)
109
+ lrate = LearningRateScheduler(step_decay)
models/wideresnet/wresnet.py ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tensorflow.keras.models import Model
2
+ from tensorflow.keras.layers import Input, Add, Activation, Dropout, Flatten, Dense
3
+ from tensorflow.keras.layers import Convolution2D, MaxPooling2D, AveragePooling2D
4
+ from tensorflow.keras.layers import BatchNormalization
5
+ from tensorflow.keras.regularizers import l2
6
+ from tensorflow.keras import backend as K
7
+ from tensorflow.keras.optimizers import SGD
8
+ import warnings
9
+
10
+ warnings.filterwarnings("ignore")
11
+
12
+
13
+ class WideResidualNetwork(object):
14
+ def __init__(
15
+ self,
16
+ input_dim,
17
+ weight_decay,
18
+ momentum,
19
+ nb_classes=100,
20
+ N=2,
21
+ k=1,
22
+ dropout=0.0,
23
+ verbose=1,
24
+ ):
25
+ """[Assign the initial parameters of the wide residual network]
26
+
27
+ Args:
28
+ weight_decay ([float]): [description]
29
+ input_dim ([tuple]): [input dimension]
30
+ nb_classes (int, optional): [output class]. Defaults to 100.
31
+ N (int, optional): [the number of blocks]. Defaults to 2.
32
+ k (int, optional): [network width]. Defaults to 1.
33
+ dropout (float, optional): [dropout value to prevent overfitting]. Defaults to 0.0.
34
+ verbose (int, optional): [description]. Defaults to 1.
35
+
36
+ Returns:
37
+ [Model]: [wideresnet]
38
+ """
39
+ self.weight_decay = weight_decay
40
+ self.input_dim = input_dim
41
+ self.nb_classes = nb_classes
42
+ self.N = N
43
+ self.k = k
44
+ self.dropout = dropout
45
+ self.verbose = verbose
46
+
47
+ def initial_conv(self, input):
48
+ """[summary]
49
+
50
+ Args:
51
+ input ([type]): [description]
52
+
53
+ Returns:
54
+ [type]: [description]
55
+ """
56
+ x = Convolution2D(
57
+ 16,
58
+ (3, 3),
59
+ padding="same",
60
+ kernel_initializer="he_normal",
61
+ kernel_regularizer=l2(self.weight_decay),
62
+ use_bias=False,
63
+ )(input)
64
+
65
+ channel_axis = 1 if K.image_data_format() == "channels_first" else -1
66
+
67
+ x = BatchNormalization(
68
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
69
+ )(x)
70
+ x = Activation("relu")(x)
71
+ return x
72
+
73
+ def expand_conv(self, init, base, k, strides=(1, 1)):
74
+ """[summary]
75
+
76
+ Args:
77
+ init ([type]): [description]
78
+ base ([type]): [description]
79
+ k ([type]): [description]
80
+ strides (tuple, optional): [description]. Defaults to (1, 1).
81
+
82
+ Returns:
83
+ [type]: [description]
84
+ """
85
+ x = Convolution2D(
86
+ base * k,
87
+ (3, 3),
88
+ padding="same",
89
+ strides=strides,
90
+ kernel_initializer="he_normal",
91
+ kernel_regularizer=l2(self.weight_decay),
92
+ use_bias=False,
93
+ )(init)
94
+
95
+ channel_axis = 1 if K.image_data_format() == "channels_first" else -1
96
+
97
+ x = BatchNormalization(
98
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
99
+ )(x)
100
+ x = Activation("relu")(x)
101
+
102
+ x = Convolution2D(
103
+ base * k,
104
+ (3, 3),
105
+ padding="same",
106
+ kernel_initializer="he_normal",
107
+ kernel_regularizer=l2(self.weight_decay),
108
+ use_bias=False,
109
+ )(x)
110
+
111
+ skip = Convolution2D(
112
+ base * k,
113
+ (1, 1),
114
+ padding="same",
115
+ strides=strides,
116
+ kernel_initializer="he_normal",
117
+ kernel_regularizer=l2(self.weight_decay),
118
+ use_bias=False,
119
+ )(init)
120
+
121
+ m = Add()([x, skip])
122
+
123
+ return m
124
+
125
+ def conv1_block(self, input, k=1, dropout=0.0):
126
+ """[summary]
127
+
128
+ Args:
129
+ input ([type]): [description]
130
+ k (int, optional): [description]. Defaults to 1.
131
+ dropout (float, optional): [description]. Defaults to 0.0.
132
+
133
+ Returns:
134
+ [type]: [description]
135
+ """
136
+ init = input
137
+
138
+ channel_axis = 1 if K.image_data_format() == "channels_first" else -1
139
+
140
+ x = BatchNormalization(
141
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
142
+ )(input)
143
+ x = Activation("relu")(x)
144
+ x = Convolution2D(
145
+ 16 * k,
146
+ (3, 3),
147
+ padding="same",
148
+ kernel_initializer="he_normal",
149
+ kernel_regularizer=l2(self.weight_decay),
150
+ use_bias=False,
151
+ )(x)
152
+
153
+ if dropout > 0.0:
154
+ x = Dropout(dropout)(x)
155
+
156
+ x = BatchNormalization(
157
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
158
+ )(x)
159
+ x = Activation("relu")(x)
160
+ x = Convolution2D(
161
+ 16 * k,
162
+ (3, 3),
163
+ padding="same",
164
+ kernel_initializer="he_normal",
165
+ kernel_regularizer=l2(self.weight_decay),
166
+ use_bias=False,
167
+ )(x)
168
+
169
+ m = Add()([init, x])
170
+ return m
171
+
172
+ def conv2_block(self, input, k=1, dropout=0.0):
173
+ """[summary]
174
+
175
+ Args:
176
+ input ([type]): [description]
177
+ k (int, optional): [description]. Defaults to 1.
178
+ dropout (float, optional): [description]. Defaults to 0.0.
179
+
180
+ Returns:
181
+ [type]: [description]
182
+ """
183
+ init = input
184
+
185
+ channel_axis = 1 if K.image_data_format() == "channels_first" else -1
186
+ print("conv2:channel: {}".format(channel_axis))
187
+ x = BatchNormalization(
188
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
189
+ )(input)
190
+ x = Activation("relu")(x)
191
+ x = Convolution2D(
192
+ 32 * k,
193
+ (3, 3),
194
+ padding="same",
195
+ kernel_initializer="he_normal",
196
+ kernel_regularizer=l2(self.weight_decay),
197
+ use_bias=False,
198
+ )(x)
199
+
200
+ if dropout > 0.0:
201
+ x = Dropout(dropout)(x)
202
+
203
+ x = BatchNormalization(
204
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
205
+ )(x)
206
+ x = Activation("relu")(x)
207
+ x = Convolution2D(
208
+ 32 * k,
209
+ (3, 3),
210
+ padding="same",
211
+ kernel_initializer="he_normal",
212
+ kernel_regularizer=l2(self.weight_decay),
213
+ use_bias=False,
214
+ )(x)
215
+
216
+ m = Add()([init, x])
217
+ return m
218
+
219
+ def conv3_block(self, input, k=1, dropout=0.0):
220
+ """[summary]
221
+
222
+ Args:
223
+ input ([type]): [description]
224
+ k (int, optional): [description]. Defaults to 1.
225
+ dropout (float, optional): [description]. Defaults to 0.0.
226
+
227
+ Returns:
228
+ [type]: [description]
229
+ """
230
+ init = input
231
+
232
+ channel_axis = 1 if K.image_data_format() == "channels_first" else -1
233
+
234
+ x = BatchNormalization(
235
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
236
+ )(input)
237
+ x = Activation("relu")(x)
238
+ x = Convolution2D(
239
+ 64 * k,
240
+ (3, 3),
241
+ padding="same",
242
+ kernel_initializer="he_normal",
243
+ kernel_regularizer=l2(self.weight_decay),
244
+ use_bias=False,
245
+ )(x)
246
+
247
+ if dropout > 0.0:
248
+ x = Dropout(dropout)(x)
249
+
250
+ x = BatchNormalization(
251
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
252
+ )(x)
253
+ x = Activation("relu")(x)
254
+ x = Convolution2D(
255
+ 64 * k,
256
+ (3, 3),
257
+ padding="same",
258
+ kernel_initializer="he_normal",
259
+ kernel_regularizer=l2(self.weight_decay),
260
+ use_bias=False,
261
+ )(x)
262
+
263
+ m = Add()([init, x])
264
+ return m
265
+
266
+ def create_wide_residual_network(self):
267
+ """create a wide residual network model
268
+
269
+
270
+ Returns:
271
+ [Model]: [wide residual network]
272
+ """
273
+ channel_axis = 1 if K.image_data_format() == "channels_first" else -1
274
+
275
+ ip = Input(shape=self.input_dim)
276
+
277
+ x = self.initial_conv(ip)
278
+ nb_conv = 4
279
+
280
+ x = self.expand_conv(x, 16, self.k)
281
+ nb_conv += 2
282
+
283
+ for i in range(self.N - 1):
284
+ x = self.conv1_block(x, self.k, self.dropout)
285
+ nb_conv += 2
286
+
287
+ x = BatchNormalization(
288
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
289
+ )(x)
290
+ x = Activation("relu")(x)
291
+
292
+ x = self.expand_conv(x, 32, self.k, strides=(2, 2))
293
+ nb_conv += 2
294
+
295
+ for i in range(self.N - 1):
296
+ x = self.conv2_block(x, self.k, self.dropout)
297
+ nb_conv += 2
298
+
299
+ x = BatchNormalization(
300
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
301
+ )(x)
302
+ x = Activation("relu")(x)
303
+
304
+ x = self.expand_conv(x, 64, self.k, strides=(2, 2))
305
+ nb_conv += 2
306
+
307
+ for i in range(self.N - 1):
308
+ x = self.conv3_block(x, self.k, self.dropout)
309
+ nb_conv += 2
310
+
311
+ x = BatchNormalization(
312
+ axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"
313
+ )(x)
314
+ x = Activation("relu")(x)
315
+
316
+ x = AveragePooling2D((8, 8))(x)
317
+ x = Flatten()(x)
318
+
319
+ x = Dense(
320
+ self.nb_classes,
321
+ kernel_regularizer=l2(self.weight_decay),
322
+ activation="softmax",
323
+ )(x)
324
+
325
+ model = Model(ip, x)
326
+
327
+ if self.verbose:
328
+ print("Wide Residual Network-%d-%d created." % (nb_conv, self.k))
329
+ return model