Upload README.md
Browse files
README.md
ADDED
@@ -0,0 +1,706 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
```python
|
2 |
+
import torch
|
3 |
+
import typing
|
4 |
+
import functorch
|
5 |
+
import itertools
|
6 |
+
```
|
7 |
+
|
8 |
+
# 2.3 Tensors
|
9 |
+
### We diagrams tensors, which can be vertically and horizontally decomposed.
|
10 |
+
<img src="SVG/rediagram.svg" width="700">
|
11 |
+
|
12 |
+
|
13 |
+
```python
|
14 |
+
# This diagram shows a function h : 3, 4 2, 6 -> 1 2 constructed out of f: 4 2, 6 -> 3 3 and g: 3, 3 3 -> 1 2
|
15 |
+
# We use assertions and random outputs to represent generic functions, and how diagrams relate to code.
|
16 |
+
T = torch.Tensor
|
17 |
+
def f(x0 : T, x1 : T):
|
18 |
+
""" f: 4 2, 6 -> 3 3 """
|
19 |
+
assert x0.size() == torch.Size([4,2])
|
20 |
+
assert x1.size() == torch.Size([6])
|
21 |
+
return torch.rand([3,3])
|
22 |
+
def g(x0 : T, x1: T):
|
23 |
+
""" g: 3, 3 3 -> 1 2 """
|
24 |
+
assert x0.size() == torch.Size([3])
|
25 |
+
assert x1.size() == torch.Size([3, 3])
|
26 |
+
return torch.rand([1,2])
|
27 |
+
def h(x0 : T, x1 : T, x2 : T):
|
28 |
+
""" h: 3, 4 2, 6 -> 1 2"""
|
29 |
+
assert x0.size() == torch.Size([3])
|
30 |
+
assert x1.size() == torch.Size([4, 2])
|
31 |
+
assert x2.size() == torch.Size([6])
|
32 |
+
return g(x0, f(x1,x2))
|
33 |
+
|
34 |
+
h(torch.rand([3]), torch.rand([4, 2]), torch.rand([6]))
|
35 |
+
```
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
tensor([[0.6837, 0.6853]])
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
## 2.3.1 Indexes
|
45 |
+
### Figure 8: Indexes
|
46 |
+
<img src="SVG/indexes.svg" width="700">
|
47 |
+
|
48 |
+
|
49 |
+
```python
|
50 |
+
# Extracting a subtensor is a process we are familiar with. Consider,
|
51 |
+
# A (4 3) tensor
|
52 |
+
table = torch.arange(0,12).view(4,3)
|
53 |
+
row = table[2,:]
|
54 |
+
row
|
55 |
+
```
|
56 |
+
|
57 |
+
|
58 |
+
|
59 |
+
|
60 |
+
tensor([6, 7, 8])
|
61 |
+
|
62 |
+
|
63 |
+
|
64 |
+
### Figure 9: Subtensors
|
65 |
+
<img src="SVG/subtensors.svg" width="700">
|
66 |
+
|
67 |
+
|
68 |
+
```python
|
69 |
+
# Different orders of access give the same result.
|
70 |
+
# Set up a random (5 7) tensor
|
71 |
+
a, b = 5, 7
|
72 |
+
Xab = torch.rand([a] + [b])
|
73 |
+
# Show that all pairs of indexes give the same result
|
74 |
+
for ia, jb in itertools.product(range(a), range(b)):
|
75 |
+
assert Xab[ia, jb] == Xab[ia, :][jb]
|
76 |
+
assert Xab[ia, jb] == Xab[:, jb][ia]
|
77 |
+
```
|
78 |
+
|
79 |
+
## 2.3.2 Broadcasting
|
80 |
+
### Figure 10: Broadcasting
|
81 |
+
<img src="SVG/broadcasting0.svg" width="700">
|
82 |
+
<img src="SVG/broadcasting0a.svg" width="700">
|
83 |
+
|
84 |
+
|
85 |
+
```python
|
86 |
+
a, b, c, d = [3], [2], [4], [3]
|
87 |
+
T = torch.Tensor
|
88 |
+
|
89 |
+
# We have some function from a to b;
|
90 |
+
def G(Xa: T) -> T:
|
91 |
+
""" G: a -> b """
|
92 |
+
return sum(Xa**2) + torch.ones(b)
|
93 |
+
|
94 |
+
# We could bootstrap a definition of broadcasting,
|
95 |
+
# Note that we are using spaces to indicate tensoring.
|
96 |
+
# We will use commas for tupling, which is in line with standard notation while writing code.
|
97 |
+
def Gc(Xac: T) -> T:
|
98 |
+
""" G c : a c -> b c """
|
99 |
+
Ybc = torch.zeros(b + c)
|
100 |
+
for j in range(c[0]):
|
101 |
+
Ybc[:,jc] = G(Xac[:,jc])
|
102 |
+
return Ybc
|
103 |
+
|
104 |
+
# Or use a PyTorch command,
|
105 |
+
# G *: a * -> b *
|
106 |
+
Gs = torch.vmap(G, -1, -1)
|
107 |
+
|
108 |
+
# We feed a random input, and see whether applying an index before or after
|
109 |
+
# gives the same result.
|
110 |
+
Xac = torch.rand(a + c)
|
111 |
+
for jc in range(c[0]):
|
112 |
+
assert torch.allclose(G(Xac[:,jc]), Gc(Xac)[:,jc])
|
113 |
+
assert torch.allclose(G(Xac[:,jc]), Gs(Xac)[:,jc])
|
114 |
+
|
115 |
+
# This shows how our definition of broadcasting lines up with that used by PyTorch vmap.
|
116 |
+
```
|
117 |
+
|
118 |
+
### Figure 11: Inner Broadcasting
|
119 |
+
<img src="SVG/inner_broadcasting0.svg" width="700">
|
120 |
+
<img src="SVG/inner broadcasting0a.svg" width="700">
|
121 |
+
|
122 |
+
|
123 |
+
```python
|
124 |
+
a, b, c, d = [3], [2], [4], [3]
|
125 |
+
T = torch.Tensor
|
126 |
+
|
127 |
+
# We have some function which can be inner broadcast,
|
128 |
+
def H(Xa: T, Xd: T) -> T:
|
129 |
+
""" H: a, d -> b """
|
130 |
+
return torch.sum(torch.sqrt(Xa**2)) + torch.sum(torch.sqrt(Xd ** 2)) + torch.ones(b)
|
131 |
+
|
132 |
+
# We can bootstrap inner broadcasting,
|
133 |
+
def Hc0(Xca: T, Xd : T) -> T:
|
134 |
+
""" c0 H: c a, d -> c d """
|
135 |
+
# Recall that we defined a, b, c, d in [_] arrays.
|
136 |
+
Ycb = torch.zeros(c + b)
|
137 |
+
for ic in range(c[0]):
|
138 |
+
Ycb[ic, :] = H(Xca[ic, :], Xd)
|
139 |
+
return Ycb
|
140 |
+
|
141 |
+
# But vmap offers a clear way of doing it,
|
142 |
+
# *0 H: * a, d -> * c
|
143 |
+
Hs0 = torch.vmap(H, (0, None), 0)
|
144 |
+
|
145 |
+
# We can show this satisfies Definition 2.14 by,
|
146 |
+
Xca = torch.rand(c + a)
|
147 |
+
Xd = torch.rand(d)
|
148 |
+
for ic in range(c[0]):
|
149 |
+
assert torch.allclose(Hc0(Xca, Xd)[ic, :], H(Xca[ic, :], Xd))
|
150 |
+
assert torch.allclose(Hs0(Xca, Xd)[ic, :], H(Xca[ic, :], Xd))
|
151 |
+
|
152 |
+
```
|
153 |
+
|
154 |
+
### Figure 12 Elementwise operations
|
155 |
+
<img src="SVG/elementwise0.svg" width="700">
|
156 |
+
|
157 |
+
|
158 |
+
```python
|
159 |
+
|
160 |
+
# Elementwise operations are implemented as usual ie
|
161 |
+
def f(x):
|
162 |
+
"f : 1 -> 1"
|
163 |
+
return x ** 2
|
164 |
+
|
165 |
+
# We broadcast an elementwise operation,
|
166 |
+
# f *: * -> *
|
167 |
+
fs = torch.vmap(f)
|
168 |
+
|
169 |
+
Xa = torch.rand(a)
|
170 |
+
for i in range(a[0]):
|
171 |
+
# And see that it aligns with the index before = index after framework.
|
172 |
+
assert torch.allclose(f(Xa[i]), fs(Xa)[i])
|
173 |
+
# But, elementwise operations are implied, so no special implementation is needed.
|
174 |
+
assert torch.allclose(f(Xa[i]), f(Xa)[i])
|
175 |
+
```
|
176 |
+
|
177 |
+
# 2.4 Linearity
|
178 |
+
## 2.4.2 Implementing Linearity and Common Operations
|
179 |
+
### Figure 17: Multi-head Attention and Einsum
|
180 |
+
<img src="SVG/implementation.svg" width="700">
|
181 |
+
|
182 |
+
|
183 |
+
```python
|
184 |
+
import math
|
185 |
+
import einops
|
186 |
+
x, y, k, h = 5, 3, 4, 2
|
187 |
+
Q = torch.rand([y, k, h])
|
188 |
+
K = torch.rand([x, k, h])
|
189 |
+
|
190 |
+
# Local memory contains,
|
191 |
+
# Q: y k h # K: x k h
|
192 |
+
# Outer products, transposes, inner products, and
|
193 |
+
# diagonalization reduce to einops expressions.
|
194 |
+
# Transpose K,
|
195 |
+
K = einops.einsum(K, 'x k h -> k x h')
|
196 |
+
# Outer product and diagonalize,
|
197 |
+
X = einops.einsum(Q, K, 'y k1 h, k2 x h -> y k1 k2 x h')
|
198 |
+
# Inner product,
|
199 |
+
X = einops.einsum(X, 'y k k x h -> y x h')
|
200 |
+
# Scale,
|
201 |
+
X = X / math.sqrt(k)
|
202 |
+
|
203 |
+
Q = torch.rand([y, k, h])
|
204 |
+
K = torch.rand([x, k, h])
|
205 |
+
|
206 |
+
# Local memory contains,
|
207 |
+
# Q: y k h # K: x k h
|
208 |
+
X = einops.einsum(Q, K, 'y k h, x k h -> y x h')
|
209 |
+
X = X / math.sqrt(k)
|
210 |
+
|
211 |
+
```
|
212 |
+
|
213 |
+
## 2.4.3 Linear Algebra
|
214 |
+
### Figure 18: Graphical Linear Algebra
|
215 |
+
<img src="SVG/linear_algebra.svg" width="700">
|
216 |
+
|
217 |
+
|
218 |
+
```python
|
219 |
+
# We will do an exercise implementing some of these equivalences.
|
220 |
+
# The reader can follow this exercise to get a better sense of how linear functions can be implemented,
|
221 |
+
# and how different forms are equivalent.
|
222 |
+
|
223 |
+
a, b, c, d = [3], [4], [5], [3]
|
224 |
+
|
225 |
+
# We will be using this function *a lot*
|
226 |
+
es = einops.einsum
|
227 |
+
|
228 |
+
# F: a b c
|
229 |
+
F_matrix = torch.rand(a + b + c)
|
230 |
+
|
231 |
+
# As an exericse we will show that the linear map F: a -> b c can be transposed in two ways.
|
232 |
+
# Either, we can broadcast, or take an outer product. We will show these are the same.
|
233 |
+
|
234 |
+
# Transposing by broadcasting
|
235 |
+
#
|
236 |
+
def F_func(Xa: T):
|
237 |
+
""" F: a -> b c """
|
238 |
+
return es(Xa,F_matrix,'a,a b c->b c',)
|
239 |
+
# * F: * a -> * b c
|
240 |
+
F_broadcast = torch.vmap(F_func, 0, 0)
|
241 |
+
|
242 |
+
# We then reduce it, as in the diagram,
|
243 |
+
# b a -> b b c -> c
|
244 |
+
def F_broadcast_transpose(Xba: T):
|
245 |
+
""" (b F) (.b c): b a -> c """
|
246 |
+
Xbbc = F_broadcast(Xba)
|
247 |
+
return es(Xbbc, 'b b c -> c')
|
248 |
+
|
249 |
+
# Transpoing by linearity
|
250 |
+
#
|
251 |
+
# We take the outer product of Id(b) and F, and follow up with a inner product.
|
252 |
+
# This gives us,
|
253 |
+
F_outerproduct = es(torch.eye(b[0]), F_matrix,'b0 b1, a b2 c->b0 b1 a b2 c',)
|
254 |
+
# Think of this as Id(b) F: b0 a -> b1 b2 c arranged into an associated b0 b1 a b2 c tensor.
|
255 |
+
# We then take the inner product. This gives a (b a c) matrix, which can be used for a (b a -> c) map.
|
256 |
+
F_linear_transpose = es(F_outerproduct,'b B a B c->b a c',)
|
257 |
+
|
258 |
+
# We contend that these are the same.
|
259 |
+
#
|
260 |
+
Xba = torch.rand(b + a)
|
261 |
+
assert torch.allclose(
|
262 |
+
F_broadcast_transpose(Xba),
|
263 |
+
es(Xba,F_linear_transpose, 'b a, b a c -> c'))
|
264 |
+
|
265 |
+
# Furthermore, lets prove the unit-inner product identity.
|
266 |
+
#
|
267 |
+
# The first step is an outer product with the unit,
|
268 |
+
outerUnit = lambda Xb: es(Xb, torch.eye(b[0]), 'b0, b1 b2 -> b0 b1 b2')
|
269 |
+
# The next is a inner product over the first two axes,
|
270 |
+
dotOuter = lambda Xbbb: es(Xbbb, 'b0 b0 b1 -> b1')
|
271 |
+
# Applying both of these *should* be the identity, and hence leave any input unchanged.
|
272 |
+
Xb = torch.rand(b)
|
273 |
+
assert torch.allclose(
|
274 |
+
Xb,
|
275 |
+
dotOuter(outerUnit(Xb)))
|
276 |
+
|
277 |
+
# Therefore, we can confidently use the expressions in Figure 18 to manipulate expressions.
|
278 |
+
```
|
279 |
+
|
280 |
+
# 3.1 Basic Multi-Layer Perceptron
|
281 |
+
### Figure 19: Implementing a Basic Multi-Layer Perceptron
|
282 |
+
<img src="SVG/imagerec.svg" width="700">
|
283 |
+
|
284 |
+
|
285 |
+
```python
|
286 |
+
import torch.nn as nn
|
287 |
+
# Basic Image Recogniser
|
288 |
+
# This is a close copy of an introductory PyTorch tutorial:
|
289 |
+
# https://pytorch.org/tutorials/beginner/basics/buildmodel_tutorial.html
|
290 |
+
class BasicImageRecogniser(nn.Module):
|
291 |
+
def __init__(self):
|
292 |
+
super().__init__()
|
293 |
+
self.flatten = nn.Flatten()
|
294 |
+
self.linear_relu_stack = nn.Sequential(
|
295 |
+
nn.Linear(28*28, 512),
|
296 |
+
nn.ReLU(),
|
297 |
+
nn.Linear(512, 512),
|
298 |
+
nn.ReLU(),
|
299 |
+
nn.Linear(512, 10),
|
300 |
+
)
|
301 |
+
def forward(self, x):
|
302 |
+
x = self.flatten(x)
|
303 |
+
x = self.linear_relu_stack(x)
|
304 |
+
y_pred = nn.Softmax(x)
|
305 |
+
return y_pred
|
306 |
+
|
307 |
+
my_BasicImageRecogniser = BasicImageRecogniser()
|
308 |
+
my_BasicImageRecogniser.forward(torch.rand([1,28,28]))
|
309 |
+
```
|
310 |
+
|
311 |
+
|
312 |
+
|
313 |
+
|
314 |
+
Softmax(
|
315 |
+
dim=tensor([[ 0.0150, -0.0301, 0.1395, -0.0558, 0.0024, -0.0613, -0.0163, 0.0134,
|
316 |
+
0.0577, -0.0624]], grad_fn=<AddmmBackward0>)
|
317 |
+
)
|
318 |
+
|
319 |
+
|
320 |
+
|
321 |
+
# 3.2 Neural Circuit Diagrams for the Transformer Architecture
|
322 |
+
### Figure 20: Scaled Dot-Product Attention
|
323 |
+
<img src="SVG/scaled_attention.svg" width="700">
|
324 |
+
|
325 |
+
|
326 |
+
```python
|
327 |
+
# Note, that we need to accomodate batches, hence the ... to capture additional axes.
|
328 |
+
|
329 |
+
# We can do the algorithm step by step,
|
330 |
+
def ScaledDotProductAttention(q: T, k: T, v: T) -> T:
|
331 |
+
''' yk, xk, xk -> yk '''
|
332 |
+
klength = k.size()[-1]
|
333 |
+
# Transpose
|
334 |
+
k = einops.einsum(k, '... x k -> ... k x')
|
335 |
+
# Matrix Multiply / Inner Product
|
336 |
+
x = einops.einsum(q, k, '... y k, ... k x -> ... y x')
|
337 |
+
# Scale
|
338 |
+
x = x / math.sqrt(klength)
|
339 |
+
# SoftMax
|
340 |
+
x = torch.nn.Softmax(-1)(x)
|
341 |
+
# Matrix Multiply / Inner Product
|
342 |
+
x = einops.einsum(x, v, '... y x, ... x k -> ... y k')
|
343 |
+
return x
|
344 |
+
|
345 |
+
# Alternatively, we can simultaneously broadcast linear functions.
|
346 |
+
def ScaledDotProductAttention(q: T, k: T, v: T) -> T:
|
347 |
+
''' yk, xk, xk -> yk '''
|
348 |
+
klength = k.size()[-1]
|
349 |
+
# Inner Product and Scale
|
350 |
+
x = einops.einsum(q, k, '... y k, ... x k -> ... y x')
|
351 |
+
# Scale and SoftMax
|
352 |
+
x = torch.nn.Softmax(-1)(x / math.sqrt(klength))
|
353 |
+
# Final Inner Product
|
354 |
+
x = einops.einsum(x, v, '... y x, ... x k -> ... y k')
|
355 |
+
return x
|
356 |
+
```
|
357 |
+
|
358 |
+
### Figure 21: Multi-Head Attention
|
359 |
+
<img src="SVG/multihead0.svg" width="700">
|
360 |
+
|
361 |
+
We will be implementing this algorithm. This shows us how we go from diagrams to implementations, and begins to give an idea of how organized diagrams leads to organized code.
|
362 |
+
|
363 |
+
|
364 |
+
```python
|
365 |
+
def MultiHeadDotProductAttention(q: T, k: T, v: T) -> T:
|
366 |
+
''' ykh, xkh, xkh -> ykh '''
|
367 |
+
klength = k.size()[-2]
|
368 |
+
x = einops.einsum(q, k, '... y k h, ... x k h -> ... y x h')
|
369 |
+
x = torch.nn.Softmax(-2)(x / math.sqrt(klength))
|
370 |
+
x = einops.einsum(x, v, '... y x h, ... x k h -> ... y k h')
|
371 |
+
return x
|
372 |
+
|
373 |
+
# We implement this component as a neural network model.
|
374 |
+
# This is necessary when there are bold, learned components that need to be initialized.
|
375 |
+
class MultiHeadAttention(nn.Module):
|
376 |
+
# Multi-Head attention has various settings, which become variables
|
377 |
+
# for the initializer.
|
378 |
+
def __init__(self, m, k, h):
|
379 |
+
super().__init__()
|
380 |
+
self.m, self.k, self.h = m, k, h
|
381 |
+
# Set up all the boldface, learned components
|
382 |
+
# Note how they bind axes we want to split, which we do later with einops.
|
383 |
+
self.Lq = nn.Linear(m, k*h, False)
|
384 |
+
self.Lk = nn.Linear(m, k*h, False)
|
385 |
+
self.Lv = nn.Linear(m, k*h, False)
|
386 |
+
self.Lo = nn.Linear(k*h, m, False)
|
387 |
+
|
388 |
+
|
389 |
+
# We have endogenous data (Eym) and external / injected data (Xxm)
|
390 |
+
def forward(self, Eym, Xxm):
|
391 |
+
""" y m, x m -> y m """
|
392 |
+
# We first generate query, key, and value vectors.
|
393 |
+
# Linear layers are automatically broadcast.
|
394 |
+
|
395 |
+
# However, the k and h axes are bound. We define an unbinder to handle the outputs,
|
396 |
+
unbind = lambda x: einops.rearrange(x, '... (k h)->... k h', h=self.h)
|
397 |
+
q = unbind(self.Lq(Eym))
|
398 |
+
k = unbind(self.Lk(Xxm))
|
399 |
+
v = unbind(self.Lv(Xxm))
|
400 |
+
|
401 |
+
# We feed q, k, and v to standard Multi-Head inner product Attention
|
402 |
+
o = MultiHeadDotProductAttention(q, k, v)
|
403 |
+
|
404 |
+
# Rebind to feed to the final learned layer,
|
405 |
+
o = einops.rearrange(o, '... k h-> ... (k h)', h=self.h)
|
406 |
+
return self.Lo(o)
|
407 |
+
|
408 |
+
# Now we can run it on fake data;
|
409 |
+
y, x, m, jc, heads = [20], [22], [128], [16], 4
|
410 |
+
# Internal Data
|
411 |
+
Eym = torch.rand(y + m)
|
412 |
+
# External Data
|
413 |
+
Xxm = torch.rand(x + m)
|
414 |
+
|
415 |
+
mha = MultiHeadAttention(m[0],jc[0],heads)
|
416 |
+
assert list(mha.forward(Eym, Xxm).size()) == y + m
|
417 |
+
|
418 |
+
```
|
419 |
+
|
420 |
+
# 3.4 Computer Vision
|
421 |
+
|
422 |
+
Here, we really start to understand why splitting diagrams into ``fenced off'' blocks aids implementation.
|
423 |
+
In addition to making diagrams easier to understand and patterns more clearn, blocks indicate how code can structured and organized.
|
424 |
+
|
425 |
+
## Figure 26: Identity Residual Network
|
426 |
+
<img src="SVG/IdResNet_overall.svg" width="700">
|
427 |
+
|
428 |
+
|
429 |
+
|
430 |
+
```python
|
431 |
+
# For Figure 26, every fenced off region is its own module.
|
432 |
+
|
433 |
+
# Batch norm and then activate is a repeated motif,
|
434 |
+
class NormActivate(nn.Sequential):
|
435 |
+
def __init__(self, nf, Norm=nn.BatchNorm2d, Activation=nn.ReLU):
|
436 |
+
super().__init__(Norm(nf), Activation())
|
437 |
+
|
438 |
+
def size_to_string(size):
|
439 |
+
return " ".join(map(str,list(size)))
|
440 |
+
|
441 |
+
# The Identity ResNet block breaks down into a manageable sequence of components.
|
442 |
+
class IdentityResNet(nn.Sequential):
|
443 |
+
def __init__(self, N=3, n_mu=[16,64,128,256], y=10):
|
444 |
+
super().__init__(
|
445 |
+
nn.Conv2d(3, n_mu[0], 3, padding=1),
|
446 |
+
Block(1, N, n_mu[0], n_mu[1]),
|
447 |
+
Block(2, N, n_mu[1], n_mu[2]),
|
448 |
+
Block(2, N, n_mu[2], n_mu[3]),
|
449 |
+
NormActivate(n_mu[3]),
|
450 |
+
nn.AdaptiveAvgPool2d(1),
|
451 |
+
nn.Flatten(),
|
452 |
+
nn.Linear(n_mu[3], y),
|
453 |
+
nn.Softmax(-1),
|
454 |
+
)
|
455 |
+
```
|
456 |
+
|
457 |
+
The Block can be defined in a seperate model, keeping the code manageable and closely connected to the diagram.
|
458 |
+
|
459 |
+
<img src="SVG/IdResNet_block.svg" width="700">
|
460 |
+
|
461 |
+
|
462 |
+
```python
|
463 |
+
# We then follow how diagrams define each ``block''
|
464 |
+
class Block(nn.Sequential):
|
465 |
+
def __init__(self, s, N, n0, n1):
|
466 |
+
""" n0 and n1 as inputs to the initializer are implicit from having them in the domain and codomain in the diagram. """
|
467 |
+
nb = n1 // 4
|
468 |
+
super().__init__(
|
469 |
+
*[
|
470 |
+
NormActivate(n0),
|
471 |
+
ResidualConnection(
|
472 |
+
nn.Sequential(
|
473 |
+
nn.Conv2d(n0, nb, 1, s),
|
474 |
+
NormActivate(nb),
|
475 |
+
nn.Conv2d(nb, nb, 3, padding=1),
|
476 |
+
NormActivate(nb),
|
477 |
+
nn.Conv2d(nb, n1, 1),
|
478 |
+
),
|
479 |
+
nn.Conv2d(n0, n1, 1, s),
|
480 |
+
)
|
481 |
+
] + [
|
482 |
+
ResidualConnection(
|
483 |
+
nn.Sequential(
|
484 |
+
NormActivate(n1),
|
485 |
+
nn.Conv2d(n1, nb, 1),
|
486 |
+
NormActivate(nb),
|
487 |
+
nn.Conv2d(nb, nb, 3, padding=1),
|
488 |
+
NormActivate(nb),
|
489 |
+
nn.Conv2d(nb, n1, 1)
|
490 |
+
),
|
491 |
+
)
|
492 |
+
] * N
|
493 |
+
|
494 |
+
)
|
495 |
+
# Residual connections are a repeated pattern in the diagram. So, we are motivated to encapsulate them
|
496 |
+
# as a seperate module.
|
497 |
+
class ResidualConnection(nn.Module):
|
498 |
+
def __init__(self, mainline : nn.Module, connection : nn.Module | None = None) -> None:
|
499 |
+
super().__init__()
|
500 |
+
self.main = mainline
|
501 |
+
self.secondary = nn.Identity() if connection == None else connection
|
502 |
+
def forward(self, x):
|
503 |
+
return self.main(x) + self.secondary(x)
|
504 |
+
```
|
505 |
+
|
506 |
+
|
507 |
+
```python
|
508 |
+
# A standard image processing algorithm has inputs shaped b c h w.
|
509 |
+
b, c, hw = [3], [3], [16, 16]
|
510 |
+
|
511 |
+
idresnet = IdentityResNet()
|
512 |
+
Xbchw = torch.rand(b + c + hw)
|
513 |
+
|
514 |
+
# And we see if the overall size is maintained,
|
515 |
+
assert list(idresnet.forward(Xbchw).size()) == b + [10]
|
516 |
+
```
|
517 |
+
|
518 |
+
The UNet is a more complicated algorithm than residual networks. The ``fenced off'' sections help keep our code organized. Diagrams streamline implementation, and helps keep code organized.
|
519 |
+
|
520 |
+
## Figure 27: The UNet architecture
|
521 |
+
<img src="SVG/unet.svg" width="700">
|
522 |
+
|
523 |
+
|
524 |
+
```python
|
525 |
+
# We notice that double convolution where the numbers of channels change is a repeated motif.
|
526 |
+
# We denote the input with c0 and output with c1.
|
527 |
+
# This can also be done for subsequent members of an iteration.
|
528 |
+
# When we go down an iteration eg. 5, 4, etc. we may have the input be c1 and the output c0.
|
529 |
+
class DoubleConvolution(nn.Sequential):
|
530 |
+
def __init__(self, c0, c1, Activation=nn.ReLU):
|
531 |
+
super().__init__(
|
532 |
+
nn.Conv2d(c0, c1, 3, padding=1),
|
533 |
+
Activation(),
|
534 |
+
nn.Conv2d(c0, c1, 3, padding=1),
|
535 |
+
Activation(),
|
536 |
+
)
|
537 |
+
|
538 |
+
# The model is specified for a very specific number of layers,
|
539 |
+
# so we will not make it very flexible.
|
540 |
+
class UNet(nn.Module):
|
541 |
+
def __init__(self, y=2):
|
542 |
+
super().__init__()
|
543 |
+
# Set up the channel sizes;
|
544 |
+
c = [1 if i == 0 else 64 * 2 ** i for i in range(6)]
|
545 |
+
|
546 |
+
# Saving and loading from memory means we can not use a single,
|
547 |
+
# sequential chain.
|
548 |
+
|
549 |
+
# Set up and initialize the components;
|
550 |
+
self.DownScaleBlocks = [
|
551 |
+
DownScaleBlock(c[i],c[i+1])
|
552 |
+
for i in range(0,4)
|
553 |
+
] # Note how this imitates the lambda operators in the diagram.
|
554 |
+
self.middleDoubleConvolution = DoubleConvolution(c[4], c[5])
|
555 |
+
self.middleUpscale = nn.ConvTranspose2d(c[5], c[4], 2, 2, 1)
|
556 |
+
self.upScaleBlocks = [
|
557 |
+
UpScaleBlock(c[5-i],c[4-i])
|
558 |
+
for i in range(1,4)
|
559 |
+
]
|
560 |
+
self.finalConvolution = nn.Conv2d(c[1], y)
|
561 |
+
|
562 |
+
def forward(self, x):
|
563 |
+
cLambdas = []
|
564 |
+
for dsb in self.DownScaleBlocks:
|
565 |
+
x, cLambda = dsb(x)
|
566 |
+
cLambdas.append(cLambda)
|
567 |
+
x = self.middleDoubleConvolution(x)
|
568 |
+
x = self.middleUpscale(x)
|
569 |
+
for usb in self.upScaleBlocks:
|
570 |
+
cLambda = cLambdas.pop()
|
571 |
+
x = usb(x, cLambda)
|
572 |
+
x = self.finalConvolution(x)
|
573 |
+
|
574 |
+
class DownScaleBlock(nn.Module):
|
575 |
+
def __init__(self, c0, c1) -> None:
|
576 |
+
super().__init__()
|
577 |
+
self.doubleConvolution = DoubleConvolution(c0, c1)
|
578 |
+
self.downScaler = nn.MaxPool2d(2, 2, 1)
|
579 |
+
def forward(self, x):
|
580 |
+
cLambda = self.doubleConvolution(x)
|
581 |
+
x = self.downScaler(cLambda)
|
582 |
+
return x, cLambda
|
583 |
+
|
584 |
+
class UpScaleBlock(nn.Module):
|
585 |
+
def __init__(self, c1, c0) -> None:
|
586 |
+
super().__init__()
|
587 |
+
self.doubleConvolution = DoubleConvolution(2*c1, c1)
|
588 |
+
self.upScaler = nn.ConvTranspose2d(c1,c0,2,2,1)
|
589 |
+
def forward(self, x, cLambda):
|
590 |
+
# Concatenation occurs over the C channel axis (dim=1)
|
591 |
+
x = torch.concat(x, cLambda, 1)
|
592 |
+
x = self.doubleConvolution(x)
|
593 |
+
x = self.upScaler(x)
|
594 |
+
return x
|
595 |
+
```
|
596 |
+
|
597 |
+
# 3.5 Vision Transformer
|
598 |
+
|
599 |
+
We adapt our code for Multi-Head Attention to apply it to the vision case. This is a good exercise in how neural circuit diagrams allow code to be easily adapted for new modalities.
|
600 |
+
## Figure 28: Visual Attention
|
601 |
+
<img src="SVG/visual_attention.svg" width="700">
|
602 |
+
|
603 |
+
|
604 |
+
```python
|
605 |
+
class VisualAttention(nn.Module):
|
606 |
+
def __init__(self, c, k, heads = 1, kernel = 1, stride = 1):
|
607 |
+
super().__init__()
|
608 |
+
|
609 |
+
# w gives the kernel size, which we make adjustable.
|
610 |
+
self.c, self.k, self.h, self.w = c, k, heads, kernel
|
611 |
+
# Set up all the boldface, learned components
|
612 |
+
# Note how standard components may not have axes bound in
|
613 |
+
# the same way as diagrams. This requires us to rearrange
|
614 |
+
# using the einops package.
|
615 |
+
|
616 |
+
# The learned layers form convolutions
|
617 |
+
self.Cq = nn.Conv2d(c, k * heads, kernel, stride)
|
618 |
+
self.Ck = nn.Conv2d(c, k * heads, kernel, stride)
|
619 |
+
self.Cv = nn.Conv2d(c, k * heads, kernel, stride)
|
620 |
+
self.Co = nn.ConvTranspose2d(
|
621 |
+
k * heads, c, kernel, stride)
|
622 |
+
|
623 |
+
# Defined previously, closely follows the diagram.
|
624 |
+
def MultiHeadDotProductAttention(self, q: T, k: T, v: T) -> T:
|
625 |
+
''' ykh, xkh, xkh -> ykh '''
|
626 |
+
klength = k.size()[-2]
|
627 |
+
x = einops.einsum(q, k, '... y k h, ... x k h -> ... y x h')
|
628 |
+
x = torch.nn.Softmax(-2)(x / math.sqrt(klength))
|
629 |
+
x = einops.einsum(x, v, '... y x h, ... x k h -> ... y k h')
|
630 |
+
return x
|
631 |
+
|
632 |
+
# We have endogenous data (EYc) and external / injected data (XXc)
|
633 |
+
def forward(self, EcY, XcX):
|
634 |
+
""" cY, cX -> cY
|
635 |
+
The visual attention algorithm. Injects information from Xc into Yc. """
|
636 |
+
# query, key, and value vectors.
|
637 |
+
# We unbind the k h axes which were produced by the convolutions, and feed them
|
638 |
+
# in the normal manner to MultiHeadDotProductAttention.
|
639 |
+
unbind = lambda x: einops.rearrange(x, 'N (k h) H W -> N (H W) k h', h=self.h)
|
640 |
+
# Save size to recover it later
|
641 |
+
q = self.Cq(EcY)
|
642 |
+
W = q.size()[-1]
|
643 |
+
|
644 |
+
# By appropriately managing the axes, minimal changes to our previous code
|
645 |
+
# is necessary.
|
646 |
+
q = unbind(q)
|
647 |
+
k = unbind(self.Ck(XcX))
|
648 |
+
v = unbind(self.Cv(XcX))
|
649 |
+
o = self.MultiHeadDotProductAttention(q, k, v)
|
650 |
+
|
651 |
+
# Rebind to feed to the transposed convolution layer.
|
652 |
+
o = einops.rearrange(o, 'N (H W) k h -> N (k h) H W',
|
653 |
+
h=self.h, W=W)
|
654 |
+
return self.Co(o)
|
655 |
+
|
656 |
+
# Single batch element,
|
657 |
+
b = [1]
|
658 |
+
Y, X, c, k = [16, 16], [16, 16], [33], 8
|
659 |
+
# The additional configurations,
|
660 |
+
heads, kernel, stride = 4, 3, 3
|
661 |
+
|
662 |
+
# Internal Data,
|
663 |
+
EYc = torch.rand(b + c + Y)
|
664 |
+
# External Data,
|
665 |
+
XXc = torch.rand(b + c + X)
|
666 |
+
|
667 |
+
# We can now run the algorithm,
|
668 |
+
visualAttention = VisualAttention(c[0], k, heads, kernel, stride)
|
669 |
+
|
670 |
+
# Interestingly, the height/width reduces by 1 for stride
|
671 |
+
# values above 1. Otherwise, it stays the same.
|
672 |
+
visualAttention.forward(EYc, XXc).size()
|
673 |
+
```
|
674 |
+
|
675 |
+
|
676 |
+
|
677 |
+
|
678 |
+
torch.Size([1, 33, 15, 15])
|
679 |
+
|
680 |
+
|
681 |
+
|
682 |
+
# Appendix
|
683 |
+
|
684 |
+
|
685 |
+
```python
|
686 |
+
# A container to track the size of modules,
|
687 |
+
# Replace a module definition eg.
|
688 |
+
# > self.Cq = nn.Conv2d(c, k * heads, kernel, stride)
|
689 |
+
# With;
|
690 |
+
# > self.Cq = Tracker(nn.Conv2d(c, k * heads, kernel, stride), "Query convolution")
|
691 |
+
# And the input / output sizes (to check diagrams) will be printed.
|
692 |
+
class Tracker(nn.Module):
|
693 |
+
def __init__(self, module: nn.Module, name : str = ""):
|
694 |
+
super().__init__()
|
695 |
+
self.module = module
|
696 |
+
if name:
|
697 |
+
self.name = name
|
698 |
+
else:
|
699 |
+
self.name = self.module._get_name()
|
700 |
+
def forward(self, x):
|
701 |
+
x_size = size_to_string(x.size())
|
702 |
+
x = self.module.forward(x)
|
703 |
+
y_size = size_to_string(x.size())
|
704 |
+
print(f"{self.name}: \t {x_size} -> {y_size}")
|
705 |
+
return x
|
706 |
+
```
|