Sa-m commited on
Commit
f05f69b
1 Parent(s): 373488e

Upload activations.py

Browse files
Files changed (1) hide show
  1. utils/activations.py +72 -0
utils/activations.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Activation functions
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+
7
+
8
+ # SiLU https://arxiv.org/pdf/1606.08415.pdf ----------------------------------------------------------------------------
9
+ class SiLU(nn.Module): # export-friendly version of nn.SiLU()
10
+ @staticmethod
11
+ def forward(x):
12
+ return x * torch.sigmoid(x)
13
+
14
+
15
+ class Hardswish(nn.Module): # export-friendly version of nn.Hardswish()
16
+ @staticmethod
17
+ def forward(x):
18
+ # return x * F.hardsigmoid(x) # for torchscript and CoreML
19
+ return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX
20
+
21
+
22
+ class MemoryEfficientSwish(nn.Module):
23
+ class F(torch.autograd.Function):
24
+ @staticmethod
25
+ def forward(ctx, x):
26
+ ctx.save_for_backward(x)
27
+ return x * torch.sigmoid(x)
28
+
29
+ @staticmethod
30
+ def backward(ctx, grad_output):
31
+ x = ctx.saved_tensors[0]
32
+ sx = torch.sigmoid(x)
33
+ return grad_output * (sx * (1 + x * (1 - sx)))
34
+
35
+ def forward(self, x):
36
+ return self.F.apply(x)
37
+
38
+
39
+ # Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------
40
+ class Mish(nn.Module):
41
+ @staticmethod
42
+ def forward(x):
43
+ return x * F.softplus(x).tanh()
44
+
45
+
46
+ class MemoryEfficientMish(nn.Module):
47
+ class F(torch.autograd.Function):
48
+ @staticmethod
49
+ def forward(ctx, x):
50
+ ctx.save_for_backward(x)
51
+ return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
52
+
53
+ @staticmethod
54
+ def backward(ctx, grad_output):
55
+ x = ctx.saved_tensors[0]
56
+ sx = torch.sigmoid(x)
57
+ fx = F.softplus(x).tanh()
58
+ return grad_output * (fx + x * sx * (1 - fx * fx))
59
+
60
+ def forward(self, x):
61
+ return self.F.apply(x)
62
+
63
+
64
+ # FReLU https://arxiv.org/abs/2007.11824 -------------------------------------------------------------------------------
65
+ class FReLU(nn.Module):
66
+ def __init__(self, c1, k=3): # ch_in, kernel
67
+ super().__init__()
68
+ self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
69
+ self.bn = nn.BatchNorm2d(c1)
70
+
71
+ def forward(self, x):
72
+ return torch.max(x, self.bn(self.conv(x)))