From 1cdb58dadf857d6d57feb834038734d23b7d356d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 22 Apr 2021 17:26:05 +0200 Subject: [PATCH] ACON activation function (#2893) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ACON Activation Function ## 🚀 Feature There is a new activation function [ACON (CVPR 2021)](https://arxiv.org/pdf/2009.04759.pdf) that unifies ReLU and Swish. ACON is simple but very effective, code is here: https://github.com/nmaac/acon/blob/main/acon.py#L19 ![image](https://user-images.githubusercontent.com/5032208/115676962-a38dfe80-a382-11eb-9883-61fa3216e3e6.png) The improvements are very significant: ![image](https://user-images.githubusercontent.com/5032208/115680180-eac9be80-a385-11eb-9c7a-8643db552c69.png) ## Alternatives It also has an enhanced version meta-ACON that uses a small network to learn beta explicitly, which may influence the speed a bit. ## Additional context [Code](https://github.com/nmaac/acon) and [paper](https://arxiv.org/pdf/2009.04759.pdf). * Update activations.py --- utils/activations.py | 58 +++++++++++++++++++++++++++++++------------- 1 file changed, 41 insertions(+), 17 deletions(-) diff --git a/utils/activations.py b/utils/activations.py index aa3ddf071d28..1d095c1cf0f1 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -19,23 +19,6 @@ def forward(x): return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX -class MemoryEfficientSwish(nn.Module): - class F(torch.autograd.Function): - @staticmethod - def forward(ctx, x): - ctx.save_for_backward(x) - return x * torch.sigmoid(x) - - @staticmethod - def backward(ctx, grad_output): - x = ctx.saved_tensors[0] - sx = torch.sigmoid(x) - return grad_output * (sx * (1 + x * (1 - sx))) - - def forward(self, x): - return self.F.apply(x) - - # Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- class Mish(nn.Module): @staticmethod @@ -70,3 +53,44 @@ def __init__(self, c1, k=3): # ch_in, kernel def forward(self, x): return torch.max(x, self.bn(self.conv(x))) + + +# ACON https://arxiv.org/pdf/2009.04759.pdf ---------------------------------------------------------------------------- +class AconC(nn.Module): + r""" ACON activation (activate or not). + AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter + according to "Activate or Not: Learning Customized Activation" . + """ + + def __init__(self, c1): + super().__init__() + self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) + + def forward(self, x): + dpx = (self.p1 - self.p2) * x + return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x + + +class MetaAconC(nn.Module): + r""" ACON activation (activate or not). + MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network + according to "Activate or Not: Learning Customized Activation" . + """ + + def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r + super().__init__() + c2 = max(r, c1 // r) + self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.fc1 = nn.Conv2d(c1, c2, k, s, bias=False) + self.bn1 = nn.BatchNorm2d(c2) + self.fc2 = nn.Conv2d(c2, c1, k, s, bias=False) + self.bn2 = nn.BatchNorm2d(c1) + + def forward(self, x): + y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) + beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) + dpx = (self.p1 - self.p2) * x + return dpx * torch.sigmoid(beta * dpx) + self.p2 * x