-
Notifications
You must be signed in to change notification settings - Fork 0
/
loss.py
38 lines (32 loc) · 1.38 KB
/
loss.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
def __init__(self, gamma=0, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha, (float, int)): self.alpha = torch.Tensor([alpha, 1 - alpha])
if isinstance(alpha, list): self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, inputs, target):
if inputs.dim() > 2:
inputs = inputs.view(inputs.size(0), inputs.size(1), -1) # N,C,H,W => N,C,H*W
inputs = inputs.transpose(1, 2) # N,C,H*W => N,H*W,C
inputs = inputs.contiguous().view(-1, inputs.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1, 1)
logpt = F.log_softmax(inputs, dim=0)
logpt = logpt.gather(1, target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
if self.alpha is not None:
if self.alpha.type() != inputs.data.type():
self.alpha = self.alpha.type_as(inputs.data)
at = self.alpha.gather(0, target.data.view(-1))
logpt = logpt * Variable(at)
loss = -1 * (1 - pt) ** self.gamma * logpt
if self.size_average:
return loss.mean()
else:
return loss.sum()