-
Notifications
You must be signed in to change notification settings - Fork 6
/
feature_extractor.py
128 lines (104 loc) · 4.35 KB
/
feature_extractor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import torch
from torch.autograd import Variable
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
def init_layer(L):
# Initialization using fan-in
if isinstance(L, nn.Conv2d):
n = L.kernel_size[0]*L.kernel_size[1]*L.out_channels
L.weight.data.normal_(0,math.sqrt(2.0/float(n)))
elif isinstance(L, nn.BatchNorm2d):
L.weight.data.fill_(1)
L.bias.data.fill_(0)
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
class SimpleBlock(nn.Module):
maml = False #Default
def __init__(self, indim, outdim, half_res):
super(SimpleBlock, self).__init__()
self.indim = indim
self.outdim = outdim
if self.maml:
self.C1 = Conv2d_fw(indim, outdim, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False)
self.BN1 = BatchNorm2d_fw(outdim)
self.C2 = Conv2d_fw(outdim, outdim,kernel_size=3, padding=1,bias=False)
self.BN2 = BatchNorm2d_fw(outdim)
else:
self.C1 = nn.Conv2d(indim, outdim, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False)
self.BN1 = nn.BatchNorm2d(outdim)
self.C2 = nn.Conv2d(outdim, outdim,kernel_size=3, padding=1,bias=False)
self.BN2 = nn.BatchNorm2d(outdim)
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.parametrized_layers = [self.C1, self.C2, self.BN1, self.BN2]
self.half_res = half_res
# if the input number of channels is not equal to the output, then need a 1x1 convolution
if indim!=outdim:
if self.maml:
self.shortcut = Conv2d_fw(indim, outdim, 1, 2 if half_res else 1, bias=False)
self.BNshortcut = BatchNorm2d_fw(outdim)
else:
self.shortcut = nn.Conv2d(indim, outdim, 1, 2 if half_res else 1, bias=False)
self.BNshortcut = nn.BatchNorm2d(outdim)
self.parametrized_layers.append(self.shortcut)
self.parametrized_layers.append(self.BNshortcut)
self.shortcut_type = '1x1'
else:
self.shortcut_type = 'identity'
for layer in self.parametrized_layers:
init_layer(layer)
def forward(self, x):
out = self.C1(x)
out = self.BN1(out)
out = self.relu1(out)
out = self.C2(out)
out = self.BN2(out)
short_out = x if self.shortcut_type == 'identity' else self.BNshortcut(self.shortcut(x))
out = out + short_out
out = self.relu2(out)
return out
class ResNet(nn.Module):
maml = False #Default
def __init__(self,block,list_of_num_layers, list_of_out_dims, flatten = True):
# list_of_num_layers specifies number of layers in each stage
# list_of_out_dims specifies number of output channel for each stage
super(ResNet,self).__init__()
assert len(list_of_num_layers)==4, 'Can have only four stages'
if self.maml:
conv1 = Conv2d_fw(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = BatchNorm2d_fw(64)
else:
conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = nn.BatchNorm2d(64)
relu = nn.ReLU()
pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
init_layer(conv1)
init_layer(bn1)
trunk = [conv1, bn1, relu, pool1]
indim = 64
for i in range(4):
for j in range(list_of_num_layers[i]):
half_res = (i>=1) and (j==0)
B = block(indim, list_of_out_dims[i], half_res)
trunk.append(B)
indim = list_of_out_dims[i]
if flatten:
avgpool = nn.AvgPool2d(7)
trunk.append(avgpool)
trunk.append(Flatten())
self.final_feat_dim = indim
else:
self.final_feat_dim = [ indim, 7, 7]
self.trunk = nn.Sequential(*trunk)
def forward(self,x):
out = self.trunk(x)
return out
def ResNet18( flatten = True):
return ResNet(SimpleBlock, [2,2,2,2],[64,128,256,512], flatten)