-
Notifications
You must be signed in to change notification settings - Fork 3
/
SAT4.lua
241 lines (188 loc) · 6.11 KB
/
SAT4.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
require 'mattorch'
require 'cunn'
require 'nn'
require 'optim'
require 'image'
require 'xlua'
local c = require 'trepl.colorize'
opt = lapp[[
-b,--batchSize (default 100) batch size
-r,--learningRate (default 0.2) learning rate
--learningRateDecay (default 1e-7) learning rate decay
--weightDecay (default 0.0005) weightDecay
-m,--momentum (default 0.9) momentum
--epoch_step (default 3) epoch step
--max_epoch (default 36) maximum number of iteration
--model (default ConvNet) model name
]]
--4-class classification problem
classes = {'barren-land', 'trees', 'grassland', 'else'}
--load desirable model
model=dofile('models/'..opt.model..'.lua')
--transfer model to GPU
model:cuda()
--retrieve parameters and gradients
parameters,gradParameters = model:getParameters()
--print model
print('<SAT-4 dataset> using model:')
print(model)
--setting criterion
print('==>' ..' setting criterion')
criterion = nn.CrossEntropyCriterion():cuda()
--set dimensions of confusion matrix
confusion = optim.ConfusionMatrix(#classes)
--define number of patches for training and testing
trsize=400000
tesize=100000
--load dataset
print '==> loading dataset'
--local matio = require 'matio'
dataset=mattorch.load('sat-4-full.mat')
--adjust dimensions so that they are appropriate for torch
trd1=dataset.train_x
trl1=dataset.train_y
ted1=dataset.test_x
tel1=dataset.test_y
trainData = {
data = trd1:double(),
labels = trl1:double(),
size = function() return trsize end
}
testData = {
data = ted1:double(),
labels = tel1:double(),
size = function() return trsize end
}
--extract the desirable number of training (trsize) and testing (tesize) patches
trainData.data=trainData.data[{{1,trsize}}]
trainData.labels=trainData.labels[{{1,trsize}}]
testData.data=testData.data[{{1,tesize}}]
testData.labels=testData.labels[{{1,tesize}}]
--label editing
ftrl=torch.Tensor(trsize,1)
ftel=torch.Tensor(tesize,1)
for i=1,trsize do
for j=1,4 do
if trainData.labels[{ i,j }]==1 then
ftrl[i]=j
end
end
end
trainData.labels=ftrl
for i=1,tesize do
for j=1,4 do
if testData.labels[{ i,j }]==1 then
ftel[i]=j
end
end
end
testData.labels=ftel
print('Size of training patches :')
print(trainData.data:size())
print('Size of testing patches :')
print(testData.data:size())
--preprocess/normalize train/test sets
print '<trainer> preprocessing data (color space + normalization)'
collectgarbage()
mean = {} -- store the mean, to normalize the test set in the future
stdv = {} -- store the standard-deviation for the future
for i=1,4 do -- over each image channel
mean[i] = trainData.data[{ {}, {i}, {}, {} }]:mean() -- mean estimation
print('Channel ' .. i .. ', Mean: ' .. mean[i])
trainData.data[{ {}, {i}, {}, {} }]:add(-mean[i]) -- mean subtraction
stdv[i] = trainData.data[{ {}, {i}, {}, {} }]:std() -- std estimation
print('Channel ' .. i .. ', Standard Deviation: ' .. stdv[i])
trainData.data[{ {}, {i}, {}, {} }]:div(stdv[i]) -- std scaling
end
for i=1,4 do -- over each image channel
testData.data[{ {}, {i}, {}, {} }]:add(-mean[i]) -- mean subtraction
testData.data[{ {}, {i}, {}, {} }]:div(stdv[i]) -- std scaling
end
--saving mean and stdv
torch.save('mean_sat4.t7',mean)
torch.save('stdv_sat4.t7',stdv)
--configuring optimizer
print(c.blue'==>' ..' configuring optimizer')
optimState = {
learningRate = opt.learningRate,
weightDecay = opt.weightDecay,
momentum = opt.momentum,
learningRateDecay = opt.learningRateDecay,
}
-- training function
function train()
model:training()
epoch = epoch or 1
-- drop learning rate every "epoch_step" epochs
if epoch % opt.epoch_step == 0 then optimState.learningRate = optimState.learningRate/2 end
print(c.blue '==>'.." online epoch # " .. epoch .. ' [batchSize = ' .. opt.batchSize .. ']')
local targets = torch.CudaTensor(opt.batchSize)
local indices = torch.randperm(trainData.data:size(1)):long():split(opt.batchSize)
-- remove last element so that all the batches have equal size
indices[#indices] = nil
local tic = torch.tic()
for t,v in ipairs(indices) do
xlua.progress(t, #indices)
local inputs = trainData.data:index(1,v)
targets:copy(trainData.labels:index(1,v))
local feval = function(x)
if x ~= parameters then parameters:copy(x) end
gradParameters:zero()
local outputs = model:forward(inputs)
local f = criterion:forward(outputs, targets)
local df_do = criterion:backward(outputs, targets)
model:backward(inputs, df_do)
confusion:batchAdd(outputs, targets)
return f,gradParameters
end
optim.sgd(feval, parameters, optimState)
end
confusion:updateValids()
print(('Train accuracy: '..c.cyan'%.2f'..' %%\t time: %.2f s'):format(
confusion.totalValid * 100, torch.toc(tic)))
train_acc = confusion.totalValid * 100
print(confusion)
print('Train Accuracy:'..train_acc)
confusion:zero()
--saving model
torch.save('model_sat4.net',model)
epoch = epoch + 1
end
--test function
function test()
-- disable flips, dropouts and batch normalization
model:evaluate()
print('==>'.." testing")
local bs = 125
klaseis=torch.DoubleTensor(tesize,1)
for i=1,testData.data:size(1),bs do
outputs = model:forward(testData.data:narrow(1,i,bs))
confusion:batchAdd(outputs, testData.labels:narrow(1,i,bs))
idx=torch.DoubleTensor(125,1):fill(0)
--extract class predictions and put it to Tensor 'klaseis'
for v=1,125 do
meg=outputs[v]:max()
for u=1,4 do
if outputs[v][u]==meg then idx[v]=u end
end
end
klaseis[{{i,i+124}}]=idx
end
confusion:updateValids()
print('Test accuracy:', confusion.totalValid * 100)
print(confusion)
confusion:zero()
end
for i=1,opt.max_epoch do
train()
test()
--writing test predictions of the trained model to file
if i==36 then
local ouf = assert(io.open("classes_sat4_alexnet.txt", "w"))
for i=1, 100000 do
ouf:write(tostring(klaseis[i][1]))
ouf:write("\n")
end
io.close(ouf)
end
end