-
Notifications
You must be signed in to change notification settings - Fork 7
/
train.py
170 lines (121 loc) · 4.8 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
'''
Pytorch implementation of MobileNet_v2_deeplab semantic segmantation
Train code
Author: Zhengwei Li
Data: July 1 2018
'''
import argparse
import timeit
from datetime import datetime
import os
import glob
from collections import OrderedDict
# PyTorch includes
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
from torchvision import transforms
from torch.utils.data import DataLoader
# data
from data import dataset
# model
from model import deeplab_v3_plus, deeplab_xception, enet
# dataloder
from data import dataset
# train helper
from utils import *
import pdb
# paramers
parser = argparse.ArgumentParser()
parser.add_argument('--dataDir', default='./data/', help='dataset directory')
parser.add_argument('--saveDir', default='./result', help='save result')
parser.add_argument('--trainData', default='SBD', help='train dataset name')
parser.add_argument('--load', default= 'deeplab_v3_plus', help='save model')
parser.add_argument('--finetuning', action='store_true', default=False, help='finetuning the training')
parser.add_argument('--load_pre_train', action='store_true', default=False, help='load pre_train model')
parser.add_argument('--without_gpu', action='store_true', default=False, help='finetuning the training')
parser.add_argument('--nThreads', type=int, default=2, help='number of threads for data loading')
parser.add_argument('--train_batch', type=int, default=4, help='input batch size for train')
parser.add_argument('--test_batch', type=int, default=8, help='input batch size for test')
parser.add_argument('--gpus', type=list, default=[0], help='GPUs ID')
parser.add_argument('--lr', type=float, default=5e-4, help='learning rate')
parser.add_argument('--lrDecay', type=int, default=100)
parser.add_argument('--decayType', default='step')
parser.add_argument('--nEpochs', type=int, default=300, help='number of epochs to train')
parser.add_argument('--save_epoch', type=int, default=5, help='number of epochs to save model')
args = parser.parse_args()
# Multi-GPUs
if args.without_gpu:
print("use CPU !")
device = torch.device('cpu')
else:
if torch.cuda.is_available():
n_gpu = torch.cuda.device_count()
print("----------------------------------------------------------")
print("| use GPU ! || Available GPU number is {} ! |".format(n_gpu))
print("----------------------------------------------------------")
device = torch.device('cuda')
#-----------------------------------------------------
# Network
#---------------
# net = deeplab_v3_plus.DeepLabv_v3_plus_mv2_os_32(nInputChannels=3, n_classes=1)
net = deeplab_v3_plus.DeepLabv_v3_plus_mv2_os_8(nInputChannels=3, n_classes=1)
# net = enet.ENet(5)
# net = deeplab_xception.DeepLabv3_plus(nInputChannels=3, n_classes=15)
# if n_gpu > 1:
# net = nn.DataParallel(net)
if args.load_pre_train:
net = load_pretrain_pam(net)
net.to(device)
#-----------------------------------------------------
# Loss
#---------------
# criterion = nn.CrossEntropyLoss(weight=None, size_average=False, ignore_index=-1).to(device)
criterion = nn.BCELoss()
#-----------------------------------------------------
# Data
#---------------
# train_data = dataset.SBD(base_dir=os.path.join(args.dataDir, 'benchmark_RELEASE'), split=['train', 'val'])
# test_data = dataset.VOC(base_dir=os.path.join(args.dataDir, 'VOCdevkit/VOC2012'), split='val')
train_data = getattr(dataset, args.trainData)(base_dir = args.dataDir)
# data load
trainloader = DataLoader(train_data, batch_size=args.train_batch,
drop_last=True, shuffle=True, num_workers=args.nThreads, pin_memory=False)
#-----------------------------------------------------
# Train loop
#---------------
save = saveData(args)
# finetuning
if args.finetuning:
net = save.load_model(net)
print("Start Train ! ... ...")
for epoch in range(args.nEpochs):
loss_tr = 0
loss_ = 0
# optimizer
# for param in net.mobilenet_features.parameters():
# param.requires_grad = False
lr_ = set_lr(args, epoch)
optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=lr_, momentum=0.99, weight_decay=5e-4)
# scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.90)
net.train()
for i, sample_batched in enumerate(trainloader):
inputs, gts = sample_batched['image'], sample_batched['gt']
inputs, gts = inputs.to(device), gts.to(device)
output = net.forward(inputs)
output = F.sigmoid(output)
output_flat = output.view(-1)
gts = gts.view(-1)
loss = criterion(output_flat, gts)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_ += loss.item()
loss_tr = loss_ / (i+1)
if (epoch+1) % args.save_epoch == 0:
log = "[{} / {}] \tLearning_rate: {}\t total_loss: {:.5f}".format(epoch+1,
args.nEpochs, lr_, loss_tr)
print(log)
save.save_log(log)
save.save_model(net)