This repository has been archived by the owner on Nov 3, 2021. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
parameters.yml
236 lines (227 loc) · 6.4 KB
/
parameters.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
# Datasets configuration
dataset:
# Dataset to use for training
train: marmot
# Dataset to use for validation
val: marmot
# Use a small slice of data to debug
dummy:
# Enable or disable
enabled: False
# Number of examples to use
size: 10
# Marmot dataset configuration
marmot:
# Path to Marmot examples
path:
- datasets/marmot/table_recognition/data/english/positive
- datasets/marmot/table_recognition/data/chinese/positive
# ICDAR13 dataset configuration
icdar13:
# Path to ICDAR13 examples
path:
- datasets/icdar13/icdar2013-competition-dataset-with-gt/competition-dataset-eu
- datasets/icdar13/icdar2013-competition-dataset-with-gt/competition-dataset-us
# Generic configuration
generic:
# Random seed for all the randomness sources
random_seed: 42
# Number of data loading workers
workers: 4
# Where should data be processed
device: cpu
# Weights and biases configurations
wandb:
# Enable or disable wandb
enabled: False
# Wandb project to log to
project: table-detector
# Wandb account to log to
entity: wadaboa
# What to log on wandb
# Types: all, gradients, parameters
watch: all
# Training configuration
training:
# How much data to use for training
train_split: !!float 0.8
# Number of training epochs
epochs: 28
# How often to print and log metrics
log_interval: 1
# Batch size
batch_size: 2
# How to handle model checkpoints during training
checkpoints:
# Enable or disable saving checkpoints
save: True
# How often to save checkpoints (epochs)
frequency: 3
# Where to save checkpoints
path: checkpoints
# What ImageNet backbone to use
backbone:
# Family of the backbone
# Types: alexnet, densenet, mobilenet, resnet, vgg
family: vgg
# Actual backbone to use (among the ones in the above family)
# See https://github.com/pytorch/vision/tree/master/torchvision/models
type: vgg16
# If pretrained or not
pretrained: True
# Desired input size
# (check backbones.py to see minimum sizes for each model)
input_size:
# Inputs will be rescaled to the exact specified size
exact:
# Backbone input width
width: 224
# Backbone input height
height: 224
# Inputs will be rescaled to squares, respecting the
# given size bounds
bound:
# Minimum size
min: 800
# Maximum size
max: 1300
# ImageNet images standard deviation (0-1 range)
imagenet_params:
# ImageNet images mean
mean:
- !!float 0.485
- !!float 0.456
- !!float 0.406
# ImageNet images standard deviation
std:
- !!float 0.229
- !!float 0.224
- !!float 0.225
# What model to use for table detection
detector:
# Actual detector to use
# Types: rcnn, fast_rcnn, faster_rcnn
type: faster_rcnn
# Standard region proposals configuration
region_proposals:
# Region proposal algorithm
type: edge_boxes
# Number of maximum proposals per image
max_proposals: 10
# Selective search configuration
selective_search:
# Selective search type
# Types: fast, quality
type: quality
# Selective search strategies
strategies:
# Color strategy
color: True
# Fill strategy
fill: True
# Size strategy
size: True
# Texture strategy
texture: True
# Edge boxes configuration
edge_boxes:
# Path to the edge boxes model
# (https://github.com/opencv/opencv_extra/blob/master/testdata/cv/ximgproc/model.yml.gz)
model_path: models/edge_boxes.gz
# Step size of sliding window search
alpha: !!float 0.65
# NMS threshold for object proposal
beta: !!float 0.75
# During inference, only return proposals with a classification score
# greater than the following parameter
box_score_thresh: !!float 0.5
# NMS threshold for the prediction head (used during inference)
box_nms_thresh: !!float 0.5
# Maximum number of detections per image, for all classes
box_detections_per_img: 100
# Minimum IoU between the proposals and the GT box so that they can be
# considered as positive during training of the classification head
box_fg_iou_thresh: !!float 0.5
# Maximum IoU between the proposals and the GT box so that they can be
# considered as negative during training of the classification head
box_bg_iou_thresh: !!float 0.5
# Number of proposals that are sampled during training of the
# classification head
box_batch_size_per_image: 512
# Proportion of positive proposals in a mini-batch during training
# of the classification head
box_positive_fraction: !!float 0.25
# Weights for the encoding/decoding of the bounding boxes
box_regression_weights:
- !!float 10.0 # x
- !!float 10.0 # y
- !!float 5.0 # w
- !!float 5.0 # h
# Faster R-CNN configuration
faster_rcnn:
# Anchors generation configuration
anchors:
# Anchor sizes for one feature map
sizes:
- 128
- 256
- 512
# Anchors aspect ratios for one feature map
ratios:
- !!float 0.5
- !!float 1.0
- !!float 2.0
# What optimizer to use for gradient descent
optimizers:
# Type of optimizer
type: adam
# Adam optimizer
adam:
# Learning rate
lr: !!float 0.001
# L2 penalty
weight_decay: !!float 0
# Whether to use the AMSGrad variant of this algorithm
amsgrad: False
# RMSProp optimizer
rmsprop:
# Learning rate
lr: !!float 0.01
# Momentum factor
momentum: !!float 0
# Smoothing constant
alpha: !!float 0.99
# L2 penalty
weight_decay: !!float 0
# Stochastic Gradient Descent with momentum optimizer
sgd:
# Learning rate
lr: !!float 0.001
# Momentum factor
momentum: !!float 0
# L2 penalty
weight_decay: !!float 0
# Enables Nesterov momentum
nesterov: False
# How to decay the learning rate during training
lr_schedulers:
# Type of learning rate scheduler ('none' means fixed)
type: none
# Step scheduler
step:
# Period of learning rate decay
step_size: 30
# Multiplicative factor of learning rate decay
gamma: !!float 0.1
# The index of last epoch
last_epoch: -1
# Multi step scheduler
multi_step:
# List of epoch indices (must be increasing)
milestones:
- 30
- 80
# Multiplicative factor of learning rate decay
gamma: 0.1
# The index of last epoch
last_epoch: -1