-
Notifications
You must be signed in to change notification settings - Fork 3
/
configuration.py
72 lines (50 loc) · 1.81 KB
/
configuration.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
# -*- coding: utf-8 -*-
#Author: Jay Yip
#Date 04Mar2017
"""Set the configuration of model and training parameters"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class ModelConfig(object):
"""docstring for ModelConfig"""
def __init__(self):
#Set the feature name of context and tags
self.context_feature_name = 'content_id'
self.tag_feature_name = 'tag_id'
self.length_name = 'length'
#Number of thread for prefetching SequenceExample
#self.num_input_reader_thread = 2
#Number of preprocessing threads
self.num_preprocess_thread = 2
#Batch size
self.batch_size = 32
#LSTM input and output dimensions
self.embedding_size = 64
self.num_lstm_units = 128
#Fully connected layer output dimensions
self.num_tag = 5
#Dropout
self.lstm_dropout_keep_prob = 0.35
#Margin loss discount
self.margin_loss_discount = 0.2
#Regularization
self.regularization = 0.0001
self.seq_max_len = 60
class TrainingConfig(object):
"""docstring for TrainingConfig"""
def __init__(self):
self.num_examples_per_epoch = 500000
#Optimizer for training
self.optimizer = 'Adam'
#Learning rate
self.initial_learning_rate = 0.01
#If decay factor <= 0 then not decay
self.learning_rate_decay_factor = 0.5
self.num_epochs_per_decay = 3
#Gradient clipping
self.clip_gradients = 1.0
#Max checkpoints to keep
self.max_checkpoints_to_keep = 2
#Set training step
self.training_step = 3000000
self.embedding_random = False