Skip to content

Commit

Permalink
[Clean/iniTest] add logic to erase ini after test
Browse files Browse the repository at this point in the history
This patch add logic to erase ini after test for better determinancy
and cleaner build directory.

v2: also deprecating config_str in favor of
`ScopedIni`

**Self evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test: [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: Jihoon Lee <jhoon.it.lee@samsung.com>
  • Loading branch information
zhoonit authored and jijoongmoon committed Mar 2, 2021
1 parent d12929d commit 68e7dab
Show file tree
Hide file tree
Showing 5 changed files with 263 additions and 389 deletions.
44 changes: 36 additions & 8 deletions test/ccapi/unittest_ccapi.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -156,18 +156,46 @@ TEST(ccapi_dataset, construct_02_p) {
TEST(nntrainer_ccapi, train_with_config_01_p) {
std::unique_ptr<ml::train::Model> model;

std::string config_file = "./test_train_01_p.ini";
RESET_CONFIG(config_file.c_str());

replaceString("Input_Shape = 1:1:62720", "Input_Shape=1:1:62720", config_file,
config_str);
replaceString("batch_size = 32", "batch_size = 16", config_file, config_str);
replaceString("BufferSize=100", "", config_file, config_str);
static IniSection model_base("Model", "Type = NeuralNetwork"
" | Learning_rate = 0.0001"
" | Decay_rate = 0.96"
" | Decay_steps = 1000"
" | Epochs = 1"
" | Optimizer = adam"
" | Loss = cross"
" | Weight_Regularizer = l2norm"
" | weight_regularizer_constant = 0.005"
" | Save_Path = 'model.bin'"
" | batch_size = 32"
" | beta1 = 0.9"
" | beta2 = 0.9999"
" | epsilon = 1e-7");

static IniSection dataset("Dataset", "BufferSize=100"
" | TrainData = trainingSet.dat"
" | ValidData = valSet.dat"
" | LabelData = label.dat");

static IniSection inputlayer("inputlayer", "Type = input"
"| Input_Shape = 1:1:62720"
"| bias_initializer = zeros"
"| Normalization = true"
"| Activation = sigmoid");

static IniSection outputlayer("outputlayer", "Type = fully_connected"
"| input_layers = inputlayer"
"| Unit = 10"
"| bias_initializer = zeros"
"| Activation = softmax");

ScopedIni s("test_train_01_p",
{model_base + "batch_size = 16", dataset + "-BufferSize",
inputlayer, outputlayer});

EXPECT_NO_THROW(model =
ml::train::createModel(ml::train::ModelType::NEURAL_NET));

EXPECT_EQ(model->loadFromConfig(config_file), ML_ERROR_NONE);
EXPECT_EQ(model->loadFromConfig(s.getIniName()), ML_ERROR_NONE);
EXPECT_EQ(model->compile(), ML_ERROR_NONE);
EXPECT_EQ(model->initialize(), ML_ERROR_NONE);
EXPECT_NO_THROW(model->train());
Expand Down
179 changes: 22 additions & 157 deletions test/include/nntrainer_test_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ class IniTestWrapper {
* @brief erase ini
*
*/
void erase_ini() { remove(getIniName().c_str()); }
void erase_ini() noexcept { remove(getIniName().c_str()); }

bool operator==(const IniTestWrapper &rhs) const {
return name == rhs.name && sections == rhs.sections;
Expand Down Expand Up @@ -290,150 +290,27 @@ class IniTestWrapper {
Sections sections;
};

/// @todo: migrate this to datafile unittest
const std::string config_str = "[Model]"
"\n"
"Type = NeuralNetwork"
"\n"
"Learning_rate = 0.0001"
"\n"
"Decay_rate = 0.96"
"\n"
"Decay_steps = 1000"
"\n"
"Epochs = 1"
"\n"
"Optimizer = adam"
"\n"
"Loss = cross"
"\n"
"Weight_Regularizer = l2norm"
"\n"
"weight_regularizer_constant = 0.005"
"\n"
"Save_Path = 'model.bin'"
"\n"
"batch_size = 32"
"\n"
"beta1 = 0.9"
"\n"
"beta2 = 0.9999"
"\n"
"epsilon = 1e-7"
"\n"
"[DataSet]"
"\n"
"BufferSize=100"
"\n"
"TrainData = trainingSet.dat"
"\n"
"ValidData = valSet.dat"
"\n"
"LabelData = label.dat"
"\n"
"[inputlayer]"
"\n"
"Type = input"
"\n"
"Input_Shape = 1:1:62720"
"\n"
"bias_initializer = zeros"
"\n"
"Normalization = true"
"\n"
"Activation = sigmoid"
"\n"
"[outputlayer]"
"\n"
"Type = fully_connected"
"\n"
"input_layers = inputlayer"
"\n"
"Unit = 10"
"\n"
"bias_initializer = zeros"
"\n"
"Activation = softmax"
"\n";

const std::string config_str2 = "[Model]"
"\n"
"Type = NeuralNetwork"
"\n"
"Learning_rate = 0.0001"
"\n"
"Decay_rate = 0.96"
"\n"
"Decay_steps = 1000"
"\n"
"Epochs = 1"
"\n"
"Optimizer = adam"
"\n"
"Loss = cross"
"\n"
"Weight_Regularizer = l2norm"
"\n"
"weight_regularizer_constant = 0.005"
"\n"
"Model = 'model.bin'"
"\n"
"batch_size = 32"
"\n"
"beta1 = 0.9"
"\n"
"beta2 = 0.9999"
"\n"
"epsilon = 1e-7"
"\n"
"[DataSet]"
"\n"
"BufferSize=100"
"\n"
"TrainData = trainingSet.dat"
"\n"
"ValidData = valSet.dat"
"\n"
"LabelData = label.dat"
"\n"
"[conv2dlayer]"
"\n"
"Type = conv2d"
"\n"
"Input_Shape = 3:28:28"
"\n"
"bias_initializer = zeros"
"\n"
"Activation = sigmoid"
"\n"
"weight_regularizer=l2norm"
"\n"
"weight_regularizer_constant=0.005"
"\n"
"filters=6"
"\n"
"kernel_size=5,5"
"\n"
"stride=1,1"
"\n"
"padding=0,0"
"\n"
"weight_initializer=xavier_uniform"
"\n"
"flatten = false"
"\n"
"[outputlayer]"
"\n"
"Type = fully_connected"
"\n"
"input_layers = conv2dlayer"
"\n"
"Unit = 10"
"\n"
"bias_initializer = zeros"
"\n"
"Activation = softmax"
"\n";
/**
* @brief This class wraps IniTestWrapper, this class must live longer than the
* IniTestWrapper contained inside
*
*/
class ScopedIni {
public:
ScopedIni(const IniTestWrapper &ini_) : ini(ini_) { ini.save_ini(); }
ScopedIni(const std::string &name_,
const IniTestWrapper::Sections &sections_) :
ini(name_, sections_) {
ini.save_ini();
}

std::string getIniName() { return ini.getIniName(); }

~ScopedIni() { ini.erase_ini(); }

private:
IniTestWrapper ini;
};

#define GEN_TEST_INPUT(input, eqation_i_j_k_l) \
do { \
Expand All @@ -449,18 +326,6 @@ const std::string config_str2 = "[Model]"
} \
} while (0)

#define RESET_CONFIG(conf_name) \
do { \
std::ifstream file_stream(conf_name, std::ifstream::in); \
if (file_stream.good()) { \
file_stream.close(); \
if (std::remove(conf_name) != 0) \
ml_loge("Error: Cannot delete file: %s", conf_name); \
else \
ml_logi("Info: deleteing file: %s", conf_name); \
} \
} while (0)

/**
* @brief return a tensor filled with contant value with dimension
*/
Expand Down
Loading

0 comments on commit 68e7dab

Please sign in to comment.