Skip to content

Commit

Permalink
[svace] fix svace issues
Browse files Browse the repository at this point in the history
fixed all svace issues on main branch

**Self-evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test:   [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: Seungbaek Hong <sb92.hong@samsung.com>
  • Loading branch information
baek2sm committed Mar 28, 2024
1 parent 6be8e84 commit 03d38c2
Show file tree
Hide file tree
Showing 7 changed files with 22 additions and 8 deletions.
4 changes: 3 additions & 1 deletion Applications/utils/jni/bitmap_helpers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
@file bitmat_helpers.cpp
@brief bitmap_helpers from tensorflow
@author TensorFlow Authors
@bug there are no known bugs
==============================================================================*/

Expand Down Expand Up @@ -89,7 +91,7 @@ uint8_t *read_bmp(const std::string &input_bmp_name, int *width, int *height,

const uint8_t *img_bytes = new uint8_t[len];
file.seekg(0, std::ios::beg);
file.read((char *)img_bytes, len);
file.read((char *)img_bytes, static_cast<std::streamsize>(len));
const int32_t header_size =
*(reinterpret_cast<const int32_t *>(img_bytes + 10));
*width = *(reinterpret_cast<const int32_t *>(img_bytes + 18));
Expand Down
8 changes: 5 additions & 3 deletions nntrainer/dataset/raw_file_data_producer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,9 @@ RawFileDataProducer::finalize(const std::vector<TensorDim> &input_dims,
std::vector<Tensor> &labels) {
NNTR_THROW_IF(idx >= sz, std::range_error)
<< "given index is out of bound, index: " << idx << " size: " << sz;
file.seekg(idx * sample_size * RawFileDataProducer::pixel_size,
std::ios_base::beg);
std::streamoff offset = static_cast<std::streamoff>(
idx * sample_size * RawFileDataProducer::pixel_size);
file.seekg(offset, std::ios_base::beg);
for (auto &input : inputs) {
input.read(file);
}
Expand Down Expand Up @@ -107,7 +108,8 @@ RawFileDataProducer::size(const std::vector<TensorDim> &input_dims,
// << " Given file does not align with the given sample size, sample size: "
// << sample_size << " file_size: " << file_size;

return file_size / (sample_size * RawFileDataProducer::pixel_size);
return static_cast<unsigned int>(file_size) /
(sample_size * RawFileDataProducer::pixel_size);
}

void RawFileDataProducer::exportTo(
Expand Down
2 changes: 2 additions & 0 deletions nntrainer/layers/gru.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,8 @@ void GRULayer::finalize(InitLayerContext &context) {
const TensorDim &input_dim = context.getInputDimensions()[0];
const unsigned int batch_size = input_dim.batch();
const unsigned int max_timestep = input_dim.height();
NNTR_THROW_IF(max_timestep < 1, std::runtime_error)
<< "max timestep must be greator than 0 in gru layer.";
const unsigned int feature_size = input_dim.width();

// if return_sequences == False :
Expand Down
2 changes: 2 additions & 0 deletions nntrainer/layers/lstm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -454,6 +454,8 @@ void LSTMLayer::finalize(InitLayerContext &context) {
if (!std::get<props::MaxTimestep>(lstm_props).empty())
max_timestep =
std::max(max_timestep, std::get<props::MaxTimestep>(lstm_props).get());
NNTR_THROW_IF(max_timestep < 1, std::runtime_error)
<< "max timestep must be greator than 0 in lstm layer.";
std::get<props::MaxTimestep>(lstm_props).set(max_timestep);
const unsigned int feature_size = input_dim.width();

Expand Down
2 changes: 2 additions & 0 deletions nntrainer/layers/rnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,8 @@ void RNNLayer::finalize(InitLayerContext &context) {
const TensorDim &input_dim = context.getInputDimensions()[SINGLE_INOUT_IDX];
const unsigned int batch_size = input_dim.batch();
const unsigned int max_timestep = input_dim.height();
NNTR_THROW_IF(max_timestep < 1, std::runtime_error)
<< "max timestep must be greator than 0 in rnn layer.";
const unsigned int feature_size = input_dim.width();

// output_dim = [ batch, 1, (return_sequences ? time_iteration : 1), unit ]
Expand Down
3 changes: 2 additions & 1 deletion nntrainer/utils/util_func.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,8 @@ char *getRealpath(const char *name, char *resolved) {
#ifdef _WIN32
return _fullpath(resolved, name, MAX_PATH_LENGTH);
#else
return realpath(name, resolved);
resolved = realpath(name, nullptr);
return resolved;
#endif
}

Expand Down
9 changes: 6 additions & 3 deletions test/unittest/compiler/unittest_tflite_export.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include <node_exporter.h>
#include <optimizer.h>
#include <realizer.h>
#include <stdlib.h>

#include <nntrainer_test_util.h>

Expand All @@ -43,6 +44,8 @@ std::vector<float> ans;
std::vector<float *> in_f;
std::vector<float *> l_f;

unsigned int seed = 0;

/**
* @brief make "key=value" from key and value
*
Expand Down Expand Up @@ -155,7 +158,7 @@ TEST(nntrainerInterpreterTflite, simple_fc) {
float *nntr_input = new float[data_size];

for (unsigned int i = 0; i < data_size; i++) {
auto rand_float = static_cast<float>(std::rand() / (RAND_MAX + 1.0));
auto rand_float = static_cast<float>(rand_r(&seed) / (RAND_MAX + 1.0));
input_data.push_back(rand_float);
nntr_input[i] = rand_float;
}
Expand Down Expand Up @@ -284,7 +287,7 @@ TEST(nntrainerInterpreterTflite, part_of_resnet_0) {
float *nntr_input = new float[data_size];

for (unsigned int i = 0; i < data_size; i++) {
auto rand_float = static_cast<float>(std::rand() / (RAND_MAX + 1.0));
auto rand_float = static_cast<float>(rand_r(&seed) / (RAND_MAX + 1.0));
input_data.push_back(rand_float);
nntr_input[i] = rand_float;
}
Expand Down Expand Up @@ -365,7 +368,7 @@ TEST(nntrainerInterpreterTflite, MNIST_FULL_TEST) {
float nntr_input[28 * 28];

for (unsigned int i = 0; i < data_size; i++) {
auto rand_float = static_cast<float>(std::rand() / (RAND_MAX + 1.0));
auto rand_float = static_cast<float>(rand_r(&seed) / (RAND_MAX + 1.0));
input_data.push_back(rand_float);
nntr_input[i] = rand_float;
}
Expand Down

0 comments on commit 03d38c2

Please sign in to comment.