-
Notifications
You must be signed in to change notification settings - Fork 18.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add memory data layer to pass data directly into the network #196
Changes from all commits
7277b51
da1c1cb
51655ba
ab02059
ee905de
cb3017d
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,69 @@ | ||
// Copyright 2014 kloudkl@github | ||
|
||
#include <vector> | ||
|
||
#include "caffe/layer.hpp" | ||
#include "caffe/vision_layers.hpp" | ||
|
||
using std::string; | ||
|
||
namespace caffe { | ||
|
||
template <typename Dtype> | ||
MemoryDataLayer<Dtype>::MemoryDataLayer(const LayerParameter& param) | ||
: Layer<Dtype>(param), num_data_blobs_(param.datum_dims_size()) { | ||
for (int i = 0; i < param.datum_dims_size(); ++i) { | ||
datum_dims_.push_back(param.datum_dims(i)); | ||
} | ||
} | ||
|
||
template <typename Dtype> | ||
void MemoryDataLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom, | ||
vector<Blob<Dtype>*>* top) { | ||
CHECK_EQ(bottom.size(), num_data_blobs_) << | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @kloudkl The idea behind of Data Layers is that they don't any bottom blobs, they just provide top blobs of data for the next layers. |
||
"MemoryDataLayer takes " << num_data_blobs_ << " blobs as input."; | ||
CHECK_EQ(top->size(), num_data_blobs_) << | ||
"MemoryDataLayer takes " << num_data_blobs_ << " blobs as output."; | ||
for (int i = 0; i < num_data_blobs_; ++i) { | ||
CHECK_EQ(bottom[i]->channels(), datum_dims_[i].channels()); | ||
CHECK_EQ(bottom[i]->height(), datum_dims_[i].height()); | ||
CHECK_EQ(bottom[i]->width(), datum_dims_[i].width()); | ||
(*top)[i]->Reshape(bottom[i]->num(), datum_dims_[i].channels(), | ||
datum_dims_[i].height(), datum_dims_[i].width()); | ||
} | ||
} | ||
|
||
template <typename Dtype> | ||
void MemoryDataLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom, | ||
vector<Blob<Dtype>*>* top) { | ||
for (int i = 0; i < num_data_blobs_; ++i) { | ||
memcpy((*top)[i]->mutable_cpu_data(), bottom[i]->cpu_data(), | ||
sizeof(Dtype) * bottom[i]->count()); | ||
} | ||
} | ||
|
||
template <typename Dtype> | ||
void MemoryDataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, | ||
vector<Blob<Dtype>*>* top) { | ||
for (int i = 0; i < num_data_blobs_; ++i) { | ||
CUDA_CHECK(cudaMemcpy((*top)[i]->mutable_gpu_data(), bottom[i]->gpu_data(), | ||
sizeof(Dtype) * bottom[i]->count(), cudaMemcpyDefault | ||
/**< Default based unified virtual address space */)); | ||
} | ||
} | ||
|
||
template <typename Dtype> | ||
Dtype MemoryDataLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top, | ||
const bool propagate_down, vector<Blob<Dtype>*>* bottom) { | ||
return Dtype(0.); | ||
} | ||
|
||
template <typename Dtype> | ||
Dtype MemoryDataLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, | ||
const bool propagate_down, vector<Blob<Dtype>*>* bottom) { | ||
return Dtype(0.); | ||
} | ||
|
||
INSTANTIATE_CLASS(MemoryDataLayer); | ||
|
||
} // namespace caffe |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -83,31 +83,44 @@ TYPED_TEST(DataLayerTest, TestRead) { | |
EXPECT_EQ(this->blob_top_label_->height(), 1); | ||
EXPECT_EQ(this->blob_top_label_->width(), 1); | ||
|
||
// Go through the data 100 times | ||
for (int iter = 0; iter < 100; ++iter) { | ||
layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); | ||
for (int i = 0; i < 5; ++i) { | ||
EXPECT_EQ(i, this->blob_top_label_->cpu_data()[i]); | ||
} | ||
for (int i = 0; i < 5; ++i) { | ||
for (int j = 0; j < 24; ++j) { | ||
EXPECT_EQ(i, this->blob_top_data_->cpu_data()[i * 24 + j]) | ||
<< "debug: i " << i << " j " << j; | ||
Caffe::Brew modes[] = {Caffe::CPU, Caffe::GPU}; | ||
for (int n_mode = 0; n_mode < 2; ++n_mode) { | ||
Caffe::set_mode(modes[n_mode]); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why do you modify test_data_layer in this PR, that should be in a different PR |
||
for (int iter = 0; iter < 100; ++iter) { | ||
layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); | ||
for (int i = 0; i < 5; ++i) { | ||
EXPECT_EQ(i, this->blob_top_label_->cpu_data()[i]); | ||
} | ||
for (int i = 0; i < 5; ++i) { | ||
for (int j = 0; j < 24; ++j) { | ||
EXPECT_EQ(i, this->blob_top_data_->cpu_data()[i * 24 + j]) | ||
<< "debug: i " << i << " j " << j; | ||
} | ||
} | ||
} | ||
} | ||
} | ||
|
||
// Same test, in GPU mode. | ||
Caffe::set_mode(Caffe::GPU); | ||
for (int iter = 0; iter < 100; ++iter) { | ||
layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); | ||
for (int i = 0; i < 5; ++i) { | ||
EXPECT_EQ(i, this->blob_top_label_->cpu_data()[i]); | ||
} | ||
for (int i = 0; i < 5; ++i) { | ||
for (int j = 0; j < 24; ++j) { | ||
EXPECT_EQ(i, this->blob_top_data_->cpu_data()[i * 24 + j]) | ||
<< "debug: i " << i << " j " << j; | ||
TYPED_TEST(DataLayerTest, TestSetDataAndLabel) { | ||
LayerParameter param; | ||
param.set_batchsize(5); | ||
param.set_source(this->filename); | ||
DataLayer<TypeParam> layer(param); | ||
layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); | ||
|
||
Caffe::Brew modes[] = {Caffe::CPU, Caffe::GPU}; | ||
for (int n_mode = 0; n_mode < 2; ++n_mode) { | ||
Caffe::set_mode(modes[n_mode]); | ||
for (int iter = 0; iter < 100; ++iter) { | ||
layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); | ||
for (int i = 0; i < 5; ++i) { | ||
EXPECT_EQ(i, this->blob_top_label_->cpu_data()[i]); | ||
} | ||
for (int i = 0; i < 5; ++i) { | ||
for (int j = 0; j < 24; ++j) { | ||
EXPECT_EQ(i, this->blob_top_data_->cpu_data()[i * 24 + j]) | ||
<< "debug: i " << i << " j " << j; | ||
} | ||
} | ||
} | ||
} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
If you remove the input part of the prototxt then you would need to change the python and matlab wrappers, otherwise they will not work