Skip to content

Commit

Permalink
Fixed FlattenLayer Backward_cpu/gpu have no return value
Browse files Browse the repository at this point in the history
  • Loading branch information
kloudkl committed Jan 11, 2014
1 parent 1ff7241 commit 842a435
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 8 deletions.
2 changes: 2 additions & 0 deletions src/caffe/layers/flatten_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ Dtype FlattenLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
caffe_copy(count_, top_diff, bottom_diff);
return Dtype(0);
}


Expand All @@ -52,6 +53,7 @@ Dtype FlattenLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
caffe_gpu_copy(count_, top_diff, bottom_diff);
return Dtype(0);
}

INSTANTIATE_CLASS(FlattenLayer);
Expand Down
3 changes: 3 additions & 0 deletions src/caffe/test/test_flatten_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ class FlattenLayerTest : public ::testing::Test {
FlattenLayerTest()
: blob_bottom_(new Blob<Dtype>(2, 3, 6, 5)),
blob_top_(new Blob<Dtype>()) {
Caffe::set_random_seed(1701);
// fill the values
FillerParameter filler_param;
GaussianFiller<Dtype> filler(filler_param);
Expand Down Expand Up @@ -72,6 +73,8 @@ TYPED_TEST(FlattenLayerTest, TestGPU) {
for (int c = 0; c < 3 * 6 * 5; ++c) {
EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0),
this->blob_bottom_->data_at(0, c / (6 * 5), (c / 5) % 6, c % 5));
EXPECT_EQ(this->blob_top_->data_at(1, c, 0, 0),
this->blob_bottom_->data_at(1, c / (6 * 5), (c / 5) % 6, c % 5));
}
}

Expand Down
21 changes: 13 additions & 8 deletions src/caffe/test/test_gradient_check_util.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,11 +82,11 @@ void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>& layer,
blobs_to_check.push_back(bottom[check_bottom]);
}
// go through the bottom and parameter blobs
// LOG(ERROR) << "Checking " << blobs_to_check.size() << " blobs.";
// LOG(ERROR) << "Checking " << blobs_to_check.size() << " blobs.";
for (int blobid = 0; blobid < blobs_to_check.size(); ++blobid) {
Blob<Dtype>* current_blob = blobs_to_check[blobid];
// LOG(ERROR) << "Blob " << blobid << ": checking " << current_blob->count()
// << " parameters.";
// LOG(ERROR) << "Blob " << blobid << ": checking " << current_blob->count()
// << " parameters.";
// go through the values
for (int feat_id = 0; feat_id < current_blob->count(); ++feat_id) {
// First, obtain the original data
Expand All @@ -96,25 +96,28 @@ void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>& layer,
// Get any additional loss from the layer
computed_objective += layer.Backward(top, true, &bottom);
Dtype computed_gradient = current_blob->cpu_diff()[feat_id];

// compute score by adding stepsize
current_blob->mutable_cpu_data()[feat_id] += stepsize_;
Caffe::set_random_seed(seed_);
layer.Forward(bottom, &top);
Dtype positive_objective = GetObjAndGradient(top, top_id, top_data_id);
positive_objective += layer.Backward(top, true, &bottom);

// compute score by subtracting stepsize
current_blob->mutable_cpu_data()[feat_id] -= stepsize_ * 2;
Caffe::set_random_seed(seed_);
layer.Forward(bottom, &top);
Dtype negative_objective = GetObjAndGradient(top, top_id, top_data_id);
negative_objective += layer.Backward(top, true, &bottom);

// Recover stepsize
current_blob->mutable_cpu_data()[feat_id] += stepsize_;
Dtype estimated_gradient = (positive_objective - negative_objective) /
stepsize_ / 2.;
Dtype feature = current_blob->cpu_data()[feat_id];
// LOG(ERROR) << "debug: " << current_blob->cpu_data()[feat_id] << " "
// << current_blob->cpu_diff()[feat_id];
// LOG(ERROR) << "debug: " << current_blob->cpu_data()[feat_id] << " "
// << current_blob->cpu_diff()[feat_id];
if (kink_ - kink_range_ > feature || feature > kink_ + kink_range_) {
// We check relative accuracy, but for too small values, we threshold
// the scale factor by 1.
Expand All @@ -126,10 +129,12 @@ void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>& layer,
EXPECT_LT(computed_gradient, estimated_gradient + threshold_ * scale)
<< "debug: (top_id, top_data_id, blob_id, feat_id)="
<< top_id << "," << top_data_id << "," << blobid << "," << feat_id;
// LOG(ERROR) << "computed gradient: " << computed_gradient
// << " estimated_gradient: " << estimated_gradient
// << " positive_objective: " << positive_objective
// << " negative_objective: " << negative_objective;
}
// LOG(ERROR) << "Feature: " << current_blob->cpu_data()[feat_id];
// LOG(ERROR) << "computed gradient: " << computed_gradient
// << " estimated_gradient: " << estimated_gradient;
// LOG(ERROR) << "Feature: " << current_blob->cpu_data()[feat_id]
}
}
}
Expand Down

0 comments on commit 842a435

Please sign in to comment.