diff --git a/include/caffe/data_layers.hpp b/include/caffe/data_layers.hpp index d9865ce485b..87be0f8e742 100644 --- a/include/caffe/data_layers.hpp +++ b/include/caffe/data_layers.hpp @@ -28,7 +28,15 @@ class HDF5OutputLayer : public Layer { explicit HDF5OutputLayer(const LayerParameter& param); virtual ~HDF5OutputLayer(); virtual void SetUp(const vector*>& bottom, - vector*>* top); + vector*>* top) {} + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_HDF5_OUTPUT; + } + // TODO: no limit on the number of blobs + virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 0; } + inline std::string file_name() const { return file_name_; } protected: @@ -58,6 +66,12 @@ class HDF5DataLayer : public Layer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_HDF5_DATA; + } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -96,6 +110,13 @@ class DataLayer : public Layer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_DATA; + } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlobs() const { return 2; } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -141,6 +162,12 @@ class ImageDataLayer : public Layer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_IMAGE_DATA; + } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -171,6 +198,48 @@ class ImageDataLayer : public Layer { Caffe::Phase phase_; }; +/* MemoryDataLayer +*/ +template +class MemoryDataLayer : public Layer { + public: + explicit MemoryDataLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_MEMORY_DATA; + } + virtual inline int ExactNumBottomBlobs() { return 0; } + virtual inline int ExactNumTopBlobs() { return 2; } + + // Reset should accept const pointers, but can't, because the memory + // will be given to Blob, which is mutable + void Reset(Dtype* data, Dtype* label, int n); + int datum_channels() { return datum_channels_; } + int datum_height() { return datum_height_; } + int datum_width() { return datum_width_; } + int batch_size() { return batch_size_; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { return; } + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { return; } + + Dtype* data_; + Dtype* labels_; + int datum_channels_; + int datum_height_; + int datum_width_; + int datum_size_; + int batch_size_; + int n_; + int pos_; +}; // This function is used to create a pthread that prefetches the window data. template @@ -188,6 +257,12 @@ class WindowDataLayer : public Layer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_WINDOW_DATA; + } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index 14bba63594b..af3d5441b34 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -3,11 +3,14 @@ #ifndef CAFFE_LAYER_H_ #define CAFFE_LAYER_H_ +#include #include + #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/proto/caffe.pb.h" +using std::string; using std::vector; namespace caffe { @@ -30,9 +33,12 @@ class Layer { } } virtual ~Layer() {} - // SetUp: your function should implement this. + // SetUp: your function should implement this, and call Layer::SetUp for + // common SetUp functionality. virtual void SetUp(const vector*>& bottom, - vector*>* top) = 0; + vector*>* top) { + CheckBlobCounts(bottom, *top); + } // Forward and backward wrappers. You should implement the cpu and // gpu specific implementations instead, and should not change these @@ -53,6 +59,31 @@ class Layer { // Writes the layer parameter to a protocol buffer virtual void ToProto(LayerParameter* param, bool write_diff = false); + // Returns the layer type as an enum value. + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_NONE; + } + + // Returns the layer type name. + virtual inline const string& type_name() const { + return LayerParameter_LayerType_Name(type()); + } + + // These methods can be overwritten to declare that this layer type expects + // a certain number of blobs as input and output. + // + // ExactNum{Bottom,Top}Blobs return a non-negative number to require an exact + // number of bottom/top blobs; the Min/Max versions return a non-negative + // number to require a minimum and/or maximum number of blobs. + // If Exact is specified, neither Min nor Max should be specified, and vice + // versa. These methods may not rely on SetUp having been called. + virtual inline int ExactNumBottomBlobs() const { return -1; } + virtual inline int MinBottomBlobs() const { return -1; } + virtual inline int MaxBottomBlobs() const { return -1; } + virtual inline int ExactNumTopBlobs() const { return -1; } + virtual inline int MinTopBlobs() const { return -1; } + virtual inline int MaxTopBlobs() const { return -1; } + protected: // The protobuf that stores the layer parameters LayerParameter layer_param_; @@ -82,6 +113,43 @@ class Layer { Backward_cpu(top, propagate_down, bottom); } + // CheckBlobCounts: called by the parent Layer's SetUp to check that the + // number of bottom and top Blobs provided as input match the expected + // numbers specified by the {ExactNum,Min,Max}{Bottom,Top}Blobs() functions. + virtual void CheckBlobCounts(const vector*>& bottom, + const vector*>& top) { + if (ExactNumBottomBlobs() >= 0) { + CHECK_EQ(ExactNumBottomBlobs(), bottom.size()) + << type_name() << " Layer takes " << ExactNumBottomBlobs() + << " bottom blob(s) as input."; + } + if (MinBottomBlobs() >= 0) { + CHECK_LE(MinBottomBlobs(), bottom.size()) + << type_name() << " Layer takes at least " << MinBottomBlobs() + << " bottom blob(s) as input."; + } + if (MaxBottomBlobs() >= 0) { + CHECK_GE(MaxBottomBlobs(), bottom.size()) + << type_name() << " Layer takes at most " << MaxBottomBlobs() + << " bottom blob(s) as input."; + } + if (ExactNumTopBlobs() >= 0) { + CHECK_EQ(ExactNumTopBlobs(), top.size()) + << type_name() << " Layer produces " << ExactNumTopBlobs() + << " top blob(s) as output."; + } + if (MinTopBlobs() >= 0) { + CHECK_LE(MinTopBlobs(), top.size()) + << type_name() << " Layer produces at least " << MinTopBlobs() + << " top blob(s) as output."; + } + if (MaxTopBlobs() >= 0) { + CHECK_GE(MaxTopBlobs(), top.size()) + << type_name() << " Layer produces at most " << MaxTopBlobs() + << " top blob(s) as output."; + } + } + DISABLE_COPY_AND_ASSIGN(Layer); }; // class Layer diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index cc798c499d3..381bf0f4f8f 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -35,6 +35,9 @@ class LossLayer : public Layer { const vector*>& bottom, vector*>* top); virtual void FurtherSetUp( const vector*>& bottom, vector*>* top) {} + + virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 0; } }; /* SigmoidCrossEntropyLossLayer @@ -49,6 +52,10 @@ class SigmoidCrossEntropyLossLayer : public LossLayer { virtual void FurtherSetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_SIGMOID_CROSS_ENTROPY_LOSS; + } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -81,6 +88,10 @@ class EuclideanLossLayer : public LossLayer { virtual void FurtherSetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_EUCLIDEAN_LOSS; + } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -100,6 +111,10 @@ class InfogainLossLayer : public LossLayer { virtual void FurtherSetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_INFOGAIN_LOSS; + } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -117,6 +132,10 @@ class HingeLossLayer : public LossLayer { explicit HingeLossLayer(const LayerParameter& param) : LossLayer(param) {} + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_HINGE_LOSS; + } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -134,6 +153,10 @@ class MultinomialLogisticLossLayer : public LossLayer { virtual void FurtherSetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_MULTINOMIAL_LOGISTIC_LOSS; + } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -153,6 +176,10 @@ class AccuracyLayer : public Layer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_ACCURACY; + } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp index 5118e8a5225..ed664df7570 100644 --- a/include/caffe/neuron_layers.hpp +++ b/include/caffe/neuron_layers.hpp @@ -33,6 +33,12 @@ class NeuronLayer : public Layer { : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_NONE; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } }; /* BNLLLayer @@ -48,6 +54,10 @@ class BNLLLayer : public NeuronLayer { explicit BNLLLayer(const LayerParameter& param) : NeuronLayer(param) {} + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_BNLL; + } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -77,6 +87,10 @@ class DropoutLayer : public NeuronLayer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_DROPOUT; + } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -107,6 +121,10 @@ class PowerLayer : public NeuronLayer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_POWER; + } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -138,6 +156,10 @@ class ReLULayer : public NeuronLayer { explicit ReLULayer(const LayerParameter& param) : NeuronLayer(param) {} + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_RELU; + } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -167,6 +189,10 @@ class SigmoidLayer : public NeuronLayer { explicit SigmoidLayer(const LayerParameter& param) : NeuronLayer(param) {} + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_SIGMOID; + } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -191,6 +217,10 @@ class TanHLayer : public NeuronLayer { explicit TanHLayer(const LayerParameter& param) : NeuronLayer(param) {} + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_TANH; + } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -220,6 +250,10 @@ class ThresholdLayer : public NeuronLayer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_THRESHOLD; + } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index a43f7f98c09..eb067bb624e 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -34,6 +34,12 @@ class ArgMaxLayer : public Layer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_ARGMAX; + } + virtual inline int MinBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -56,6 +62,12 @@ class ConcatLayer : public Layer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_CONCAT; + } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -85,6 +97,12 @@ class ConvolutionLayer : public Layer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_CONVOLUTION; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -123,6 +141,12 @@ class EltwiseLayer : public Layer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_ELTWISE; + } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -147,6 +171,12 @@ class FlattenLayer : public Layer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_FLATTEN; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -170,6 +200,12 @@ class Im2colLayer : public Layer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_IM2COL; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -198,6 +234,12 @@ class InnerProductLayer : public Layer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_INNER_PRODUCT; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -230,6 +272,12 @@ class LRNLayer : public Layer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_LRN; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -285,42 +333,6 @@ class LRNLayer : public Layer { vector*> product_bottom_vec_; }; -/* PoolingLayer -*/ -template -class MemoryDataLayer : public Layer { - public: - explicit MemoryDataLayer(const LayerParameter& param) - : Layer(param) {} - virtual void SetUp(const vector*>& bottom, - vector*>* top); - // Reset should accept const pointers, but can't, because the memory - // will be given to Blob, which is mutable - void Reset(Dtype* data, Dtype* label, int n); - int datum_channels() { return datum_channels_; } - int datum_height() { return datum_height_; } - int datum_width() { return datum_width_; } - int batch_size() { return batch_size_; } - - protected: - virtual Dtype Forward_cpu(const vector*>& bottom, - vector*>* top); - virtual void Backward_cpu(const vector*>& top, - const bool propagate_down, vector*>* bottom) { return; } - virtual void Backward_gpu(const vector*>& top, - const bool propagate_down, vector*>* bottom) { return; } - - Dtype* data_; - Dtype* labels_; - int datum_channels_; - int datum_height_; - int datum_width_; - int datum_size_; - int batch_size_; - int n_; - int pos_; -}; - /* PoolingLayer */ template @@ -331,6 +343,13 @@ class PoolingLayer : public Layer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_POOLING; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlobs() const { return max_top_blobs_; } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -341,6 +360,7 @@ class PoolingLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); + int max_top_blobs_; int kernel_size_; int stride_; int pad_; @@ -363,6 +383,12 @@ class SoftmaxLayer : public Layer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_SOFTMAX; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -395,6 +421,12 @@ class SoftmaxWithLossLayer : public Layer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_SOFTMAX_LOSS; + } + virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 0; } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); @@ -423,6 +455,12 @@ class SplitLayer : public Layer { virtual void SetUp(const vector*>& bottom, vector*>* top); + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_SPLIT; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); diff --git a/src/caffe/layers/accuracy_layer.cpp b/src/caffe/layers/accuracy_layer.cpp index 3e671704465..fbc943eaf2d 100644 --- a/src/caffe/layers/accuracy_layer.cpp +++ b/src/caffe/layers/accuracy_layer.cpp @@ -17,8 +17,7 @@ namespace caffe { template void AccuracyLayer::SetUp( const vector*>& bottom, vector*>* top) { - CHECK_EQ(bottom.size(), 2) << "Accuracy Layer takes two blobs as input."; - CHECK_EQ(top->size(), 1) << "Accuracy Layer takes 1 output."; + Layer::SetUp(bottom, top); CHECK_EQ(bottom[0]->num(), bottom[1]->num()) << "The data and label should have the same number."; CHECK_EQ(bottom[1]->channels(), 1); diff --git a/src/caffe/layers/argmax_layer.cpp b/src/caffe/layers/argmax_layer.cpp index e04588d4baa..70ccfff5a36 100644 --- a/src/caffe/layers/argmax_layer.cpp +++ b/src/caffe/layers/argmax_layer.cpp @@ -12,8 +12,7 @@ namespace caffe { template void ArgMaxLayer::SetUp(const vector*>& bottom, vector*>* top) { - CHECK_EQ(bottom.size(), 1) << "ArgMaxLayer Layer takes 1 input."; - CHECK_EQ(top->size(), 1) << "ArgMaxLayer Layer takes 1 output."; + Layer::SetUp(bottom, top); out_max_val_ = this->layer_param_.argmax_param().out_max_val(); if (out_max_val_) { // Produces max_ind and max_val diff --git a/src/caffe/layers/concat_layer.cpp b/src/caffe/layers/concat_layer.cpp index 8036bdab675..4541ee74215 100644 --- a/src/caffe/layers/concat_layer.cpp +++ b/src/caffe/layers/concat_layer.cpp @@ -11,11 +11,7 @@ namespace caffe { template void ConcatLayer::SetUp(const vector*>& bottom, vector*>* top) { - CHECK_GT(bottom.size(), 1) << - "ConcatLayer takes at least two blobs as input."; - CHECK_EQ(top->size(), 1) << - "ConcatLayer takes a single blob as output."; - + Layer::SetUp(bottom, top); concat_dim_ = this->layer_param_.concat_param().concat_dim(); CHECK_GE(concat_dim_, 0) << "concat_dim should be >= 0"; diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp index 55966b54bde..880b83afccb 100644 --- a/src/caffe/layers/conv_layer.cpp +++ b/src/caffe/layers/conv_layer.cpp @@ -13,8 +13,7 @@ namespace caffe { template void ConvolutionLayer::SetUp(const vector*>& bottom, vector*>* top) { - CHECK_EQ(bottom.size(), 1) << "Conv Layer takes a single blob as input."; - CHECK_EQ(top->size(), 1) << "Conv Layer takes a single blob as output."; + Layer::SetUp(bottom, top); kernel_size_ = this->layer_param_.convolution_param().kernel_size(); stride_ = this->layer_param_.convolution_param().stride(); group_ = this->layer_param_.convolution_param().group(); diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index 6d7392a6b2a..348753e5db9 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -129,9 +129,7 @@ DataLayer::~DataLayer() { template void DataLayer::SetUp(const vector*>& bottom, vector*>* top) { - CHECK_EQ(bottom.size(), 0) << "Data Layer takes no input blobs."; - CHECK_GE(top->size(), 1) << "Data Layer takes at least one blob as output."; - CHECK_LE(top->size(), 2) << "Data Layer takes at most two blobs as output."; + Layer::SetUp(bottom, top); if (top->size() == 1) { output_labels_ = false; } else { diff --git a/src/caffe/layers/eltwise_layer.cpp b/src/caffe/layers/eltwise_layer.cpp index 5d05124c4c8..5e5d760c993 100644 --- a/src/caffe/layers/eltwise_layer.cpp +++ b/src/caffe/layers/eltwise_layer.cpp @@ -11,10 +11,7 @@ namespace caffe { template void EltwiseLayer::SetUp(const vector*>& bottom, vector*>* top) { - CHECK_GE(bottom.size(), 2) << - "Eltwise Layer takes at least 2 blobs as input."; - CHECK_EQ(top->size(), 1) << - "Eltwise Layer takes a single blob as output."; + Layer::SetUp(bottom, top); CHECK(this->layer_param().eltwise_param().coeff_size() == 0 || this->layer_param().eltwise_param().coeff_size() == bottom.size()) << "Eltwise Layer takes one coefficient per bottom blob."; diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp index e954030d260..95f4859f8d9 100644 --- a/src/caffe/layers/flatten_layer.cpp +++ b/src/caffe/layers/flatten_layer.cpp @@ -11,8 +11,7 @@ namespace caffe { template void FlattenLayer::SetUp(const vector*>& bottom, vector*>* top) { - CHECK_EQ(bottom.size(), 1) << "Flatten Layer takes a single blob as input."; - CHECK_EQ(top->size(), 1) << "Flatten Layer takes a single blob as output."; + Layer::SetUp(bottom, top); int channels_out = bottom[0]->channels() * bottom[0]->height() * bottom[0]->width(); (*top)[0]->Reshape(bottom[0]->num(), channels_out, 1, 1); diff --git a/src/caffe/layers/hdf5_data_layer.cpp b/src/caffe/layers/hdf5_data_layer.cpp index cff4f7c7318..d5c64f05696 100644 --- a/src/caffe/layers/hdf5_data_layer.cpp +++ b/src/caffe/layers/hdf5_data_layer.cpp @@ -52,9 +52,7 @@ void HDF5DataLayer::LoadHDF5FileData(const char* filename) { template void HDF5DataLayer::SetUp(const vector*>& bottom, vector*>* top) { - CHECK_EQ(bottom.size(), 0) << "HDF5DataLayer takes no input blobs."; - CHECK_EQ(top->size(), 2) << "HDF5DataLayer takes two blobs as output."; - + Layer::SetUp(bottom, top); // Read the source to parse the filenames. const string& source = this->layer_param_.hdf5_data_param().source(); LOG(INFO) << "Loading filename from " << source; diff --git a/src/caffe/layers/hdf5_output_layer.cpp b/src/caffe/layers/hdf5_output_layer.cpp index e491697e17c..0961b9b73a3 100644 --- a/src/caffe/layers/hdf5_output_layer.cpp +++ b/src/caffe/layers/hdf5_output_layer.cpp @@ -41,14 +41,6 @@ void HDF5OutputLayer::SaveBlobs() { LOG(INFO) << "Successfully saved " << data_blob_.num() << " rows"; } -template -void HDF5OutputLayer::SetUp(const vector*>& bottom, - vector*>* top) { - // TODO: no limit on the number of blobs - CHECK_EQ(bottom.size(), 2) << "HDF5OutputLayer takes two blobs as input."; - CHECK_EQ(top->size(), 0) << "HDF5OutputLayer takes no output blobs."; -} - template Dtype HDF5OutputLayer::Forward_cpu(const vector*>& bottom, vector*>* top) { diff --git a/src/caffe/layers/im2col_layer.cpp b/src/caffe/layers/im2col_layer.cpp index 749ea3c2d6a..f0c26c9a1cb 100644 --- a/src/caffe/layers/im2col_layer.cpp +++ b/src/caffe/layers/im2col_layer.cpp @@ -12,8 +12,7 @@ namespace caffe { template void Im2colLayer::SetUp(const vector*>& bottom, vector*>* top) { - CHECK_EQ(bottom.size(), 1) << "Im2col Layer takes a single blob as input."; - CHECK_EQ(top->size(), 1) << "Im2col Layer takes a single blob as output."; + Layer::SetUp(bottom, top); kernel_size_ = this->layer_param_.convolution_param().kernel_size(); stride_ = this->layer_param_.convolution_param().stride(); pad_ = this->layer_param_.convolution_param().pad(); diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index ed064d0608d..9a79e72fedc 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -140,8 +140,7 @@ ImageDataLayer::~ImageDataLayer() { template void ImageDataLayer::SetUp(const vector*>& bottom, vector*>* top) { - CHECK_EQ(bottom.size(), 0) << "Input Layer takes no input blobs."; - CHECK_EQ(top->size(), 2) << "Input Layer takes two blobs as output."; + Layer::SetUp(bottom, top); const int new_height = this->layer_param_.image_data_param().new_height(); const int new_width = this->layer_param_.image_data_param().new_height(); CHECK((new_height == 0 && new_width == 0) || diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index c60261e9486..971254c9c45 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -14,8 +14,7 @@ namespace caffe { template void InnerProductLayer::SetUp(const vector*>& bottom, vector*>* top) { - CHECK_EQ(bottom.size(), 1) << "IP Layer takes a single blob as input."; - CHECK_EQ(top->size(), 1) << "IP Layer takes a single blob as output."; + Layer::SetUp(bottom, top); const int num_output = this->layer_param_.inner_product_param().num_output(); bias_term_ = this->layer_param_.inner_product_param().bias_term(); // Figure out the dimensions diff --git a/src/caffe/layers/loss_layer.cpp b/src/caffe/layers/loss_layer.cpp index 1efb6235f98..14ea975ad0d 100644 --- a/src/caffe/layers/loss_layer.cpp +++ b/src/caffe/layers/loss_layer.cpp @@ -17,8 +17,7 @@ namespace caffe { template void LossLayer::SetUp( const vector*>& bottom, vector*>* top) { - CHECK_EQ(bottom.size(), 2) << "Loss Layer takes two blobs as input."; - CHECK_EQ(top->size(), 0) << "Loss Layer takes no output."; + Layer::SetUp(bottom, top); CHECK_EQ(bottom[0]->num(), bottom[1]->num()) << "The data and label should have the same number."; FurtherSetUp(bottom, top); diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp index 6f7af75957a..071e7198544 100644 --- a/src/caffe/layers/lrn_layer.cpp +++ b/src/caffe/layers/lrn_layer.cpp @@ -11,10 +11,7 @@ namespace caffe { template void LRNLayer::SetUp(const vector*>& bottom, vector*>* top) { - CHECK_EQ(bottom.size(), 1) << - "Local Response Normalization Layer takes a single blob as input."; - CHECK_EQ(top->size(), 1) << - "Local Response Normalization Layer takes a single blob as output."; + Layer::SetUp(bottom, top); num_ = bottom[0]->num(); channels_ = bottom[0]->channels(); height_ = bottom[0]->height(); diff --git a/src/caffe/layers/memory_data_layer.cpp b/src/caffe/layers/memory_data_layer.cpp index 60bce27b8c9..15eedb317e3 100644 --- a/src/caffe/layers/memory_data_layer.cpp +++ b/src/caffe/layers/memory_data_layer.cpp @@ -10,8 +10,7 @@ namespace caffe { template void MemoryDataLayer::SetUp(const vector*>& bottom, vector*>* top) { - CHECK_EQ(bottom.size(), 0) << "Memory Data Layer takes no blobs as input."; - CHECK_EQ(top->size(), 2) << "Memory Data Layer takes two blobs as output."; + Layer::SetUp(bottom, top); batch_size_ = this->layer_param_.memory_data_param().batch_size(); datum_channels_ = this->layer_param_.memory_data_param().channels(); datum_height_ = this->layer_param_.memory_data_param().height(); diff --git a/src/caffe/layers/neuron_layer.cpp b/src/caffe/layers/neuron_layer.cpp index e9dbd0eb75c..1b8fcecd482 100644 --- a/src/caffe/layers/neuron_layer.cpp +++ b/src/caffe/layers/neuron_layer.cpp @@ -10,8 +10,7 @@ namespace caffe { template void NeuronLayer::SetUp(const vector*>& bottom, vector*>* top) { - CHECK_EQ(bottom.size(), 1) << "Neuron Layer takes a single blob as input."; - CHECK_EQ(top->size(), 1) << "Neuron Layer takes a single blob as output."; + Layer::SetUp(bottom, top); // NeuronLayer allows in-place computations. If the computation is not // in-place, we will need to initialize the top blob. if ((*top)[0] != bottom[0]) { diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index 929d7dfaa02..8f5f82d6ff7 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -18,16 +18,16 @@ namespace caffe { template void PoolingLayer::SetUp(const vector*>& bottom, vector*>* top) { - CHECK_EQ(bottom.size(), 1) << "PoolingLayer takes a single blob as input."; + // Set the max number of top blobs before calling base Layer::SetUp. + // If doing MAX pooling, we can optionally output an extra top Blob + // for the mask. Otherwise, we only have one top Blob. if (this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_MAX) { - CHECK_GE(top->size(), 1) - << "MaxPoolingLayer takes at least one blob as output."; - CHECK_LE(top->size(), 2) - << "MaxPoolingLayer takes at most two blobs as output."; + max_top_blobs_ = 2; } else { - CHECK_EQ(top->size(), 1) << "PoolingLayer takes a single blob as output."; + max_top_blobs_ = 1; } + Layer::SetUp(bottom, top); kernel_size_ = this->layer_param_.pooling_param().kernel_size(); stride_ = this->layer_param_.pooling_param().stride(); pad_ = this->layer_param_.pooling_param().pad(); diff --git a/src/caffe/layers/softmax_layer.cpp b/src/caffe/layers/softmax_layer.cpp index e9983608e94..dbe16da2343 100644 --- a/src/caffe/layers/softmax_layer.cpp +++ b/src/caffe/layers/softmax_layer.cpp @@ -14,8 +14,7 @@ namespace caffe { template void SoftmaxLayer::SetUp(const vector*>& bottom, vector*>* top) { - CHECK_EQ(bottom.size(), 1) << "Softmax Layer takes a single blob as input."; - CHECK_EQ(top->size(), 1) << "Softmax Layer takes a single blob as output."; + Layer::SetUp(bottom, top); (*top)[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), bottom[0]->width()); sum_multiplier_.Reshape(1, bottom[0]->channels(), diff --git a/src/caffe/layers/softmax_loss_layer.cpp b/src/caffe/layers/softmax_loss_layer.cpp index fecd7a520df..ef6eebabadd 100644 --- a/src/caffe/layers/softmax_loss_layer.cpp +++ b/src/caffe/layers/softmax_loss_layer.cpp @@ -15,8 +15,7 @@ namespace caffe { template void SoftmaxWithLossLayer::SetUp(const vector*>& bottom, vector*>* top) { - CHECK_EQ(bottom.size(), 2) << "SoftmaxLoss Layer takes two blobs as input."; - CHECK_EQ(top->size(), 0) << "SoftmaxLoss Layer takes no blob as output."; + Layer::SetUp(bottom, top); softmax_bottom_vec_.clear(); softmax_bottom_vec_.push_back(bottom[0]); softmax_top_vec_.push_back(&prob_); diff --git a/src/caffe/layers/split_layer.cpp b/src/caffe/layers/split_layer.cpp index aa2b6f6a308..2f99ca1842c 100644 --- a/src/caffe/layers/split_layer.cpp +++ b/src/caffe/layers/split_layer.cpp @@ -11,8 +11,7 @@ namespace caffe { template void SplitLayer::SetUp(const vector*>& bottom, vector*>* top) { - CHECK_EQ(bottom.size(), 1) << "Split Layer takes a single blob as input."; - CHECK_GE(top->size(), 1) << "Split Layer takes at least one blob as output."; + Layer::SetUp(bottom, top); count_ = bottom[0]->count(); for (int i = 0; i < top->size(); ++i) { // Allow the 0th top blob to be 'in-place', but no others. diff --git a/src/caffe/layers/window_data_layer.cpp b/src/caffe/layers/window_data_layer.cpp index 862c0347082..e08bed7d56f 100644 --- a/src/caffe/layers/window_data_layer.cpp +++ b/src/caffe/layers/window_data_layer.cpp @@ -258,14 +258,12 @@ WindowDataLayer::~WindowDataLayer() { template void WindowDataLayer::SetUp(const vector*>& bottom, vector*>* top) { + Layer::SetUp(bottom, top); // SetUp runs through the window_file and creates two structures // that hold windows: one for foreground (object) windows and one // for background (non-object) windows. We use an overlap threshold // to decide which is which. - CHECK_EQ(bottom.size(), 0) << "Window data Layer takes no input blobs."; - CHECK_EQ(top->size(), 2) << "Window data Layer prodcues two blobs as output."; - // window_file format // repeated: // # image_index