name: "Darknet2Caffe" input: "data" input_dim: 1 input_dim: 3 input_dim: 416 input_dim: 416 layer { bottom: "data" top: "layer1-conv" name: "layer1-conv" type: "Convolution" convolution_param { num_output: 32 kernel_size: 3 pad: 1 stride: 2 bias_term: false } } layer { bottom: "layer1-conv" top: "layer1-conv" name: "layer1-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer1-conv" top: "layer1-conv" name: "layer1-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer1-conv" top: "layer1-conv" name: "layer1-act" type: "ReLU" relu_param { negative_slope: 0.1 } } layer { bottom: "layer1-conv" top: "layer2-conv" name: "layer2-conv" type: "Convolution" convolution_param { num_output: 64 kernel_size: 3 pad: 1 stride: 2 bias_term: false } } layer { bottom: "layer2-conv" top: "layer2-conv" name: "layer2-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer2-conv" top: "layer2-conv" name: "layer2-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer2-conv" top: "layer2-conv" name: "layer2-act" type: "ReLU" relu_param { negative_slope: 0.1 } } layer { bottom: "layer2-conv" top: "layer3-conv" name: "layer3-conv" type: "Convolution" convolution_param { num_output: 64 kernel_size: 3 pad: 1 stride: 1 bias_term: false } } layer { bottom: "layer3-conv" top: "layer3-conv" name: "layer3-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer3-conv" top: "layer3-conv" name: "layer3-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer3-conv" top: "layer3-conv" name: "layer3-act" type: "ReLU" relu_param { negative_slope: 0.1 } } ####################################################### layer { bottom:"layer2-conv" top: "layer300-conv" name: "layer300-conv" type: "Convolution" convolution_param { num_output: 32 kernel_size: 3 pad: 1 stride: 1 bias_term: false } } layer { bottom: "layer300-conv" top: "layer300-conv" name: "layer300-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer300-conv" top: "layer300-conv" name: "layer300-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer300-conv" top: "layer300-conv" name: "layer300-act" type: "ReLU" relu_param { negative_slope: 0.1 } } ####################################################### layer { bottom: "layer300-conv" top: "layer4-route" name: "layer4-route" type: "Concat" } layer { bottom: "layer4-route" top: "layer5-conv" name: "layer5-conv" type: "Convolution" convolution_param { num_output: 32 kernel_size: 3 pad: 1 stride: 1 bias_term: false } } layer { bottom: "layer5-conv" top: "layer5-conv" name: "layer5-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer5-conv" top: "layer5-conv" name: "layer5-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer5-conv" top: "layer5-conv" name: "layer5-act" type: "ReLU" relu_param { negative_slope: 0.1 } } layer { bottom: "layer5-conv" top: "layer6-conv" name: "layer6-conv" type: "Convolution" convolution_param { num_output: 32 kernel_size: 3 pad: 1 stride: 1 bias_term: false } } layer { bottom: "layer6-conv" top: "layer6-conv" name: "layer6-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer6-conv" top: "layer6-conv" name: "layer6-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer6-conv" top: "layer6-conv" name: "layer6-act" type: "ReLU" relu_param { negative_slope: 0.1 } } layer { bottom: "layer6-conv" bottom: "layer5-conv" top: "layer7-route" name: "layer7-route" type: "Concat" } layer { bottom: "layer7-route" top: "layer8-conv" name: "layer8-conv" type: "Convolution" convolution_param { num_output: 64 kernel_size: 1 pad: 0 stride: 1 bias_term: false } } layer { bottom: "layer8-conv" top: "layer8-conv" name: "layer8-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer8-conv" top: "layer8-conv" name: "layer8-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer8-conv" top: "layer8-conv" name: "layer8-act" type: "ReLU" relu_param { negative_slope: 0.1 } } layer { bottom: "layer3-conv" bottom: "layer8-conv" top: "layer9-route" name: "layer9-route" type: "Concat" } layer { bottom: "layer9-route" top: "layer10-maxpool" name: "layer10-maxpool" type: "Pooling" pooling_param { stride: 2 pool: MAX kernel_size: 2 pad: 0 } } layer { bottom: "layer10-maxpool" top: "layer11-conv" name: "layer11-conv" type: "Convolution" convolution_param { num_output: 128 kernel_size: 3 pad: 1 stride: 1 bias_term: false } } layer { bottom: "layer11-conv" top: "layer11-conv" name: "layer11-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer11-conv" top: "layer11-conv" name: "layer11-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer11-conv" top: "layer11-conv" name: "layer11-act" type: "ReLU" relu_param { negative_slope: 0.1 } } ########################################################## layer { bottom: "layer10-maxpool" top: "layer110-conv" name: "layer110-conv" type: "Convolution" convolution_param { num_output: 64 kernel_size: 3 pad: 1 stride: 1 bias_term: false } } layer { bottom: "layer110-conv" top: "layer110-conv" name: "layer110-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer110-conv" top: "layer110-conv" name: "layer110-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer110-conv" top: "layer110-conv" name: "layer110-act" type: "ReLU" relu_param { negative_slope: 0.1 } } ########################################################## layer { bottom: "layer110-conv" top: "layer12-route" name: "layer12-route" type: "Concat" } layer { bottom: "layer12-route" top: "layer13-conv" name: "layer13-conv" type: "Convolution" convolution_param { num_output: 64 kernel_size: 3 pad: 1 stride: 1 bias_term: false } } layer { bottom: "layer13-conv" top: "layer13-conv" name: "layer13-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer13-conv" top: "layer13-conv" name: "layer13-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer13-conv" top: "layer13-conv" name: "layer13-act" type: "ReLU" relu_param { negative_slope: 0.1 } } layer { bottom: "layer13-conv" top: "layer14-conv" name: "layer14-conv" type: "Convolution" convolution_param { num_output: 64 kernel_size: 3 pad: 1 stride: 1 bias_term: false } } layer { bottom: "layer14-conv" top: "layer14-conv" name: "layer14-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer14-conv" top: "layer14-conv" name: "layer14-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer14-conv" top: "layer14-conv" name: "layer14-act" type: "ReLU" relu_param { negative_slope: 0.1 } } layer { bottom: "layer14-conv" bottom: "layer13-conv" top: "layer15-route" name: "layer15-route" type: "Concat" } layer { bottom: "layer15-route" top: "layer16-conv" name: "layer16-conv" type: "Convolution" convolution_param { num_output: 128 kernel_size: 1 pad: 0 stride: 1 bias_term: false } } layer { bottom: "layer16-conv" top: "layer16-conv" name: "layer16-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer16-conv" top: "layer16-conv" name: "layer16-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer16-conv" top: "layer16-conv" name: "layer16-act" type: "ReLU" relu_param { negative_slope: 0.1 } } layer { bottom: "layer11-conv" bottom: "layer16-conv" top: "layer17-route" name: "layer17-route" type: "Concat" } layer { bottom: "layer17-route" top: "layer18-maxpool" name: "layer18-maxpool" type: "Pooling" pooling_param { stride: 2 pool: MAX kernel_size: 2 pad: 0 } } layer { bottom: "layer18-maxpool" top: "layer19-conv" name: "layer19-conv" type: "Convolution" convolution_param { num_output: 256 kernel_size: 3 pad: 1 stride: 1 bias_term: false } } layer { bottom: "layer19-conv" top: "layer19-conv" name: "layer19-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer19-conv" top: "layer19-conv" name: "layer19-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer19-conv" top: "layer19-conv" name: "layer19-act" type: "ReLU" relu_param { negative_slope: 0.1 } } ################################################### layer { bottom: "layer18-maxpool" top: "layer190-conv" name: "layer190-conv" type: "Convolution" convolution_param { num_output: 128 kernel_size: 3 pad: 1 stride: 1 bias_term: false } } layer { bottom: "layer190-conv" top: "layer190-conv" name: "layer190-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer190-conv" top: "layer190-conv" name: "layer190-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer190-conv" top: "layer190-conv" name: "layer190-act" type: "ReLU" relu_param { negative_slope: 0.1 } } ################################################## layer { bottom: "layer190-conv" top: "layer20-route" name: "layer20-route" type: "Concat" } layer { bottom: "layer20-route" top: "layer21-conv" name: "layer21-conv" type: "Convolution" convolution_param { num_output: 128 kernel_size: 3 pad: 1 stride: 1 bias_term: false } } layer { bottom: "layer21-conv" top: "layer21-conv" name: "layer21-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer21-conv" top: "layer21-conv" name: "layer21-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer21-conv" top: "layer21-conv" name: "layer21-act" type: "ReLU" relu_param { negative_slope: 0.1 } } layer { bottom: "layer21-conv" top: "layer22-conv" name: "layer22-conv" type: "Convolution" convolution_param { num_output: 128 kernel_size: 3 pad: 1 stride: 1 bias_term: false } } layer { bottom: "layer22-conv" top: "layer22-conv" name: "layer22-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer22-conv" top: "layer22-conv" name: "layer22-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer22-conv" top: "layer22-conv" name: "layer22-act" type: "ReLU" relu_param { negative_slope: 0.1 } } layer { bottom: "layer22-conv" bottom: "layer21-conv" top: "layer23-route" name: "layer23-route" type: "Concat" } layer { bottom: "layer23-route" top: "layer24-conv" name: "layer24-conv" type: "Convolution" convolution_param { num_output: 256 kernel_size: 1 pad: 0 stride: 1 bias_term: false } } layer { bottom: "layer24-conv" top: "layer24-conv" name: "layer24-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer24-conv" top: "layer24-conv" name: "layer24-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer24-conv" top: "layer24-conv" name: "layer24-act" type: "ReLU" relu_param { negative_slope: 0.1 } } layer { bottom: "layer19-conv" bottom: "layer24-conv" top: "layer25-route" name: "layer25-route" type: "Concat" } layer { bottom: "layer25-route" top: "layer26-maxpool" name: "layer26-maxpool" type: "Pooling" pooling_param { stride: 2 pool: MAX kernel_size: 2 pad: 0 } } layer { bottom: "layer26-maxpool" top: "layer27-conv" name: "layer27-conv" type: "Convolution" convolution_param { num_output: 512 kernel_size: 3 pad: 1 stride: 1 bias_term: false } } layer { bottom: "layer27-conv" top: "layer27-conv" name: "layer27-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer27-conv" top: "layer27-conv" name: "layer27-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer27-conv" top: "layer27-conv" name: "layer27-act" type: "ReLU" relu_param { negative_slope: 0.1 } } layer { bottom: "layer27-conv" top: "layer28-conv" name: "layer28-conv" type: "Convolution" convolution_param { num_output: 256 kernel_size: 1 pad: 0 stride: 1 bias_term: false } } layer { bottom: "layer28-conv" top: "layer28-conv" name: "layer28-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer28-conv" top: "layer28-conv" name: "layer28-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer28-conv" top: "layer28-conv" name: "layer28-act" type: "ReLU" relu_param { negative_slope: 0.1 } } layer { bottom: "layer28-conv" top: "layer29-conv" name: "layer29-conv" type: "Convolution" convolution_param { num_output: 512 kernel_size: 3 pad: 1 stride: 1 bias_term: false } } layer { bottom: "layer29-conv" top: "layer29-conv" name: "layer29-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer29-conv" top: "layer29-conv" name: "layer29-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer29-conv" top: "layer29-conv" name: "layer29-act" type: "ReLU" relu_param { negative_slope: 0.1 } } layer { bottom: "layer29-conv" top: "layer30-conv" name: "layer30-conv" type: "Convolution" convolution_param { num_output: 255 kernel_size: 1 pad: 0 stride: 1 bias_term: true } } layer { bottom: "layer28-conv" top: "layer32-route" name: "layer32-route" type: "Concat" } layer { bottom: "layer32-route" top: "layer33-conv" name: "layer33-conv" type: "Convolution" convolution_param { num_output: 128 kernel_size: 1 pad: 0 stride: 1 bias_term: false } } layer { bottom: "layer33-conv" top: "layer33-conv" name: "layer33-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer33-conv" top: "layer33-conv" name: "layer33-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer33-conv" top: "layer33-conv" name: "layer33-act" type: "ReLU" relu_param { negative_slope: 0.1 } } layer { bottom: "layer33-conv" top: "layer34-upsample" name: "layer34-upsample" type: "Upsample" upsample_param { scale: 2 } } layer { bottom: "layer34-upsample" bottom: "layer23-route" top: "layer35-route" name: "layer35-route" type: "Concat" } layer { bottom: "layer35-route" top: "layer36-conv" name: "layer36-conv" type: "Convolution" convolution_param { num_output: 256 kernel_size: 3 pad: 1 stride: 1 bias_term: false } } layer { bottom: "layer36-conv" top: "layer36-conv" name: "layer36-bn" type: "BatchNorm" batch_norm_param { use_global_stats: true } } layer { bottom: "layer36-conv" top: "layer36-conv" name: "layer36-scale" type: "Scale" scale_param { bias_term: true } } layer { bottom: "layer36-conv" top: "layer36-conv" name: "layer36-act" type: "ReLU" relu_param { negative_slope: 0.1 } } layer { bottom: "layer36-conv" top: "layer37-conv" name: "layer37-conv" type: "Convolution" convolution_param { num_output: 255 kernel_size: 1 pad: 0 stride: 1 bias_term: true } }