1

我注意到在SegNet中有上取樣層,它自己的圖像是480 * 360,當我想使用我的圖像時(565 * 584),我遇到以下錯誤:如何理解SegNet的上取樣層

I0929 03:58:06.238135 22750 net.cpp:368] upsample4 -> pool4_D 
I0929 03:58:06.238142 22750 net.cpp:120] Setting up upsample4 
F0929 03:58:06.238164 22750 upsample_layer.cpp:63] Check failed: bottom[0]->height() == bottom[1]->height() (38 vs. 37) 

這裏的定義是:

layer { 
    name: "upsample4" 
    type: "Upsample" 
    bottom: "conv5_1_D" 
    top: "pool4_D" 
    bottom: "pool4_mask" 
    upsample_param { 
    scale: 2 
    upsample_w: 60 
    upsample_h: 45 
    } 
} 

我想我應該改變upsample_wupsample_h,但我不知道確切的value.Can任何身體告訴我scaleupsample_wupsample_h和大小之間的關係圖像或如何計算它。

網的整個定義:segnet_train.prototxt

name: "VGG_ILSVRC_16_layer" 
layer { 
    name: "data" 
    type: "DenseImageData" 
    top: "data" 
    top: "label" 
    dense_image_data_param { 
    source: "/home/zhaimo/SegNet/CamVid/mytrain.txt" # Change this to the absolute path to your data file 
    batch_size: 4    # Change this number to a batch size that will fit on your GPU 
    shuffle: true 
    } 
} 
layer { 
    bottom: "data" 
    top: "conv1_1" 
    name: "conv1_1" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 64 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv1_1" 
    top: "conv1_1" 
    name: "conv1_1_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv1_1" 
    top: "conv1_1" 
    name: "relu1_1" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv1_1" 
    top: "conv1_2" 
    name: "conv1_2" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 64 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv1_2" 
    top: "conv1_2" 
    name: "conv1_2_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv1_2" 
    top: "conv1_2" 
    name: "relu1_2" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv1_2" 
    top: "pool1" 
    top: "pool1_mask" 
    name: "pool1" 
    type: "Pooling" 
    pooling_param { 
    pool: MAX 
    kernel_size: 2 
    stride: 2 
    } 
} 
layer { 
    bottom: "pool1" 
    top: "conv2_1" 
    name: "conv2_1" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 128 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv2_1" 
    top: "conv2_1" 
    name: "conv2_1_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv2_1" 
    top: "conv2_1" 
    name: "relu2_1" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv2_1" 
    top: "conv2_2" 
    name: "conv2_2" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 128 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv2_2" 
    top: "conv2_2" 
    name: "conv2_2_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv2_2" 
    top: "conv2_2" 
    name: "relu2_2" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv2_2" 
    top: "pool2" 
    top: "pool2_mask" 
    name: "pool2" 
    type: "Pooling" 
    pooling_param { 
    pool: MAX 
    kernel_size: 2 
    stride: 2 
    } 
} 
layer { 
    bottom: "pool2" 
    top: "conv3_1" 
    name: "conv3_1" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 256 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv3_1" 
    top: "conv3_1" 
    name: "conv3_1_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv3_1" 
    top: "conv3_1" 
    name: "relu3_1" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv3_1" 
    top: "conv3_2" 
    name: "conv3_2" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 256 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv3_2" 
    top: "conv3_2" 
    name: "conv3_2_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv3_2" 
    top: "conv3_2" 
    name: "relu3_2" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv3_2" 
    top: "conv3_3" 
    name: "conv3_3" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 256 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv3_3" 
    top: "conv3_3" 
    name: "conv3_3_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv3_3" 
    top: "conv3_3" 
    name: "relu3_3" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv3_3" 
    top: "pool3" 
    top: "pool3_mask" 
    name: "pool3" 
    type: "Pooling" 
    pooling_param { 
    pool: MAX 
    kernel_size: 2 
    stride: 2 
    } 
} 
layer { 
    bottom: "pool3" 
    top: "conv4_1" 
    name: "conv4_1" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 512 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv4_1" 
    top: "conv4_1" 
    name: "conv4_1_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv4_1" 
    top: "conv4_1" 
    name: "relu4_1" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv4_1" 
    top: "conv4_2" 
    name: "conv4_2" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 512 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv4_2" 
    top: "conv4_2" 
    name: "conv4_2_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv4_2" 
    top: "conv4_2" 
    name: "relu4_2" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv4_2" 
    top: "conv4_3" 
    name: "conv4_3" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 512 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv4_3" 
    top: "conv4_3" 
    name: "conv4_3_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv4_3" 
    top: "conv4_3" 
    name: "relu4_3" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv4_3" 
    top: "pool4" 
    top: "pool4_mask" 
    name: "pool4" 
    type: "Pooling" 
    pooling_param { 
    pool: MAX 
    kernel_size: 2 
    stride: 2 
    } 
} 
layer { 
    bottom: "pool4" 
    top: "conv5_1" 
    name: "conv5_1" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 512 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv5_1" 
    top: "conv5_1" 
    name: "conv5_1_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv5_1" 
    top: "conv5_1" 
    name: "relu5_1" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv5_1" 
    top: "conv5_2" 
    name: "conv5_2" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 512 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv5_2" 
    top: "conv5_2" 
    name: "conv5_2_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv5_2" 
    top: "conv5_2" 
    name: "relu5_2" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv5_2" 
    top: "conv5_3" 
    name: "conv5_3" 
    type: "Convolution" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    weight_filler { 
     type: "msra" 
    } 
    bias_filler { 
     type: "constant" 
    } 
    num_output: 512 
    pad: 1 
    kernel_size: 3 
    } 
} 
layer { 
    bottom: "conv5_3" 
    top: "conv5_3" 
    name: "conv5_3_bn" 
    type: "BN" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 1 
    decay_mult: 0 
    } 
    bn_param { 
    scale_filler { 
     type: "constant" 
     value: 1 
    } 
    shift_filler { 
     type: "constant" 
     value: 0.001 
    } 
} 
} 
layer { 
    bottom: "conv5_3" 
    top: "conv5_3" 
    name: "relu5_3" 
    type: "ReLU" 
} 
layer { 
    bottom: "conv5_3" 
    top: "pool5" 
    top: "pool5_mask" 
    name: "pool5" 
    type: "Pooling" 
    pooling_param { 
    pool: MAX 
    kernel_size: 2 
    stride: 2 
    } 
} 
layer { 
    name: "upsample5" 
    type: "Upsample" 
    bottom: "pool5" 
    top: "pool5_D" 
    bottom: "pool5_mask" 
    upsample_param { 
    scale: 2 
    upsample_w: 30 
    upsample_h: 23 
    } 
} 
....(The rest is omitted) 
+0

你得到的錯誤是因爲'conv5_1_D'的'shape'與'pool4_mask'的'shape'有** ** **:它們有不同的'height'。 – Shai

回答

0

你應該改變upsample_wupsample_h。 每Pool layer減少您的圖像X2。所以您應該計算您有多少圖層,然後根據圖像的大小計算upsample