1

I notice that there are upsample layers in SegNet,its own images are 480*360,when I want to use my images(565*584),I meet the following error:

I0929 03:58:06.238135 22750 net.cpp:368] upsample4 -> pool4_D
I0929 03:58:06.238142 22750 net.cpp:120] Setting up upsample4
F0929 03:58:06.238164 22750 upsample_layer.cpp:63] Check failed: bottom[0]->height() == bottom[1]->height() (38 vs. 37) 

here is the definition:

layer {
  name: "upsample4"
  type: "Upsample"
  bottom: "conv5_1_D"
  top: "pool4_D"
  bottom: "pool4_mask"
  upsample_param {
    scale: 2
    upsample_w: 60
    upsample_h: 45
  }
}

I think I should change upsample_w and upsample_h,but I don't know the exact value.Can any body tell me the relationship between scale upsample_w upsample_h and size of images or how to calculate it.

the whole definition of the net:segnet_train.prototxt

name: "VGG_ILSVRC_16_layer"
layer {
  name: "data"
  type: "DenseImageData"
  top: "data"
  top: "label"
  dense_image_data_param {
    source: "/home/zhaimo/SegNet/CamVid/mytrain.txt"    # Change this to the absolute path to your data file
    batch_size: 4               # Change this number to a batch size that will fit on your GPU
    shuffle: true
  }
}
layer {
  bottom: "data"
  top: "conv1_1"
  name: "conv1_1"
  type: "Convolution"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    num_output: 64
    pad: 1
    kernel_size: 3
  }
}
layer {
  bottom: "conv1_1"
  top: "conv1_1"
  name: "conv1_1_bn"
  type: "BN"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 0
  }
  bn_param {
    scale_filler {
      type: "constant"
      value: 1
    }
    shift_filler {
      type: "constant"
      value: 0.001
    }
 }
}
layer {
  bottom: "conv1_1"
  top: "conv1_1"
  name: "relu1_1"
  type: "ReLU"
}
layer {
  bottom: "conv1_1"
  top: "conv1_2"
  name: "conv1_2"
  type: "Convolution"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    num_output: 64
    pad: 1
    kernel_size: 3
  }
}
layer {
  bottom: "conv1_2"
  top: "conv1_2"
  name: "conv1_2_bn"
  type: "BN"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 0
  }
  bn_param {
    scale_filler {
      type: "constant"
      value: 1
    }
    shift_filler {
      type: "constant"
      value: 0.001
    }
 }
}
layer {
  bottom: "conv1_2"
  top: "conv1_2"
  name: "relu1_2"
  type: "ReLU"
}
layer {
  bottom: "conv1_2"
  top: "pool1"
  top: "pool1_mask"
  name: "pool1"
  type: "Pooling"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  bottom: "pool1"
  top: "conv2_1"
  name: "conv2_1"
  type: "Convolution"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    num_output: 128
    pad: 1
    kernel_size: 3
  }
}
layer {
  bottom: "conv2_1"
  top: "conv2_1"
  name: "conv2_1_bn"
  type: "BN"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 0
  }
  bn_param {
    scale_filler {
      type: "constant"
      value: 1
    }
    shift_filler {
      type: "constant"
      value: 0.001
    }
 }
}
layer {
  bottom: "conv2_1"
  top: "conv2_1"
  name: "relu2_1"
  type: "ReLU"
}
layer {
  bottom: "conv2_1"
  top: "conv2_2"
  name: "conv2_2"
  type: "Convolution"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    num_output: 128
    pad: 1
    kernel_size: 3
  }
}
layer {
  bottom: "conv2_2"
  top: "conv2_2"
  name: "conv2_2_bn"
  type: "BN"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 0
  }
  bn_param {
    scale_filler {
      type: "constant"
      value: 1
    }
    shift_filler {
      type: "constant"
      value: 0.001
    }
 }
}
layer {
  bottom: "conv2_2"
  top: "conv2_2"
  name: "relu2_2"
  type: "ReLU"
}
layer {
  bottom: "conv2_2"
  top: "pool2"
  top: "pool2_mask"
  name: "pool2"
  type: "Pooling"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  bottom: "pool2"
  top: "conv3_1"
  name: "conv3_1"
  type: "Convolution"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    num_output: 256
    pad: 1
    kernel_size: 3
  }
}
layer {
  bottom: "conv3_1"
  top: "conv3_1"
  name: "conv3_1_bn"
  type: "BN"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 0
  }
  bn_param {
    scale_filler {
      type: "constant"
      value: 1
    }
    shift_filler {
      type: "constant"
      value: 0.001
    }
 }
}
layer {
  bottom: "conv3_1"
  top: "conv3_1"
  name: "relu3_1"
  type: "ReLU"
}
layer {
  bottom: "conv3_1"
  top: "conv3_2"
  name: "conv3_2"
  type: "Convolution"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    num_output: 256
    pad: 1
    kernel_size: 3
  }
}
layer {
  bottom: "conv3_2"
  top: "conv3_2"
  name: "conv3_2_bn"
  type: "BN"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 0
  }
  bn_param {
    scale_filler {
      type: "constant"
      value: 1
    }
    shift_filler {
      type: "constant"
      value: 0.001
    }
 }
}
layer {
  bottom: "conv3_2"
  top: "conv3_2"
  name: "relu3_2"
  type: "ReLU"
}
layer {
  bottom: "conv3_2"
  top: "conv3_3"
  name: "conv3_3"
  type: "Convolution"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    num_output: 256
    pad: 1
    kernel_size: 3
  }
}
layer {
  bottom: "conv3_3"
  top: "conv3_3"
  name: "conv3_3_bn"
  type: "BN"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 0
  }
  bn_param {
    scale_filler {
      type: "constant"
      value: 1
    }
    shift_filler {
      type: "constant"
      value: 0.001
    }
 }
}
layer {
  bottom: "conv3_3"
  top: "conv3_3"
  name: "relu3_3"
  type: "ReLU"
}
layer {
  bottom: "conv3_3"
  top: "pool3"
  top: "pool3_mask"
  name: "pool3"
  type: "Pooling"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  bottom: "pool3"
  top: "conv4_1"
  name: "conv4_1"
  type: "Convolution"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    num_output: 512
    pad: 1
    kernel_size: 3
  }
}
layer {
  bottom: "conv4_1"
  top: "conv4_1"
  name: "conv4_1_bn"
  type: "BN"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 0
  }
  bn_param {
    scale_filler {
      type: "constant"
      value: 1
    }
    shift_filler {
      type: "constant"
      value: 0.001
    }
 }
}
layer {
  bottom: "conv4_1"
  top: "conv4_1"
  name: "relu4_1"
  type: "ReLU"
}
layer {
  bottom: "conv4_1"
  top: "conv4_2"
  name: "conv4_2"
  type: "Convolution"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    num_output: 512
    pad: 1
    kernel_size: 3
  }
}
layer {
  bottom: "conv4_2"
  top: "conv4_2"
  name: "conv4_2_bn"
  type: "BN"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 0
  }
  bn_param {
    scale_filler {
      type: "constant"
      value: 1
    }
    shift_filler {
      type: "constant"
      value: 0.001
    }
 }
}
layer {
  bottom: "conv4_2"
  top: "conv4_2"
  name: "relu4_2"
  type: "ReLU"
}
layer {
  bottom: "conv4_2"
  top: "conv4_3"
  name: "conv4_3"
  type: "Convolution"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    num_output: 512
    pad: 1
    kernel_size: 3
  }
}
layer {
  bottom: "conv4_3"
  top: "conv4_3"
  name: "conv4_3_bn"
  type: "BN"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 0
  }
  bn_param {
    scale_filler {
      type: "constant"
      value: 1
    }
    shift_filler {
      type: "constant"
      value: 0.001
    }
 }
}
layer {
  bottom: "conv4_3"
  top: "conv4_3"
  name: "relu4_3"
  type: "ReLU"
}
layer {
  bottom: "conv4_3"
  top: "pool4"
  top: "pool4_mask"
  name: "pool4"
  type: "Pooling"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  bottom: "pool4"
  top: "conv5_1"
  name: "conv5_1"
  type: "Convolution"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    num_output: 512
    pad: 1
    kernel_size: 3
  }
}
layer {
  bottom: "conv5_1"
  top: "conv5_1"
  name: "conv5_1_bn"
  type: "BN"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 0
  }
  bn_param {
    scale_filler {
      type: "constant"
      value: 1
    }
    shift_filler {
      type: "constant"
      value: 0.001
    }
 }
}
layer {
  bottom: "conv5_1"
  top: "conv5_1"
  name: "relu5_1"
  type: "ReLU"
}
layer {
  bottom: "conv5_1"
  top: "conv5_2"
  name: "conv5_2"
  type: "Convolution"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    num_output: 512
    pad: 1
    kernel_size: 3
  }
}
layer {
  bottom: "conv5_2"
  top: "conv5_2"
  name: "conv5_2_bn"
  type: "BN"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 0
  }
  bn_param {
    scale_filler {
      type: "constant"
      value: 1
    }
    shift_filler {
      type: "constant"
      value: 0.001
    }
 }
}
layer {
  bottom: "conv5_2"
  top: "conv5_2"
  name: "relu5_2"
  type: "ReLU"
}
layer {
  bottom: "conv5_2"
  top: "conv5_3"
  name: "conv5_3"
  type: "Convolution"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
    }
    num_output: 512
    pad: 1
    kernel_size: 3
  }
}
layer {
  bottom: "conv5_3"
  top: "conv5_3"
  name: "conv5_3_bn"
  type: "BN"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 0
  }
  bn_param {
    scale_filler {
      type: "constant"
      value: 1
    }
    shift_filler {
      type: "constant"
      value: 0.001
    }
 }
}
layer {
  bottom: "conv5_3"
  top: "conv5_3"
  name: "relu5_3"
  type: "ReLU"
}
layer {
  bottom: "conv5_3"
  top: "pool5"
  top: "pool5_mask"
  name: "pool5"
  type: "Pooling"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "upsample5"
  type: "Upsample"
  bottom: "pool5"
  top: "pool5_D"
  bottom: "pool5_mask"
  upsample_param {
    scale: 2
     upsample_w: 30
     upsample_h: 23
  }
}
....(The rest is omitted)
StalkerMuse
  • 1,001
  • 3
  • 11
  • 22
  • the error you get is because the `shape` of `conv5_1_D` is **different** than the `shape` of `pool4_mask`: they have different `height`. – Shai Sep 29 '16 at 12:42

1 Answers1

0

You should change upsample_w and upsample_h. every Pool layerdecrese your image X2. so you should count how many layers you have and then calculate your upsample considering the size of your images.

Z.Kal
  • 426
  • 4
  • 18