Spaces:
Runtime error
Runtime error
name: "LtoAB" | |
layer { | |
name: "data_l" | |
type: "Input" | |
top: "data_l" | |
input_param { | |
shape { dim: 1 dim: 1 dim: 224 dim: 224 } | |
} | |
} | |
layer { | |
name: "bw_conv1_1" | |
type: "Convolution" | |
bottom: "data_l" | |
top: "conv1_1" | |
convolution_param { | |
num_output: 64 | |
pad: 1 | |
kernel_size: 3 | |
} | |
} | |
layer { | |
name: "relu1_1" | |
type: "ReLU" | |
bottom: "conv1_1" | |
top: "conv1_1" | |
} | |
layer { | |
name: "conv1_2" | |
type: "Convolution" | |
bottom: "conv1_1" | |
top: "conv1_2" | |
convolution_param { | |
num_output: 64 | |
pad: 1 | |
kernel_size: 3 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "relu1_2" | |
type: "ReLU" | |
bottom: "conv1_2" | |
top: "conv1_2" | |
} | |
layer { | |
name: "conv1_2norm" | |
type: "BatchNorm" | |
bottom: "conv1_2" | |
top: "conv1_2norm" | |
batch_norm_param{ } | |
param {lr_mult: 0 decay_mult: 0} | |
param {lr_mult: 0 decay_mult: 0} | |
param {lr_mult: 0 decay_mult: 0} | |
} | |
layer { | |
name: "conv2_1" | |
type: "Convolution" | |
bottom: "conv1_2norm" | |
top: "conv2_1" | |
convolution_param { | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
} | |
} | |
layer { | |
name: "relu2_1" | |
type: "ReLU" | |
bottom: "conv2_1" | |
top: "conv2_1" | |
} | |
layer { | |
name: "conv2_2" | |
type: "Convolution" | |
bottom: "conv2_1" | |
top: "conv2_2" | |
convolution_param { | |
num_output: 128 | |
pad: 1 | |
kernel_size: 3 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "relu2_2" | |
type: "ReLU" | |
bottom: "conv2_2" | |
top: "conv2_2" | |
} | |
layer { | |
name: "conv2_2norm" | |
type: "BatchNorm" | |
bottom: "conv2_2" | |
top: "conv2_2norm" | |
batch_norm_param{ } | |
param {lr_mult: 0 decay_mult: 0} | |
param {lr_mult: 0 decay_mult: 0} | |
param {lr_mult: 0 decay_mult: 0} | |
} | |
layer { | |
name: "conv3_1" | |
type: "Convolution" | |
bottom: "conv2_2norm" | |
top: "conv3_1" | |
convolution_param { | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
} | |
} | |
layer { | |
name: "relu3_1" | |
type: "ReLU" | |
bottom: "conv3_1" | |
top: "conv3_1" | |
} | |
layer { | |
name: "conv3_2" | |
type: "Convolution" | |
bottom: "conv3_1" | |
top: "conv3_2" | |
convolution_param { | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
} | |
} | |
layer { | |
name: "relu3_2" | |
type: "ReLU" | |
bottom: "conv3_2" | |
top: "conv3_2" | |
} | |
layer { | |
name: "conv3_3" | |
type: "Convolution" | |
bottom: "conv3_2" | |
top: "conv3_3" | |
convolution_param { | |
num_output: 256 | |
pad: 1 | |
kernel_size: 3 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "relu3_3" | |
type: "ReLU" | |
bottom: "conv3_3" | |
top: "conv3_3" | |
} | |
layer { | |
name: "conv3_3norm" | |
type: "BatchNorm" | |
bottom: "conv3_3" | |
top: "conv3_3norm" | |
batch_norm_param{ } | |
param {lr_mult: 0 decay_mult: 0} | |
param {lr_mult: 0 decay_mult: 0} | |
param {lr_mult: 0 decay_mult: 0} | |
} | |
layer { | |
name: "conv4_1" | |
type: "Convolution" | |
bottom: "conv3_3norm" | |
top: "conv4_1" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
stride: 1 | |
pad: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "relu4_1" | |
type: "ReLU" | |
bottom: "conv4_1" | |
top: "conv4_1" | |
} | |
layer { | |
name: "conv4_2" | |
type: "Convolution" | |
bottom: "conv4_1" | |
top: "conv4_2" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
stride: 1 | |
pad: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "relu4_2" | |
type: "ReLU" | |
bottom: "conv4_2" | |
top: "conv4_2" | |
} | |
layer { | |
name: "conv4_3" | |
type: "Convolution" | |
bottom: "conv4_2" | |
top: "conv4_3" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
stride: 1 | |
pad: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "relu4_3" | |
type: "ReLU" | |
bottom: "conv4_3" | |
top: "conv4_3" | |
} | |
layer { | |
name: "conv4_3norm" | |
type: "BatchNorm" | |
bottom: "conv4_3" | |
top: "conv4_3norm" | |
batch_norm_param{ } | |
param {lr_mult: 0 decay_mult: 0} | |
param {lr_mult: 0 decay_mult: 0} | |
param {lr_mult: 0 decay_mult: 0} | |
} | |
layer { | |
name: "conv5_1" | |
type: "Convolution" | |
bottom: "conv4_3norm" | |
top: "conv5_1" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
stride: 1 | |
pad: 2 | |
dilation: 2 | |
} | |
} | |
layer { | |
name: "relu5_1" | |
type: "ReLU" | |
bottom: "conv5_1" | |
top: "conv5_1" | |
} | |
layer { | |
name: "conv5_2" | |
type: "Convolution" | |
bottom: "conv5_1" | |
top: "conv5_2" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
stride: 1 | |
pad: 2 | |
dilation: 2 | |
} | |
} | |
layer { | |
name: "relu5_2" | |
type: "ReLU" | |
bottom: "conv5_2" | |
top: "conv5_2" | |
} | |
layer { | |
name: "conv5_3" | |
type: "Convolution" | |
bottom: "conv5_2" | |
top: "conv5_3" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
stride: 1 | |
pad: 2 | |
dilation: 2 | |
} | |
} | |
layer { | |
name: "relu5_3" | |
type: "ReLU" | |
bottom: "conv5_3" | |
top: "conv5_3" | |
} | |
layer { | |
name: "conv5_3norm" | |
type: "BatchNorm" | |
bottom: "conv5_3" | |
top: "conv5_3norm" | |
batch_norm_param{ } | |
param {lr_mult: 0 decay_mult: 0} | |
param {lr_mult: 0 decay_mult: 0} | |
param {lr_mult: 0 decay_mult: 0} | |
} | |
layer { | |
name: "conv6_1" | |
type: "Convolution" | |
bottom: "conv5_3norm" | |
top: "conv6_1" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 2 | |
dilation: 2 | |
} | |
} | |
layer { | |
name: "relu6_1" | |
type: "ReLU" | |
bottom: "conv6_1" | |
top: "conv6_1" | |
} | |
layer { | |
name: "conv6_2" | |
type: "Convolution" | |
bottom: "conv6_1" | |
top: "conv6_2" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 2 | |
dilation: 2 | |
} | |
} | |
layer { | |
name: "relu6_2" | |
type: "ReLU" | |
bottom: "conv6_2" | |
top: "conv6_2" | |
} | |
layer { | |
name: "conv6_3" | |
type: "Convolution" | |
bottom: "conv6_2" | |
top: "conv6_3" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 2 | |
dilation: 2 | |
} | |
} | |
layer { | |
name: "relu6_3" | |
type: "ReLU" | |
bottom: "conv6_3" | |
top: "conv6_3" | |
} | |
layer { | |
name: "conv6_3norm" | |
type: "BatchNorm" | |
bottom: "conv6_3" | |
top: "conv6_3norm" | |
batch_norm_param{ } | |
param {lr_mult: 0 decay_mult: 0} | |
param {lr_mult: 0 decay_mult: 0} | |
param {lr_mult: 0 decay_mult: 0} | |
} | |
layer { | |
name: "conv7_1" | |
type: "Convolution" | |
bottom: "conv6_3norm" | |
top: "conv7_1" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "relu7_1" | |
type: "ReLU" | |
bottom: "conv7_1" | |
top: "conv7_1" | |
} | |
layer { | |
name: "conv7_2" | |
type: "Convolution" | |
bottom: "conv7_1" | |
top: "conv7_2" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "relu7_2" | |
type: "ReLU" | |
bottom: "conv7_2" | |
top: "conv7_2" | |
} | |
layer { | |
name: "conv7_3" | |
type: "Convolution" | |
bottom: "conv7_2" | |
top: "conv7_3" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "relu7_3" | |
type: "ReLU" | |
bottom: "conv7_3" | |
top: "conv7_3" | |
} | |
layer { | |
name: "conv7_3norm" | |
type: "BatchNorm" | |
bottom: "conv7_3" | |
top: "conv7_3norm" | |
batch_norm_param{ } | |
param {lr_mult: 0 decay_mult: 0} | |
param {lr_mult: 0 decay_mult: 0} | |
param {lr_mult: 0 decay_mult: 0} | |
} | |
layer { | |
name: "conv8_1" | |
type: "Deconvolution" | |
bottom: "conv7_3norm" | |
top: "conv8_1" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 4 | |
pad: 1 | |
dilation: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "relu8_1" | |
type: "ReLU" | |
bottom: "conv8_1" | |
top: "conv8_1" | |
} | |
layer { | |
name: "conv8_2" | |
type: "Convolution" | |
bottom: "conv8_1" | |
top: "conv8_2" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "relu8_2" | |
type: "ReLU" | |
bottom: "conv8_2" | |
top: "conv8_2" | |
} | |
layer { | |
name: "conv8_3" | |
type: "Convolution" | |
bottom: "conv8_2" | |
top: "conv8_3" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "relu8_3" | |
type: "ReLU" | |
bottom: "conv8_3" | |
top: "conv8_3" | |
} | |
layer { | |
name: "conv8_313" | |
type: "Convolution" | |
bottom: "conv8_3" | |
top: "conv8_313" | |
convolution_param { | |
num_output: 313 | |
kernel_size: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "conv8_313_rh" | |
type: "Scale" | |
bottom: "conv8_313" | |
top: "conv8_313_rh" | |
scale_param { | |
bias_term: false | |
filler { type: 'constant' value: 2.606 } | |
} | |
} | |
layer { | |
name: "class8_313_rh" | |
type: "Softmax" | |
bottom: "conv8_313_rh" | |
top: "class8_313_rh" | |
} | |
layer { | |
name: "class8_ab" | |
type: "Convolution" | |
bottom: "class8_313_rh" | |
top: "class8_ab" | |
convolution_param { | |
num_output: 2 | |
kernel_size: 1 | |
stride: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "Silence" | |
type: "Silence" | |
bottom: "class8_ab" | |
} |