diff --git a/Prj-Linux/lpr/model/SegmentationFree.prototxt b/Prj-Linux/lpr/model/SegmentationFree.prototxt deleted file mode 100644 index 24fd8e2..0000000 --- a/Prj-Linux/lpr/model/SegmentationFree.prototxt +++ /dev/null @@ -1,318 +0,0 @@ -input: "data" -input_dim: 1 -input_dim: 3 -input_dim: 160 -input_dim: 40 -layer { - name: "conv0" - type: "Convolution" - bottom: "data" - top: "conv0" - convolution_param { - num_output: 32 - bias_term: true - pad_h: 1 - pad_w: 1 - kernel_h: 3 - kernel_w: 3 - stride_h: 1 - stride_w: 1 - } -} -layer { - name: "bn0" - type: "BatchNorm" - bottom: "conv0" - top: "bn0" - batch_norm_param { - moving_average_fraction: 0.99 - eps: 0.001 - } -} -layer { - name: "bn0_scale" - type: "Scale" - bottom: "bn0" - top: "bn0" - scale_param { - bias_term: true - } -} -layer { - name: "relu0" - type: "ReLU" - bottom: "bn0" - top: "bn0" -} -layer { - name: "pool0" - type: "Pooling" - bottom: "bn0" - top: "pool0" - pooling_param { - pool: MAX - kernel_h: 2 - kernel_w: 2 - stride_h: 2 - stride_w: 2 - pad_h: 0 - pad_w: 0 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "pool0" - top: "conv1" - convolution_param { - num_output: 64 - bias_term: true - pad_h: 1 - pad_w: 1 - kernel_h: 3 - kernel_w: 3 - stride_h: 1 - stride_w: 1 - } -} -layer { - name: "bn1" - type: "BatchNorm" - bottom: "conv1" - top: "bn1" - batch_norm_param { - moving_average_fraction: 0.99 - eps: 0.001 - } -} -layer { - name: "bn1_scale" - type: "Scale" - bottom: "bn1" - top: "bn1" - scale_param { - bias_term: true - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "bn1" - top: "bn1" -} -layer { - name: "pool1" - type: "Pooling" - bottom: "bn1" - top: "pool1" - pooling_param { - pool: MAX - kernel_h: 2 - kernel_w: 2 - stride_h: 2 - stride_w: 2 - pad_h: 0 - pad_w: 0 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - convolution_param { - num_output: 128 - bias_term: true - pad_h: 1 - pad_w: 1 - kernel_h: 3 - kernel_w: 3 - stride_h: 1 - stride_w: 1 - } -} -layer { - name: "bn2" - type: "BatchNorm" - bottom: "conv2" - top: "bn2" - batch_norm_param { - moving_average_fraction: 0.99 - eps: 0.001 - } -} -layer { - name: "bn2_scale" - type: "Scale" - bottom: "bn2" - top: "bn2" - scale_param { - bias_term: true - } -} -layer { - name: "relu2" - type: "ReLU" - bottom: "bn2" - top: "bn2" -} -layer { - name: "pool2" - type: "Pooling" - bottom: "bn2" - top: "pool2" - pooling_param { - pool: MAX - kernel_h: 2 - kernel_w: 2 - stride_h: 2 - stride_w: 2 - pad_h: 0 - pad_w: 0 - } -} -layer { - name: "conv_512_15" - type: "Convolution" - bottom: "pool2" - top: "conv_512_15" - convolution_param { - num_output: 512 - bias_term: true - pad_h: 0 - pad_w: 0 - kernel_h: 1 - kernel_w: 5 - stride_h: 1 - stride_w: 1 - } -} -layer { - name: "batch_normalization_1" - type: "BatchNorm" - bottom: "conv_512_15" - top: "batch_normalization_1" - batch_norm_param { - moving_average_fraction: 0.99 - eps: 0.001 - } -} -layer { - name: "batch_normalization_1_scale" - type: "Scale" - bottom: "batch_normalization_1" - top: "batch_normalization_1" - scale_param { - bias_term: true - } -} -layer { - name: "activation_1" - type: "ReLU" - bottom: "batch_normalization_1" - top: "batch_normalization_1" -} -layer { - name: "conv_512_51" - type: "Convolution" - bottom: "batch_normalization_1" - top: "conv_512_51" - convolution_param { - num_output: 512 - bias_term: true - pad_h: 0 - pad_w: 0 - kernel_h: 5 - kernel_w: 1 - stride_h: 1 - stride_w: 1 - } -} -layer { - name: "batch_normalization_2" - type: "BatchNorm" - bottom: "conv_512_51" - top: "batch_normalization_2" - batch_norm_param { - moving_average_fraction: 0.99 - eps: 0.001 - } -} -layer { - name: "batch_normalization_2_scale" - type: "Scale" - bottom: "batch_normalization_2" - top: "batch_normalization_2" - scale_param { - bias_term: true - } -} -layer { - name: "activation_2" - type: "ReLU" - bottom: "batch_normalization_2" - top: "batch_normalization_2" -} -layer { - name: "conv_1024_11" - type: "Convolution" - bottom: "batch_normalization_2" - top: "conv_1024_11" - convolution_param { - num_output: 1024 - bias_term: true - pad_h: 0 - pad_w: 0 - kernel_h: 1 - kernel_w: 1 - stride_h: 1 - stride_w: 1 - } -} -layer { - name: "batch_normalization_3" - type: "BatchNorm" - bottom: "conv_1024_11" - top: "batch_normalization_3" - batch_norm_param { - moving_average_fraction: 0.99 - eps: 0.001 - } -} -layer { - name: "batch_normalization_3_scale" - type: "Scale" - bottom: "batch_normalization_3" - top: "batch_normalization_3" - scale_param { - bias_term: true - } -} -layer { - name: "activation_3" - type: "ReLU" - bottom: "batch_normalization_3" - top: "batch_normalization_3" -} -layer { - name: "conv_class_11" - type: "Convolution" - bottom: "batch_normalization_3" - top: "conv_class_11" - convolution_param { - num_output: 84 - bias_term: true - pad_h: 0 - pad_w: 0 - kernel_h: 1 - kernel_w: 1 - stride_h: 1 - stride_w: 1 - } -} -layer { - name: "prob" - type: "Softmax" - bottom: "conv_class_11" - top: "prob" -}