@@ -1,123 +0,0 @@ | |||
input: "data" | |||
input_dim: 1 | |||
input_dim: 1 | |||
input_dim: 30 | |||
input_dim: 14 | |||
layer { | |||
name: "conv2d_1" | |||
type: "Convolution" | |||
bottom: "data" | |||
top: "conv2d_1" | |||
convolution_param { | |||
num_output: 32 | |||
bias_term: true | |||
pad: 0 | |||
kernel_size: 3 | |||
stride: 1 | |||
} | |||
} | |||
layer { | |||
name: "activation_1" | |||
type: "ReLU" | |||
bottom: "conv2d_1" | |||
top: "activation_1" | |||
} | |||
layer { | |||
name: "max_pooling2d_1" | |||
type: "Pooling" | |||
bottom: "activation_1" | |||
top: "max_pooling2d_1" | |||
pooling_param { | |||
pool: MAX | |||
kernel_size: 2 | |||
stride: 2 | |||
pad: 0 | |||
} | |||
} | |||
layer { | |||
name: "conv2d_2" | |||
type: "Convolution" | |||
bottom: "max_pooling2d_1" | |||
top: "conv2d_2" | |||
convolution_param { | |||
num_output: 64 | |||
bias_term: true | |||
pad: 0 | |||
kernel_size: 3 | |||
stride: 1 | |||
} | |||
} | |||
layer { | |||
name: "activation_2" | |||
type: "ReLU" | |||
bottom: "conv2d_2" | |||
top: "activation_2" | |||
} | |||
layer { | |||
name: "max_pooling2d_2" | |||
type: "Pooling" | |||
bottom: "activation_2" | |||
top: "max_pooling2d_2" | |||
pooling_param { | |||
pool: MAX | |||
kernel_size: 2 | |||
stride: 2 | |||
pad: 0 | |||
} | |||
} | |||
layer { | |||
name: "conv2d_3" | |||
type: "Convolution" | |||
bottom: "max_pooling2d_2" | |||
top: "conv2d_3" | |||
convolution_param { | |||
num_output: 128 | |||
bias_term: true | |||
pad: 0 | |||
kernel_size: 2 | |||
stride: 1 | |||
} | |||
} | |||
layer { | |||
name: "activation_3" | |||
type: "ReLU" | |||
bottom: "conv2d_3" | |||
top: "activation_3" | |||
} | |||
layer { | |||
name: "flatten_1" | |||
type: "Flatten" | |||
bottom: "activation_3" | |||
top: "flatten_1" | |||
} | |||
layer { | |||
name: "dense_1" | |||
type: "InnerProduct" | |||
bottom: "flatten_1" | |||
top: "dense_1" | |||
inner_product_param { | |||
num_output: 256 | |||
} | |||
} | |||
layer { | |||
name: "relu2" | |||
type: "ReLU" | |||
bottom: "dense_1" | |||
top: "relu2" | |||
} | |||
layer { | |||
name: "dense2" | |||
type: "InnerProduct" | |||
bottom: "relu2" | |||
top: "dense2" | |||
inner_product_param { | |||
num_output: 65 | |||
} | |||
} | |||
layer { | |||
name: "prob" | |||
type: "Softmax" | |||
bottom: "dense2" | |||
top: "prob" | |||
} |
@@ -1,95 +0,0 @@ | |||
input: "data" | |||
input_dim: 1 | |||
input_dim: 3 | |||
input_dim: 16 | |||
input_dim: 66 | |||
layer { | |||
name: "conv1" | |||
type: "Convolution" | |||
bottom: "data" | |||
top: "conv1" | |||
convolution_param { | |||
num_output: 10 | |||
bias_term: true | |||
pad: 0 | |||
kernel_size: 3 | |||
stride: 1 | |||
} | |||
} | |||
layer { | |||
name: "relu1" | |||
type: "ReLU" | |||
bottom: "conv1" | |||
top: "conv1" | |||
} | |||
layer { | |||
name: "max_pooling2d_3" | |||
type: "Pooling" | |||
bottom: "conv1" | |||
top: "max_pooling2d_3" | |||
pooling_param { | |||
pool: MAX | |||
kernel_size: 2 | |||
stride: 2 | |||
pad: 0 | |||
} | |||
} | |||
layer { | |||
name: "conv2" | |||
type: "Convolution" | |||
bottom: "max_pooling2d_3" | |||
top: "conv2" | |||
convolution_param { | |||
num_output: 16 | |||
bias_term: true | |||
pad: 0 | |||
kernel_size: 3 | |||
stride: 1 | |||
} | |||
} | |||
layer { | |||
name: "relu2" | |||
type: "ReLU" | |||
bottom: "conv2" | |||
top: "conv2" | |||
} | |||
layer { | |||
name: "conv3" | |||
type: "Convolution" | |||
bottom: "conv2" | |||
top: "conv3" | |||
convolution_param { | |||
num_output: 32 | |||
bias_term: true | |||
pad: 0 | |||
kernel_size: 3 | |||
stride: 1 | |||
} | |||
} | |||
layer { | |||
name: "relu3" | |||
type: "ReLU" | |||
bottom: "conv3" | |||
top: "conv3" | |||
} | |||
layer { | |||
name: "flatten_2" | |||
type: "Flatten" | |||
bottom: "conv3" | |||
top: "flatten_2" | |||
} | |||
layer { | |||
name: "dense" | |||
type: "InnerProduct" | |||
bottom: "flatten_2" | |||
top: "dense" | |||
inner_product_param { | |||
num_output: 2 | |||
} | |||
} | |||
layer { | |||
name: "relu4" | |||
type: "ReLU" | |||
bottom: "dense" | |||
top: "dense" | |||
} |
@@ -0,0 +1,11 @@ | |||
将/Prj-Linux/lpr/model目录下的 | |||
cascade.xml | |||
CharacterRecognization.caffemodel | |||
CharacterRecognization.prototxt | |||
HorizonalFinemapping.caffemodel | |||
HorizonalFinemapping.prototxt | |||
SegmenationFree-Inception.caffemodel | |||
SegmenationFree-Inception.prototxt | |||
Segmentation.caffemodel | |||
Segmentation.prototxt | |||
放置在该目录 |
@@ -1,114 +0,0 @@ | |||
input: "data" | |||
input_dim: 1 | |||
input_dim: 1 | |||
input_dim: 22 | |||
input_dim: 22 | |||
layer { | |||
name: "conv2d_12" | |||
type: "Convolution" | |||
bottom: "data" | |||
top: "conv2d_12" | |||
convolution_param { | |||
num_output: 16 | |||
bias_term: true | |||
pad: 0 | |||
kernel_size: 3 | |||
stride: 1 | |||
} | |||
} | |||
layer { | |||
name: "activation_18" | |||
type: "ReLU" | |||
bottom: "conv2d_12" | |||
top: "activation_18" | |||
} | |||
layer { | |||
name: "max_pooling2d_10" | |||
type: "Pooling" | |||
bottom: "activation_18" | |||
top: "max_pooling2d_10" | |||
pooling_param { | |||
pool: MAX | |||
kernel_size: 2 | |||
stride: 2 | |||
pad: 0 | |||
} | |||
} | |||
layer { | |||
name: "conv2d_13" | |||
type: "Convolution" | |||
bottom: "max_pooling2d_10" | |||
top: "conv2d_13" | |||
convolution_param { | |||
num_output: 16 | |||
bias_term: true | |||
pad: 0 | |||
kernel_size: 3 | |||
stride: 1 | |||
} | |||
} | |||
layer { | |||
name: "activation_19" | |||
type: "ReLU" | |||
bottom: "conv2d_13" | |||
top: "activation_19" | |||
} | |||
layer { | |||
name: "max_pooling2d_11" | |||
type: "Pooling" | |||
bottom: "activation_19" | |||
top: "max_pooling2d_11" | |||
pooling_param { | |||
pool: MAX | |||
kernel_size: 2 | |||
stride: 2 | |||
pad: 0 | |||
} | |||
} | |||
layer { | |||
name: "flatten_6" | |||
type: "Flatten" | |||
bottom: "max_pooling2d_11" | |||
top: "flatten_6" | |||
} | |||
layer { | |||
name: "dense_9" | |||
type: "InnerProduct" | |||
bottom: "flatten_6" | |||
top: "dense_9" | |||
inner_product_param { | |||
num_output: 256 | |||
} | |||
} | |||
layer { | |||
name: "dropout_9" | |||
type: "Dropout" | |||
bottom: "dense_9" | |||
top: "dropout_9" | |||
dropout_param { | |||
dropout_ratio: 0.5 | |||
} | |||
} | |||
layer { | |||
name: "activation_20" | |||
type: "ReLU" | |||
bottom: "dropout_9" | |||
top: "activation_20" | |||
} | |||
layer { | |||
name: "dense_10" | |||
type: "InnerProduct" | |||
bottom: "activation_20" | |||
top: "dense_10" | |||
inner_product_param { | |||
num_output: 3 | |||
} | |||
} | |||
layer { | |||
name: "prob" | |||
type: "Softmax" | |||
bottom: "dense_10" | |||
top: "prob" | |||
} |
@@ -1,318 +0,0 @@ | |||
input: "data" | |||
input_dim: 1 | |||
input_dim: 3 | |||
input_dim: 160 | |||
input_dim: 40 | |||
layer { | |||
name: "conv0" | |||
type: "Convolution" | |||
bottom: "data" | |||
top: "conv0" | |||
convolution_param { | |||
num_output: 32 | |||
bias_term: true | |||
pad_h: 1 | |||
pad_w: 1 | |||
kernel_h: 3 | |||
kernel_w: 3 | |||
stride_h: 1 | |||
stride_w: 1 | |||
} | |||
} | |||
layer { | |||
name: "bn0" | |||
type: "BatchNorm" | |||
bottom: "conv0" | |||
top: "bn0" | |||
batch_norm_param { | |||
moving_average_fraction: 0.99 | |||
eps: 0.001 | |||
} | |||
} | |||
layer { | |||
name: "bn0_scale" | |||
type: "Scale" | |||
bottom: "bn0" | |||
top: "bn0" | |||
scale_param { | |||
bias_term: true | |||
} | |||
} | |||
layer { | |||
name: "relu0" | |||
type: "ReLU" | |||
bottom: "bn0" | |||
top: "bn0" | |||
} | |||
layer { | |||
name: "pool0" | |||
type: "Pooling" | |||
bottom: "bn0" | |||
top: "pool0" | |||
pooling_param { | |||
pool: MAX | |||
kernel_h: 2 | |||
kernel_w: 2 | |||
stride_h: 2 | |||
stride_w: 2 | |||
pad_h: 0 | |||
pad_w: 0 | |||
} | |||
} | |||
layer { | |||
name: "conv1" | |||
type: "Convolution" | |||
bottom: "pool0" | |||
top: "conv1" | |||
convolution_param { | |||
num_output: 64 | |||
bias_term: true | |||
pad_h: 1 | |||
pad_w: 1 | |||
kernel_h: 3 | |||
kernel_w: 3 | |||
stride_h: 1 | |||
stride_w: 1 | |||
} | |||
} | |||
layer { | |||
name: "bn1" | |||
type: "BatchNorm" | |||
bottom: "conv1" | |||
top: "bn1" | |||
batch_norm_param { | |||
moving_average_fraction: 0.99 | |||
eps: 0.001 | |||
} | |||
} | |||
layer { | |||
name: "bn1_scale" | |||
type: "Scale" | |||
bottom: "bn1" | |||
top: "bn1" | |||
scale_param { | |||
bias_term: true | |||
} | |||
} | |||
layer { | |||
name: "relu1" | |||
type: "ReLU" | |||
bottom: "bn1" | |||
top: "bn1" | |||
} | |||
layer { | |||
name: "pool1" | |||
type: "Pooling" | |||
bottom: "bn1" | |||
top: "pool1" | |||
pooling_param { | |||
pool: MAX | |||
kernel_h: 2 | |||
kernel_w: 2 | |||
stride_h: 2 | |||
stride_w: 2 | |||
pad_h: 0 | |||
pad_w: 0 | |||
} | |||
} | |||
layer { | |||
name: "conv2" | |||
type: "Convolution" | |||
bottom: "pool1" | |||
top: "conv2" | |||
convolution_param { | |||
num_output: 128 | |||
bias_term: true | |||
pad_h: 1 | |||
pad_w: 1 | |||
kernel_h: 3 | |||
kernel_w: 3 | |||
stride_h: 1 | |||
stride_w: 1 | |||
} | |||
} | |||
layer { | |||
name: "bn2" | |||
type: "BatchNorm" | |||
bottom: "conv2" | |||
top: "bn2" | |||
batch_norm_param { | |||
moving_average_fraction: 0.99 | |||
eps: 0.001 | |||
} | |||
} | |||
layer { | |||
name: "bn2_scale" | |||
type: "Scale" | |||
bottom: "bn2" | |||
top: "bn2" | |||
scale_param { | |||
bias_term: true | |||
} | |||
} | |||
layer { | |||
name: "relu2" | |||
type: "ReLU" | |||
bottom: "bn2" | |||
top: "bn2" | |||
} | |||
layer { | |||
name: "pool2" | |||
type: "Pooling" | |||
bottom: "bn2" | |||
top: "pool2" | |||
pooling_param { | |||
pool: MAX | |||
kernel_h: 2 | |||
kernel_w: 2 | |||
stride_h: 2 | |||
stride_w: 2 | |||
pad_h: 0 | |||
pad_w: 0 | |||
} | |||
} | |||
layer { | |||
name: "conv_512_15" | |||
type: "Convolution" | |||
bottom: "pool2" | |||
top: "conv_512_15" | |||
convolution_param { | |||
num_output: 512 | |||
bias_term: true | |||
pad_h: 0 | |||
pad_w: 0 | |||
kernel_h: 1 | |||
kernel_w: 5 | |||
stride_h: 1 | |||
stride_w: 1 | |||
} | |||
} | |||
layer { | |||
name: "batch_normalization_1" | |||
type: "BatchNorm" | |||
bottom: "conv_512_15" | |||
top: "batch_normalization_1" | |||
batch_norm_param { | |||
moving_average_fraction: 0.99 | |||
eps: 0.001 | |||
} | |||
} | |||
layer { | |||
name: "batch_normalization_1_scale" | |||
type: "Scale" | |||
bottom: "batch_normalization_1" | |||
top: "batch_normalization_1" | |||
scale_param { | |||
bias_term: true | |||
} | |||
} | |||
layer { | |||
name: "activation_1" | |||
type: "ReLU" | |||
bottom: "batch_normalization_1" | |||
top: "batch_normalization_1" | |||
} | |||
layer { | |||
name: "conv_512_51" | |||
type: "Convolution" | |||
bottom: "batch_normalization_1" | |||
top: "conv_512_51" | |||
convolution_param { | |||
num_output: 512 | |||
bias_term: true | |||
pad_h: 0 | |||
pad_w: 0 | |||
kernel_h: 5 | |||
kernel_w: 1 | |||
stride_h: 1 | |||
stride_w: 1 | |||
} | |||
} | |||
layer { | |||
name: "batch_normalization_2" | |||
type: "BatchNorm" | |||
bottom: "conv_512_51" | |||
top: "batch_normalization_2" | |||
batch_norm_param { | |||
moving_average_fraction: 0.99 | |||
eps: 0.001 | |||
} | |||
} | |||
layer { | |||
name: "batch_normalization_2_scale" | |||
type: "Scale" | |||
bottom: "batch_normalization_2" | |||
top: "batch_normalization_2" | |||
scale_param { | |||
bias_term: true | |||
} | |||
} | |||
layer { | |||
name: "activation_2" | |||
type: "ReLU" | |||
bottom: "batch_normalization_2" | |||
top: "batch_normalization_2" | |||
} | |||
layer { | |||
name: "conv_1024_11" | |||
type: "Convolution" | |||
bottom: "batch_normalization_2" | |||
top: "conv_1024_11" | |||
convolution_param { | |||
num_output: 1024 | |||
bias_term: true | |||
pad_h: 0 | |||
pad_w: 0 | |||
kernel_h: 1 | |||
kernel_w: 1 | |||
stride_h: 1 | |||
stride_w: 1 | |||
} | |||
} | |||
layer { | |||
name: "batch_normalization_3" | |||
type: "BatchNorm" | |||
bottom: "conv_1024_11" | |||
top: "batch_normalization_3" | |||
batch_norm_param { | |||
moving_average_fraction: 0.99 | |||
eps: 0.001 | |||
} | |||
} | |||
layer { | |||
name: "batch_normalization_3_scale" | |||
type: "Scale" | |||
bottom: "batch_normalization_3" | |||
top: "batch_normalization_3" | |||
scale_param { | |||
bias_term: true | |||
} | |||
} | |||
layer { | |||
name: "activation_3" | |||
type: "ReLU" | |||
bottom: "batch_normalization_3" | |||
top: "batch_normalization_3" | |||
} | |||
layer { | |||
name: "conv_class_11" | |||
type: "Convolution" | |||
bottom: "batch_normalization_3" | |||
top: "conv_class_11" | |||
convolution_param { | |||
num_output: 84 | |||
bias_term: true | |||
pad_h: 0 | |||
pad_w: 0 | |||
kernel_h: 1 | |||
kernel_w: 1 | |||
stride_h: 1 | |||
stride_w: 1 | |||
} | |||
} | |||
layer { | |||
name: "prob" | |||
type: "Softmax" | |||
bottom: "conv_class_11" | |||
top: "prob" | |||
} |
@@ -1,123 +0,0 @@ | |||
input: "data" | |||
input_dim: 1 | |||
input_dim: 1 | |||
input_dim: 30 | |||
input_dim: 14 | |||
layer { | |||
name: "conv2d_1" | |||
type: "Convolution" | |||
bottom: "data" | |||
top: "conv2d_1" | |||
convolution_param { | |||
num_output: 32 | |||
bias_term: true | |||
pad: 0 | |||
kernel_size: 3 | |||
stride: 1 | |||
} | |||
} | |||
layer { | |||
name: "activation_1" | |||
type: "ReLU" | |||
bottom: "conv2d_1" | |||
top: "activation_1" | |||
} | |||
layer { | |||
name: "max_pooling2d_1" | |||
type: "Pooling" | |||
bottom: "activation_1" | |||
top: "max_pooling2d_1" | |||
pooling_param { | |||
pool: MAX | |||
kernel_size: 2 | |||
stride: 2 | |||
pad: 0 | |||
} | |||
} | |||
layer { | |||
name: "conv2d_2" | |||
type: "Convolution" | |||
bottom: "max_pooling2d_1" | |||
top: "conv2d_2" | |||
convolution_param { | |||
num_output: 64 | |||
bias_term: true | |||
pad: 0 | |||
kernel_size: 3 | |||
stride: 1 | |||
} | |||
} | |||
layer { | |||
name: "activation_2" | |||
type: "ReLU" | |||
bottom: "conv2d_2" | |||
top: "activation_2" | |||
} | |||
layer { | |||
name: "max_pooling2d_2" | |||
type: "Pooling" | |||
bottom: "activation_2" | |||
top: "max_pooling2d_2" | |||
pooling_param { | |||
pool: MAX | |||
kernel_size: 2 | |||
stride: 2 | |||
pad: 0 | |||
} | |||
} | |||
layer { | |||
name: "conv2d_3" | |||
type: "Convolution" | |||
bottom: "max_pooling2d_2" | |||
top: "conv2d_3" | |||
convolution_param { | |||
num_output: 128 | |||
bias_term: true | |||
pad: 0 | |||
kernel_size: 2 | |||
stride: 1 | |||
} | |||
} | |||
layer { | |||
name: "activation_3" | |||
type: "ReLU" | |||
bottom: "conv2d_3" | |||
top: "activation_3" | |||
} | |||
layer { | |||
name: "flatten_1" | |||
type: "Flatten" | |||
bottom: "activation_3" | |||
top: "flatten_1" | |||
} | |||
layer { | |||
name: "dense_1" | |||
type: "InnerProduct" | |||
bottom: "flatten_1" | |||
top: "dense_1" | |||
inner_product_param { | |||
num_output: 256 | |||
} | |||
} | |||
layer { | |||
name: "relu2" | |||
type: "ReLU" | |||
bottom: "dense_1" | |||
top: "relu2" | |||
} | |||
layer { | |||
name: "dense2" | |||
type: "InnerProduct" | |||
bottom: "relu2" | |||
top: "dense2" | |||
inner_product_param { | |||
num_output: 65 | |||
} | |||
} | |||
layer { | |||
name: "prob" | |||
type: "Softmax" | |||
bottom: "dense2" | |||
top: "prob" | |||
} |
@@ -1,95 +0,0 @@ | |||
input: "data" | |||
input_dim: 1 | |||
input_dim: 3 | |||
input_dim: 16 | |||
input_dim: 66 | |||
layer { | |||
name: "conv1" | |||
type: "Convolution" | |||
bottom: "data" | |||
top: "conv1" | |||
convolution_param { | |||
num_output: 10 | |||
bias_term: true | |||
pad: 0 | |||
kernel_size: 3 | |||
stride: 1 | |||
} | |||
} | |||
layer { | |||
name: "relu1" | |||
type: "ReLU" | |||
bottom: "conv1" | |||
top: "conv1" | |||
} | |||
layer { | |||
name: "max_pooling2d_3" | |||
type: "Pooling" | |||
bottom: "conv1" | |||
top: "max_pooling2d_3" | |||
pooling_param { | |||
pool: MAX | |||
kernel_size: 2 | |||
stride: 2 | |||
pad: 0 | |||
} | |||
} | |||
layer { | |||
name: "conv2" | |||
type: "Convolution" | |||
bottom: "max_pooling2d_3" | |||
top: "conv2" | |||
convolution_param { | |||
num_output: 16 | |||
bias_term: true | |||
pad: 0 | |||
kernel_size: 3 | |||
stride: 1 | |||
} | |||
} | |||
layer { | |||
name: "relu2" | |||
type: "ReLU" | |||
bottom: "conv2" | |||
top: "conv2" | |||
} | |||
layer { | |||
name: "conv3" | |||
type: "Convolution" | |||
bottom: "conv2" | |||
top: "conv3" | |||
convolution_param { | |||
num_output: 32 | |||
bias_term: true | |||
pad: 0 | |||
kernel_size: 3 | |||
stride: 1 | |||
} | |||
} | |||
layer { | |||
name: "relu3" | |||
type: "ReLU" | |||
bottom: "conv3" | |||
top: "conv3" | |||
} | |||
layer { | |||
name: "flatten_2" | |||
type: "Flatten" | |||
bottom: "conv3" | |||
top: "flatten_2" | |||
} | |||
layer { | |||
name: "dense" | |||
type: "InnerProduct" | |||
bottom: "flatten_2" | |||
top: "dense" | |||
inner_product_param { | |||
num_output: 2 | |||
} | |||
} | |||
layer { | |||
name: "relu4" | |||
type: "ReLU" | |||
bottom: "dense" | |||
top: "dense" | |||
} |
@@ -0,0 +1,9 @@ | |||
将/Prj-Linux/lpr/model目录下的 | |||
cascade.xml | |||
CharacterRecognization.caffemodel | |||
CharacterRecognization.prototxt | |||
HorizonalFinemapping.caffemodel | |||
HorizonalFinemapping.prototxt | |||
Segmentation.caffemodel | |||
Segmentation.prototxt | |||
放置在该目录 |
@@ -1,114 +0,0 @@ | |||
input: "data" | |||
input_dim: 1 | |||
input_dim: 1 | |||
input_dim: 22 | |||
input_dim: 22 | |||
layer { | |||
name: "conv2d_12" | |||
type: "Convolution" | |||
bottom: "data" | |||
top: "conv2d_12" | |||
convolution_param { | |||
num_output: 16 | |||
bias_term: true | |||
pad: 0 | |||
kernel_size: 3 | |||
stride: 1 | |||
} | |||
} | |||
layer { | |||
name: "activation_18" | |||
type: "ReLU" | |||
bottom: "conv2d_12" | |||
top: "activation_18" | |||
} | |||
layer { | |||
name: "max_pooling2d_10" | |||
type: "Pooling" | |||
bottom: "activation_18" | |||
top: "max_pooling2d_10" | |||
pooling_param { | |||
pool: MAX | |||
kernel_size: 2 | |||
stride: 2 | |||
pad: 0 | |||
} | |||
} | |||
layer { | |||
name: "conv2d_13" | |||
type: "Convolution" | |||
bottom: "max_pooling2d_10" | |||
top: "conv2d_13" | |||
convolution_param { | |||
num_output: 16 | |||
bias_term: true | |||
pad: 0 | |||
kernel_size: 3 | |||
stride: 1 | |||
} | |||
} | |||
layer { | |||
name: "activation_19" | |||
type: "ReLU" | |||
bottom: "conv2d_13" | |||
top: "activation_19" | |||
} | |||
layer { | |||
name: "max_pooling2d_11" | |||
type: "Pooling" | |||
bottom: "activation_19" | |||
top: "max_pooling2d_11" | |||
pooling_param { | |||
pool: MAX | |||
kernel_size: 2 | |||
stride: 2 | |||
pad: 0 | |||
} | |||
} | |||
layer { | |||
name: "flatten_6" | |||
type: "Flatten" | |||
bottom: "max_pooling2d_11" | |||
top: "flatten_6" | |||
} | |||
layer { | |||
name: "dense_9" | |||
type: "InnerProduct" | |||
bottom: "flatten_6" | |||
top: "dense_9" | |||
inner_product_param { | |||
num_output: 256 | |||
} | |||
} | |||
layer { | |||
name: "dropout_9" | |||
type: "Dropout" | |||
bottom: "dense_9" | |||
top: "dropout_9" | |||
dropout_param { | |||
dropout_ratio: 0.5 | |||
} | |||
} | |||
layer { | |||
name: "activation_20" | |||
type: "ReLU" | |||
bottom: "dropout_9" | |||
top: "activation_20" | |||
} | |||
layer { | |||
name: "dense_10" | |||
type: "InnerProduct" | |||
bottom: "activation_20" | |||
top: "dense_10" | |||
inner_product_param { | |||
num_output: 3 | |||
} | |||
} | |||
layer { | |||
name: "prob" | |||
type: "Softmax" | |||
bottom: "dense_10" | |||
top: "prob" | |||
} |