Browse Source

models

pull/4/head
jackyu1127 4 years ago
parent
commit
0b3386c456
8 changed files with 786 additions and 0 deletions
  1. BIN
      Prj-Linux/hyperlpr/model/CharacterRecognization.caffemodel
  2. +123
    -0
      Prj-Linux/hyperlpr/model/CharacterRecognization.prototxt
  3. BIN
      Prj-Linux/hyperlpr/model/HorizonalFinemapping.caffemodel
  4. +95
    -0
      Prj-Linux/hyperlpr/model/HorizonalFinemapping.prototxt
  5. BIN
      Prj-Linux/hyperlpr/model/SegmenationFree-Inception.caffemodel
  6. +454
    -0
      Prj-Linux/hyperlpr/model/SegmenationFree-Inception.prototxt
  7. BIN
      Prj-Linux/hyperlpr/model/Segmentation.caffemodel
  8. +114
    -0
      Prj-Linux/hyperlpr/model/Segmentation.prototxt

BIN
Prj-Linux/hyperlpr/model/CharacterRecognization.caffemodel View File


+ 123
- 0
Prj-Linux/hyperlpr/model/CharacterRecognization.prototxt View File

@@ -0,0 +1,123 @@
input: "data"
input_dim: 1
input_dim: 1
input_dim: 30
input_dim: 14
layer {
name: "conv2d_1"
type: "Convolution"
bottom: "data"
top: "conv2d_1"
convolution_param {
num_output: 32
bias_term: true
pad: 0
kernel_size: 3
stride: 1
}
}
layer {
name: "activation_1"
type: "ReLU"
bottom: "conv2d_1"
top: "activation_1"
}
layer {
name: "max_pooling2d_1"
type: "Pooling"
bottom: "activation_1"
top: "max_pooling2d_1"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
pad: 0
}
}
layer {
name: "conv2d_2"
type: "Convolution"
bottom: "max_pooling2d_1"
top: "conv2d_2"
convolution_param {
num_output: 64
bias_term: true
pad: 0
kernel_size: 3
stride: 1
}
}
layer {
name: "activation_2"
type: "ReLU"
bottom: "conv2d_2"
top: "activation_2"
}
layer {
name: "max_pooling2d_2"
type: "Pooling"
bottom: "activation_2"
top: "max_pooling2d_2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
pad: 0
}
}
layer {
name: "conv2d_3"
type: "Convolution"
bottom: "max_pooling2d_2"
top: "conv2d_3"
convolution_param {
num_output: 128
bias_term: true
pad: 0
kernel_size: 2
stride: 1
}
}
layer {
name: "activation_3"
type: "ReLU"
bottom: "conv2d_3"
top: "activation_3"
}
layer {
name: "flatten_1"
type: "Flatten"
bottom: "activation_3"
top: "flatten_1"
}
layer {
name: "dense_1"
type: "InnerProduct"
bottom: "flatten_1"
top: "dense_1"
inner_product_param {
num_output: 256
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "dense_1"
top: "relu2"
}
layer {
name: "dense2"
type: "InnerProduct"
bottom: "relu2"
top: "dense2"
inner_product_param {
num_output: 65
}
}

layer {
name: "prob"
type: "Softmax"
bottom: "dense2"
top: "prob"
}

BIN
Prj-Linux/hyperlpr/model/HorizonalFinemapping.caffemodel View File


+ 95
- 0
Prj-Linux/hyperlpr/model/HorizonalFinemapping.prototxt View File

@@ -0,0 +1,95 @@
input: "data"
input_dim: 1
input_dim: 3
input_dim: 16
input_dim: 66
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
convolution_param {
num_output: 10
bias_term: true
pad: 0
kernel_size: 3
stride: 1
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "max_pooling2d_3"
type: "Pooling"
bottom: "conv1"
top: "max_pooling2d_3"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
pad: 0
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "max_pooling2d_3"
top: "conv2"
convolution_param {
num_output: 16
bias_term: true
pad: 0
kernel_size: 3
stride: 1
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "conv3"
type: "Convolution"
bottom: "conv2"
top: "conv3"
convolution_param {
num_output: 32
bias_term: true
pad: 0
kernel_size: 3
stride: 1
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "flatten_2"
type: "Flatten"
bottom: "conv3"
top: "flatten_2"
}
layer {
name: "dense"
type: "InnerProduct"
bottom: "flatten_2"
top: "dense"
inner_product_param {
num_output: 2
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "dense"
top: "dense"
}

BIN
Prj-Linux/hyperlpr/model/SegmenationFree-Inception.caffemodel View File


+ 454
- 0
Prj-Linux/hyperlpr/model/SegmenationFree-Inception.prototxt View File

@@ -0,0 +1,454 @@
input: "data"
input_dim: 1
input_dim: 3
input_dim: 160
input_dim: 40
layer {
name: "conv0"
type: "Convolution"
bottom: "data"
top: "conv0"
convolution_param {
num_output: 32
bias_term: true
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "bn0"
type: "BatchNorm"
bottom: "conv0"
top: "bn0"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.001
}
}
layer {
name: "bn0_scale"
type: "Scale"
bottom: "bn0"
top: "bn0"
scale_param {
bias_term: true
}
}
layer {
name: "relu0"
type: "ReLU"
bottom: "bn0"
top: "bn0"
}
layer {
name: "pool0"
type: "Pooling"
bottom: "bn0"
top: "pool0"
pooling_param {
pool: MAX
kernel_h: 2
kernel_w: 2
stride_h: 2
stride_w: 2
pad_h: 0
pad_w: 0
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "pool0"
top: "conv1"
convolution_param {
num_output: 64
bias_term: true
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "bn1"
type: "BatchNorm"
bottom: "conv1"
top: "bn1"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.001
}
}
layer {
name: "bn1_scale"
type: "Scale"
bottom: "bn1"
top: "bn1"
scale_param {
bias_term: true
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "bn1"
top: "bn1"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "bn1"
top: "pool1"
pooling_param {
pool: MAX
kernel_h: 2
kernel_w: 2
stride_h: 2
stride_w: 2
pad_h: 0
pad_w: 0
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
convolution_param {
num_output: 128
bias_term: true
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "bn2"
type: "BatchNorm"
bottom: "conv2"
top: "bn2"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.001
}
}
layer {
name: "bn2_scale"
type: "Scale"
bottom: "bn2"
top: "bn2"
scale_param {
bias_term: true
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "bn2"
top: "bn2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "bn2"
top: "pool2"
pooling_param {
pool: MAX
kernel_h: 2
kernel_w: 2
stride_h: 2
stride_w: 2
pad_h: 0
pad_w: 0
}
}
layer {
name: "conv2d_1"
type: "Convolution"
bottom: "pool2"
top: "conv2d_1"
convolution_param {
num_output: 256
bias_term: true
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 5
stride_h: 1
stride_w: 1
}
}
layer {
name: "batch_normalization_1"
type: "BatchNorm"
bottom: "conv2d_1"
top: "batch_normalization_1"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.001
}
}
layer {
name: "batch_normalization_1_scale"
type: "Scale"
bottom: "batch_normalization_1"
top: "batch_normalization_1"
scale_param {
bias_term: true
}
}
layer {
name: "activation_1"
type: "ReLU"
bottom: "batch_normalization_1"
top: "batch_normalization_1"
}
layer {
name: "conv2d_2"
type: "Convolution"
bottom: "batch_normalization_1"
top: "conv2d_2"
convolution_param {
num_output: 256
bias_term: true
pad_h: 3
pad_w: 0
kernel_h: 7
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv2d_3"
type: "Convolution"
bottom: "batch_normalization_1"
top: "conv2d_3"
convolution_param {
num_output: 256
bias_term: true
pad_h: 2
pad_w: 0
kernel_h: 5
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv2d_4"
type: "Convolution"
bottom: "batch_normalization_1"
top: "conv2d_4"
convolution_param {
num_output: 256
bias_term: true
pad_h: 1
pad_w: 0
kernel_h: 3
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv2d_5"
type: "Convolution"
bottom: "batch_normalization_1"
top: "conv2d_5"
convolution_param {
num_output: 256
bias_term: true
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "batch_normalization_2"
type: "BatchNorm"
bottom: "conv2d_2"
top: "batch_normalization_2"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.001
}
}
layer {
name: "batch_normalization_2_scale"
type: "Scale"
bottom: "batch_normalization_2"
top: "batch_normalization_2"
scale_param {
bias_term: true
}
}
layer {
name: "batch_normalization_3"
type: "BatchNorm"
bottom: "conv2d_3"
top: "batch_normalization_3"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.001
}
}
layer {
name: "batch_normalization_3_scale"
type: "Scale"
bottom: "batch_normalization_3"
top: "batch_normalization_3"
scale_param {
bias_term: true
}
}
layer {
name: "batch_normalization_4"
type: "BatchNorm"
bottom: "conv2d_4"
top: "batch_normalization_4"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.001
}
}
layer {
name: "batch_normalization_4_scale"
type: "Scale"
bottom: "batch_normalization_4"
top: "batch_normalization_4"
scale_param {
bias_term: true
}
}
layer {
name: "batch_normalization_5"
type: "BatchNorm"
bottom: "conv2d_5"
top: "batch_normalization_5"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.001
}
}
layer {
name: "batch_normalization_5_scale"
type: "Scale"
bottom: "batch_normalization_5"
top: "batch_normalization_5"
scale_param {
bias_term: true
}
}
layer {
name: "activation_2"
type: "ReLU"
bottom: "batch_normalization_2"
top: "batch_normalization_2"
}
layer {
name: "activation_3"
type: "ReLU"
bottom: "batch_normalization_3"
top: "batch_normalization_3"
}
layer {
name: "activation_4"
type: "ReLU"
bottom: "batch_normalization_4"
top: "batch_normalization_4"
}
layer {
name: "activation_5"
type: "ReLU"
bottom: "batch_normalization_5"
top: "batch_normalization_5"
}
layer {
name: "concatenate_1"
type: "Concat"
bottom: "batch_normalization_2"
bottom: "batch_normalization_3"
bottom: "batch_normalization_4"
bottom: "batch_normalization_5"
top: "concatenate_1"
concat_param {
axis: 1
}
}
layer {
name: "conv_1024_11"
type: "Convolution"
bottom: "concatenate_1"
top: "conv_1024_11"
convolution_param {
num_output: 1024
bias_term: true
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "batch_normalization_6"
type: "BatchNorm"
bottom: "conv_1024_11"
top: "batch_normalization_6"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.001
}
}
layer {
name: "batch_normalization_6_scale"
type: "Scale"
bottom: "batch_normalization_6"
top: "batch_normalization_6"
scale_param {
bias_term: true
}
}
layer {
name: "activation_6"
type: "ReLU"
bottom: "batch_normalization_6"
top: "batch_normalization_6"
}
layer {
name: "conv_class_11"
type: "Convolution"
bottom: "batch_normalization_6"
top: "conv_class_11"
convolution_param {
num_output: 84
bias_term: true
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "prob"
type: "Softmax"
bottom: "conv_class_11"
top: "prob"
}


BIN
Prj-Linux/hyperlpr/model/Segmentation.caffemodel View File


+ 114
- 0
Prj-Linux/hyperlpr/model/Segmentation.prototxt View File

@@ -0,0 +1,114 @@
input: "data"
input_dim: 1
input_dim: 1
input_dim: 22
input_dim: 22
layer {
name: "conv2d_12"
type: "Convolution"
bottom: "data"
top: "conv2d_12"
convolution_param {
num_output: 16
bias_term: true
pad: 0
kernel_size: 3
stride: 1
}
}
layer {
name: "activation_18"
type: "ReLU"
bottom: "conv2d_12"
top: "activation_18"
}
layer {
name: "max_pooling2d_10"
type: "Pooling"
bottom: "activation_18"
top: "max_pooling2d_10"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
pad: 0
}
}
layer {
name: "conv2d_13"
type: "Convolution"
bottom: "max_pooling2d_10"
top: "conv2d_13"
convolution_param {
num_output: 16
bias_term: true
pad: 0
kernel_size: 3
stride: 1
}
}
layer {
name: "activation_19"
type: "ReLU"
bottom: "conv2d_13"
top: "activation_19"
}
layer {
name: "max_pooling2d_11"
type: "Pooling"
bottom: "activation_19"
top: "max_pooling2d_11"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
pad: 0
}
}
layer {
name: "flatten_6"
type: "Flatten"
bottom: "max_pooling2d_11"
top: "flatten_6"
}
layer {
name: "dense_9"
type: "InnerProduct"
bottom: "flatten_6"
top: "dense_9"
inner_product_param {
num_output: 256
}
}
layer {
name: "dropout_9"
type: "Dropout"
bottom: "dense_9"
top: "dropout_9"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "activation_20"
type: "ReLU"
bottom: "dropout_9"
top: "activation_20"
}
layer {
name: "dense_10"
type: "InnerProduct"
bottom: "activation_20"
top: "dense_10"
inner_product_param {
num_output: 3
}
}


layer {
name: "prob"
type: "Softmax"
bottom: "dense_10"
top: "prob"
}

Loading…
Cancel
Save