Browse Source

add e2e model.

pull/1/MERGE
jackyu 6 years ago
parent
commit
3b955ff1e5
4 changed files with 148 additions and 9 deletions
  1. +9
    -5
      README.md
  2. +64
    -0
      hyperlpr/e2e.py
  3. +35
    -0
      hyperlpr/e2emodel.py
  4. +40
    -4
      hyperlpr/pipline.py

+ 9
- 5
README.md View File

@@ -8,15 +8,18 @@ HyperLPR是一个使用深度学习针对对中文车牌识别的实现,与较


### 更新 ### 更新


+ 添加端到端的序列识别模型识别率大幅度提升
+ 添加的端到端模型可以识别 新能源车牌,教练车牌,白色警用车牌
+ 更新Windows版本的Visual Studio工程。(2017.11.15) + 更新Windows版本的Visual Studio工程。(2017.11.15)




+ 增加cpp版本,目前仅支持标准蓝牌(需要依赖OpenCV 3.3)
+ 添加了简单的Android实现 (骁龙835 (*720*x*1280*) 200ms)
+ 增加cpp版本,目前仅支持标准蓝牌(需要依赖OpenCV 3.3)
+ 添加了简单的Android实现 (骁龙835 (*720*x*1280*) 200ms)


### 特性 ### 特性


+ 速度快 720p ,单核 Intel 2.2G CPU (macbook Pro 2015)识别时间 <=140ms 。 + 速度快 720p ,单核 Intel 2.2G CPU (macbook Pro 2015)识别时间 <=140ms 。
+ 基于端到端的车牌识别无需进行字符分割。
+ 识别率高 EasyPR数据集上0-error达到 81.75%, 1-error识别率达到 94.1%。 + 识别率高 EasyPR数据集上0-error达到 81.75%, 1-error识别率达到 94.1%。
+ 轻量 总代码量不超1k行。 + 轻量 总代码量不超1k行。


@@ -81,13 +84,14 @@ sudo make -j


- [x] 单行蓝牌 - [x] 单行蓝牌
- [x] 单行黄牌 - [x] 单行黄牌
- [ ] 新能源车牌
- [x] 新能源车牌
- [x] 白色警用车牌
- [x] 使馆/港澳车牌
- [x] 教练车牌
- [ ] 双层黄牌 - [ ] 双层黄牌
- [ ] 双层武警 - [ ] 双层武警
- [ ] 双层军牌 - [ ] 双层军牌
- [ ] 农用车牌 - [ ] 农用车牌
- [ ] 白色警用车牌
- [ ] 使馆/港澳车牌
- [ ] 民航车牌 - [ ] 民航车牌
- [ ] 个性化车牌 - [ ] 个性化车牌




+ 64
- 0
hyperlpr/e2e.py View File

@@ -0,0 +1,64 @@
#coding=utf-8
from keras import backend as K
from keras.models import load_model
from keras.layers import *
from captcha.image import ImageCaptcha
import numpy as np
import random
import string
import cv2
import e2emodel as model
chars = [u"京", u"沪", u"津", u"渝", u"冀", u"晋", u"蒙", u"辽", u"吉", u"黑", u"苏", u"浙", u"皖", u"闽", u"赣", u"鲁", u"豫", u"鄂", u"湘", u"粤", u"桂",
u"琼", u"川", u"贵", u"云", u"藏", u"陕", u"甘", u"青", u"宁", u"新", u"0", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"A",
u"B", u"C", u"D", u"E", u"F", u"G", u"H", u"J", u"K", u"L", u"M", u"N", u"P", u"Q", u"R", u"S", u"T", u"U", u"V", u"W", u"X",
u"Y", u"Z",u"港",u"学",u"使",u"警",u"澳",u"挂",u"军",u"北",u"南",u"广",u"沈",u"兰",u"成",u"济",u"海",u"民",u"航",u"空"
];
pred_model = model.construct_model("./model/ocr_plate_all_w_rnn_2.h5",)
import time
def fastdecode(y_pred):
results = ""
confidence = 0.0
table_pred = y_pred.reshape(-1, len(chars)+1)
res = table_pred.argmax(axis=1)
for i,one in enumerate(res):
if one<len(chars) and (i==0 or (one!=res[i-1])):
results+= chars[one]
confidence+=table_pred[i][one]
confidence/= len(results)
return results,confidence
def recognizeOne(src):
# x_tempx= cv2.imread(src)
x_tempx = src
# x_tempx = cv2.bitwise_not(x_tempx)
x_temp = cv2.resize(x_tempx,( 160,40))
x_temp = x_temp.transpose(1, 0, 2)
t0 = time.time()
y_pred = pred_model.predict(np.array([x_temp]))
y_pred = y_pred[:,2:,:]
# plt.imshow(y_pred.reshape(16,66))
# plt.show()
#
# cv2.imshow("x_temp",x_tempx)
# cv2.waitKey(0)
return fastdecode(y_pred)
#
#
# import os
#
# path = "/Users/yujinke/PycharmProjects/HyperLPR_Python_web/cache/finemapping"
# for filename in os.listdir(path):
# if filename.endswith(".png") or filename.endswith(".jpg") or filename.endswith(".bmp"):
# x = os.path.join(path,filename)
# recognizeOne(x)
# # print time.time() - t0
#
# # cv2.imshow("x",x)
# # cv2.waitKey()

+ 35
- 0
hyperlpr/e2emodel.py View File

@@ -0,0 +1,35 @@

from keras import backend as K
from keras.models import *
from keras.layers import *
from captcha.image import ImageCaptcha
import e2e


def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
y_pred = y_pred[:, 2:, :]
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)


def construct_model(model_path):
input_tensor = Input((None, 40, 3))
x = input_tensor
base_conv = 32

for i in range(3):
x = Conv2D(base_conv * (2 ** (i)), (3, 3),padding="same")(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(256, (5, 5))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(1024, (1, 1))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(len(e2e.chars)+1, (1, 1))(x)
x = Activation('softmax')(x)
base_model = Model(inputs=input_tensor, outputs=x)
base_model.load_weights(model_path)
return base_model

+ 40
- 4
hyperlpr/pipline.py View File

@@ -22,7 +22,7 @@ sys.setdefaultencoding("utf-8")


fontC = ImageFont.truetype("./Font/platech.ttf", 14, 0); fontC = ImageFont.truetype("./Font/platech.ttf", 14, 0);


import e2e
#寻找车牌左右边界 #寻找车牌左右边界


def find_edge(image): def find_edge(image):
@@ -91,7 +91,7 @@ def horizontalSegmentation(image):
#打上boundingbox和标签 #打上boundingbox和标签
def drawRectBox(image,rect,addText): def drawRectBox(image,rect,addText):
cv2.rectangle(image, (int(rect[0]), int(rect[1])), (int(rect[0] + rect[2]), int(rect[1] + rect[3])), (0,0, 255), 2,cv2.LINE_AA) cv2.rectangle(image, (int(rect[0]), int(rect[1])), (int(rect[0] + rect[2]), int(rect[1] + rect[3])), (0,0, 255), 2,cv2.LINE_AA)
cv2.rectangle(image, (int(rect[0]-1), int(rect[1])-16), (int(rect[0] + 80), int(rect[1])), (0, 0, 255), -1,
cv2.rectangle(image, (int(rect[0]-1), int(rect[1])-16), (int(rect[0] + 115), int(rect[1])), (0, 0, 255), -1,
cv2.LINE_AA) cv2.LINE_AA)


img = Image.fromarray(image) img = Image.fromarray(image)
@@ -129,7 +129,7 @@ def RecognizePlateJson(image):




ptype = td.SimplePredict(plate) ptype = td.SimplePredict(plate)
if ptype>0 and ptype<5:
if ptype>0 and ptype<4:
plate = cv2.bitwise_not(plate) plate = cv2.bitwise_not(plate)
# demo = verticalEdgeDetection(plate) # demo = verticalEdgeDetection(plate)


@@ -137,7 +137,7 @@ def RecognizePlateJson(image):
image_rgb = fv.finemappingVertical(image_rgb) image_rgb = fv.finemappingVertical(image_rgb)
cache.verticalMappingToFolder(image_rgb) cache.verticalMappingToFolder(image_rgb)
# print time.time() - t1,"校正" # print time.time() - t1,"校正"
print "e2e:",e2e.recognizeOne(image_rgb)[0]
image_gray = cv2.cvtColor(image_rgb,cv2.COLOR_BGR2GRAY) image_gray = cv2.cvtColor(image_rgb,cv2.COLOR_BGR2GRAY)




@@ -184,6 +184,40 @@ def RecognizePlateJson(image):






def SimpleRecognizePlateByE2E(image):
t0 = time.time()
images = detect.detectPlateRough(image,image.shape[0],top_bottom_padding_rate=0.1)
res_set = []
for j,plate in enumerate(images):
plate, rect, origin_plate =plate
# plate = cv2.cvtColor(plate, cv2.COLOR_RGB2GRAY)
plate =cv2.resize(plate,(136,36*2))
res,confidence = e2e.recognizeOne(origin_plate)
print "res",res

t1 = time.time()
ptype = td.SimplePredict(plate)
if ptype>0 and ptype<5:
# pass
plate = cv2.bitwise_not(plate)
image_rgb = fm.findContoursAndDrawBoundingBox(plate)
image_rgb = fv.finemappingVertical(image_rgb)
image_rgb = fv.finemappingVertical(image_rgb)
cache.verticalMappingToFolder(image_rgb)
cv2.imwrite("./"+str(j)+".jpg",image_rgb)
res,confidence = e2e.recognizeOne(image_rgb)
print res,confidence
res_set.append([[],res,confidence])

if confidence>0.7:
image = drawRectBox(image, rect, res+" "+str(round(confidence,3)))
return image,res_set








def SimpleRecognizePlate(image): def SimpleRecognizePlate(image):
t0 = time.time() t0 = time.time()
@@ -200,8 +234,10 @@ def SimpleRecognizePlate(image):
plate = cv2.bitwise_not(plate) plate = cv2.bitwise_not(plate)


image_rgb = fm.findContoursAndDrawBoundingBox(plate) image_rgb = fm.findContoursAndDrawBoundingBox(plate)

image_rgb = fv.finemappingVertical(image_rgb) image_rgb = fv.finemappingVertical(image_rgb)
cache.verticalMappingToFolder(image_rgb) cache.verticalMappingToFolder(image_rgb)
print "e2e:", e2e.recognizeOne(image_rgb)
image_gray = cv2.cvtColor(image_rgb,cv2.COLOR_RGB2GRAY) image_gray = cv2.cvtColor(image_rgb,cv2.COLOR_RGB2GRAY)


# image_gray = horizontalSegmentation(image_gray) # image_gray = horizontalSegmentation(image_gray)


Loading…
Cancel
Save