diff --git a/.vscode/settings.json b/.vscode/settings.json index 301fa29..d36c539 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -5,6 +5,7 @@ "iconify", "intlify", "mingcute", + "onnx", "pinia", "pnpm", "pyinstaller", diff --git a/app/api/api.py b/app/api/api.py index b7e67ae..aa843a3 100644 --- a/app/api/api.py +++ b/app/api/api.py @@ -1,9 +1,118 @@ +from api.model.model import PicoDet +import time +import cv2 +import json +import datetime +import os +import sys + +def getFile(ruleFile): + if getattr(sys, 'frozen', False): + absPath = os.path.dirname(os.path.abspath(sys.executable)) + elif __file__: + absPath = os.path.dirname(os.path.abspath(__file__)) + else: + absPath = '' + return os.path.join(absPath,ruleFile) + + +def get_settings_status_name(data, settings,setting_name): + # 访问指定 "setting" 的信息 + for setting in data[settings]: + if setting["name"] == setting_name: + # 使用 filter() 和 map() 函数提取信息 + names = list(map(lambda x: x["name"], filter(lambda x: x["status"], setting["status"]))) + return names[0] +def get_setting_status(data, settings,setting_name): + # 使用 filter() 和 map() 函数提取信息 + status = list(map(lambda x: x["status"], filter(lambda x: x["name"] == setting_name, data[settings]))) + return status[0] + +def get_photo(cap): + # cap = cv2.VideoCapture(0) # 开启摄像头 + f, frame = cap.read() # 将摄像头中的一帧图片数据保存 + return frame + # print('开始咯!') + # while cv2.waitKey(1)==-1: + # #计时 +def get_photo_detect(cap,net): + ''' + 输入{图像,网络模型},输出[预测时间,预测结果] + ''' + result = net.detect_img(get_photo(cap),show_result=False) + return result +def get_photo_detect_img(cap,net): + ''' + 输入{图像,网络模型},输出[预测时间,预测图片] + ''' + start = time.time() + [result,img] = net.detect_img(get_photo(cap),show_result=True) + # cv2.imshow('Video Cam', result) + # # print(result) + end = time.time() + # print('time:',end-start) + # cap.release() # 关闭摄像头 + # # cv2.destroyAllWindows() + return [end-start,result,img] +def get_photo_base64(cap): + import base64 + # import numpy as np + img = get_photo(cap) + image = cv2.imencode('.jpg',img)[1] + image_code = str(base64.b64encode(image))[2:-1] + return image_code +def mat2base64(frame): + import base64 + # import numpy as np + image = cv2.imencode('.jpg',frame)[1] + image_code = str(base64.b64encode(image))[2:-1] + return image_code + class API: '''本地API,供前端JS调用''' window = None + net = None + cap = None + args = None + - def getOwner(self): + def __init__(self): + with open(getFile("config.json"),'r',encoding='utf8')as fp: + self.args = json.load(fp) + + self.net = PicoDet( + get_settings_status_name(self.args,"ModelSetting","模型版本设置"), + self.args['classfile'], + prob_threshold=get_setting_status(self.args,"ModelSetting",'confThreshold'), + iou_threshold=get_setting_status(self.args,"ModelSetting",'nmsThreshold')) + + # net.detect_folder(args['img_fold'], args['result_fold']) + + # 调用摄像头拍摄照片 + self.cap = cv2.VideoCapture(0) # 开启摄像头 + + # cv2.namedWindow('Video Cam', cv2.WINDOW_NORMAL) + + + + def getPrimaryImg(self): self.window.evaluate_js('getPythonData()') - return '我是Python' + return get_photo_base64(self.cap) + + def getDetectImg(self): + [time,result,img] = get_photo_detect_img(self.cap,self.net) + return [time,result,mat2base64(img)]#AttributeError: 'InferenceSession' object has no attribute 'detect' + def getIndexSetting(self): + toggle = self.args['toggle'] + tip = self.args['tip'] + control = self.args['control'] + return [toggle,tip,control] + def getAdvanceSetting(self): + return [self.args['ControlSetting'], self.args['ModelSetting'], self.args['otherSetting']] + def changeSetting(self,data): + self.args.update(data) + with open(getFile("config.json"),'w',encoding='utf8')as fp: + json.dump(self.args,fp,ensure_ascii=False,indent=4) + diff --git a/app/api/config.json b/app/api/config.json new file mode 100644 index 0000000..d7dd5fe --- /dev/null +++ b/app/api/config.json @@ -0,0 +1,111 @@ +{ + "classfile": "coco_label.txt", + "img_fold": "./imgs", + "result_fold": "results", + "toggle": false, + "tip": false, + "control": [ + { + "name": "嘴控", + "status": false + }, + { + "name": "眼控", + "status": false + }, + { + "name": "嘴/眼控", + "status": true + } + ], + "ControlSetting": [ + { + "name": "控制设置", + "status": [ + { + "name": "开/关/开", + "status": true + }, + { + "name": "关/开/关", + "status": false + }, + { + "name": "闭上持续2S", + "status": false + }, + { + "name": "张开持续2S", + "status": false + } + ], + "description": "控制设置" + }, + { + "name": "控制功能", + "status": [ + { + "name": "悬停鼠标", + "status": true + }, + { + "name": "单击按钮", + "status": false + } + ] + } + ], + "ModelSetting": [ + { + "name": "模型版本设置", + "status": [ + { + "name": "model_1.0.onnx", + "status": true + } + ], + "description": "目前仅有官方提供模型,可通过个性化设置进行修改", + "link": "/Personalization" + }, + { + "name": "置信度", + "status": 0.5 + }, + { + "name": "confThreshold", + "status": 0.5 + }, + { + "name": "nmsThreshold", + "status": 0.6 + }, + { + "name": "图片载入方式", + "status": [ + { + "name": "不裁剪", + "status": true + }, + { + "name": "居中裁剪成224*224", + "status": false + }, + { + "name": "居中裁剪成320*320", + "status": false + } + ] + } + ], + "otherSetting": [ + { + "name": "系统摄像头选择", + "status": [ + { + "name": "摄像头0", + "status": true + } + ] + } + ] +} \ No newline at end of file diff --git a/app/api/model/coco_label.txt b/app/api/model/coco_label.txt new file mode 100644 index 0000000..0bfce16 --- /dev/null +++ b/app/api/model/coco_label.txt @@ -0,0 +1,4 @@ +closed_eye +closed_mouth +open_eye +open_mouth \ No newline at end of file diff --git a/app/api/model/model.py b/app/api/model/model.py new file mode 100644 index 0000000..c756a43 --- /dev/null +++ b/app/api/model/model.py @@ -0,0 +1,218 @@ + +import cv2 +import numpy as np +import argparse +import onnxruntime as ort +from pathlib import Path +from tqdm import tqdm +import time +import sys +import os + +def getFile(ruleFile): + if getattr(sys, 'frozen', False): + absPath = os.path.dirname(os.path.abspath(sys.executable)) + elif __file__: + absPath = os.path.dirname(os.path.abspath(__file__)) + else: + absPath = '' + return os.path.join(absPath,ruleFile) + + +class PicoDet(): + def __init__(self, + model_pb_path, + label_path, + prob_threshold=0.4, + iou_threshold=0.3): + self.classes = list( + map(lambda x: x.strip(), open(getFile(label_path), 'r').readlines())) + self.num_classes = len(self.classes) + self.prob_threshold = prob_threshold + self.iou_threshold = iou_threshold + self.mean = np.array( + [103.53, 116.28, 123.675], dtype=np.float32).reshape(1, 1, 3) + self.std = np.array( + [57.375, 57.12, 58.395], dtype=np.float32).reshape(1, 1, 3) + so = ort.SessionOptions() + so.log_severity_level = 3 + self.net = ort.InferenceSession(getFile(model_pb_path), so) + inputs_name = [a.name for a in self.net.get_inputs()] + inputs_shape = { + k: v.shape + for k, v in zip(inputs_name, self.net.get_inputs()) + } + self.input_shape = inputs_shape['image'][2:] + + def _normalize(self, img): + img = img.astype(np.float32) + img = (img / 255.0 - self.mean / 255.0) / (self.std / 255.0) + return img + + def resize_image(self, srcimg, keep_ratio=False): + top, left, newh, neww = 0, 0, self.input_shape[0], self.input_shape[1] + origin_shape = srcimg.shape[:2] + im_scale_y = newh / float(origin_shape[0]) + im_scale_x = neww / float(origin_shape[1]) + img_shape = np.array([ + [float(self.input_shape[0]), float(self.input_shape[1])] + ]).astype('float32') + scale_factor = np.array([[im_scale_y, im_scale_x]]).astype('float32') + + if keep_ratio and srcimg.shape[0] != srcimg.shape[1]: + hw_scale = srcimg.shape[0] / srcimg.shape[1] + if hw_scale > 1: + newh, neww = self.input_shape[0], int(self.input_shape[1] / + hw_scale) + img = cv2.resize( + srcimg, (neww, newh), interpolation=cv2.INTER_AREA) + left = int((self.input_shape[1] - neww) * 0.5) + img = cv2.copyMakeBorder( + img, + 0, + 0, + left, + self.input_shape[1] - neww - left, + cv2.BORDER_CONSTANT, + value=0) # add border + else: + newh, neww = int(self.input_shape[0] * + hw_scale), self.input_shape[1] + img = cv2.resize( + srcimg, (neww, newh), interpolation=cv2.INTER_AREA) + top = int((self.input_shape[0] - newh) * 0.5) + img = cv2.copyMakeBorder( + img, + top, + self.input_shape[0] - newh - top, + 0, + 0, + cv2.BORDER_CONSTANT, + value=0) + else: + img = cv2.resize( + srcimg, self.input_shape, interpolation=cv2.INTER_AREA) + + return img, img_shape, scale_factor + + def get_color_map_list(self, num_classes): + color_map = num_classes * [0, 0, 0] + for i in range(0, num_classes): + j = 0 + lab = i + while lab: + color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j)) + color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j)) + color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j)) + j += 1 + lab >>= 3 + color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)] + return color_map + + def detect(self, srcimg, show_result=False): + img, im_shape, scale_factor = self.resize_image(srcimg) + img = self._normalize(img) + + blob = np.expand_dims(np.transpose(img, (2, 0, 1)), axis=0) + + inputs_dict = { + 'im_shape': im_shape, + 'image': blob, + 'scale_factor': scale_factor + } + inputs_name = [a.name for a in self.net.get_inputs()] + net_inputs = {k: inputs_dict[k] for k in inputs_name} + + outs = self.net.run(None, net_inputs) + + outs = np.array(outs[0]) + expect_boxes = (outs[:, 1] > 0.5) & (outs[:, 0] > -1) + np_boxes = outs[expect_boxes, :] + + color_list = self.get_color_map_list(self.num_classes) + clsid2color = {} + + result = [] + for i in range(np_boxes.shape[0]): + classid, conf = int(np_boxes[i, 0]), np_boxes[i, 1] + result.append({ + 'classid': self.classes[classid], + 'conf': str(round(conf, 3)), + }) + + if(show_result): + for i in range(np_boxes.shape[0]): + classid, conf = int(np_boxes[i, 0]), np_boxes[i, 1] + xmin, ymin, xmax, ymax = int(np_boxes[i, 2]), int(np_boxes[ + i, 3]), int(np_boxes[i, 4]), int(np_boxes[i, 5]) + + if classid not in clsid2color: + clsid2color[classid] = color_list[classid] + color = tuple(clsid2color[classid]) + + cv2.rectangle( + srcimg, (xmin, ymin), (xmax, ymax), color, thickness=2) + print(self.classes[classid] + ': ' + str(round(conf, 3))) + cv2.putText( + srcimg, + self.classes[classid] + ':' + str(round(conf, 3)), (xmin, + ymin - 10), + cv2.FONT_HERSHEY_SIMPLEX, + 0.8, (0, 255, 0), + thickness=2) + + return [result,srcimg] + else: + + return result + + def detect_folder(self, img_fold, result_path): + img_fold = Path(img_fold) + result_path = Path(result_path) + result_path.mkdir(parents=True, exist_ok=True) + + img_name_list = filter( + lambda x: str(x).endswith(".png") or str(x).endswith(".jpg"), + img_fold.iterdir(), ) + img_name_list = list(img_name_list) + print(f"find {len(img_name_list)} images") + + for img_path in tqdm(img_name_list): + img = cv2.imread(str(img_path)) + + srcimg = self.detect(img,True) + save_path = str(result_path / img_path.name.replace(".png", ".jpg")) + cv2.imwrite(save_path, srcimg) + + def detect_img(self, img,show_result=False): + # img = cv2.imread(img) + # img_path = Path(img_path) + # print(f"find {img_path} images") + + + if(show_result): + [result,srcimg] = self.detect(img,show_result) + return [result,srcimg] + else: + result = self.detect(img,show_result) + return result + # result_path = Path(result_path) + # result_path.mkdir(parents=True, exist_ok=True) + # save_path = str(result_path / "result.jpg") + # cv2.imwrite(save_path, srcimg) + # return srcimg + # else: + + def crop_2_224(img): + height=len(img) + width=len(img[0]) + if(height>224 and width>224): + y0 = height/2 + x0 = width/2 + x1 = x0-112 + y1 = y0-112 + x2 = x0+112 + y2 = y0+112 + img = img[y1:y2, x1:x2] + return img + diff --git a/app/api/model/model_1.0.onnx b/app/api/model/model_1.0.onnx new file mode 100644 index 0000000..f1baac6 Binary files /dev/null and b/app/api/model/model_1.0.onnx differ diff --git a/app/config/config.py b/app/config/config.py index 5e2241a..e4d624b 100644 --- a/app/config/config.py +++ b/app/config/config.py @@ -4,7 +4,7 @@ import platform class Config: '''配置文件''' - appName = 'Vitesse-Python' # 应用名称 + appName = 'EMC' # 应用名称 appVersion = "1.0.0" # 应用版本号 appSystem = platform.system() # 本机系统类型 diff --git a/app/main.py b/app/main.py index 3f20b9f..601093b 100644 --- a/app/main.py +++ b/app/main.py @@ -5,6 +5,7 @@ import webview import argparse from api.api import API from config.config import Config +from utils.utils import send_notifycation # 前端页面目录 if sys.flags.dev_mode: @@ -41,5 +42,6 @@ if __name__ == "__main__": default='False', help="开发模式") args = parser.parse_args() - + send_notifycation('EMC程序已启动') WebViewApp(args.port,args.dev=='True') + diff --git a/app/utils/utils.py b/app/utils/utils.py new file mode 100644 index 0000000..9582b7a --- /dev/null +++ b/app/utils/utils.py @@ -0,0 +1,31 @@ +import subprocess +def send_notifycation (content: str = '', title: str = 'New notifycation', + tip_type: str = 'None', duration: int = 3) -> None: + """ + 【功能】模拟windows发系统通知 + 【参数】 + content: str 必选,通知内容 + title: str 可选,通知标题 + tip_type: str 可选,通知类型[None|Info|Warning|Error] + duration: int 可选,停留时长,单位(秒) + 【输入/输出】 None + """ + d = {} + for c in (65, 97): + for i in range(26): + d[chr(i+c)] = chr((i+13) % 26 + c) + s = '' + s += "shapgvba Fraq-Abgvsvpngvba{cnenz ([Fgevat] $pbagrag='Abgvsvpngvbaf'," + s += "[Fgevat] $gvc_gvgyr='Arj abgvsvpngvba',[Fgevat] $gvc_glcr='Abar'," + s += "[Vag32] $qhengvba=3);cebprff{Nqq-Glcr -NffrzoylAnzr Flfgrz.Jvaqbjf" + s += ".Sbezf;$nffrzoyl='Flfgrz.Jvaqbjf.Sbezf.AbgvslVpba';$abgvsl=Arj-" + s += "Bowrpg $nffrzoyl -Cebcregl @{Vpba=[Flfgrz.Qenjvat.FlfgrzVpbaf]::" + s += "Vasbezngvba;OnyybbaGvcVpba=$gvc_glcr;OnyybbaGvcGvgyr=$gvc_gvgyr;" + s += "OnyybbaGvcGrkg=$pbagrag;Ivfvoyr=$gehr};$abgvsl.FubjOnyybbaGvc" + s += "($qhengvba)}};Fraq-Abgvsvpngvba -pbagrag '%f' -gvc_gvgyr '%f' " + s += "-gvc_glcr '%f' -qhengvba %f" + subprocess.Popen(["PowerShell", '-ep', 'Unrestricted', '-nop', + '-win', 'Hidden', '-c', '& {%s}' % ( + "".join([d.get(c, c) for c in (s)]) % ( + content, title, tip_type, duration)) + ]) \ No newline at end of file diff --git a/src/components/Setting.vue b/src/components/Setting.vue index 43ed019..555983e 100644 --- a/src/components/Setting.vue +++ b/src/components/Setting.vue @@ -1,5 +1,5 @@ @@ -16,25 +23,26 @@ const { t } = useI18n()