From: @liuchongming74 Reviewed-by: @yelihua,@ouwenchang Signed-off-by: @ouwenchangtags/v1.2.0-rc1
| @@ -486,10 +486,9 @@ def convert_to_froze_graph(keras_model: tf.python.keras.models.Model, model_name | |||||
| | UnKnownModelError | Fail to recognize model format | 0000001 | Generally, the given TensorFlow model or PyTorch model doesn't observe the standard | | | UnKnownModelError | Fail to recognize model format | 0000001 | Generally, the given TensorFlow model or PyTorch model doesn't observe the standard | | ||||
| | ParamMissingError | Fail to get required conversion params | 0000002 | Mainly caused by missing `--shape`, `--input_nodes`, `--output_nodes` | | | ParamMissingError | Fail to get required conversion params | 0000002 | Mainly caused by missing `--shape`, `--input_nodes`, `--output_nodes` | | ||||
| | GraphInitFailError | Fail to trace the computational graph | 1000000 | Exception caused by 1000001~1000003 | | | GraphInitFailError | Fail to trace the computational graph | 1000000 | Exception caused by 1000001~1000003 | | ||||
| | ModelNotSupportError | Fail to parse .pth/.pb file | 1000001 | Given `--input_nodes`, `--output_nodes don't match the input model; Meanwhile, the model file can not be loaded also can cause this error. | | |||||
| | ModelLoadingError | Fail to load the model | 1000001 | Given `--input_nodes`, `--output_nodes`, `--shape` don't match the input model; Meanwhile, the model file can not be loaded also can cause this error. | | |||||
| | TfRuntimeError | Fail to initialize the TF runtime | 1000002 | Resources required by TensorFlow are not available | | | TfRuntimeError | Fail to initialize the TF runtime | 1000002 | Resources required by TensorFlow are not available | | ||||
| | ModelLoadingError | Fail to load the model | 1000003 | Maybe cause by the wrong `--input_shape` value | | |||||
| | RuntimeIntegrityError | Fail to locate required third party dependency | 1000004 | Caused by required third party packages are not installed | | |||||
| | RuntimeIntegrityError | Fail to locate required third party dependency | 1000003 | Caused by required third party packages are not installed | | |||||
| | TreeCreateFailError | Fail to create code hierarchical tree | 2000000 | Mainly caused by usage of `torch.nn.functional.xxx`, `torch.xxx`, `torch.Tensor.xxx` in PyTorch | | | TreeCreateFailError | Fail to create code hierarchical tree | 2000000 | Mainly caused by usage of `torch.nn.functional.xxx`, `torch.xxx`, `torch.Tensor.xxx` in PyTorch | | ||||
| | NodeInputMissingError | Fail to get the input node info | 2000001 | Fail to get input node info | | | NodeInputMissingError | Fail to get the input node info | 2000001 | Fail to get input node info | | ||||
| | TreeNodeInsertError | Fail to insert tree node | 2000002 | Mainly caused by wrong scope name | | | TreeNodeInsertError | Fail to insert tree node | 2000002 | Mainly caused by wrong scope name | | ||||
| @@ -502,10 +502,9 @@ def convert_to_froze_graph(keras_model: tf.python.keras.models.Model, model_name | |||||
| | UnKnownModelError | 识别网络模型对应的框架失败 | 0000001 | 通常为用户给定模型文件不符合TensorFlow或PyTorch标准。 | | | UnKnownModelError | 识别网络模型对应的框架失败 | 0000001 | 通常为用户给定模型文件不符合TensorFlow或PyTorch标准。 | | ||||
| | ParamMissingError | 缺少转换所需参数 | 0000002 | 通常为`--shape`, `--input_nodes` , `--output_nodes`缺失导致 | | | ParamMissingError | 缺少转换所需参数 | 0000002 | 通常为`--shape`, `--input_nodes` , `--output_nodes`缺失导致 | | ||||
| | GraphInitFailError | 依据网络模型构建计算图失败 | 1000000 | 由1000001,1000002,1000003导致的计算图无法解析。 | | | GraphInitFailError | 依据网络模型构建计算图失败 | 1000000 | 由1000001,1000002,1000003导致的计算图无法解析。 | | ||||
| | ModelNotSupportError | 解析.pth/.pb文件失败 | 1000001 | 给定的`--input_nodes`, `--output_nodes`与实际模型不符;<br />或模型文件存在问题导致模型无法加载。 | | |||||
| | ModelLoadingError | 模型加载失败 | 1000001 | 给定的`--input_nodes`, `--output_nodes`, `--shape`与实际模型不符;<br />或模型文件存在问题导致模型无法加载。 | | |||||
| | TfRuntimeError | TensorFlow库执行出错 | 1000002 | TensorFlow启动申请所需资源失败导致无法正常启动,<br />请检查系统资源(进程数、内存、显存占用、CPU占用)是否充足。 | | | TfRuntimeError | TensorFlow库执行出错 | 1000002 | TensorFlow启动申请所需资源失败导致无法正常启动,<br />请检查系统资源(进程数、内存、显存占用、CPU占用)是否充足。 | | ||||
| | ModelLoadingError | 模型加载失败 | 1000003 | 可能由于用户给定网络输入尺寸错误导致模型无法加载。 | | |||||
| | RuntimeIntegrityError | 三方依赖库不完整 | 1000004 | MindConverter运行时所需的三方依赖库未安装。 | | |||||
| | RuntimeIntegrityError | 三方依赖库不完整 | 1000003 | MindConverter运行时所需的三方依赖库未安装。 | | |||||
| | TreeCreateFailError | 依据计算图构建模型树失败 | 2000000 | Tree用于生成最终代码结构,<br />通常由于PyTorch网络中存在`torch.nn.functional.xxx`, `torch.xxx`, `torch.Tensor.xxx`算子导致。 | | | TreeCreateFailError | 依据计算图构建模型树失败 | 2000000 | Tree用于生成最终代码结构,<br />通常由于PyTorch网络中存在`torch.nn.functional.xxx`, `torch.xxx`, `torch.Tensor.xxx`算子导致。 | | ||||
| | NodeInputMissingError | 网络节点输入信息丢失 | 2000001 | 节点的输入信息丢失。 | | | NodeInputMissingError | 网络节点输入信息丢失 | 2000001 | 节点的输入信息丢失。 | | ||||
| | TreeNodeInsertError | 树节点构建失败 | 2000002 | 由于scope name错误,无法找到该节点的父节点。 | | | TreeNodeInsertError | 树节点构建失败 | 2000002 | 由于scope name错误,无法找到该节点的父节点。 | | ||||
| @@ -253,10 +253,9 @@ class GraphInitError(MindConverterException): | |||||
| class ErrCode(Enum): | class ErrCode(Enum): | ||||
| """Define error code of GraphInitError.""" | """Define error code of GraphInitError.""" | ||||
| UNKNOWN_ERROR = 0 | UNKNOWN_ERROR = 0 | ||||
| MODEL_NOT_SUPPORT = 1 | |||||
| MODEL_LOADING_ERROR = 1 | |||||
| TF_RUNTIME_ERROR = 2 | TF_RUNTIME_ERROR = 2 | ||||
| INPUT_SHAPE_ERROR = 3 | |||||
| MI_RUNTIME_ERROR = 4 | |||||
| MI_RUNTIME_ERROR = 3 | |||||
| BASE_ERROR_CODE = ConverterErrors.GRAPH_INIT_FAIL.value | BASE_ERROR_CODE = ConverterErrors.GRAPH_INIT_FAIL.value | ||||
| ERROR_CODE = ErrCode.UNKNOWN_ERROR.value | ERROR_CODE = ErrCode.UNKNOWN_ERROR.value | ||||
| @@ -270,7 +269,6 @@ class GraphInitError(MindConverterException): | |||||
| """Raise from exceptions below.""" | """Raise from exceptions below.""" | ||||
| except_source = (FileNotFoundError, | except_source = (FileNotFoundError, | ||||
| ModuleNotFoundError, | ModuleNotFoundError, | ||||
| ModelNotSupportError, | |||||
| ModelLoadingError, | ModelLoadingError, | ||||
| RuntimeIntegrityError, | RuntimeIntegrityError, | ||||
| TypeError, | TypeError, | ||||
| @@ -336,13 +334,13 @@ class SourceFilesSaveError(MindConverterException): | |||||
| return except_source | return except_source | ||||
| class ModelNotSupportError(GraphInitError): | |||||
| class ModelLoadingError(GraphInitError): | |||||
| """The model not support error.""" | """The model not support error.""" | ||||
| ERROR_CODE = GraphInitError.ErrCode.MODEL_NOT_SUPPORT.value | |||||
| ERROR_CODE = GraphInitError.ErrCode.MODEL_LOADING_ERROR.value | |||||
| def __init__(self, msg): | def __init__(self, msg): | ||||
| super(ModelNotSupportError, self).__init__(msg=msg) | |||||
| super(ModelLoadingError, self).__init__(msg=msg) | |||||
| @classmethod | @classmethod | ||||
| def raise_from(cls): | def raise_from(cls): | ||||
| @@ -538,16 +536,3 @@ class GeneratorError(MindConverterException): | |||||
| """Raise from exceptions below.""" | """Raise from exceptions below.""" | ||||
| except_source = (ValueError, TypeError, SyntaxError, cls) | except_source = (ValueError, TypeError, SyntaxError, cls) | ||||
| return except_source | return except_source | ||||
| class ModelLoadingError(GraphInitError): | |||||
| """Model loading fail.""" | |||||
| ERROR_CODE = GraphInitError.ErrCode.INPUT_SHAPE_ERROR.value | |||||
| def __init__(self, msg): | |||||
| super(ModelLoadingError, self).__init__(msg=msg) | |||||
| @classmethod | |||||
| def raise_from(cls): | |||||
| """Define exception when model loading fail.""" | |||||
| return ValueError, cls | |||||
| @@ -16,7 +16,7 @@ | |||||
| from importlib import import_module | from importlib import import_module | ||||
| from typing import Dict, NoReturn | from typing import Dict, NoReturn | ||||
| from mindinsight.mindconverter.common.exceptions import ModelNotSupportError | |||||
| from mindinsight.mindconverter.common.exceptions import ModelLoadingError | |||||
| from mindinsight.mindconverter.common.log import logger as log | from mindinsight.mindconverter.common.log import logger as log | ||||
| from mindinsight.mindconverter.graph_based_converter.third_party_graph.base import Graph | from mindinsight.mindconverter.graph_based_converter.third_party_graph.base import Graph | ||||
| from mindinsight.mindconverter.graph_based_converter.third_party_graph.input_node import InputNode | from mindinsight.mindconverter.graph_based_converter.third_party_graph.input_node import InputNode | ||||
| @@ -204,6 +204,6 @@ class OnnxGraph(Graph): | |||||
| onnx_inputs = [onnx_input.name for onnx_input in onnx_model.graph.input] | onnx_inputs = [onnx_input.name for onnx_input in onnx_model.graph.input] | ||||
| for ipt in input_nodes: | for ipt in input_nodes: | ||||
| if ipt not in onnx_inputs: | if ipt not in onnx_inputs: | ||||
| raise ModelNotSupportError(f"input nodes({input_nodes}) is not " | |||||
| f"in model inputs ({onnx_inputs}).") | |||||
| raise ModelLoadingError(f"input nodes({input_nodes}) is not " | |||||
| f"in model inputs ({onnx_inputs}).") | |||||
| return onnx_model | return onnx_model | ||||
| @@ -16,7 +16,7 @@ | |||||
| import copy | import copy | ||||
| from importlib import import_module | from importlib import import_module | ||||
| from mindinsight.mindconverter.common.exceptions import ModelNotSupportError | |||||
| from mindinsight.mindconverter.common.exceptions import ModelLoadingError | |||||
| from mindinsight.mindconverter.graph_based_converter.common.utils import fetch_output_from_onnx_model, build_feed_dict | from mindinsight.mindconverter.graph_based_converter.common.utils import fetch_output_from_onnx_model, build_feed_dict | ||||
| @@ -92,7 +92,7 @@ class OnnxSimplify: | |||||
| self._constant_nodes = copy.deepcopy(const_nodes) | self._constant_nodes = copy.deepcopy(const_nodes) | ||||
| @ModelNotSupportError.check_except( | |||||
| @ModelLoadingError.check_except( | |||||
| "Error occurs when loading model with given params, please check `--shape`, " | "Error occurs when loading model with given params, please check `--shape`, " | ||||
| "`--input_nodes`, `--output_nodes`, `--model_file` or runtime environment integrity." | "`--input_nodes`, `--output_nodes`, `--model_file` or runtime environment integrity." | ||||
| ) | ) | ||||
| @@ -19,14 +19,14 @@ from importlib import import_module | |||||
| from mindinsight.mindconverter.common.log import logger as log | from mindinsight.mindconverter.common.log import logger as log | ||||
| from mindinsight.mindconverter.graph_based_converter.third_party_graph.base import GraphParser | from mindinsight.mindconverter.graph_based_converter.third_party_graph.base import GraphParser | ||||
| from mindinsight.mindconverter.common.exceptions import ModelNotSupportError | |||||
| from mindinsight.mindconverter.common.exceptions import ModelLoadingError | |||||
| class PyTorchGraphParser(GraphParser): | class PyTorchGraphParser(GraphParser): | ||||
| """Define pytorch graph parser.""" | """Define pytorch graph parser.""" | ||||
| @classmethod | @classmethod | ||||
| @ModelNotSupportError.check_except( | |||||
| @ModelLoadingError.check_except( | |||||
| "Error occurs when loading model with given params, please check `--shape`, " | "Error occurs when loading model with given params, please check `--shape`, " | ||||
| "`--input_nodes`, `--output_nodes`, `--model_file` or runtime environment integrity." | "`--input_nodes`, `--output_nodes`, `--model_file` or runtime environment integrity." | ||||
| ) | ) | ||||
| @@ -40,7 +40,6 @@ class PyTorchGraphParser(GraphParser): | |||||
| Returns: | Returns: | ||||
| object, torch model. | object, torch model. | ||||
| """ | """ | ||||
| if not os.path.exists(model_path): | if not os.path.exists(model_path): | ||||
| error = FileNotFoundError("`model_path` must be assigned with " | error = FileNotFoundError("`model_path` must be assigned with " | ||||
| "an existed file path.") | "an existed file path.") | ||||
| @@ -52,7 +51,6 @@ class PyTorchGraphParser(GraphParser): | |||||
| onnx_model_sim = cls._convert_pytorch_graph_to_onnx( | onnx_model_sim = cls._convert_pytorch_graph_to_onnx( | ||||
| model_path, sample_shape, opset_version=11) | model_path, sample_shape, opset_version=11) | ||||
| return onnx_model_sim | return onnx_model_sim | ||||
| except ModuleNotFoundError: | except ModuleNotFoundError: | ||||
| error_msg = "Cannot find model scripts in system path, " \ | error_msg = "Cannot find model scripts in system path, " \ | ||||
| "set `--project_path` to the path of model scripts folder correctly." | "set `--project_path` to the path of model scripts folder correctly." | ||||
| @@ -131,5 +129,5 @@ class PyTorchGraphParser(GraphParser): | |||||
| True, False) | True, False) | ||||
| output_queue.put(proto) | output_queue.put(proto) | ||||
| except ModelNotSupportError.raise_from() as e: | |||||
| except ModelLoadingError.raise_from() as e: | |||||
| output_queue.put(e) | output_queue.put(e) | ||||
| @@ -19,14 +19,14 @@ from importlib import import_module | |||||
| from mindinsight.mindconverter.common.log import logger as log | from mindinsight.mindconverter.common.log import logger as log | ||||
| from mindinsight.mindconverter.graph_based_converter.third_party_graph.base import GraphParser | from mindinsight.mindconverter.graph_based_converter.third_party_graph.base import GraphParser | ||||
| from mindinsight.mindconverter.common.exceptions import ModelNotSupportError | |||||
| from mindinsight.mindconverter.common.exceptions import ModelLoadingError | |||||
| class TFGraphParser(GraphParser): | class TFGraphParser(GraphParser): | ||||
| """Define TF graph parser.""" | """Define TF graph parser.""" | ||||
| @classmethod | @classmethod | ||||
| @ModelNotSupportError.check_except( | |||||
| @ModelLoadingError.check_except( | |||||
| "Error occurs when loading model with given params, please check `--shape`, " | "Error occurs when loading model with given params, please check `--shape`, " | ||||
| "`--input_nodes`, `--output_nodes`, `--model_file` or runtime environment integrity." | "`--input_nodes`, `--output_nodes`, `--model_file` or runtime environment integrity." | ||||
| ) | ) | ||||
| @@ -56,9 +56,9 @@ class TFGraphParser(GraphParser): | |||||
| invalid_inputs = TFGraphParser.invalid_nodes_name(input_nodes) | invalid_inputs = TFGraphParser.invalid_nodes_name(input_nodes) | ||||
| invalid_outputs = TFGraphParser.invalid_nodes_name(output_nodes) | invalid_outputs = TFGraphParser.invalid_nodes_name(output_nodes) | ||||
| if invalid_inputs: | if invalid_inputs: | ||||
| raise ModelNotSupportError(f"Invalid Input Node Name Found: {', '.join(invalid_inputs)}") | |||||
| raise ModelLoadingError(f"Invalid Input Node Name Found: {', '.join(invalid_inputs)}") | |||||
| if invalid_outputs: | if invalid_outputs: | ||||
| raise ModelNotSupportError(f"Invalid Output Node Name Found: {', '.join(invalid_outputs)}") | |||||
| raise ModelLoadingError(f"Invalid Output Node Name Found: {', '.join(invalid_outputs)}") | |||||
| model = convert_tf_graph_to_onnx(model_path, | model = convert_tf_graph_to_onnx(model_path, | ||||
| model_inputs=input_nodes, | model_inputs=input_nodes, | ||||