You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

utils.py 7.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Define common utils."""
  16. import os
  17. import stat
  18. from importlib import import_module
  19. from typing import List, Tuple, Mapping
  20. from mindinsight.mindconverter.common.exceptions import ScriptGenerationError, ReportGenerationError, UnknownModelError
  21. from mindinsight.mindconverter.common.log import logger as log
  22. from mindinsight.mindconverter.graph_based_converter.constant import SEPARATOR_IN_ONNX_OP, BINARY_HEADER_PYTORCH_BITS, \
  23. FrameworkType, BINARY_HEADER_PYTORCH_FILE, TENSORFLOW_MODEL_SUFFIX
  24. def is_converted(operation: str):
  25. """
  26. Whether convert successful.
  27. Args:
  28. operation (str): Operation name.
  29. Returns:
  30. bool, true or false.
  31. """
  32. return operation and SEPARATOR_IN_ONNX_OP not in operation
  33. def _add_outputs_of_onnx_model(model, output_nodes: List[str]):
  34. """
  35. Add output nodes of onnx model.
  36. Args:
  37. model (ModelProto): ONNX model.
  38. output_nodes (list[str]): Output nodes list.
  39. Returns:
  40. ModelProto, edited ONNX model.
  41. """
  42. onnx = import_module("onnx")
  43. for opt_name in output_nodes:
  44. intermediate_layer_value_info = onnx.helper.ValueInfoProto()
  45. intermediate_layer_value_info.name = opt_name
  46. model.graph.output.append(intermediate_layer_value_info)
  47. return model
  48. def fetch_output_from_onnx_model(model, feed_dict: dict, output_nodes: List[str]):
  49. """
  50. Fetch specific nodes output from onnx model.
  51. Notes:
  52. Only support to get output without batch dimension.
  53. Args:
  54. model (ModelProto): ONNX model.
  55. feed_dict (dict): Feed forward inputs.
  56. output_nodes (list[str]): Output nodes list.
  57. Returns:
  58. dict, nodes' output value.
  59. """
  60. if not isinstance(feed_dict, dict) or not isinstance(output_nodes, list):
  61. raise TypeError("`feed_dict` should be type of dict, and `output_nodes` "
  62. "should be type of List[str].")
  63. edit_model = _add_outputs_of_onnx_model(model, output_nodes)
  64. ort = import_module("onnxruntime")
  65. sess = ort.InferenceSession(path_or_bytes=bytes(edit_model.SerializeToString()))
  66. fetched_res = sess.run(output_names=output_nodes, input_feed=feed_dict)
  67. run_result = dict()
  68. for idx, opt in enumerate(output_nodes):
  69. run_result[opt] = fetched_res[idx]
  70. return run_result
  71. def save_code_file_and_report(model_name: str, code_lines: Mapping[str, Tuple],
  72. out_folder: str, report_folder: str):
  73. """
  74. Save code file and report.
  75. Args:
  76. model_name (str): Model name.
  77. code_lines (dict): Code lines.
  78. out_folder (str): Output folder.
  79. report_folder (str): Report output folder.
  80. """
  81. flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
  82. modes = stat.S_IRUSR | stat.S_IWUSR
  83. modes_usr = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
  84. out_folder = os.path.realpath(out_folder)
  85. if not report_folder:
  86. report_folder = out_folder
  87. else:
  88. report_folder = os.path.realpath(report_folder)
  89. if not os.path.exists(out_folder):
  90. os.makedirs(out_folder, modes_usr)
  91. if not os.path.exists(report_folder):
  92. os.makedirs(report_folder, modes_usr)
  93. for file_name in code_lines:
  94. code, report = code_lines[file_name]
  95. code_file_path = os.path.realpath(os.path.join(out_folder, f"{model_name}.py"))
  96. report_file_path = os.path.realpath(os.path.join(report_folder, f"report_of_{model_name}.txt"))
  97. try:
  98. if os.path.exists(code_file_path):
  99. raise ScriptGenerationError("Code file with the same name already exists.")
  100. with os.fdopen(os.open(code_file_path, flags, modes), 'w') as file:
  101. file.write(code)
  102. except (IOError, FileExistsError) as error:
  103. raise ScriptGenerationError(str(error))
  104. try:
  105. if os.path.exists(report_file_path):
  106. raise ReportGenerationError("Report file with the same name already exists.")
  107. with os.fdopen(os.open(report_file_path, flags, stat.S_IRUSR), "w") as rpt_f:
  108. rpt_f.write(report)
  109. except (IOError, FileExistsError) as error:
  110. raise ReportGenerationError(str(error))
  111. def lib_version_satisfied(current_ver: str, mini_ver_limited: str,
  112. newest_ver_limited: str = ""):
  113. """
  114. Check python lib version whether is satisfied.
  115. Notes:
  116. Version number must be format of x.x.x, e.g. 1.1.0.
  117. Args:
  118. current_ver (str): Current lib version.
  119. mini_ver_limited (str): Mini lib version.
  120. newest_ver_limited (str): Newest lib version.
  121. Returns:
  122. bool, true or false.
  123. """
  124. required_version_number_len = 3
  125. if len(list(current_ver.split("."))) != required_version_number_len or \
  126. len(list(mini_ver_limited.split("."))) != required_version_number_len or \
  127. (newest_ver_limited and len(newest_ver_limited.split(".")) != required_version_number_len):
  128. raise ValueError("Version number must be format of x.x.x.")
  129. if current_ver < mini_ver_limited or (newest_ver_limited and current_ver > newest_ver_limited):
  130. return False
  131. return True
  132. def get_dict_key_by_value(val, dic):
  133. """
  134. Return the first appeared key of a dictionary by given value.
  135. Args:
  136. val (Any): Value of the key.
  137. dic (dict): Dictionary to be checked.
  138. Returns:
  139. Any, key of the given value.
  140. """
  141. for d_key, d_val in dic.items():
  142. if d_val == val:
  143. return d_key
  144. return None
  145. def convert_bytes_string_to_string(bytes_str):
  146. """
  147. Convert a byte string to string by utf-8.
  148. Args:
  149. bytes_str (bytes): A bytes string.
  150. Returns:
  151. str, a str with utf-8 encoding.
  152. """
  153. if isinstance(bytes_str, bytes):
  154. return bytes_str.decode('utf-8')
  155. return bytes_str
  156. def get_framework_type(model_path):
  157. """Get framework type."""
  158. try:
  159. with open(model_path, 'rb') as f:
  160. if f.read(BINARY_HEADER_PYTORCH_BITS) == BINARY_HEADER_PYTORCH_FILE:
  161. framework_type = FrameworkType.PYTORCH.value
  162. elif os.path.basename(model_path).split(".")[-1].lower() == TENSORFLOW_MODEL_SUFFIX:
  163. framework_type = FrameworkType.TENSORFLOW.value
  164. else:
  165. framework_type = FrameworkType.UNKNOWN.value
  166. except IOError:
  167. error_msg = "Get UNSUPPORTED model."
  168. error = UnknownModelError(error_msg)
  169. log.error(str(error))
  170. raise error
  171. return framework_type