You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

utils.py 11 kB

4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Define common utils."""
  16. import json
  17. import os
  18. import stat
  19. from importlib import import_module
  20. from importlib.util import find_spec
  21. from typing import List, Tuple, Mapping
  22. import numpy as np
  23. from mindinsight.mindconverter.common.exceptions import ScriptGenerationError, ReportGenerationError, \
  24. CheckPointGenerationError, WeightMapGenerationError
  25. from mindinsight.mindconverter.graph_based_converter.constant import SEPARATOR_IN_ONNX_OP, FrameworkType, \
  26. TENSORFLOW_MODEL_SUFFIX, THIRD_PART_VERSION, ONNX_MODEL_SUFFIX, DTYPE_MAP
  27. def is_converted(operation: str):
  28. """
  29. Whether convert successful.
  30. Args:
  31. operation (str): Operation name.
  32. Returns:
  33. bool, true or false.
  34. """
  35. return operation and SEPARATOR_IN_ONNX_OP not in operation
  36. def _add_outputs_of_onnx_model(model, output_nodes: List[str]):
  37. """
  38. Add output nodes of onnx model.
  39. Args:
  40. model (ModelProto): ONNX model.
  41. output_nodes (list[str]): Output nodes list.
  42. Returns:
  43. ModelProto, edited ONNX model.
  44. """
  45. onnx = import_module("onnx")
  46. for opt_name in output_nodes:
  47. intermediate_layer_value_info = onnx.helper.ValueInfoProto()
  48. intermediate_layer_value_info.name = opt_name
  49. model.graph.output.append(intermediate_layer_value_info)
  50. return model
  51. def check_dependency_integrity(*packages):
  52. """Check dependency package integrity."""
  53. try:
  54. for pkg in packages:
  55. import_module(pkg)
  56. return True
  57. except ImportError:
  58. return False
  59. def build_feed_dict(onnx_model, input_nodes: dict):
  60. """Build feed dict for onnxruntime."""
  61. dtype_mapping = DTYPE_MAP
  62. input_nodes_types = {
  63. node.name: dtype_mapping[node.type.tensor_type.elem_type]
  64. for node in onnx_model.graph.input
  65. }
  66. feed_dict = {
  67. name: np.random.rand(*shape).astype(input_nodes_types[name])
  68. for name, shape in input_nodes.items()
  69. }
  70. return feed_dict
  71. def fetch_output_from_onnx_model(model, feed_dict: dict, output_nodes: List[str]):
  72. """
  73. Fetch specific nodes output from onnx model.
  74. Notes:
  75. Only support to get output without batch dimension.
  76. Args:
  77. model (ModelProto): ONNX model.
  78. feed_dict (dict): Feed forward inputs.
  79. output_nodes (list[str]): Output nodes list.
  80. Returns:
  81. dict, nodes' output value.
  82. """
  83. if not isinstance(feed_dict, dict) or not isinstance(output_nodes, list):
  84. raise TypeError("`feed_dict` should be type of dict, and `output_nodes` "
  85. "should be type of List[str].")
  86. edit_model = _add_outputs_of_onnx_model(model, output_nodes)
  87. ort = import_module("onnxruntime")
  88. sess = ort.InferenceSession(path_or_bytes=bytes(edit_model.SerializeToString()))
  89. fetched_res = sess.run(output_names=output_nodes, input_feed=feed_dict)
  90. run_result = dict()
  91. for idx, opt in enumerate(output_nodes):
  92. run_result[opt] = fetched_res[idx]
  93. return run_result
  94. def save_code_file_and_report(model_name: str, code_lines: Mapping[str, Tuple],
  95. out_folder: str, report_folder: str):
  96. """
  97. Save code file and report.
  98. Args:
  99. model_name (str): Model name.
  100. code_lines (dict): Code lines.
  101. out_folder (str): Output folder.
  102. report_folder (str): Report output folder.
  103. """
  104. flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
  105. modes = stat.S_IRUSR | stat.S_IWUSR
  106. modes_usr = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
  107. out_folder = os.path.realpath(out_folder)
  108. if not report_folder:
  109. report_folder = out_folder
  110. else:
  111. report_folder = os.path.realpath(report_folder)
  112. if not os.path.exists(out_folder):
  113. os.makedirs(out_folder, modes_usr)
  114. if not os.path.exists(report_folder):
  115. os.makedirs(report_folder, modes_usr)
  116. for file_name in code_lines:
  117. code, report, trainable_weights, weight_map = code_lines[file_name]
  118. code_file_path = os.path.realpath(os.path.join(out_folder, f"{model_name}.py"))
  119. report_file_path = os.path.realpath(os.path.join(report_folder, f"report_of_{model_name}.txt"))
  120. try:
  121. if os.path.exists(code_file_path):
  122. raise ScriptGenerationError("Code file with the same name already exists.")
  123. with os.fdopen(os.open(code_file_path, flags, modes), 'w') as file:
  124. file.write(code)
  125. except (IOError, FileExistsError) as error:
  126. raise ScriptGenerationError(str(error))
  127. try:
  128. if os.path.exists(report_file_path):
  129. raise ReportGenerationError("Report file with the same name already exists.")
  130. with os.fdopen(os.open(report_file_path, flags, stat.S_IRUSR), "w") as rpt_f:
  131. rpt_f.write(report)
  132. except (IOError, FileExistsError) as error:
  133. raise ReportGenerationError(str(error))
  134. save_checkpoint = getattr(import_module("mindspore.train.serialization"), "save_checkpoint")
  135. ckpt_file_path = os.path.realpath(os.path.join(out_folder, f"{model_name}.ckpt"))
  136. try:
  137. if os.path.exists(ckpt_file_path):
  138. raise CheckPointGenerationError("Checkpoint file with the same name already exists.")
  139. save_checkpoint(trainable_weights, ckpt_file_path)
  140. except TypeError as error:
  141. raise CheckPointGenerationError(str(error))
  142. weight_map_path = os.path.realpath(os.path.join(report_folder, f"weight_map_of_{model_name}.json"))
  143. try:
  144. if os.path.exists(weight_map_path):
  145. raise WeightMapGenerationError("Weight map file with the same name already exists.")
  146. with os.fdopen(os.open(weight_map_path, flags, stat.S_IRUSR), 'w') as map_f:
  147. weight_map_json = {f"{model_name}": weight_map}
  148. json.dump(weight_map_json, map_f)
  149. except (IOError, FileExistsError) as error:
  150. raise WeightMapGenerationError(str(error))
  151. def onnx_satisfied():
  152. """Validate ONNX , ONNXRUNTIME, ONNXOPTIMIZER installation."""
  153. if not find_spec("onnx") or not find_spec("onnxruntime") or not find_spec("onnxoptimizer"):
  154. return False
  155. return True
  156. def lib_version_satisfied(current_ver: str, mini_ver_limited: str,
  157. newest_ver_limited: str = ""):
  158. """
  159. Check python lib version whether is satisfied.
  160. Notes:
  161. Version number must be format of x.x.x, e.g. 1.1.0.
  162. Args:
  163. current_ver (str): Current lib version.
  164. mini_ver_limited (str): Mini lib version.
  165. newest_ver_limited (str): Newest lib version.
  166. Returns:
  167. bool, true or false.
  168. """
  169. required_version_number_len = 3
  170. if len(list(current_ver.split("."))) != required_version_number_len or \
  171. len(list(mini_ver_limited.split("."))) != required_version_number_len or \
  172. (newest_ver_limited and len(newest_ver_limited.split(".")) != required_version_number_len):
  173. raise ValueError("Version number must be format of x.x.x.")
  174. if current_ver < mini_ver_limited or (newest_ver_limited and current_ver > newest_ver_limited):
  175. return False
  176. return True
  177. def get_dict_key_by_value(val, dic):
  178. """
  179. Return the first appeared key of a dictionary by given value.
  180. Args:
  181. val (Any): Value of the key.
  182. dic (dict): Dictionary to be checked.
  183. Returns:
  184. Any, key of the given value.
  185. """
  186. for d_key, d_val in dic.items():
  187. if d_val == val:
  188. return d_key
  189. return None
  190. def convert_bytes_string_to_string(bytes_str):
  191. """
  192. Convert a byte string to string by utf-8.
  193. Args:
  194. bytes_str (bytes): A bytes string.
  195. Returns:
  196. str, a str with utf-8 encoding.
  197. """
  198. if isinstance(bytes_str, bytes):
  199. return bytes_str.decode('utf-8')
  200. return bytes_str
  201. def get_framework_type(model_path):
  202. """Get framework type."""
  203. model_suffix = os.path.basename(model_path).split(".")[-1].lower()
  204. if model_suffix == ONNX_MODEL_SUFFIX:
  205. framework_type = FrameworkType.ONNX.value
  206. elif model_suffix == TENSORFLOW_MODEL_SUFFIX:
  207. framework_type = FrameworkType.TENSORFLOW.value
  208. else:
  209. framework_type = FrameworkType.UNKNOWN.value
  210. return framework_type
  211. def reset_init_or_construct(template, variable_slot, new_data, scope):
  212. """Reset init statement."""
  213. template[variable_slot][scope].clear()
  214. template[variable_slot][scope] += new_data
  215. return template
  216. def replace_string_in_list(str_list: list, original_str: str, target_str: str):
  217. """
  218. Replace a string in a list by provided string.
  219. Args:
  220. str_list (list): A list contains the string to be replaced.
  221. original_str (str): The string to be replaced.
  222. target_str (str): The replacement of string.
  223. Returns,
  224. list, the original list with replaced string.
  225. """
  226. return [s.replace(original_str, target_str) for s in str_list]
  227. def get_third_part_lib_validation_error_info(lib_list):
  228. """Get error info when not satisfying third part lib validation."""
  229. error_info = None
  230. link_str = ', '
  231. for idx, lib in enumerate(lib_list):
  232. if idx == len(lib_list) - 1:
  233. link_str = ' and '
  234. lib_version_required = THIRD_PART_VERSION[lib]
  235. if len(lib_version_required) == 2:
  236. lib_version_required_min = lib_version_required[0]
  237. lib_version_required_max = lib_version_required[1]
  238. if lib_version_required_min == lib_version_required_max:
  239. info = f"{lib}(=={lib_version_required_min})"
  240. else:
  241. info = f"{lib}(>={lib_version_required_min} and <{lib_version_required_max})"
  242. else:
  243. info = f"{lib}(>={lib_version_required[0]})"
  244. if not error_info:
  245. error_info = info
  246. else:
  247. error_info = link_str.join((error_info, info))
  248. return error_info