You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tensor_handler.py 14 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Define the tensor stream handler."""
  16. import numpy as np
  17. from mindinsight.datavisual.data_transform.graph.node import NodeTypeEnum
  18. from mindinsight.debugger.common.exceptions.exceptions import DebuggerParamValueError
  19. from mindinsight.debugger.common.log import logger as log
  20. from mindinsight.debugger.proto.ms_graph_pb2 import DataType
  21. from mindinsight.debugger.stream_cache.tensor import OpTensor, ConstTensor
  22. from mindinsight.debugger.stream_handler.base_handler import StreamHandlerBase
  23. from mindinsight.utils.tensor import TensorUtils, TensorComparison
  24. class TensorHandler(StreamHandlerBase):
  25. """Metadata Handler."""
  26. def __init__(self):
  27. self._const_vals = {}
  28. self._tensors = {}
  29. self._cur_step = 0
  30. def put(self, value):
  31. """
  32. Put value into tensor cache. Called by grpc server.
  33. Args:
  34. value (dict): The Tensor proto message.
  35. - step (int): The current step of tensor.
  36. - tensor_protos (list[TensorProto]): The tensor proto.
  37. Returns:
  38. bool, the tensor has updated successfully.
  39. """
  40. tensor_protos = value.get('tensor_protos')
  41. merged_tensor = self._get_merged_tensor(tensor_protos)
  42. step = value.get('step', 0)
  43. if merged_tensor.iter and step > 0:
  44. log.debug("Received previous tensor.")
  45. step -= 1
  46. tensor = OpTensor(merged_tensor, step)
  47. flag = self._put_tensor_into_cache(tensor, step)
  48. log.info("Put tensor %s of step: %d, into cache. Flag: %s", tensor.name, step, flag)
  49. return flag
  50. @staticmethod
  51. def _get_merged_tensor(tensor_protos):
  52. """
  53. Merged list of parsed tensor value into one.
  54. Args:
  55. tensor_protos (list[TensorProto]): List of tensor proto.
  56. Returns:
  57. TensorProto, merged tensor proto.
  58. """
  59. merged_tensor = tensor_protos[-1]
  60. if len(tensor_protos) > 1:
  61. tensor_value = bytes()
  62. for tensor_proto in tensor_protos:
  63. if not tensor_proto.tensor_content:
  64. log.warning("Doesn't find tensor value for %s:%s",
  65. tensor_proto.node_name, tensor_proto.slot)
  66. break
  67. tensor_value += tensor_proto.tensor_content
  68. merged_tensor.tensor_content = tensor_value
  69. log.debug("Merge multi tensor values into one.")
  70. return merged_tensor
  71. def _put_tensor_into_cache(self, tensor, step):
  72. """
  73. Put tensor into cache.
  74. Args:
  75. tensor (OpTensor): The tensor value.
  76. step (int): The step of tensor.
  77. Returns:
  78. bool, the tensor has updated successfully.
  79. """
  80. cache_tensor = self._tensors.get(tensor.name)
  81. if cache_tensor is None:
  82. cache_tensor = {}
  83. self._tensors[tensor.name] = cache_tensor
  84. old_tensor = cache_tensor.get(step)
  85. if old_tensor and not self.is_value_diff(old_tensor.value, tensor.value):
  86. log.debug("Tensor %s of step %s has no change. Ignore it.", tensor.name, step)
  87. return False
  88. cache_tensor[step] = tensor
  89. log.debug("Put updated tensor value for %s of step %s.", tensor.name, step)
  90. return True
  91. @staticmethod
  92. def is_value_diff(old_value, new_value):
  93. """Check tensor value if there are equal."""
  94. log.debug("old value type: %s, new_value type: %s", type(old_value), type(new_value))
  95. if old_value is None and new_value is None:
  96. return False
  97. flag = old_value != new_value
  98. if isinstance(flag, np.ndarray):
  99. return flag.any()
  100. return flag
  101. def put_const_vals(self, const_vals):
  102. """
  103. Put const value into tensor cache.
  104. Args:
  105. const_vals (list[NamedValueProto]): List of const values.
  106. """
  107. for const_val in const_vals:
  108. if not (const_val.value and const_val.key):
  109. continue
  110. if DataType.Name(const_val.value.dtype) == "DT_TENSOR":
  111. tensor_proto = const_val.value.tensor_val
  112. tensor_proto.node_name = const_val.key
  113. tensor_proto.slot = '0'
  114. const_tensor = OpTensor(tensor_proto)
  115. else:
  116. const_tensor = ConstTensor(const_val)
  117. self._const_vals[const_tensor.name] = const_tensor
  118. def get(self, filter_condition=None):
  119. """
  120. Get full tensor value.
  121. Args:
  122. filter_condition (dict): Filter condition.
  123. - name (str): The name of tensor.
  124. - node_type (str): The type of the node.
  125. Returns:
  126. dict, the tensor_value.
  127. """
  128. name = filter_condition.get('name')
  129. node_type = filter_condition.get('node_type')
  130. shape = filter_condition.get('shape')
  131. tensor = self._get_tensor(name, node_type)
  132. if not tensor:
  133. log.error("No tensor named %s", name)
  134. raise DebuggerParamValueError("No tensor named {}".format(name))
  135. tensor_info = tensor.get_full_info(shape)
  136. self._update_has_prev_step_field(tensor_info, name, node_type)
  137. return {'tensor_value': tensor_info}
  138. def _get_tensor(self, tensor_name, node_type=None, step=None):
  139. """
  140. Get tensor according to tensor name and node_type.
  141. Args:
  142. tensor_name (str): Tensor name, format like `node_name:slot`.
  143. node_type (str): Node type.
  144. step (int): The step of tensor info. Default: None. Noe
  145. Returns:
  146. Union[OPTensor, ConstTensor], the tensor object.
  147. """
  148. if step is None:
  149. step = self._cur_step
  150. tensor = self._tensors.get(tensor_name, {}).get(step)
  151. if not tensor and node_type == NodeTypeEnum.CONST.value:
  152. const_name = tensor_name.rsplit('/', 1)[-1]
  153. tensor = self._const_vals.get(const_name)
  154. self._tensors[tensor_name] = {step: tensor}
  155. return tensor
  156. def _get_basic_info(self, tensor_name, node_type=None):
  157. """Get the latest basic tensor info by tensor name."""
  158. tensor = self._get_tensor(tensor_name, node_type)
  159. if tensor:
  160. return tensor.get_basic_info()
  161. return None
  162. def update_tensor_history(self, tensor_history):
  163. """
  164. Add tensor basic info in tensor_history.
  165. Args:
  166. tensor_history (dict): Tensor history, including a list of tensor name and type.
  167. Returns:
  168. list[dict], the list of tensor basic info cache.
  169. """
  170. missed_tensors = []
  171. for tensor_info in tensor_history.get('tensor_history'):
  172. tensor_name = tensor_info.get('full_name')
  173. node_type = tensor_info.get('node_type')
  174. basic_info = self._get_basic_info(tensor_name, node_type)
  175. flag = self._update_has_prev_step_field(basic_info, tensor_name, node_type)
  176. if flag is False:
  177. missed_tensor = tensor_info.copy()
  178. missed_tensor['iter'] = 'prev'
  179. missed_tensors.append(missed_tensor)
  180. log.debug("Add previous view cmd for %s", tensor_name)
  181. # add `has_prev_step` field to tensor basic info.
  182. if basic_info:
  183. tensor_info.update(basic_info)
  184. if basic_info.get('value') is None:
  185. missed_tensors.append(tensor_info)
  186. log.debug("Add view cmd for %s", tensor_name)
  187. else:
  188. missed_tensors.append(tensor_info)
  189. log.debug("Add view cmd for %s", tensor_name)
  190. return missed_tensors
  191. def _update_has_prev_step_field(self, tensor_info, tensor_name, node_type):
  192. """Update has_prev_step field in tensor info."""
  193. flag = None
  194. cur_tensor_value = bool(tensor_info and tensor_info.get('value') is not None)
  195. if node_type == NodeTypeEnum.PARAMETER.value:
  196. flag = self._get_prev_tensor_value_status(tensor_name)
  197. if flag and cur_tensor_value:
  198. tensor_info['has_prev_step'] = True
  199. return flag
  200. def _get_prev_tensor_value_status(self, tensor_name):
  201. """
  202. Get the status of tensor value of previous step.
  203. Args:
  204. tensor_name (str): Tensor name.
  205. Returns:
  206. Union[None, bool], the status of previous tensor value. If True, there is valid previous
  207. tensor value. If False, the tensor value should be queried from client.
  208. If None, ignore.
  209. """
  210. flag = None
  211. # check if the tensor has previous step value.
  212. prev_step = self._cur_step - 1
  213. if prev_step < 0:
  214. return flag
  215. tensor = self._get_tensor(tensor_name, step=prev_step)
  216. return bool(tensor and not tensor.empty)
  217. def get_tensor_value_by_name(self, tensor_name, prev=False):
  218. """Get tensor value by name in numpy type."""
  219. cur_step = self._cur_step
  220. step = cur_step - 1 if prev else cur_step
  221. if step < 0:
  222. log.warning("%d step has no previous value for tensor: %s", cur_step, tensor_name)
  223. return None
  224. tensor = self._get_tensor(tensor_name, step=step)
  225. return tensor
  226. def clean_tensors(self, cur_step):
  227. """Clean the tensor cache."""
  228. self._cur_step = cur_step
  229. expired_tensor = []
  230. for tensor_name, tensor in self._tensors.items():
  231. expired_step = [step for step in tensor.keys() if step <= cur_step - 2]
  232. for step in expired_step:
  233. tensor.pop(step)
  234. if not tensor:
  235. expired_tensor.append(tensor_name)
  236. for tensor_name in expired_tensor:
  237. self._tensors.pop(tensor_name)
  238. def get_tensors_diff(self, tensor_name, shape, tolerance=0):
  239. """
  240. Get tensor comparisons data for given name, detail, shape and tolerance.
  241. Args:
  242. tensor_name (str): The name of tensor for cache.
  243. shape (tuple): Specify concrete dimensions of shape.
  244. tolerance (str): Specify tolerance of difference between current step tensor and previous
  245. step tensor. Default value is 0. Its is a percentage. The boundary value is equal to
  246. max(abs(min),abs(max)) * tolerance. The function of min and max is being used to
  247. calculate the min value and max value of the result of the current step tensor subtract
  248. the previous step tensor. If the absolute value of result is less than or equal to
  249. boundary value, the result will set to be zero.
  250. Raises:
  251. DebuggerParamValueError, If get current step node and previous step node failed or
  252. the type of tensor value is not numpy.ndarray."
  253. Returns:
  254. dict, the retrieved data.
  255. """
  256. curr_tensor = self.get_tensor_value_by_name(tensor_name)
  257. prev_tensor = self.get_tensor_value_by_name(tensor_name, prev=True)
  258. if not (curr_tensor and prev_tensor):
  259. log.error("Get current step and previous step for this tensor name %s failed.", tensor_name)
  260. raise DebuggerParamValueError(f"Get current step and previous step for this tensor name "
  261. f"{tensor_name} failed.")
  262. curr_tensor_slice = curr_tensor.get_tensor_value_by_shape(shape)
  263. prev_tensor_slice = prev_tensor.get_tensor_value_by_shape(shape)
  264. tensor_info = curr_tensor.get_basic_info()
  265. if isinstance(tensor_info, dict):
  266. tensor_info.pop('has_prev_step')
  267. tensor_info.pop('value')
  268. tensor_comparison = curr_tensor.tensor_comparison
  269. if not tensor_comparison or tensor_comparison.tolerance != tolerance:
  270. if isinstance(curr_tensor.value, np.ndarray) and isinstance(prev_tensor.value, np.ndarray):
  271. tensor_diff = TensorUtils.calc_diff_between_two_tensor(curr_tensor.value, prev_tensor.value, tolerance)
  272. if not tensor_comparison:
  273. stats = TensorUtils.get_statistics_from_tensor(tensor_diff)
  274. tensor_comparison = TensorComparison(tolerance, stats, tensor_diff)
  275. curr_tensor.update_tensor_comparisons(tensor_comparison)
  276. else:
  277. tensor_comparison.update(tolerance=tolerance, value=tensor_diff)
  278. else:
  279. raise DebuggerParamValueError("The type of tensor value should be numpy.ndarray.")
  280. # the type of curr_tensor_slice is one of None, np.ndarray or str
  281. if isinstance(curr_tensor_slice, np.ndarray) and isinstance(prev_tensor_slice, np.ndarray):
  282. if not shape:
  283. tensor_diff_slice = tensor_comparison.value
  284. else:
  285. tensor_diff_slice = tensor_comparison.value[shape]
  286. result = np.stack([prev_tensor_slice, curr_tensor_slice, tensor_diff_slice], axis=-1)
  287. tensor_info['diff'] = result.tolist()
  288. stats = TensorUtils.get_statistics_from_tensor(tensor_diff_slice)
  289. tensor_info['statistics'] = TensorUtils.get_statistics_dict(stats=stats,
  290. overall_stats=tensor_comparison.stats)
  291. elif isinstance(curr_tensor_slice, str):
  292. tensor_info['diff'] = curr_tensor_slice
  293. reply = {'tensor_value': tensor_info}
  294. return reply