You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

debugger_server.py 35 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Implement the debugger server."""
  16. import signal
  17. from concurrent import futures
  18. from threading import Thread
  19. import grpc
  20. from mindinsight.conditionmgr.common.utils import NodeBasicInfo
  21. from mindinsight.conditionmgr.condition import ConditionContext, ConditionIdEnum
  22. from mindinsight.conditionmgr.conditionmgr import ConditionMgr
  23. from mindinsight.conditionmgr.recommender import recommend_watchpoints
  24. from mindinsight.conf import settings
  25. from mindinsight.datavisual.data_transform.graph import NodeTypeEnum
  26. from mindinsight.datavisual.utils.tools import to_float
  27. from mindinsight.debugger.common.exceptions.exceptions import DebuggerParamValueError, \
  28. DebuggerParamTypeError, DebuggerCreateWatchPointError, DebuggerUpdateWatchPointError, \
  29. DebuggerDeleteWatchPointError, DebuggerCompareTensorError, DebuggerTensorGraphError, \
  30. DebuggerTensorHitError
  31. from mindinsight.debugger.common.log import LOGGER as log
  32. from mindinsight.debugger.common.utils import ServerStatus, \
  33. create_view_event_from_tensor_basic_info, Streams
  34. from mindinsight.debugger.debugger_cache import DebuggerCache
  35. from mindinsight.debugger.debugger_grpc_server import DebuggerGrpcServer
  36. from mindinsight.debugger.proto import debug_grpc_pb2_grpc as grpc_server_base
  37. from mindinsight.debugger.stream_operator.tensor_detail_info import TensorDetailInfo
  38. from mindinsight.debugger.stream_operator.training_control_operator import TrainingControlOperator
  39. from mindinsight.utils.tensor import TensorUtils, MAX_DIMENSIONS_FOR_TENSOR
  40. class DebuggerServer:
  41. """The server manager of debugger."""
  42. def __init__(self, grpc_port=None):
  43. self.grpc_port = grpc_port
  44. self.condition_mgr = ConditionMgr()
  45. self.cache_store = DebuggerCache()
  46. self.grpc_server = DebuggerGrpcServer(self.cache_store, self.condition_mgr)
  47. self.grpc_server_manager = None
  48. self.back_server = None
  49. def get_conditions(self, train_id):
  50. """Get all default conditions"""
  51. metadata_stream = self.cache_store.get_stream_handler(Streams.METADATA)
  52. condition_context = ConditionContext(metadata_stream.backend, metadata_stream.step, (1, 0))
  53. log.debug("Train_id: %s, backend: %s", train_id, condition_context.backend)
  54. return self.condition_mgr.get_all(condition_context)
  55. def get_condition_collections(self, train_id):
  56. """Get default condition_collections"""
  57. metadata_stream = self.cache_store.get_stream_handler(Streams.METADATA)
  58. condition_context = ConditionContext(metadata_stream.backend, metadata_stream.step, (1, 0))
  59. log.debug("Train_id: %s, backend: %s", train_id, condition_context.backend)
  60. return self.condition_mgr.get_all_collections(condition_context)
  61. def set_recommended_watch_points(self, set_recommended, train_id):
  62. """set recommended watch points."""
  63. metadata_stream = self.cache_store.get_stream_handler(Streams.METADATA)
  64. condition_context = ConditionContext(metadata_stream.backend, metadata_stream.step, (1, 0))
  65. log.debug("Train_id: %s, backend: %s", train_id, condition_context.backend)
  66. res = metadata_stream.get(['state', 'enable_recheck'])
  67. if set_recommended:
  68. res['id'] = self._add_recommended_watchpoints(condition_context)
  69. metadata_stream.recommendation_confirmed = True
  70. return res
  71. def _add_recommended_watchpoints(self, condition_context):
  72. """Add predefined watchpoints."""
  73. log.debug("Add predefined watchpoints.")
  74. graph_stream = self.cache_store.get_stream_handler(Streams.GRAPH)
  75. watchpoints = recommend_watchpoints(self.condition_mgr, graph_stream, condition_context)
  76. watch_point_stream_handler = self.cache_store.get_stream_handler(Streams.WATCHPOINT)
  77. watch_points_ids = []
  78. for watchpoint in watchpoints:
  79. watch_points_id = watch_point_stream_handler.create_watchpoint(
  80. watch_condition=watchpoint.get_watch_condition_dict(),
  81. watch_nodes=watchpoint.watch_nodes,
  82. condition_mgr=self.condition_mgr
  83. )
  84. watch_points_ids.append(watch_points_id)
  85. return watch_points_ids
  86. def start(self):
  87. """Start server."""
  88. grpc_port = self.grpc_port if self.grpc_port else "50051"
  89. host = settings.HOST if hasattr(settings, 'HOST') else '[::]'
  90. hostname = "{}:{}".format(host, grpc_port)
  91. # initialize a grpc server
  92. grpc_server_manager = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
  93. grpc_server_base.add_EventListenerServicer_to_server(self.grpc_server, grpc_server_manager)
  94. grpc_server_manager.add_insecure_port(hostname)
  95. grpc_server_manager.start()
  96. my_server_thread = Thread(target=grpc_server_manager.wait_for_termination)
  97. # start grpc server
  98. my_server_thread.start()
  99. self.back_server = my_server_thread
  100. self.grpc_server_manager = grpc_server_manager
  101. # register stop server handler
  102. signal.signal(signal.SIGINT, self._stop_handler)
  103. log.info("Start grpc server %s", hostname)
  104. def _stop_handler(self, signum, frame):
  105. """Register stop server handler."""
  106. self.stop()
  107. log.debug("Deal with stop signal: %s, %s", signum, frame)
  108. def stop(self):
  109. """Stop debugger server."""
  110. log.info("Send terminate info to client.")
  111. self.control({'mode': 'terminate'})
  112. self.grpc_server_manager.stop(grace=None)
  113. self.back_server.join()
  114. log.info("Stop debugger server.")
  115. def poll_data(self, pos):
  116. """
  117. Get the pos-th data from DebuggerCache.
  118. Args:
  119. pos (int): The index of data.
  120. Returns:
  121. dict, the data to be updated.
  122. """
  123. if not isinstance(pos, str):
  124. log.error("Pos should be string. Received: %s", pos)
  125. raise DebuggerParamValueError("Pos should be string.")
  126. reply = self.cache_store.get_data(pos)
  127. return reply
  128. def search(self, filter_condition):
  129. """
  130. Search for single node in graph.
  131. Args:
  132. filter_condition (dict): Filter condition.
  133. - name (str): The name pattern.
  134. - graph_name (str): The graph name.
  135. - watch_point_id (int): The id of watchpoint. Default: 0.
  136. - node_category (str): The node_category. Default: None
  137. Returns:
  138. dict, the searched nodes.
  139. """
  140. log.info("receive search request with filter_condition: %s", filter_condition)
  141. # validate watchpoint id
  142. watch_point_id = filter_condition.pop('watch_point_id', 0)
  143. watchpoint_stream = self.cache_store.get_stream_handler(Streams.WATCHPOINT)
  144. watchpoint_stream.validate_watchpoint_id(watch_point_id)
  145. # validate and update graph name
  146. graph_stream = self.cache_store.get_stream_handler(Streams.GRAPH)
  147. graph_name = graph_stream.validate_graph_name(filter_condition.get('graph_name'))
  148. filter_condition['graph_name'] = graph_name
  149. # get searched graph
  150. graph = graph_stream.search_nodes(filter_condition)
  151. # add watched label to graph
  152. watchpoint_stream.set_watch_nodes(graph, graph_stream, watch_point_id, graph_name)
  153. return graph
  154. def tensor_comparisons(self, name, shape, detail='data', tolerance='0'):
  155. """
  156. Get tensor comparisons data for given name, detail, shape and tolerance.
  157. Args:
  158. name (str): The name of tensor for ui.
  159. detail (str): Specify which data to query. Current available value is 'data' which means
  160. concrete tensor data. Histogram or unique count can be supported in the future.
  161. shape (str): Specify concrete dimensions of shape.
  162. tolerance (str): Specify tolerance of difference between current step tensor and previous
  163. step tensor. Default value is 0.
  164. Raises:
  165. DebuggerParamValueError, If node type is not parameter or value of detail is not support.
  166. DebuggerCompareTensorError, If MindSpore is not in waiting state.
  167. Returns:
  168. dict, the retrieved data.
  169. """
  170. if self.cache_store.get_stream_handler(
  171. Streams.METADATA).state != ServerStatus.WAITING.value:
  172. log.error("Failed to compare tensors as the MindSpore is not in waiting state.")
  173. raise DebuggerCompareTensorError(
  174. "Failed to compare tensors as the MindSpore is not in waiting state."
  175. )
  176. self.validate_tensor_param(name, detail)
  177. # Limit to query max two dimensions for tensor in table view.
  178. parsed_shape = TensorUtils.parse_shape(shape, limit=MAX_DIMENSIONS_FOR_TENSOR)
  179. node_type, tensor_name = self._get_tensor_name_and_type_by_ui_name(name)
  180. tolerance = to_float(tolerance, 'tolerance')
  181. tensor_stream = self.cache_store.get_stream_handler(Streams.TENSOR)
  182. if node_type == NodeTypeEnum.PARAMETER.value:
  183. reply = tensor_stream.get_tensors_diff(tensor_name, parsed_shape, tolerance)
  184. else:
  185. raise DebuggerParamValueError(
  186. "The node type must be parameter, but got {}.".format(node_type))
  187. return reply
  188. def retrieve(self, mode, filter_condition=None):
  189. """
  190. Retrieve data according to mode and params.
  191. Args:
  192. mode (str): The type of info message.
  193. filter_condition (dict): The filter condition.
  194. Returns:
  195. dict, the retrieved data.
  196. """
  197. log.info("receive retrieve request for mode:%s\n, filter_condition: %s", mode,
  198. filter_condition)
  199. mode_mapping = {
  200. 'all': self._retrieve_all,
  201. 'node': self._retrieve_node,
  202. 'watchpoint': self._retrieve_watchpoint,
  203. 'watchpoint_hit': self._retrieve_watchpoint_hit
  204. }
  205. # validate param <mode>
  206. if mode not in mode_mapping.keys():
  207. log.error("Invalid param <mode>. <mode> should be in ['all', 'node', 'watchpoint', "
  208. "'watchpoint_hit'], but got %s.", mode_mapping)
  209. raise DebuggerParamValueError("Invalid mode.")
  210. # validate backend status
  211. metadata_stream = self.cache_store.get_stream_handler(Streams.METADATA)
  212. if metadata_stream.state == ServerStatus.PENDING.value:
  213. log.info("The backend is in pending status.")
  214. return metadata_stream.get()
  215. filter_condition = {} if filter_condition is None else filter_condition
  216. reply = mode_mapping[mode](filter_condition)
  217. return reply
  218. def _retrieve_all(self, filter_condition=None):
  219. """Retrieve metadata, root graph and watchpoint list."""
  220. if filter_condition:
  221. log.error("No filter condition required for retrieve all request.")
  222. raise DebuggerParamTypeError("filter_condition should be empty.")
  223. self.cache_store.clean_data()
  224. log.info("Clean data queue cache when retrieve all request.")
  225. result = {}
  226. for stream in [Streams.METADATA, Streams.GRAPH]:
  227. sub_res = self.cache_store.get_stream_handler(stream).get()
  228. result.update(sub_res)
  229. sub_res = self._hide_parameters_for_ui()
  230. result.update(sub_res)
  231. return result
  232. def _retrieve_node(self, filter_condition):
  233. """
  234. Retrieve node info.
  235. Args:
  236. filter_condition (dict): Filter condition.
  237. - name (str): The name of single node.
  238. - graph_name (str): The relative graph_name of the node.
  239. - single_node (bool): If False, return the sub-layer of single node. If True, return
  240. the node list from root node to single node.
  241. - watch_point_id (int): The id of watchpoint.
  242. Returns:
  243. dict, reply with graph.
  244. """
  245. log.debug("Retrieve node %s.", filter_condition)
  246. # validate node name
  247. node_name = filter_condition.get('name')
  248. graph_stream = self.cache_store.get_stream_handler(Streams.GRAPH)
  249. graph_name = graph_stream.validate_graph_name(filter_condition.get('graph_name'))
  250. if node_name:
  251. # validate node name
  252. graph_stream.get_node_type(node_name, graph_name)
  253. filter_condition['single_node'] = bool(filter_condition.get('single_node'))
  254. filter_condition['graph_name'] = graph_name
  255. reply = self._get_nodes_info(filter_condition)
  256. return reply
  257. def _get_nodes_info(self, filter_condition):
  258. """
  259. Get nodes info.
  260. Args:
  261. filter_condition (dict): The filter condition.
  262. - name (str): The node name.
  263. - graph_name (str): The relative graph_name of the node.
  264. - single_node (bool): If False, return the sub-layer of single node. If True, return
  265. the node list from root node to single node.
  266. - watch_point_id (int): The id of watchpoint.
  267. Returns:
  268. dict, reply with graph.
  269. """
  270. # validate watch_point_id
  271. watch_point_id = filter_condition.get('watch_point_id', 0)
  272. watchpoint_stream = self.cache_store.get_stream_handler(Streams.WATCHPOINT)
  273. watchpoint_stream.validate_watchpoint_id(watch_point_id)
  274. # get graph
  275. graph_stream = self.cache_store.get_stream_handler(Streams.GRAPH)
  276. reply = graph_stream.get(filter_condition)
  277. graph = reply.get('graph')
  278. # add watched label to graph
  279. watchpoint_stream.set_watch_nodes(graph, graph_stream, watch_point_id, filter_condition.get('graph_name'))
  280. return reply
  281. def retrieve_tensor_history(self, node_name, graph_name=None):
  282. """
  283. Retrieve tensor history for leaf node.
  284. Args:
  285. node_name (str): The name of leaf node.
  286. graph_name (str): The graph name. Default: None.
  287. Returns:
  288. dict, the tensor history and metadata.
  289. """
  290. log.info("Retrieve tensor history for node: %s.", node_name)
  291. metadata_stream = self.cache_store.get_stream_handler(Streams.METADATA)
  292. if metadata_stream.state == ServerStatus.PENDING.value:
  293. log.info("The backend is in pending status.")
  294. return metadata_stream.get(['state', 'step'])
  295. res = self._get_tensor_history(node_name, graph_name)
  296. return res
  297. def _get_tensor_history(self, node_name, graph_name=None):
  298. """
  299. Get tensor history for single node.
  300. Args:
  301. node_name (str): The name of leaf node.
  302. graph_name (str): The graph name. Default: None.
  303. Returns:
  304. dict, the tensor history and metadata.
  305. """
  306. # get basic tensor history
  307. graph_stream = self.cache_store.get_stream_handler(Streams.GRAPH)
  308. tensor_history = graph_stream.get_tensor_history(node_name, graph_name)
  309. # add tensor value for tensor history
  310. self._add_tensor_value_for_tensor_history(tensor_history, node_name, graph_name)
  311. # add hit label for tensor history
  312. watchpoint_hit_stream = self.cache_store.get_stream_handler(Streams.WATCHPOINT_HIT)
  313. watchpoint_hit_stream.update_tensor_history(tensor_history)
  314. # add metadata
  315. metadata = self.cache_store.get_stream_handler(Streams.METADATA).get(['state', 'step'])
  316. tensor_history.update(metadata)
  317. return tensor_history
  318. def _add_tensor_value_for_tensor_history(self, tensor_history, node_name, graph_name):
  319. """
  320. Add tensor value for_tensor_history and send ViewCMD if tensor value missed.
  321. Args:
  322. tensor_history (list[dict]): A list of tensor info, including name and type.
  323. node_name (str): The UI node name.
  324. graph_name (str): The graph name. Default: None.
  325. Returns:
  326. dict, the tensor info.
  327. """
  328. tensor_stream = self.cache_store.get_stream_handler(Streams.TENSOR)
  329. missed_tensors = tensor_stream.update_tensor_history(tensor_history)
  330. if missed_tensors:
  331. view_cmd = create_view_event_from_tensor_basic_info(missed_tensors)
  332. self.cache_store.put_command({'view_cmd': view_cmd, 'node_name': node_name, 'graph_name': graph_name})
  333. log.debug("Send view cmd.")
  334. def retrieve_tensor_value(self, name, detail, shape, graph_name=None, prev=False):
  335. """Retrieve the tensor value."""
  336. log.info("Retrieve tensor value: name: %s, detail: %s, shape: %s", name, detail, shape)
  337. self.validate_tensor_param(name, detail)
  338. # Limit to query max two dimensions for tensor in table view.
  339. parsed_shape = TensorUtils.parse_shape(shape, limit=MAX_DIMENSIONS_FOR_TENSOR)
  340. node_type, tensor_name = self._get_tensor_name_and_type_by_ui_name(name, graph_name)
  341. reply = self.cache_store.get_stream_handler(Streams.TENSOR).get(
  342. {'name': tensor_name,
  343. 'node_type': node_type,
  344. 'shape': parsed_shape,
  345. 'prev': prev}
  346. )
  347. reply['tensor_value']['name'] = name
  348. return reply
  349. def _get_tensor_name_and_type_by_ui_name(self, name, graph_name=None):
  350. """
  351. Get inner tensor name and type by UI name.
  352. Args:
  353. name (str): Node name shown in UI.
  354. graph_name (Union[str, None]): The graph name, default is: None.
  355. Returns:
  356. str, full name of tensor.
  357. str, node type of tensor.
  358. """
  359. node_name, slot = name.rsplit(':', 1)
  360. graph_stream = self.cache_store.get_stream_handler(Streams.GRAPH)
  361. graph_name = graph_name if graph_name else graph_stream.get_graph_id_by_name(node_name)
  362. node_type = graph_stream.get_node_type(node_name, graph_name)
  363. full_name = graph_stream.get_full_name(node_name, graph_name)
  364. tensor_name = full_name + ':' + slot
  365. return node_type, tensor_name
  366. @staticmethod
  367. def validate_tensor_param(name, detail):
  368. """Validate params for retrieve tensor request."""
  369. # validate name
  370. if not isinstance(name, str) or ':' not in name:
  371. log.error("Invalid tensor name. Received: %s", name)
  372. raise DebuggerParamValueError("Invalid tensor name.")
  373. # validate data
  374. if detail != 'data':
  375. log.error("Invalid detail value. Received: %s", detail)
  376. raise DebuggerParamValueError("Invalid detail value.")
  377. def _retrieve_watchpoint(self, filter_condition):
  378. """
  379. Retrieve watchpoint.
  380. Args:
  381. filter_condition (dict): Filter condition.
  382. - watch_point_id (int): The id of watchpoint. If not given, return all watchpoints.
  383. - name (str): The name of single node.
  384. - single_node (bool): If False, return the sub-layer of single node. If True, return
  385. the node list from root node to single node.
  386. Returns:
  387. dict, watch point list or relative graph.
  388. """
  389. watchpoint_id = filter_condition.get('watch_point_id', 0)
  390. if not watchpoint_id:
  391. reply = self._hide_parameters_for_ui()
  392. log.debug("Get condition of watchpoints.")
  393. else:
  394. reply = self._retrieve_node(filter_condition)
  395. log.debug("Get graph of %d-th watchpoint.", watchpoint_id)
  396. return reply
  397. def _retrieve_watchpoint_hit(self, filter_condition):
  398. """
  399. Retrieve watchpoint hit.
  400. Args:
  401. filter_condition (dict): Filter condition.
  402. - name (str): The name of single node.
  403. - single_node (bool): If False, return the sub-layer of single node. If True, return
  404. the node list from root node to single node.
  405. Returns:
  406. dict, watch point list or relative graph.
  407. """
  408. node_name = filter_condition.get('name')
  409. # get all watchpoint hit list
  410. if node_name is None:
  411. reply = self.cache_store.get_stream_handler(Streams.WATCHPOINT_HIT).get()
  412. return reply
  413. graph_name = self.cache_store.get_stream_handler(Streams.GRAPH).validate_graph_name(
  414. filter_condition.get('graph_name'))
  415. # get tensor history
  416. reply = self._get_tensor_history(node_name, graph_name)
  417. log.debug("Get tensor history for watchpoint hit node.")
  418. # get single graph
  419. if filter_condition.get('single_node'):
  420. filter_condition['graph_name'] = graph_name
  421. graph = self._get_nodes_info(filter_condition)
  422. reply.update(graph)
  423. log.debug("Get tensor history for watchpoint hit node.")
  424. return reply
  425. def create_watchpoint(self, watch_condition, watch_nodes=None, watch_point_id=None, search_pattern=None,
  426. graph_name=None):
  427. """
  428. Create watchpoint.
  429. Args:
  430. watch_condition (dict): The watch condition. The format is like:
  431. {
  432. "id": "tensor_too_large",
  433. "params": [
  434. {
  435. "name": "abs_mean_gt",
  436. "disable": false,
  437. "value": 1.1
  438. }
  439. ]
  440. }
  441. - id (str): Id of condition.
  442. - params (list[dict]): The list of param for this condition.
  443. watch_nodes (list[str]): The list of node names.
  444. watch_point_id (int): The id of watchpoint.
  445. search_pattern (dict): The search pattern. Default: None.
  446. graph_name (str): The relative graph_name of the watched node. Default: None.
  447. Returns:
  448. dict, the id of new watchpoint and metadata info.
  449. """
  450. log.info("Received create watchpoint request. WatchCondition: %s", watch_condition)
  451. metadata_stream = self.cache_store.get_stream_handler(Streams.METADATA)
  452. if metadata_stream.state != ServerStatus.WAITING.value:
  453. log.error("Failed to create watchpoint as the MindSpore is not in waiting state.")
  454. raise DebuggerCreateWatchPointError(
  455. "Failed to create watchpoint as the MindSpore is not in waiting state.")
  456. if metadata_stream.backend == 'GPU' and watch_condition.get('id') in (
  457. ConditionIdEnum.OVERFLOW_ASCEND_CHIP.value, ConditionIdEnum.OPERATOR_OVERFLOW.value):
  458. log.error("GPU doesn't support overflow watch condition.")
  459. raise DebuggerParamValueError("GPU doesn't support overflow watch condition.")
  460. if metadata_stream.backend == 'Ascend' and watch_condition.get('id') == ConditionIdEnum.NAN.value:
  461. log.error("Ascend doesn't support nan watch condition.")
  462. raise DebuggerParamValueError("Ascend doesn't support nan watch condition.")
  463. watch_nodes = self._get_watch_node_with_basic_info(
  464. node_names=watch_nodes, search_pattern=search_pattern, graph_name=graph_name)
  465. watchpoint_stream = self.cache_store.get_stream_handler(Streams.WATCHPOINT)
  466. watch_point_id = watchpoint_stream.create_watchpoint(
  467. self.condition_mgr, watch_condition, watch_nodes, watch_point_id)
  468. log.info("Create watchpoint %d", watch_point_id)
  469. metadata_stream.enable_recheck = watchpoint_stream.is_recheckable(metadata_stream.backend)
  470. res = metadata_stream.get(['state', 'enable_recheck'])
  471. res['id'] = watch_point_id
  472. return res
  473. def update_watchpoint(self, watch_point_id, watch_nodes, mode, search_pattern=None, graph_name=None):
  474. """
  475. Update watchpoint.
  476. Args:
  477. watch_point_id (int): The id of watchpoint.
  478. watch_nodes (list[str]): The list of node names.
  479. mode (int): The update operator on nodes. 0 for remove nodes from watch nodes.
  480. 1 for add nodes to watch nodes.
  481. search_pattern (dict): The search pattern. Default: None.
  482. graph_name (str): The relative graph_name of the watched node. Default: None.
  483. Returns:
  484. dict, the metadata info.
  485. """
  486. metadata_stream = self.cache_store.get_stream_handler(Streams.METADATA)
  487. if metadata_stream.state != ServerStatus.WAITING.value:
  488. log.error("Failed to update watchpoint as the MindSpore is not in waiting state.")
  489. raise DebuggerUpdateWatchPointError(
  490. "Failed to update watchpoint as the MindSpore is not in waiting state."
  491. )
  492. # validate parameter
  493. watchpoint_stream = self.cache_store.get_stream_handler(Streams.WATCHPOINT)
  494. watchpoint_stream.validate_watchpoint_id(watch_point_id)
  495. if not watch_nodes or not watch_point_id:
  496. log.error("Invalid parameter for update watchpoint.")
  497. raise DebuggerParamValueError("Invalid parameter for update watchpoint.")
  498. # get node basic info for watch nodes
  499. watch_nodes = self._get_watch_node_with_basic_info(watch_nodes, search_pattern, graph_name)
  500. watchpoint_stream.update_watchpoint(watch_point_id, watch_nodes, mode)
  501. metadata_stream.enable_recheck = watchpoint_stream.is_recheckable(metadata_stream.backend)
  502. log.info("Update watchpoint with id: %d", watch_point_id)
  503. return metadata_stream.get(['state', 'enable_recheck'])
  504. def _get_watch_node_with_basic_info(self, node_names, search_pattern=None, graph_name=None):
  505. """
  506. Get watch node with basic info.
  507. Args:
  508. node_names (list[str]): A list of node names.
  509. search_pattern (dict): Get watch node with search pattern. Default: None
  510. graph_name (str): The relative graph_name of the watched node. Default: None.
  511. Returns:
  512. list[NodeBasicInfo], a list of node basic infos.
  513. """
  514. graph_stream = self.cache_store.get_stream_handler(Streams.GRAPH)
  515. graph_name = graph_stream.validate_graph_name(graph_name)
  516. if search_pattern is not None:
  517. watch_nodes = self._get_watch_nodes_by_search(node_names, search_pattern, graph_name)
  518. else:
  519. watch_nodes = self._get_node_basic_infos(node_names, graph_name=graph_name)
  520. return watch_nodes
  521. def _get_watch_nodes_by_search(self, watch_nodes, search_pattern, graph_name):
  522. """Get watched leaf nodes by search name."""
  523. watched_leaf_nodes = []
  524. graph_stream = self.cache_store.get_stream_handler(Streams.GRAPH)
  525. new_pattern = {'graph_name': graph_name}.update(search_pattern)
  526. for search_name in watch_nodes:
  527. search_nodes = graph_stream.get_searched_node_list(new_pattern)
  528. search_node_names = [
  529. NodeBasicInfo(name=node.name, full_name=node.full_name, type=node.type)
  530. for node in search_nodes
  531. if node.name.startswith(search_name)]
  532. watched_leaf_nodes.extend(search_node_names)
  533. log.debug("Update nodes: %s", watched_leaf_nodes)
  534. return watched_leaf_nodes
  535. def delete_watchpoint(self, watch_point_id=None):
  536. """
  537. Delete watchpoint.
  538. Args:
  539. watch_point_id (Union[None, int]): The id of watchpoint.
  540. If None, delete all watchpoints. Default: None.
  541. Returns:
  542. dict, the metadata info.
  543. """
  544. metadata_stream = self.cache_store.get_stream_handler(Streams.METADATA)
  545. if metadata_stream.state != ServerStatus.WAITING.value:
  546. log.error("Failed to delete watchpoint as the MindSpore is not in waiting state.")
  547. raise DebuggerDeleteWatchPointError(
  548. "Failed to delete watchpoint as the MindSpore is not in waiting state."
  549. )
  550. watchpoint_stream = self.cache_store.get_stream_handler(Streams.WATCHPOINT)
  551. watchpoint_stream.delete_watchpoint(watch_point_id)
  552. metadata_stream.enable_recheck = watchpoint_stream.is_recheckable()
  553. log.info("Delete watchpoint with id: %s", watch_point_id)
  554. return metadata_stream.get(['state', 'enable_recheck'])
  555. def _get_node_basic_infos(self, node_names, graph_name=None):
  556. """
  557. Get node info according to node names.
  558. Args:
  559. node_names (list[str]): A list of node names.
  560. graph_name (str): The relative graph_name of the watched node. Default: None.
  561. Returns:
  562. list[NodeBasicInfo], a list of basic node infos.
  563. """
  564. if not node_names:
  565. return []
  566. graph_stream = self.cache_store.get_stream_handler(Streams.GRAPH)
  567. node_infos = []
  568. for node_name in node_names:
  569. node_info = graph_stream.get_node_basic_info(node_name, graph_name)
  570. node_infos.append(node_info)
  571. return node_infos
  572. def control(self, params=None):
  573. """
  574. Control the training process.
  575. Args:
  576. params (dict): The control params.
  577. - mode (str): Acceptable control command, including `continue`,
  578. `pause` and `terminate`.
  579. - level (str): The control granularity, `node` level or `step` level.
  580. Default: `step`.
  581. - steps (int): Specify the steps that training should run.
  582. Used when `level` is `step`.
  583. - name (str): Specify the name of the node. Used when `level` is `node`.
  584. - graph_name (str): The graph name.
  585. Returns:
  586. dict, the response.
  587. """
  588. log.info("Receive control request: %s.", params)
  589. mode = params.pop('mode', None)
  590. training_controller = TrainingControlOperator(self.cache_store)
  591. training_controller.validate_mode(mode)
  592. return training_controller.control(mode, params)
  593. def retrieve_node_by_bfs(self, node_name, graph_name=None, ascend=False):
  594. """
  595. Get the graph of the next node according to node_name.
  596. Args:
  597. node_name (str): The name of current chosen leaf node.
  598. graph_name (str): The graph name.
  599. ascend (bool): If True, traverse the input nodes;
  600. If False, traverse the output nodes. Default is True.
  601. Returns:
  602. dict, the next node information.
  603. """
  604. log.info("Retrieve node <%s> by bfs, `ascend` is :%s",
  605. node_name, ascend)
  606. reply = {}
  607. graph_stream = self.cache_store.get_stream_handler(Streams.GRAPH)
  608. graph_name = graph_stream.validate_graph_name(graph_name)
  609. next_node_name = graph_stream.get_node_by_bfs_order(node_name, ascend)
  610. # no next node
  611. if next_node_name is None:
  612. return reply
  613. # add graph and tensor history for next node
  614. filter_condition = {
  615. 'name': next_node_name,
  616. 'graph_name': graph_name,
  617. 'single_node': True
  618. }
  619. search_graph = self._get_nodes_info(filter_condition)
  620. reply = {'name': next_node_name}
  621. reply.update(search_graph)
  622. return reply
  623. def recheck(self):
  624. """
  625. Recheck all watchpoints.
  626. Returns:
  627. dict, metadata info.
  628. """
  629. return TrainingControlOperator(self.cache_store).recheck()
  630. def retrieve_tensor_graph(self, tensor_name, graph_name):
  631. """
  632. Retrieve tensor graph.
  633. Args:
  634. tensor_name (str): The tensor name from UI.
  635. graph_name (str): The graph name.
  636. Returns:
  637. dict, tensor graph object.
  638. """
  639. if self.cache_store.get_stream_handler(Streams.METADATA).state != ServerStatus.WAITING.value:
  640. log.error("Failed to get tensor graph the MindSpore is not in waiting state.")
  641. raise DebuggerTensorGraphError
  642. log.info("Retrieve tensor graph for %s from %s", tensor_name, graph_name)
  643. tensor_graph_ops = TensorDetailInfo(self.cache_store).get_tensor_graph(tensor_name, graph_name)
  644. return tensor_graph_ops
  645. def retrieve_tensor_hits(self, tensor_name, graph_name):
  646. """
  647. Retrieve tensor hit information.
  648. Args:
  649. tensor_name (str): The tensor name from UI.
  650. graph_name (str): The graph name.
  651. Returns:
  652. dict, tensor hit info.
  653. """
  654. if self.cache_store.get_stream_handler(Streams.METADATA).state != ServerStatus.WAITING.value:
  655. log.error("Failed to get tensor hits as the MindSpore is not in waiting state.")
  656. raise DebuggerTensorHitError
  657. log.info("Retrieve tensor hits for %s from %s", tensor_name, graph_name)
  658. watch_points = TensorDetailInfo(self.cache_store).get_tensor_watch_points(tensor_name, graph_name)
  659. return {'watch_points': watch_points}
  660. def _hide_parameters_for_ui(self):
  661. """
  662. Hide some parameters on ui.
  663. Returns:
  664. dict, watch point list.
  665. """
  666. reply = self.cache_store.get_stream_handler(Streams.WATCHPOINT).get()
  667. watch_points = reply.get('watch_points')
  668. for i, watch_point in enumerate(watch_points):
  669. watch_condition = watch_point.get('watch_condition')
  670. parameters = watch_condition.get('params')
  671. watch_condition_id = watch_condition.get('id')
  672. mgr_condition = self.condition_mgr.get_condition(watch_condition_id)
  673. ui_watch_condition = []
  674. for param in parameters:
  675. parameter_definition = mgr_condition.get_parameter_definition(param['name'])
  676. if not parameter_definition.visible_on_ui:
  677. continue
  678. ui_watch_condition.append(param)
  679. reply['watch_points'][i]['watch_condition']['params'] = ui_watch_condition
  680. return reply