You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_restful_api.py 32 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """
  16. Function:
  17. Test query debugger restful api.
  18. Usage:
  19. pytest tests/st/func/debugger/test_restful_api.py
  20. """
  21. import os
  22. import pytest
  23. from mindinsight.conf import settings
  24. from mindinsight.debugger.common.utils import ServerStatus
  25. from tests.st.func.debugger.conftest import DEBUGGER_BASE_URL
  26. from tests.st.func.debugger.mock_ms_client import MockDebuggerClient
  27. from tests.st.func.debugger.utils import check_state, get_request_result, \
  28. send_and_compare_result
  29. def send_terminate_cmd(app_client):
  30. """Send terminate command to debugger client."""
  31. url = os.path.join(DEBUGGER_BASE_URL, 'control')
  32. body_data = {'mode': 'terminate'}
  33. send_and_compare_result(app_client, url, body_data)
  34. class TestAscendDebugger:
  35. """Test debugger on Ascend backend."""
  36. @classmethod
  37. def setup_class(cls):
  38. """Setup class."""
  39. cls._debugger_client = MockDebuggerClient(backend='Ascend')
  40. @pytest.mark.level0
  41. @pytest.mark.env_single
  42. @pytest.mark.platform_x86_cpu
  43. @pytest.mark.platform_arm_ascend_training
  44. @pytest.mark.platform_x86_gpu_training
  45. @pytest.mark.platform_x86_ascend_training
  46. def test_before_train_begin(self, app_client):
  47. """Test retrieve all."""
  48. url = 'retrieve'
  49. body_data = {'mode': 'all'}
  50. expect_file = 'before_train_begin.json'
  51. send_and_compare_result(app_client, url, body_data, expect_file)
  52. @pytest.mark.level0
  53. @pytest.mark.env_single
  54. @pytest.mark.platform_x86_cpu
  55. @pytest.mark.platform_arm_ascend_training
  56. @pytest.mark.platform_x86_gpu_training
  57. @pytest.mark.platform_x86_ascend_training
  58. @pytest.mark.parametrize("body_data, expect_file", [
  59. ({'mode': 'all'}, 'retrieve_all.json'),
  60. ({'mode': 'node', 'params': {'name': 'Default'}}, 'retrieve_scope_node.json'),
  61. ({'mode': 'node', 'params': {'name': 'Default/optimizer-Momentum/Parameter[18]_7'}},
  62. 'retrieve_aggregation_scope_node.json'),
  63. ({'mode': 'node', 'params': {
  64. 'name': 'Default/TransData-op99',
  65. 'single_node': True}}, 'retrieve_single_node.json'),
  66. ({'mode': 'watchpoint_hit'}, 'retrieve_empty_watchpoint_hit_list')
  67. ])
  68. def test_retrieve_when_train_begin(self, app_client, body_data, expect_file):
  69. """Test retrieve when train_begin."""
  70. url = 'retrieve'
  71. with self._debugger_client.get_thread_instance():
  72. check_state(app_client)
  73. send_and_compare_result(app_client, url, body_data, expect_file)
  74. send_terminate_cmd(app_client)
  75. def test_get_conditions(self, app_client):
  76. """Test get conditions for ascend."""
  77. url = '/v1/mindinsight/conditionmgr/train-jobs/train-id/condition-collections'
  78. body_data = {}
  79. expect_file = 'get_conditions_for_ascend.json'
  80. with self._debugger_client.get_thread_instance():
  81. check_state(app_client)
  82. send_and_compare_result(app_client, url, body_data, expect_file, method='get', full_url=True)
  83. send_terminate_cmd(app_client)
  84. @pytest.mark.level0
  85. @pytest.mark.env_single
  86. @pytest.mark.platform_x86_cpu
  87. @pytest.mark.platform_arm_ascend_training
  88. @pytest.mark.platform_x86_gpu_training
  89. @pytest.mark.platform_x86_ascend_training
  90. @pytest.mark.parametrize("body_data, expect_file", [
  91. ({'mode': 'all'}, 'multi_retrieve_all.json'),
  92. ({'mode': 'node', 'params': {'name': 'Default', 'graph_name': 'graph_1'}}, 'retrieve_scope_node.json'),
  93. ({'mode': 'node', 'params': {'name': 'graph_0'}}, 'multi_retrieve_scope_node.json'),
  94. ({'mode': 'node', 'params': {'name': 'graph_0/Default/optimizer-Momentum/Parameter[18]_7'}},
  95. 'multi_retrieve_aggregation_scope_node.json'),
  96. ({'mode': 'node', 'params': {
  97. 'name': 'graph_0/Default/TransData-op99',
  98. 'single_node': True}}, 'multi_retrieve_single_node.json'),
  99. ({'mode': 'node', 'params': {
  100. 'name': 'Default/TransData-op99',
  101. 'single_node': True, 'graph_name': 'graph_0'}}, 'retrieve_single_node.json')
  102. ])
  103. def test_multi_retrieve_when_train_begin(self, app_client, body_data, expect_file):
  104. """Test retrieve when train_begin."""
  105. url = 'retrieve'
  106. debugger_client = MockDebuggerClient(backend='Ascend', graph_num=2)
  107. with debugger_client.get_thread_instance():
  108. check_state(app_client)
  109. send_and_compare_result(app_client, url, body_data, expect_file)
  110. send_terminate_cmd(app_client)
  111. @pytest.mark.level0
  112. @pytest.mark.env_single
  113. @pytest.mark.platform_x86_cpu
  114. @pytest.mark.platform_arm_ascend_training
  115. @pytest.mark.platform_x86_gpu_training
  116. @pytest.mark.platform_x86_ascend_training
  117. def test_create_and_delete_watchpoint(self, app_client):
  118. """Test create and delete watchpoint."""
  119. with self._debugger_client.get_thread_instance():
  120. check_state(app_client)
  121. conditions = [
  122. {'id': 'tensor_too_large', 'params': [{'name': 'max_gt', 'value': 1.0}]},
  123. {'id': 'tensor_too_small', 'params': [{'name': 'max_lt', 'value': -1.0}]},
  124. {'id': 'tensor_too_large', 'params': [{'name': 'min_gt', 'value': 1e+32}]},
  125. {'id': 'tensor_too_small', 'params': [{'name': 'min_lt', 'value': -1e+32}]},
  126. {'id': 'tensor_too_large', 'params': [{'name': 'mean_gt', 'value': 0}]},
  127. {'id': 'tensor_too_small', 'params': [{'name': 'mean_lt', 'value': 0}]}
  128. ]
  129. for idx, condition in enumerate(conditions):
  130. create_watchpoint(app_client, condition, idx + 1)
  131. # delete 4-th watchpoint
  132. url = 'delete_watchpoint'
  133. body_data = {'watch_point_id': 4}
  134. get_request_result(app_client, url, body_data)
  135. # test watchpoint list
  136. url = 'retrieve'
  137. body_data = {'mode': 'watchpoint'}
  138. expect_file = 'create_and_delete_watchpoint.json'
  139. send_and_compare_result(app_client, url, body_data, expect_file)
  140. send_terminate_cmd(app_client)
  141. @pytest.mark.level0
  142. @pytest.mark.env_single
  143. @pytest.mark.platform_x86_cpu
  144. @pytest.mark.platform_arm_ascend_training
  145. @pytest.mark.platform_x86_gpu_training
  146. @pytest.mark.platform_x86_ascend_training
  147. def test_update_watchpoint(self, app_client):
  148. """Test retrieve when train_begin."""
  149. watch_point_id = 1
  150. leaf_node_name = 'Default/optimizer-Momentum/Parameter[18]_7/moments.fc3.bias'
  151. with self._debugger_client.get_thread_instance():
  152. check_state(app_client)
  153. condition = {'id': 'tensor_too_large', 'params': [{'name': 'max_gt', 'value': 1.0}]}
  154. create_watchpoint(app_client, condition, watch_point_id)
  155. # update watchpoint watchpoint list
  156. url = 'update_watchpoint'
  157. body_data = {'watch_point_id': watch_point_id,
  158. 'watch_nodes': [leaf_node_name],
  159. 'mode': 0}
  160. get_request_result(app_client, url, body_data)
  161. # get updated nodes
  162. url = 'search'
  163. body_data = {'name': leaf_node_name, 'watch_point_id': watch_point_id}
  164. expect_file = 'search_unwatched_leaf_node.json'
  165. send_and_compare_result(app_client, url, body_data, expect_file, method='get')
  166. send_terminate_cmd(app_client)
  167. @pytest.mark.level0
  168. @pytest.mark.env_single
  169. @pytest.mark.platform_x86_cpu
  170. @pytest.mark.platform_arm_ascend_training
  171. @pytest.mark.platform_x86_gpu_training
  172. @pytest.mark.platform_x86_ascend_training
  173. def test_watchpoint_hit(self, app_client):
  174. """Test retrieve watchpoint hit."""
  175. with self._debugger_client.get_thread_instance():
  176. create_watchpoint_and_wait(app_client)
  177. # check watchpoint hit list
  178. url = 'retrieve'
  179. body_data = {'mode': 'watchpoint_hit'}
  180. expect_file = 'retrieve_watchpoint_hit.json'
  181. send_and_compare_result(app_client, url, body_data, expect_file)
  182. # check single watchpoint hit
  183. body_data = {
  184. 'mode': 'watchpoint_hit',
  185. 'params': {
  186. 'name': 'Default/TransData-op99',
  187. 'single_node': True,
  188. 'watch_point_id': 1
  189. }
  190. }
  191. expect_file = 'retrieve_single_watchpoint_hit.json'
  192. send_and_compare_result(app_client, url, body_data, expect_file)
  193. send_terminate_cmd(app_client)
  194. @pytest.mark.level0
  195. @pytest.mark.env_single
  196. @pytest.mark.platform_x86_cpu
  197. @pytest.mark.platform_arm_ascend_training
  198. @pytest.mark.platform_x86_gpu_training
  199. @pytest.mark.platform_x86_ascend_training
  200. def test_retrieve_tensor_value(self, app_client):
  201. """Test retrieve tensor value."""
  202. node_name = 'Default/TransData-op99'
  203. with self._debugger_client.get_thread_instance():
  204. check_state(app_client)
  205. # prepare tensor value
  206. url = 'retrieve_tensor_history'
  207. body_data = {'name': node_name}
  208. expect_file = 'retrieve_empty_tensor_history.json'
  209. send_and_compare_result(app_client, url, body_data, expect_file)
  210. # check full tensor history from poll data
  211. res = get_request_result(
  212. app_client=app_client, url='poll_data', body_data={'pos': 0}, method='get')
  213. assert res.get('receive_tensor', {}).get('node_name') == node_name
  214. expect_file = 'retrieve_full_tensor_history.json'
  215. send_and_compare_result(app_client, url, body_data, expect_file)
  216. # check tensor value
  217. url = 'tensors'
  218. body_data = {
  219. 'name': node_name + ':0',
  220. 'detail': 'data',
  221. 'shape': '[1, 1:3]'
  222. }
  223. expect_file = 'retrieve_tensor_value.json'
  224. send_and_compare_result(app_client, url, body_data, expect_file, method='get')
  225. send_terminate_cmd(app_client)
  226. @pytest.mark.level0
  227. @pytest.mark.env_single
  228. @pytest.mark.platform_x86_cpu
  229. @pytest.mark.platform_arm_ascend_training
  230. @pytest.mark.platform_x86_gpu_training
  231. @pytest.mark.platform_x86_ascend_training
  232. def test_compare_tensor_value(self, app_client):
  233. """Test compare tensor value."""
  234. node_name = 'Default/args0'
  235. with self._debugger_client.get_thread_instance():
  236. check_state(app_client)
  237. # prepare tensor values
  238. url = 'control'
  239. body_data = {'mode': 'continue',
  240. 'steps': 2}
  241. get_request_result(app_client, url, body_data)
  242. check_state(app_client)
  243. get_request_result(
  244. app_client=app_client, url='retrieve_tensor_history', body_data={'name': node_name})
  245. res = get_request_result(
  246. app_client=app_client, url='poll_data', body_data={'pos': 0}, method='get')
  247. assert res.get('receive_tensor', {}).get('node_name') == node_name
  248. # get compare results
  249. url = 'tensor-comparisons'
  250. body_data = {
  251. 'name': node_name + ':0',
  252. 'detail': 'data',
  253. 'shape': '[:, :]',
  254. 'tolerance': 1
  255. }
  256. expect_file = 'compare_tensors.json'
  257. send_and_compare_result(app_client, url, body_data, expect_file, method='get')
  258. send_terminate_cmd(app_client)
  259. @pytest.mark.level0
  260. @pytest.mark.env_single
  261. @pytest.mark.platform_x86_cpu
  262. @pytest.mark.platform_arm_ascend_training
  263. @pytest.mark.platform_x86_gpu_training
  264. @pytest.mark.platform_x86_ascend_training
  265. @pytest.mark.parametrize("body_data, expect_file", [
  266. ({'ascend': True}, 'retrieve_node_by_bfs_ascend.json'),
  267. ({'name': 'Default/args0', 'ascend': False}, 'retrieve_node_by_bfs.json')
  268. ])
  269. def test_retrieve_bfs_node(self, app_client, body_data, expect_file):
  270. """Test retrieve bfs node."""
  271. with self._debugger_client.get_thread_instance():
  272. check_state(app_client)
  273. # prepare tensor values
  274. url = 'retrieve_node_by_bfs'
  275. send_and_compare_result(app_client, url, body_data, expect_file, method='get')
  276. send_terminate_cmd(app_client)
  277. @pytest.mark.level0
  278. @pytest.mark.env_single
  279. @pytest.mark.platform_x86_cpu
  280. @pytest.mark.platform_arm_ascend_training
  281. @pytest.mark.platform_x86_gpu_training
  282. @pytest.mark.platform_x86_ascend_training
  283. def test_pause(self, app_client):
  284. """Test pause the training."""
  285. with self._debugger_client.get_thread_instance():
  286. check_state(app_client)
  287. # send run command to execute to next node
  288. url = 'control'
  289. body_data = {'mode': 'continue',
  290. 'steps': -1}
  291. res = get_request_result(app_client, url, body_data)
  292. assert res == {'metadata': {'state': 'running', 'enable_recheck': False}}
  293. # send pause command
  294. url = 'control'
  295. body_data = {'mode': 'pause'}
  296. res = get_request_result(app_client, url, body_data)
  297. assert res == {'metadata': {'state': 'waiting', 'enable_recheck': False}}
  298. send_terminate_cmd(app_client)
  299. @pytest.mark.level0
  300. @pytest.mark.env_single
  301. @pytest.mark.platform_x86_cpu
  302. @pytest.mark.platform_arm_ascend_training
  303. @pytest.mark.platform_x86_gpu_training
  304. @pytest.mark.platform_x86_ascend_training
  305. @pytest.mark.parametrize("url, body_data, enable_recheck", [
  306. ('create_watchpoint',
  307. {'condition': {'id': 'tensor_too_large', 'params': [{'name': 'max_gt', 'value': 1.0}]},
  308. 'watch_nodes': ['Default']}, True),
  309. ('update_watchpoint',
  310. {'watch_point_id': 1, 'watch_nodes': ['Default/optimizer-Momentum/Parameter[18]_7'],
  311. 'mode': 0}, True),
  312. ('update_watchpoint',
  313. {'watch_point_id': 1, 'watch_nodes': ['Default/optimizer-Momentum'],
  314. 'mode': 1}, True),
  315. ('delete_watchpoint', {}, True)
  316. ])
  317. def test_recheck(self, app_client, url, body_data, enable_recheck):
  318. """Test recheck."""
  319. with self._debugger_client.get_thread_instance():
  320. create_watchpoint_and_wait(app_client)
  321. # create watchpoint
  322. res = get_request_result(app_client, url, body_data, method='post')
  323. assert res['metadata']['enable_recheck'] is enable_recheck
  324. send_terminate_cmd(app_client)
  325. @pytest.mark.level0
  326. @pytest.mark.env_single
  327. @pytest.mark.platform_x86_cpu
  328. @pytest.mark.platform_arm_ascend_training
  329. @pytest.mark.platform_x86_gpu_training
  330. @pytest.mark.platform_x86_ascend_training
  331. def test_recommend_watchpoints(self, app_client):
  332. """Test generating recommended watchpoints."""
  333. original_value = settings.ENABLE_RECOMMENDED_WATCHPOINTS
  334. settings.ENABLE_RECOMMENDED_WATCHPOINTS = True
  335. try:
  336. with self._debugger_client.get_thread_instance():
  337. check_state(app_client)
  338. url = 'retrieve'
  339. body_data = {'mode': 'watchpoint'}
  340. expect_file = 'recommended_watchpoints_at_startup.json'
  341. send_and_compare_result(app_client, url, body_data, expect_file, method='post')
  342. send_terminate_cmd(app_client)
  343. finally:
  344. settings.ENABLE_RECOMMENDED_WATCHPOINTS = original_value
  345. @pytest.mark.level0
  346. @pytest.mark.env_single
  347. @pytest.mark.platform_x86_cpu
  348. @pytest.mark.platform_arm_ascend_training
  349. @pytest.mark.platform_x86_gpu_training
  350. @pytest.mark.platform_x86_ascend_training
  351. @pytest.mark.parametrize("body_data, expect_file", [
  352. ({'tensor_name': 'Default/TransData-op99:0', 'graph_name': 'graph_0'}, 'retrieve_tensor_graph-0.json'),
  353. ({'tensor_name': 'Default/optimizer-Momentum/Parameter[18]_7/moments.fc1.bias:0', 'graph_name': 'graph_0'},
  354. 'retrieve_tensor_graph-1.json')
  355. ])
  356. def test_retrieve_tensor_graph(self, app_client, body_data, expect_file):
  357. """Test retrieve tensor graph."""
  358. url = 'tensor-graphs'
  359. with self._debugger_client.get_thread_instance():
  360. create_watchpoint_and_wait(app_client)
  361. get_request_result(app_client, url, body_data, method='GET')
  362. # check full tensor history from poll data
  363. res = get_request_result(
  364. app_client=app_client, url='poll_data', body_data={'pos': 0}, method='get')
  365. assert res.get('receive_tensor', {}).get('tensor_name') == body_data.get('tensor_name')
  366. send_and_compare_result(app_client, url, body_data, expect_file, method='GET')
  367. send_terminate_cmd(app_client)
  368. class TestGPUDebugger:
  369. """Test debugger on Ascend backend."""
  370. @classmethod
  371. def setup_class(cls):
  372. """Setup class."""
  373. cls._debugger_client = MockDebuggerClient(backend='GPU')
  374. @pytest.mark.level0
  375. @pytest.mark.env_single
  376. @pytest.mark.platform_x86_cpu
  377. @pytest.mark.platform_arm_ascend_training
  378. @pytest.mark.platform_x86_gpu_training
  379. @pytest.mark.platform_x86_ascend_training
  380. def test_next_node_on_gpu(self, app_client):
  381. """Test get next node on GPU."""
  382. gpu_debugger_client = MockDebuggerClient(backend='GPU')
  383. with gpu_debugger_client.get_thread_instance():
  384. check_state(app_client)
  385. # send run command to get watchpoint hit
  386. url = 'control'
  387. body_data = {'mode': 'continue',
  388. 'level': 'node',
  389. 'name': 'Default/TransData-op99'}
  390. res = get_request_result(app_client, url, body_data)
  391. assert res == {'metadata': {'state': 'running', 'enable_recheck': False}}
  392. # get metadata
  393. check_state(app_client)
  394. url = 'retrieve'
  395. body_data = {'mode': 'all'}
  396. expect_file = 'retrieve_next_node_on_gpu.json'
  397. send_and_compare_result(app_client, url, body_data, expect_file)
  398. send_terminate_cmd(app_client)
  399. @pytest.mark.level0
  400. @pytest.mark.env_single
  401. @pytest.mark.platform_x86_cpu
  402. @pytest.mark.platform_arm_ascend_training
  403. @pytest.mark.platform_x86_gpu_training
  404. @pytest.mark.platform_x86_ascend_training
  405. @pytest.mark.parametrize("url, body_data, enable_recheck", [
  406. ('create_watchpoint',
  407. {'condition': {'id': 'tensor_too_large', 'params': [{'name': 'max_gt', 'value': 1.0}]},
  408. 'watch_nodes': ['Default']}, True),
  409. ('create_watchpoint',
  410. {'condition': {'id': 'tensor_too_large', 'params': [{'name': 'max_gt', 'value': 1.0}]},
  411. 'watch_nodes': ['Default/TransData-op99']}, True),
  412. ('update_watchpoint',
  413. {'watch_point_id': 1, 'watch_nodes': ['Default/optimizer-Momentum/Parameter[18]_7'],
  414. 'mode': 0}, True),
  415. ('update_watchpoint',
  416. {'watch_point_id': 1, 'watch_nodes': ['Default/optimizer-Momentum'],
  417. 'mode': 1}, True),
  418. ('update_watchpoint',
  419. [{'watch_point_id': 1, 'watch_nodes': ['Default/optimizer-Momentum'],
  420. 'mode': 1},
  421. {'watch_point_id': 1, 'watch_nodes': ['Default/optimizer-Momentum'],
  422. 'mode': 0}
  423. ], True),
  424. ('update_watchpoint',
  425. [{'watch_point_id': 1, 'watch_nodes': ['Default/TransData-op99'],
  426. 'mode': 0},
  427. {'watch_point_id': 1, 'watch_nodes': ['Default/TransData-op99'],
  428. 'mode': 1}
  429. ], True),
  430. ('delete_watchpoint', {'watch_point_id': 1}, True)
  431. ])
  432. def test_recheck_state(self, app_client, url, body_data, enable_recheck):
  433. """Test update watchpoint and check the value of enable_recheck."""
  434. with self._debugger_client.get_thread_instance():
  435. create_watchpoint_and_wait(app_client)
  436. if not isinstance(body_data, list):
  437. body_data = [body_data]
  438. for sub_body_data in body_data:
  439. res = get_request_result(app_client, url, sub_body_data, method='post')
  440. assert res['metadata']['enable_recheck'] is enable_recheck
  441. send_terminate_cmd(app_client)
  442. def test_get_conditions(self, app_client):
  443. """Test get conditions for gpu."""
  444. url = '/v1/mindinsight/conditionmgr/train-jobs/train-id/condition-collections'
  445. body_data = {}
  446. expect_file = 'get_conditions_for_gpu.json'
  447. with self._debugger_client.get_thread_instance():
  448. check_state(app_client)
  449. send_and_compare_result(app_client, url, body_data, expect_file, method='get', full_url=True)
  450. send_terminate_cmd(app_client)
  451. @pytest.mark.level0
  452. @pytest.mark.env_single
  453. @pytest.mark.platform_x86_cpu
  454. @pytest.mark.platform_arm_ascend_training
  455. @pytest.mark.platform_x86_gpu_training
  456. @pytest.mark.platform_x86_ascend_training
  457. def test_recheck(self, app_client):
  458. """Test recheck request."""
  459. with self._debugger_client.get_thread_instance():
  460. create_watchpoint_and_wait(app_client)
  461. # send recheck when disable to do recheck
  462. get_request_result(app_client, 'recheck', {}, method='post', expect_code=400)
  463. # send recheck when enable to do recheck
  464. create_watchpoint(app_client, {'id': 'tensor_too_large', 'params': [{'name': 'max_gt', 'value': 1.0}]}, 2)
  465. res = get_request_result(app_client, 'recheck', {}, method='post')
  466. assert res['metadata']['enable_recheck'] is False
  467. send_terminate_cmd(app_client)
  468. @pytest.mark.level0
  469. @pytest.mark.env_single
  470. @pytest.mark.platform_x86_cpu
  471. @pytest.mark.platform_arm_ascend_training
  472. @pytest.mark.platform_x86_gpu_training
  473. @pytest.mark.platform_x86_ascend_training
  474. @pytest.mark.parametrize("filter_condition, expect_file", [
  475. ({'name': 'fc', 'node_category': 'weight'}, 'search_weight.json'),
  476. ({'name': 'fc', 'node_category': 'gradient'}, 'search_gradient.json'),
  477. ({'node_category': 'activation'}, 'search_activation.json')
  478. ])
  479. def test_search_by_category(self, app_client, filter_condition, expect_file):
  480. """Test recheck request."""
  481. with self._debugger_client.get_thread_instance():
  482. check_state(app_client)
  483. send_and_compare_result(app_client, 'search', filter_condition, expect_file,
  484. method='get')
  485. send_terminate_cmd(app_client)
  486. class TestMultiGraphDebugger:
  487. """Test debugger on Ascend backend for multi_graph."""
  488. @classmethod
  489. def setup_class(cls):
  490. """Setup class."""
  491. cls._debugger_client = MockDebuggerClient(backend='Ascend', graph_num=2)
  492. @pytest.mark.level0
  493. @pytest.mark.env_single
  494. @pytest.mark.platform_x86_cpu
  495. @pytest.mark.platform_arm_ascend_training
  496. @pytest.mark.platform_x86_gpu_training
  497. @pytest.mark.platform_x86_ascend_training
  498. @pytest.mark.parametrize("body_data, expect_file", [
  499. ({'mode': 'all'}, 'multi_retrieve_all.json'),
  500. ({'mode': 'node', 'params': {'name': 'Default', 'graph_name': 'graph_1'}}, 'retrieve_scope_node.json'),
  501. ({'mode': 'node', 'params': {'name': 'graph_0'}}, 'multi_retrieve_scope_node.json'),
  502. ({'mode': 'node', 'params': {'name': 'graph_0/Default/optimizer-Momentum/Parameter[18]_7'}},
  503. 'multi_retrieve_aggregation_scope_node.json'),
  504. ({'mode': 'node', 'params': {
  505. 'name': 'graph_0/Default/TransData-op99',
  506. 'single_node': True}}, 'multi_retrieve_single_node.json'),
  507. ({'mode': 'node', 'params': {
  508. 'name': 'Default/TransData-op99',
  509. 'single_node': True, 'graph_name': 'graph_0'}}, 'retrieve_single_node.json')
  510. ])
  511. def test_multi_retrieve_when_train_begin(self, app_client, body_data, expect_file):
  512. """Test retrieve when train_begin."""
  513. url = 'retrieve'
  514. with self._debugger_client.get_thread_instance():
  515. check_state(app_client)
  516. send_and_compare_result(app_client, url, body_data, expect_file)
  517. send_terminate_cmd(app_client)
  518. @pytest.mark.level0
  519. @pytest.mark.env_single
  520. @pytest.mark.platform_x86_cpu
  521. @pytest.mark.platform_arm_ascend_training
  522. @pytest.mark.platform_x86_gpu_training
  523. @pytest.mark.platform_x86_ascend_training
  524. @pytest.mark.parametrize("filter_condition, expect_file", [
  525. ({'name': '', 'node_category': 'weight'}, 'search_weight_multi_graph.json'),
  526. ({'node_category': 'activation'}, 'search_activation_multi_graph.json')
  527. ])
  528. def test_search_by_category_with_multi_graph(self, app_client, filter_condition, expect_file):
  529. """Test search by category request."""
  530. with self._debugger_client.get_thread_instance():
  531. check_state(app_client)
  532. send_and_compare_result(app_client, 'search', filter_condition, expect_file, method='get')
  533. send_terminate_cmd(app_client)
  534. @pytest.mark.level0
  535. @pytest.mark.env_single
  536. @pytest.mark.platform_x86_cpu
  537. @pytest.mark.platform_arm_ascend_training
  538. @pytest.mark.platform_x86_gpu_training
  539. @pytest.mark.platform_x86_ascend_training
  540. @pytest.mark.parametrize("filter_condition, expect_id", [
  541. ({'condition': {'id': 'tensor_too_large', 'params': [{'name': 'max_gt', 'value': 1.0}]},
  542. 'watch_nodes': ['Default/optimizer-Momentum/Parameter[18]_7'],
  543. 'graph_name': 'graph_0'}, 1),
  544. ({'condition': {'id': 'tensor_too_large', 'params': [{'name': 'max_gt', 'value': 1.0}]},
  545. 'watch_nodes': ['graph_0/Default/optimizer-Momentum/ApplyMomentum[8]_1'],
  546. 'graph_name': None}, 1)
  547. ])
  548. def test_create_watchpoint(self, app_client, filter_condition, expect_id):
  549. """Test create watchpoint with multiple graphs."""
  550. url = 'create_watchpoint'
  551. with self._debugger_client.get_thread_instance():
  552. check_state(app_client)
  553. res = get_request_result(app_client, url, filter_condition)
  554. assert res.get('id') == expect_id
  555. send_terminate_cmd(app_client)
  556. @pytest.mark.level0
  557. @pytest.mark.env_single
  558. @pytest.mark.platform_x86_cpu
  559. @pytest.mark.platform_arm_ascend_training
  560. @pytest.mark.platform_x86_gpu_training
  561. @pytest.mark.platform_x86_ascend_training
  562. @pytest.mark.parametrize("params, expect_file", [
  563. ({'level': 'node'}, 'multi_next_node.json'),
  564. ({'level': 'node', 'node_name': 'graph_0/Default/TransData-op99'}, 'multi_next_node.json'),
  565. ({'level': 'node', 'node_name': 'Default/TransData-op99', 'graph_name': 'graph_0'},
  566. 'multi_next_node.json')
  567. ])
  568. def test_continue_on_gpu(self, app_client, params, expect_file):
  569. """Test get next node on GPU."""
  570. gpu_debugger_client = MockDebuggerClient(backend='GPU', graph_num=2)
  571. original_value = settings.ENABLE_RECOMMENDED_WATCHPOINTS
  572. settings.ENABLE_RECOMMENDED_WATCHPOINTS = True
  573. try:
  574. with gpu_debugger_client.get_thread_instance():
  575. check_state(app_client)
  576. # send run command to get watchpoint hit
  577. url = 'control'
  578. body_data = {'mode': 'continue'}
  579. body_data.update(params)
  580. res = get_request_result(app_client, url, body_data)
  581. assert res == {'metadata': {'state': 'running', 'enable_recheck': False}}
  582. # get metadata
  583. check_state(app_client)
  584. url = 'retrieve'
  585. body_data = {'mode': 'all'}
  586. send_and_compare_result(app_client, url, body_data, expect_file)
  587. send_terminate_cmd(app_client)
  588. finally:
  589. settings.ENABLE_RECOMMENDED_WATCHPOINTS = original_value
  590. @pytest.mark.level0
  591. @pytest.mark.env_single
  592. @pytest.mark.platform_x86_cpu
  593. @pytest.mark.platform_arm_ascend_training
  594. @pytest.mark.platform_x86_gpu_training
  595. @pytest.mark.platform_x86_ascend_training
  596. @pytest.mark.parametrize("body_data, expect_file", [
  597. ({'tensor_name': 'Default/TransData-op99:0', 'graph_name': 'graph_0'}, 'retrieve_tensor_hits-0.json'),
  598. ({'tensor_name': 'Default/optimizer-Momentum/Parameter[18]_7/moments.fc1.bias:0', 'graph_name': 'graph_0'},
  599. 'retrieve_tensor_hits-1.json')
  600. ])
  601. def test_retrieve_tensor_hits(self, app_client, body_data, expect_file):
  602. """Test retrieve tensor graph."""
  603. url = 'tensor-hits'
  604. with self._debugger_client.get_thread_instance():
  605. check_state(app_client)
  606. send_and_compare_result(app_client, url, body_data, expect_file, method='GET')
  607. send_terminate_cmd(app_client)
  608. def create_watchpoint(app_client, condition, expect_id):
  609. """Create watchpoint."""
  610. url = 'create_watchpoint'
  611. body_data = {'condition': condition,
  612. 'watch_nodes': ['Default/optimizer-Momentum/Parameter[18]_7',
  613. 'Default/optimizer-Momentum/Parameter[18]_7/moments.fc3.bias',
  614. 'Default/optimizer-Momentum/Parameter[18]_7/moments.fc1.bias',
  615. 'Default/TransData-op99']}
  616. res = get_request_result(app_client, url, body_data)
  617. assert res.get('id') == expect_id
  618. def create_watchpoint_and_wait(app_client):
  619. """Preparation for recheck."""
  620. check_state(app_client)
  621. create_watchpoint(app_client, condition={'id': 'tensor_too_large', 'params': [{'name': 'max_gt', 'value': 1.0}]},
  622. expect_id=1)
  623. # send run command to get watchpoint hit
  624. url = 'control'
  625. body_data = {'mode': 'continue',
  626. 'steps': 2}
  627. res = get_request_result(app_client, url, body_data)
  628. assert res == {'metadata': {'state': 'running', 'enable_recheck': False}}
  629. # wait for server has received watchpoint hit
  630. check_state(app_client)
  631. class TestMismatchDebugger:
  632. """Test debugger when Mindinsight and Mindspore is mismatched."""
  633. @classmethod
  634. def setup_class(cls):
  635. """Setup class."""
  636. cls._debugger_client = MockDebuggerClient(backend='Ascend', ms_version='1.0.0')
  637. @pytest.mark.level0
  638. @pytest.mark.env_single
  639. @pytest.mark.platform_x86_cpu
  640. @pytest.mark.platform_arm_ascend_training
  641. @pytest.mark.platform_x86_gpu_training
  642. @pytest.mark.platform_x86_ascend_training
  643. @pytest.mark.parametrize("body_data, expect_file", [
  644. ({'mode': 'all'}, 'version_mismatch.json')
  645. ])
  646. def test_retrieve_when_version_mismatch(self, app_client, body_data, expect_file):
  647. """Test retrieve when train_begin."""
  648. url = 'retrieve'
  649. with self._debugger_client.get_thread_instance():
  650. check_state(app_client, ServerStatus.MISMATCH.value)
  651. send_and_compare_result(app_client, url, body_data, expect_file)
  652. send_terminate_cmd(app_client)