You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_restful_api.py 31 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """
  16. Function:
  17. Test query debugger restful api.
  18. Usage:
  19. pytest tests/st/func/debugger/test_restful_api.py
  20. """
  21. import os
  22. from urllib.parse import quote
  23. import pytest
  24. from mindinsight.conf import settings
  25. from mindinsight.debugger.common.utils import ServerStatus
  26. from tests.st.func.debugger.conftest import DEBUGGER_BASE_URL
  27. from tests.st.func.debugger.mock_ms_client import MockDebuggerClient
  28. from tests.st.func.debugger.utils import check_state, get_request_result, \
  29. send_and_compare_result
  30. def send_terminate_cmd(app_client):
  31. """Send terminate command to debugger client."""
  32. url = os.path.join(DEBUGGER_BASE_URL, 'control')
  33. body_data = {'mode': 'terminate'}
  34. send_and_compare_result(app_client, url, body_data)
  35. class TestAscendDebugger:
  36. """Test debugger on Ascend backend."""
  37. @classmethod
  38. def setup_class(cls):
  39. """Setup class."""
  40. cls._debugger_client = MockDebuggerClient(backend='Ascend')
  41. @pytest.mark.level0
  42. @pytest.mark.env_single
  43. @pytest.mark.platform_x86_cpu
  44. @pytest.mark.platform_arm_ascend_training
  45. @pytest.mark.platform_x86_gpu_training
  46. @pytest.mark.platform_x86_ascend_training
  47. def test_before_train_begin(self, app_client):
  48. """Test retrieve all."""
  49. url = 'retrieve'
  50. body_data = {'mode': 'all'}
  51. expect_file = 'before_train_begin.json'
  52. send_and_compare_result(app_client, url, body_data, expect_file)
  53. @pytest.mark.level0
  54. @pytest.mark.env_single
  55. @pytest.mark.platform_x86_cpu
  56. @pytest.mark.platform_arm_ascend_training
  57. @pytest.mark.platform_x86_gpu_training
  58. @pytest.mark.platform_x86_ascend_training
  59. @pytest.mark.parametrize("body_data, expect_file", [
  60. ({'mode': 'all'}, 'retrieve_all.json'),
  61. ({'mode': 'node', 'params': {'name': 'Default'}}, 'retrieve_scope_node.json'),
  62. ({'mode': 'node', 'params': {'name': 'Default/optimizer-Momentum/Parameter[18]_7'}},
  63. 'retrieve_aggregation_scope_node.json'),
  64. ({'mode': 'node', 'params': {
  65. 'name': 'Default/TransData-op99',
  66. 'single_node': True}}, 'retrieve_single_node.json')
  67. ])
  68. def test_retrieve_when_train_begin(self, app_client, body_data, expect_file):
  69. """Test retrieve when train_begin."""
  70. url = 'retrieve'
  71. with self._debugger_client.get_thread_instance():
  72. check_state(app_client)
  73. send_and_compare_result(app_client, url, body_data, expect_file)
  74. send_terminate_cmd(app_client)
  75. def test_get_conditions(self, app_client):
  76. """Test get conditions for ascend."""
  77. url = '/v1/mindinsight/conditionmgr/train-jobs/train-id/condition-collections'
  78. body_data = {}
  79. expect_file = 'get_conditions_for_ascend.json'
  80. with self._debugger_client.get_thread_instance():
  81. check_state(app_client)
  82. send_and_compare_result(app_client, url, body_data, expect_file, method='get', full_url=True)
  83. send_terminate_cmd(app_client)
  84. @pytest.mark.level0
  85. @pytest.mark.env_single
  86. @pytest.mark.platform_x86_cpu
  87. @pytest.mark.platform_arm_ascend_training
  88. @pytest.mark.platform_x86_gpu_training
  89. @pytest.mark.platform_x86_ascend_training
  90. @pytest.mark.parametrize("body_data, expect_file", [
  91. ({'mode': 'all'}, 'multi_retrieve_all.json'),
  92. ({'mode': 'node', 'params': {'name': 'Default', 'graph_name': 'graph_1'}}, 'retrieve_scope_node.json'),
  93. ({'mode': 'node', 'params': {'name': 'graph_0'}}, 'multi_retrieve_scope_node.json'),
  94. ({'mode': 'node', 'params': {'name': 'graph_0/Default/optimizer-Momentum/Parameter[18]_7'}},
  95. 'multi_retrieve_aggregation_scope_node.json'),
  96. ({'mode': 'node', 'params': {
  97. 'name': 'graph_0/Default/TransData-op99',
  98. 'single_node': True}}, 'multi_retrieve_single_node.json'),
  99. ({'mode': 'node', 'params': {
  100. 'name': 'Default/TransData-op99',
  101. 'single_node': True, 'graph_name': 'graph_0'}}, 'retrieve_single_node.json')
  102. ])
  103. def test_multi_retrieve_when_train_begin(self, app_client, body_data, expect_file):
  104. """Test retrieve when train_begin."""
  105. url = 'retrieve'
  106. debugger_client = MockDebuggerClient(backend='Ascend', graph_num=2)
  107. with debugger_client.get_thread_instance():
  108. check_state(app_client)
  109. send_and_compare_result(app_client, url, body_data, expect_file)
  110. send_terminate_cmd(app_client)
  111. @pytest.mark.level0
  112. @pytest.mark.env_single
  113. @pytest.mark.platform_x86_cpu
  114. @pytest.mark.platform_arm_ascend_training
  115. @pytest.mark.platform_x86_gpu_training
  116. @pytest.mark.platform_x86_ascend_training
  117. def test_create_and_delete_watchpoint(self, app_client):
  118. """Test create and delete watchpoint."""
  119. with self._debugger_client.get_thread_instance():
  120. check_state(app_client)
  121. conditions = [
  122. {'id': 'tensor_too_large', 'params': [{'name': 'max_gt', 'value': 1.0}]},
  123. {'id': 'tensor_too_small', 'params': [{'name': 'max_lt', 'value': -1.0}]},
  124. {'id': 'tensor_too_large', 'params': [{'name': 'min_gt', 'value': 1e+32}]},
  125. {'id': 'tensor_too_small', 'params': [{'name': 'min_lt', 'value': -1e+32}]},
  126. {'id': 'tensor_too_large', 'params': [{'name': 'mean_gt', 'value': 0}]},
  127. {'id': 'tensor_too_small', 'params': [{'name': 'mean_lt', 'value': 0}]}
  128. ]
  129. for idx, condition in enumerate(conditions):
  130. create_watchpoint(app_client, condition, idx + 1)
  131. # delete 4-th watchpoint
  132. url = 'delete-watchpoint'
  133. body_data = {'watch_point_id': 4}
  134. get_request_result(app_client, url, body_data)
  135. # test watchpoint list
  136. url = 'retrieve'
  137. body_data = {'mode': 'watchpoint'}
  138. expect_file = 'create_and_delete_watchpoint.json'
  139. send_and_compare_result(app_client, url, body_data, expect_file)
  140. send_terminate_cmd(app_client)
  141. @pytest.mark.level0
  142. @pytest.mark.env_single
  143. @pytest.mark.platform_x86_cpu
  144. @pytest.mark.platform_arm_ascend_training
  145. @pytest.mark.platform_x86_gpu_training
  146. @pytest.mark.platform_x86_ascend_training
  147. def test_update_watchpoint(self, app_client):
  148. """Test retrieve when train_begin."""
  149. watch_point_id = 1
  150. leaf_node_name = 'Default/optimizer-Momentum/Parameter[18]_7/moments.fc3.bias'
  151. with self._debugger_client.get_thread_instance():
  152. check_state(app_client)
  153. condition = {'id': 'tensor_too_large', 'params': [{'name': 'max_gt', 'value': 1.0}]}
  154. create_watchpoint(app_client, condition, watch_point_id)
  155. # update watchpoint watchpoint list
  156. url = 'update-watchpoint'
  157. body_data = {'watch_point_id': watch_point_id,
  158. 'watch_nodes': [leaf_node_name],
  159. 'mode': 1}
  160. get_request_result(app_client, url, body_data)
  161. # get updated nodes
  162. url = 'search'
  163. body_data = {'name': leaf_node_name, 'watch_point_id': watch_point_id}
  164. expect_file = 'search_unwatched_leaf_node.json'
  165. send_and_compare_result(app_client, url, body_data, expect_file, method='get')
  166. send_terminate_cmd(app_client)
  167. @pytest.mark.level0
  168. @pytest.mark.env_single
  169. @pytest.mark.platform_x86_cpu
  170. @pytest.mark.platform_arm_ascend_training
  171. @pytest.mark.platform_x86_gpu_training
  172. @pytest.mark.platform_x86_ascend_training
  173. def test_retrieve_tensor_value(self, app_client):
  174. """Test retrieve tensor value."""
  175. node_name = 'Default/TransData-op99'
  176. with self._debugger_client.get_thread_instance():
  177. check_state(app_client)
  178. # prepare tensor value
  179. url = 'tensor-history'
  180. body_data = {'name': node_name}
  181. expect_file = 'retrieve_empty_tensor_history.json'
  182. send_and_compare_result(app_client, url, body_data, expect_file)
  183. # check full tensor history from poll data
  184. res = get_request_result(
  185. app_client=app_client, url='poll-data', body_data={'pos': 0}, method='get')
  186. assert res.get('receive_tensor', {}).get('node_name') == node_name
  187. expect_file = 'retrieve_full_tensor_history.json'
  188. send_and_compare_result(app_client, url, body_data, expect_file)
  189. # check tensor value
  190. url = 'tensors'
  191. body_data = {
  192. 'name': node_name + ':0',
  193. 'detail': 'data',
  194. 'shape': quote('[1, 1:3]')
  195. }
  196. expect_file = 'retrieve_tensor_value.json'
  197. send_and_compare_result(app_client, url, body_data, expect_file, method='get')
  198. send_terminate_cmd(app_client)
  199. @pytest.mark.level0
  200. @pytest.mark.env_single
  201. @pytest.mark.platform_x86_cpu
  202. @pytest.mark.platform_arm_ascend_training
  203. @pytest.mark.platform_x86_gpu_training
  204. @pytest.mark.platform_x86_ascend_training
  205. def test_compare_tensor_value(self, app_client):
  206. """Test compare tensor value."""
  207. node_name = 'Default/args0'
  208. with self._debugger_client.get_thread_instance():
  209. check_state(app_client)
  210. # prepare tensor values
  211. url = 'control'
  212. body_data = {'mode': 'continue',
  213. 'steps': 2}
  214. get_request_result(app_client, url, body_data)
  215. check_state(app_client)
  216. get_request_result(
  217. app_client=app_client, url='tensor-history', body_data={'name': node_name})
  218. res = get_request_result(
  219. app_client=app_client, url='poll-data', body_data={'pos': 0}, method='get')
  220. assert res.get('receive_tensor', {}).get('node_name') == node_name
  221. # get compare results
  222. url = 'tensor-comparisons'
  223. body_data = {
  224. 'name': node_name + ':0',
  225. 'detail': 'data',
  226. 'shape': quote('[:, :]'),
  227. 'tolerance': 1
  228. }
  229. expect_file = 'compare_tensors.json'
  230. send_and_compare_result(app_client, url, body_data, expect_file, method='get')
  231. send_terminate_cmd(app_client)
  232. @pytest.mark.level0
  233. @pytest.mark.env_single
  234. @pytest.mark.platform_x86_cpu
  235. @pytest.mark.platform_arm_ascend_training
  236. @pytest.mark.platform_x86_gpu_training
  237. @pytest.mark.platform_x86_ascend_training
  238. @pytest.mark.parametrize("body_data, expect_file", [
  239. ({'ascend': True}, 'retrieve_node_by_bfs_ascend.json'),
  240. ({'name': 'Default/args0', 'ascend': False}, 'retrieve_node_by_bfs.json')
  241. ])
  242. def test_retrieve_bfs_node(self, app_client, body_data, expect_file):
  243. """Test retrieve bfs node."""
  244. with self._debugger_client.get_thread_instance():
  245. check_state(app_client)
  246. # prepare tensor values
  247. url = 'retrieve_node_by_bfs'
  248. send_and_compare_result(app_client, url, body_data, expect_file, method='get')
  249. send_terminate_cmd(app_client)
  250. @pytest.mark.level0
  251. @pytest.mark.env_single
  252. @pytest.mark.platform_x86_cpu
  253. @pytest.mark.platform_arm_ascend_training
  254. @pytest.mark.platform_x86_gpu_training
  255. @pytest.mark.platform_x86_ascend_training
  256. def test_pause(self, app_client):
  257. """Test pause the training."""
  258. with self._debugger_client.get_thread_instance():
  259. check_state(app_client)
  260. # send run command to execute to next node
  261. url = 'control'
  262. body_data = {'mode': 'continue',
  263. 'steps': -1}
  264. res = get_request_result(app_client, url, body_data)
  265. assert res == {'metadata': {'state': 'sending', 'enable_recheck': False}}
  266. # send pause command
  267. check_state(app_client, 'running')
  268. url = 'control'
  269. body_data = {'mode': 'pause'}
  270. res = get_request_result(app_client, url, body_data)
  271. assert res == {'metadata': {'state': 'sending', 'enable_recheck': False}}
  272. send_terminate_cmd(app_client)
  273. @pytest.mark.level0
  274. @pytest.mark.env_single
  275. @pytest.mark.platform_x86_cpu
  276. @pytest.mark.platform_arm_ascend_training
  277. @pytest.mark.platform_x86_gpu_training
  278. @pytest.mark.platform_x86_ascend_training
  279. @pytest.mark.parametrize("url, body_data, enable_recheck", [
  280. ('create-watchpoint',
  281. {'condition': {'id': 'tensor_too_large', 'params': [{'name': 'max_gt', 'value': 1.0}]},
  282. 'watch_nodes': ['Default']}, True),
  283. ('update-watchpoint',
  284. {'watch_point_id': 1, 'watch_nodes': ['Default/optimizer-Momentum/Parameter[18]_7'],
  285. 'mode': 1}, True),
  286. ('update-watchpoint',
  287. {'watch_point_id': 1, 'watch_nodes': ['Default/optimizer-Momentum'],
  288. 'mode': 1}, True),
  289. ('delete-watchpoint', {}, True)
  290. ])
  291. def test_recheck(self, app_client, url, body_data, enable_recheck):
  292. """Test recheck."""
  293. with self._debugger_client.get_thread_instance():
  294. create_watchpoint_and_wait(app_client)
  295. # create watchpoint
  296. res = get_request_result(app_client, url, body_data, method='post')
  297. assert res['metadata']['enable_recheck'] is enable_recheck
  298. send_terminate_cmd(app_client)
  299. @pytest.mark.level0
  300. @pytest.mark.env_single
  301. @pytest.mark.platform_x86_cpu
  302. @pytest.mark.platform_arm_ascend_training
  303. @pytest.mark.platform_x86_gpu_training
  304. @pytest.mark.platform_x86_ascend_training
  305. def test_recommend_watchpoints(self, app_client):
  306. """Test generating recommended watchpoints."""
  307. original_value = settings.ENABLE_RECOMMENDED_WATCHPOINTS
  308. settings.ENABLE_RECOMMENDED_WATCHPOINTS = True
  309. try:
  310. with self._debugger_client.get_thread_instance():
  311. check_state(app_client)
  312. url = 'retrieve'
  313. body_data = {'mode': 'watchpoint'}
  314. expect_file = 'recommended_watchpoints_at_startup.json'
  315. send_and_compare_result(app_client, url, body_data, expect_file, method='post')
  316. send_terminate_cmd(app_client)
  317. finally:
  318. settings.ENABLE_RECOMMENDED_WATCHPOINTS = original_value
  319. @pytest.mark.level0
  320. @pytest.mark.env_single
  321. @pytest.mark.platform_x86_cpu
  322. @pytest.mark.platform_arm_ascend_training
  323. @pytest.mark.platform_x86_gpu_training
  324. @pytest.mark.platform_x86_ascend_training
  325. @pytest.mark.parametrize("body_data, expect_file", [
  326. ({'tensor_name': 'Default/TransData-op99:0', 'graph_name': 'graph_0'}, 'retrieve_tensor_graph-0.json'),
  327. ({'tensor_name': 'Default/optimizer-Momentum/Parameter[18]_7/moments.fc1.bias:0', 'graph_name': 'graph_0'},
  328. 'retrieve_tensor_graph-1.json')
  329. ])
  330. def test_retrieve_tensor_graph(self, app_client, body_data, expect_file):
  331. """Test retrieve tensor graph."""
  332. url = 'tensor-graphs'
  333. with self._debugger_client.get_thread_instance():
  334. create_watchpoint_and_wait(app_client)
  335. get_request_result(app_client, url, body_data, method='GET')
  336. # check full tensor history from poll data
  337. res = get_request_result(
  338. app_client=app_client, url='poll-data', body_data={'pos': 0}, method='get')
  339. assert res.get('receive_tensor', {}).get('tensor_name') == body_data.get('tensor_name')
  340. send_and_compare_result(app_client, url, body_data, expect_file, method='GET')
  341. send_terminate_cmd(app_client)
  342. class TestGPUDebugger:
  343. """Test debugger on Ascend backend."""
  344. @classmethod
  345. def setup_class(cls):
  346. """Setup class."""
  347. cls._debugger_client = MockDebuggerClient(backend='GPU')
  348. @pytest.mark.level0
  349. @pytest.mark.env_single
  350. @pytest.mark.platform_x86_cpu
  351. @pytest.mark.platform_arm_ascend_training
  352. @pytest.mark.platform_x86_gpu_training
  353. @pytest.mark.platform_x86_ascend_training
  354. def test_next_node_on_gpu(self, app_client):
  355. """Test get next node on GPU."""
  356. gpu_debugger_client = MockDebuggerClient(backend='GPU')
  357. with gpu_debugger_client.get_thread_instance():
  358. check_state(app_client)
  359. # send run command to get watchpoint hit
  360. url = 'control'
  361. body_data = {'mode': 'continue',
  362. 'level': 'node',
  363. 'name': 'Default/TransData-op99'}
  364. res = get_request_result(app_client, url, body_data)
  365. assert res == {'metadata': {'state': 'sending', 'enable_recheck': False}}
  366. # get metadata
  367. check_state(app_client)
  368. url = 'retrieve'
  369. body_data = {'mode': 'all'}
  370. expect_file = 'retrieve_next_node_on_gpu.json'
  371. send_and_compare_result(app_client, url, body_data, expect_file)
  372. send_terminate_cmd(app_client)
  373. @pytest.mark.level0
  374. @pytest.mark.env_single
  375. @pytest.mark.platform_x86_cpu
  376. @pytest.mark.platform_arm_ascend_training
  377. @pytest.mark.platform_x86_gpu_training
  378. @pytest.mark.platform_x86_ascend_training
  379. @pytest.mark.parametrize("url, body_data, enable_recheck", [
  380. ('create-watchpoint',
  381. {'condition': {'id': 'tensor_too_large', 'params': [{'name': 'max_gt', 'value': 1.0}]},
  382. 'watch_nodes': ['Default']}, True),
  383. ('create-watchpoint',
  384. {'condition': {'id': 'tensor_too_large', 'params': [{'name': 'max_gt', 'value': 1.0}]},
  385. 'watch_nodes': ['Default/TransData-op99']}, True),
  386. ('update-watchpoint',
  387. {'watch_point_id': 1, 'watch_nodes': ['Default/optimizer-Momentum/Parameter[18]_7'],
  388. 'mode': 1}, True),
  389. ('update-watchpoint',
  390. {'watch_point_id': 1, 'watch_nodes': ['Default/optimizer-Momentum'],
  391. 'mode': 1}, True),
  392. ('update-watchpoint',
  393. [{'watch_point_id': 1, 'watch_nodes': ['Default/optimizer-Momentum'],
  394. 'mode': 1},
  395. {'watch_point_id': 1, 'watch_nodes': ['Default/optimizer-Momentum'],
  396. 'mode': 0}
  397. ], True),
  398. ('update-watchpoint',
  399. [{'watch_point_id': 1, 'watch_nodes': ['Default/TransData-op99'],
  400. 'mode': 1},
  401. {'watch_point_id': 1, 'watch_nodes': ['Default/TransData-op99'],
  402. 'mode': 0}
  403. ], True),
  404. ('delete-watchpoint', {'watch_point_id': 1}, True)
  405. ])
  406. def test_recheck_state(self, app_client, url, body_data, enable_recheck):
  407. """Test update watchpoint and check the value of enable_recheck."""
  408. with self._debugger_client.get_thread_instance():
  409. create_watchpoint_and_wait(app_client)
  410. if not isinstance(body_data, list):
  411. body_data = [body_data]
  412. for sub_body_data in body_data:
  413. res = get_request_result(app_client, url, sub_body_data, method='post')
  414. assert res['metadata']['enable_recheck'] is enable_recheck
  415. send_terminate_cmd(app_client)
  416. def test_get_conditions(self, app_client):
  417. """Test get conditions for gpu."""
  418. url = '/v1/mindinsight/conditionmgr/train-jobs/train-id/condition-collections'
  419. body_data = {}
  420. expect_file = 'get_conditions_for_gpu.json'
  421. with self._debugger_client.get_thread_instance():
  422. check_state(app_client)
  423. send_and_compare_result(app_client, url, body_data, expect_file, method='get', full_url=True)
  424. send_terminate_cmd(app_client)
  425. @pytest.mark.level0
  426. @pytest.mark.env_single
  427. @pytest.mark.platform_x86_cpu
  428. @pytest.mark.platform_arm_ascend_training
  429. @pytest.mark.platform_x86_gpu_training
  430. @pytest.mark.platform_x86_ascend_training
  431. def test_recheck(self, app_client):
  432. """Test recheck request."""
  433. with self._debugger_client.get_thread_instance():
  434. create_watchpoint_and_wait(app_client)
  435. # send recheck when disable to do recheck
  436. get_request_result(app_client, 'recheck', {}, method='post', expect_code=400)
  437. # send recheck when enable to do recheck
  438. create_watchpoint(app_client, {'id': 'tensor_too_large', 'params': [{'name': 'max_gt', 'value': 1.0}]}, 2)
  439. res = get_request_result(app_client, 'recheck', {}, method='post')
  440. assert res['metadata']['enable_recheck'] is False
  441. send_terminate_cmd(app_client)
  442. @pytest.mark.level0
  443. @pytest.mark.env_single
  444. @pytest.mark.platform_x86_cpu
  445. @pytest.mark.platform_arm_ascend_training
  446. @pytest.mark.platform_x86_gpu_training
  447. @pytest.mark.platform_x86_ascend_training
  448. @pytest.mark.parametrize("filter_condition, expect_file", [
  449. ({'name': 'fc', 'node_category': 'weight'}, 'search_weight.json'),
  450. ({'name': 'fc', 'node_category': 'gradient'}, 'search_gradient.json'),
  451. ({'node_category': 'activation'}, 'search_activation.json')
  452. ])
  453. def test_search_by_category(self, app_client, filter_condition, expect_file):
  454. """Test recheck request."""
  455. with self._debugger_client.get_thread_instance():
  456. check_state(app_client)
  457. send_and_compare_result(app_client, 'search', filter_condition, expect_file,
  458. method='get')
  459. send_terminate_cmd(app_client)
  460. class TestMultiGraphDebugger:
  461. """Test debugger on Ascend backend for multi_graph."""
  462. @classmethod
  463. def setup_class(cls):
  464. """Setup class."""
  465. cls._debugger_client = MockDebuggerClient(backend='Ascend', graph_num=2)
  466. @pytest.mark.level0
  467. @pytest.mark.env_single
  468. @pytest.mark.platform_x86_cpu
  469. @pytest.mark.platform_arm_ascend_training
  470. @pytest.mark.platform_x86_gpu_training
  471. @pytest.mark.platform_x86_ascend_training
  472. @pytest.mark.parametrize("body_data, expect_file", [
  473. ({'mode': 'all'}, 'multi_retrieve_all.json'),
  474. ({'mode': 'node', 'params': {'name': 'Default', 'graph_name': 'graph_1'}}, 'retrieve_scope_node.json'),
  475. ({'mode': 'node', 'params': {'name': 'graph_0'}}, 'multi_retrieve_scope_node.json'),
  476. ({'mode': 'node', 'params': {'name': 'graph_0/Default/optimizer-Momentum/Parameter[18]_7'}},
  477. 'multi_retrieve_aggregation_scope_node.json'),
  478. ({'mode': 'node', 'params': {
  479. 'name': 'graph_0/Default/TransData-op99',
  480. 'single_node': True}}, 'multi_retrieve_single_node.json'),
  481. ({'mode': 'node', 'params': {
  482. 'name': 'Default/TransData-op99',
  483. 'single_node': True, 'graph_name': 'graph_0'}}, 'retrieve_single_node.json')
  484. ])
  485. def test_multi_retrieve_when_train_begin(self, app_client, body_data, expect_file):
  486. """Test retrieve when train_begin."""
  487. url = 'retrieve'
  488. with self._debugger_client.get_thread_instance():
  489. check_state(app_client)
  490. send_and_compare_result(app_client, url, body_data, expect_file)
  491. send_terminate_cmd(app_client)
  492. @pytest.mark.level0
  493. @pytest.mark.env_single
  494. @pytest.mark.platform_x86_cpu
  495. @pytest.mark.platform_arm_ascend_training
  496. @pytest.mark.platform_x86_gpu_training
  497. @pytest.mark.platform_x86_ascend_training
  498. @pytest.mark.parametrize("filter_condition, expect_file", [
  499. ({'name': '', 'node_category': 'weight'}, 'search_weight_multi_graph.json'),
  500. ({'node_category': 'activation'}, 'search_activation_multi_graph.json'),
  501. ({'node_category': 'gradient'}, 'search_gradient_multi_graph.json')
  502. ])
  503. def test_search_by_category_with_multi_graph(self, app_client, filter_condition, expect_file):
  504. """Test search by category request."""
  505. with self._debugger_client.get_thread_instance():
  506. check_state(app_client)
  507. send_and_compare_result(app_client, 'search', filter_condition, expect_file, method='get')
  508. send_terminate_cmd(app_client)
  509. @pytest.mark.level0
  510. @pytest.mark.env_single
  511. @pytest.mark.platform_x86_cpu
  512. @pytest.mark.platform_arm_ascend_training
  513. @pytest.mark.platform_x86_gpu_training
  514. @pytest.mark.platform_x86_ascend_training
  515. @pytest.mark.parametrize("filter_condition, expect_id", [
  516. ({'condition': {'id': 'tensor_too_large', 'params': [{'name': 'max_gt', 'value': 1.0}]},
  517. 'watch_nodes': ['Default/optimizer-Momentum/Parameter[18]_7'],
  518. 'graph_name': 'graph_0'}, 1),
  519. ({'condition': {'id': 'tensor_too_large', 'params': [{'name': 'max_gt', 'value': 1.0}]},
  520. 'watch_nodes': ['graph_0/Default/optimizer-Momentum/ApplyMomentum[8]_1'],
  521. 'graph_name': None}, 1)
  522. ])
  523. def test_create_watchpoint(self, app_client, filter_condition, expect_id):
  524. """Test create watchpoint with multiple graphs."""
  525. url = 'create-watchpoint'
  526. with self._debugger_client.get_thread_instance():
  527. check_state(app_client)
  528. res = get_request_result(app_client, url, filter_condition)
  529. assert res.get('id') == expect_id
  530. send_terminate_cmd(app_client)
  531. @pytest.mark.level0
  532. @pytest.mark.env_single
  533. @pytest.mark.platform_x86_cpu
  534. @pytest.mark.platform_arm_ascend_training
  535. @pytest.mark.platform_x86_gpu_training
  536. @pytest.mark.platform_x86_ascend_training
  537. @pytest.mark.parametrize("params, expect_file", [
  538. ({'level': 'node'}, 'multi_next_node.json'),
  539. ({'level': 'node', 'node_name': 'graph_0/Default/TransData-op99'}, 'multi_next_node.json'),
  540. ({'level': 'node', 'node_name': 'Default/TransData-op99', 'graph_name': 'graph_0'},
  541. 'multi_next_node.json')
  542. ])
  543. def test_continue_on_gpu(self, app_client, params, expect_file):
  544. """Test get next node on GPU."""
  545. gpu_debugger_client = MockDebuggerClient(backend='GPU', graph_num=2)
  546. original_value = settings.ENABLE_RECOMMENDED_WATCHPOINTS
  547. settings.ENABLE_RECOMMENDED_WATCHPOINTS = True
  548. try:
  549. with gpu_debugger_client.get_thread_instance():
  550. check_state(app_client)
  551. # send run command to get watchpoint hit
  552. url = 'control'
  553. body_data = {'mode': 'continue'}
  554. body_data.update(params)
  555. res = get_request_result(app_client, url, body_data)
  556. assert res == {'metadata': {'state': 'sending', 'enable_recheck': False}}
  557. # get metadata
  558. check_state(app_client)
  559. url = 'retrieve'
  560. body_data = {'mode': 'all'}
  561. send_and_compare_result(app_client, url, body_data, expect_file)
  562. send_terminate_cmd(app_client)
  563. finally:
  564. settings.ENABLE_RECOMMENDED_WATCHPOINTS = original_value
  565. @pytest.mark.level0
  566. @pytest.mark.env_single
  567. @pytest.mark.platform_x86_cpu
  568. @pytest.mark.platform_arm_ascend_training
  569. @pytest.mark.platform_x86_gpu_training
  570. @pytest.mark.platform_x86_ascend_training
  571. @pytest.mark.parametrize("body_data, expect_file", [
  572. ({'tensor_name': 'Default/TransData-op99:0', 'graph_name': 'graph_0'}, 'retrieve_tensor_hits-0.json'),
  573. ({'tensor_name': 'Default/optimizer-Momentum/Parameter[18]_7/moments.fc1.bias:0', 'graph_name': 'graph_0'},
  574. 'retrieve_tensor_hits-1.json')
  575. ])
  576. def test_retrieve_tensor_hits(self, app_client, body_data, expect_file):
  577. """Test retrieve tensor graph."""
  578. url = 'tensor-hits'
  579. with self._debugger_client.get_thread_instance():
  580. check_state(app_client)
  581. send_and_compare_result(app_client, url, body_data, expect_file, method='GET')
  582. send_terminate_cmd(app_client)
  583. def create_watchpoint(app_client, condition, expect_id):
  584. """Create watchpoint."""
  585. url = 'create-watchpoint'
  586. body_data = {'condition': condition,
  587. 'watch_nodes': ['Default/optimizer-Momentum/Parameter[18]_7',
  588. 'Default/optimizer-Momentum/Parameter[18]_7/moments.fc3.bias',
  589. 'Default/optimizer-Momentum/Parameter[18]_7/moments.fc1.bias',
  590. 'Default/TransData-op99']}
  591. res = get_request_result(app_client, url, body_data)
  592. assert res.get('id') == expect_id
  593. def create_watchpoint_and_wait(app_client):
  594. """Preparation for recheck."""
  595. check_state(app_client)
  596. create_watchpoint(app_client, condition={'id': 'tensor_too_large', 'params': [{'name': 'max_gt', 'value': 1.0}]},
  597. expect_id=1)
  598. # send run command to get watchpoint hit
  599. url = 'control'
  600. body_data = {'mode': 'continue',
  601. 'steps': 2}
  602. res = get_request_result(app_client, url, body_data)
  603. assert res == {'metadata': {'state': 'sending', 'enable_recheck': False}}
  604. # wait for server has received watchpoint hit
  605. check_state(app_client)
  606. class TestMismatchDebugger:
  607. """Test debugger when Mindinsight and Mindspore is mismatched."""
  608. @classmethod
  609. def setup_class(cls):
  610. """Setup class."""
  611. cls._debugger_client = MockDebuggerClient(backend='Ascend', ms_version='1.0.0')
  612. @pytest.mark.level0
  613. @pytest.mark.env_single
  614. @pytest.mark.platform_x86_cpu
  615. @pytest.mark.platform_arm_ascend_training
  616. @pytest.mark.platform_x86_gpu_training
  617. @pytest.mark.platform_x86_ascend_training
  618. @pytest.mark.parametrize("body_data, expect_file", [
  619. ({'mode': 'all'}, 'version_mismatch.json')
  620. ])
  621. def test_retrieve_when_version_mismatch(self, app_client, body_data, expect_file):
  622. """Test retrieve when train_begin."""
  623. url = 'retrieve'
  624. with self._debugger_client.get_thread_instance():
  625. check_state(app_client, ServerStatus.MISMATCH.value)
  626. send_and_compare_result(app_client, url, body_data, expect_file)
  627. send_terminate_cmd(app_client)