You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_querier.py 25 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Test the querier module."""
  16. from unittest import TestCase, mock
  17. from google.protobuf.json_format import ParseDict
  18. import mindinsight.datavisual.proto_files.mindinsight_lineage_pb2 as summary_pb2
  19. from mindinsight.lineagemgr.common.exceptions.exceptions import (LineageParamTypeError, LineageQuerierParamException,
  20. LineageSummaryAnalyzeException,
  21. LineageSummaryParseException)
  22. from mindinsight.lineagemgr.querier.querier import Querier
  23. from mindinsight.lineagemgr.summary.lineage_summary_analyzer import LineageInfo
  24. from . import event_data
  25. def create_lineage_info(train_event_dict, eval_event_dict, dataset_event_dict):
  26. """
  27. Create parsed lineage info tuple.
  28. Args:
  29. train_event_dict (Union[dict, None]): The dict of train event.
  30. eval_event_dict (Union[dict, None]): The dict of evaluation event.
  31. dataset_event_dict (Union[dict, None]): The dict of dataset graph event.
  32. Returns:
  33. namedtuple, parsed lineage info.
  34. """
  35. if train_event_dict is not None:
  36. train_event = summary_pb2.LineageEvent()
  37. ParseDict(train_event_dict, train_event)
  38. else:
  39. train_event = None
  40. if eval_event_dict is not None:
  41. eval_event = summary_pb2.LineageEvent()
  42. ParseDict(eval_event_dict, eval_event)
  43. else:
  44. eval_event = None
  45. if dataset_event_dict is not None:
  46. dataset_event = summary_pb2.LineageEvent()
  47. ParseDict(dataset_event_dict, dataset_event)
  48. else:
  49. dataset_event = None
  50. lineage_info = LineageInfo(
  51. train_lineage=train_event,
  52. eval_lineage=eval_event,
  53. dataset_graph=dataset_event,
  54. )
  55. return lineage_info
  56. def create_filtration_result(summary_dir, train_event_dict,
  57. eval_event_dict, metric_dict, dataset_dict):
  58. """
  59. Create filteration result.
  60. Args:
  61. summary_dir (str): The summary dir.
  62. train_event_dict (dict): The dict of train event.
  63. eval_event_dict (dict): The dict of evaluation event.
  64. metric_dict (dict): The dict of metric.
  65. dataset_dict (dict): The dict of dataset graph.
  66. Returns:
  67. dict, the filteration result.
  68. """
  69. filtration_result = {
  70. "summary_dir": summary_dir,
  71. "model_lineage": {
  72. "loss_function": train_event_dict['train_lineage']['hyper_parameters']['loss_function'],
  73. "train_dataset_path": train_event_dict['train_lineage']['train_dataset']['train_dataset_path'],
  74. "train_dataset_count": train_event_dict['train_lineage']['train_dataset']['train_dataset_size'],
  75. "test_dataset_path": eval_event_dict['evaluation_lineage']['valid_dataset']['valid_dataset_path'],
  76. "test_dataset_count": eval_event_dict['evaluation_lineage']['valid_dataset']['valid_dataset_size'],
  77. "network": train_event_dict['train_lineage']['algorithm']['network'],
  78. "optimizer": train_event_dict['train_lineage']['hyper_parameters']['optimizer'],
  79. "learning_rate": train_event_dict['train_lineage']['hyper_parameters']['learning_rate'],
  80. "epoch": train_event_dict['train_lineage']['hyper_parameters']['epoch'],
  81. "batch_size": train_event_dict['train_lineage']['hyper_parameters']['batch_size'],
  82. "device_num": train_event_dict['train_lineage']['hyper_parameters']['device_num'],
  83. "loss": train_event_dict['train_lineage']['algorithm']['loss'],
  84. "model_size": train_event_dict['train_lineage']['model']['size'],
  85. "metric": metric_dict,
  86. "dataset_mark": '2',
  87. "user_defined": {}
  88. },
  89. "dataset_graph": dataset_dict,
  90. }
  91. return filtration_result
  92. def get_lineage_infos():
  93. """
  94. Get tuples of lineage info, simulate the function of summary analyzer.
  95. Returns:
  96. list[namedtuple], tuples of lineage info.
  97. """
  98. train_events = [
  99. event_data.EVENT_TRAIN_DICT_0,
  100. event_data.EVENT_TRAIN_DICT_1,
  101. event_data.EVENT_TRAIN_DICT_2,
  102. event_data.EVENT_TRAIN_DICT_3,
  103. event_data.EVENT_TRAIN_DICT_4,
  104. event_data.EVENT_TRAIN_DICT_5,
  105. None
  106. ]
  107. eval_events = [
  108. event_data.EVENT_EVAL_DICT_0,
  109. event_data.EVENT_EVAL_DICT_1,
  110. event_data.EVENT_EVAL_DICT_2,
  111. event_data.EVENT_EVAL_DICT_3,
  112. event_data.EVENT_EVAL_DICT_4,
  113. None,
  114. event_data.EVENT_EVAL_DICT_5
  115. ]
  116. dataset_events = [
  117. event_data.EVENT_DATASET_DICT_0
  118. ]*7
  119. lineage_infos = list(
  120. map(
  121. lambda event: create_lineage_info(event[0], event[1], event[2]),
  122. zip(train_events, eval_events, dataset_events)
  123. )
  124. )
  125. return lineage_infos
  126. LINEAGE_INFO_0 = {
  127. 'summary_dir': '/path/to/summary0',
  128. **event_data.EVENT_TRAIN_DICT_0['train_lineage'],
  129. 'metric': event_data.METRIC_0,
  130. 'valid_dataset': event_data.EVENT_EVAL_DICT_0['evaluation_lineage']['valid_dataset'],
  131. 'dataset_graph': event_data.DATASET_DICT_0
  132. }
  133. LINEAGE_INFO_1 = {
  134. 'summary_dir': '/path/to/summary1',
  135. **event_data.EVENT_TRAIN_DICT_1['train_lineage'],
  136. 'metric': event_data.METRIC_1,
  137. 'valid_dataset': event_data.EVENT_EVAL_DICT_1['evaluation_lineage']['valid_dataset'],
  138. 'dataset_graph': event_data.DATASET_DICT_0
  139. }
  140. LINEAGE_FILTRATION_0 = create_filtration_result(
  141. '/path/to/summary0',
  142. event_data.EVENT_TRAIN_DICT_0,
  143. event_data.EVENT_EVAL_DICT_0,
  144. event_data.METRIC_0,
  145. event_data.DATASET_DICT_0
  146. )
  147. LINEAGE_FILTRATION_1 = create_filtration_result(
  148. '/path/to/summary1',
  149. event_data.EVENT_TRAIN_DICT_1,
  150. event_data.EVENT_EVAL_DICT_1,
  151. event_data.METRIC_1,
  152. event_data.DATASET_DICT_0
  153. )
  154. LINEAGE_FILTRATION_2 = create_filtration_result(
  155. '/path/to/summary2',
  156. event_data.EVENT_TRAIN_DICT_2,
  157. event_data.EVENT_EVAL_DICT_2,
  158. event_data.METRIC_2,
  159. event_data.DATASET_DICT_0
  160. )
  161. LINEAGE_FILTRATION_3 = create_filtration_result(
  162. '/path/to/summary3',
  163. event_data.EVENT_TRAIN_DICT_3,
  164. event_data.EVENT_EVAL_DICT_3,
  165. event_data.METRIC_3,
  166. event_data.DATASET_DICT_0
  167. )
  168. LINEAGE_FILTRATION_4 = create_filtration_result(
  169. '/path/to/summary4',
  170. event_data.EVENT_TRAIN_DICT_4,
  171. event_data.EVENT_EVAL_DICT_4,
  172. event_data.METRIC_4,
  173. event_data.DATASET_DICT_0
  174. )
  175. LINEAGE_FILTRATION_5 = {
  176. "summary_dir": '/path/to/summary5',
  177. "model_lineage": {
  178. "loss_function":
  179. event_data.EVENT_TRAIN_DICT_5['train_lineage']['hyper_parameters']['loss_function'],
  180. "train_dataset_path": None,
  181. "train_dataset_count":
  182. event_data.EVENT_TRAIN_DICT_5['train_lineage']['train_dataset']['train_dataset_size'],
  183. "test_dataset_path": None,
  184. "test_dataset_count": None,
  185. "network": event_data.EVENT_TRAIN_DICT_5['train_lineage']['algorithm']['network'],
  186. "optimizer": event_data.EVENT_TRAIN_DICT_5['train_lineage']['hyper_parameters']['optimizer'],
  187. "learning_rate":
  188. event_data.EVENT_TRAIN_DICT_5['train_lineage']['hyper_parameters']['learning_rate'],
  189. "epoch": event_data.EVENT_TRAIN_DICT_5['train_lineage']['hyper_parameters']['epoch'],
  190. "batch_size": event_data.EVENT_TRAIN_DICT_5['train_lineage']['hyper_parameters']['batch_size'],
  191. "device_num": event_data.EVENT_TRAIN_DICT_5['train_lineage']['hyper_parameters']['device_num'],
  192. "loss": event_data.EVENT_TRAIN_DICT_5['train_lineage']['algorithm']['loss'],
  193. "model_size": event_data.EVENT_TRAIN_DICT_5['train_lineage']['model']['size'],
  194. "metric": {},
  195. "dataset_mark": '2',
  196. "user_defined": {}
  197. },
  198. "dataset_graph": event_data.DATASET_DICT_0
  199. }
  200. LINEAGE_FILTRATION_6 = {
  201. "summary_dir": '/path/to/summary6',
  202. "model_lineage": {
  203. "loss_function": None,
  204. "train_dataset_path": None,
  205. "train_dataset_count": None,
  206. "test_dataset_path":
  207. event_data.EVENT_EVAL_DICT_5['evaluation_lineage']['valid_dataset']['valid_dataset_path'],
  208. "test_dataset_count":
  209. event_data.EVENT_EVAL_DICT_5['evaluation_lineage']['valid_dataset']['valid_dataset_size'],
  210. "network": None,
  211. "optimizer": None,
  212. "learning_rate": None,
  213. "epoch": None,
  214. "batch_size": None,
  215. "device_num": None,
  216. "loss": None,
  217. "model_size": None,
  218. "metric": event_data.METRIC_5,
  219. "dataset_mark": '2',
  220. "user_defined": {}
  221. },
  222. "dataset_graph": event_data.DATASET_DICT_0
  223. }
  224. class TestQuerier(TestCase):
  225. """Test the class of `Querier`."""
  226. @mock.patch('mindinsight.lineagemgr.querier.querier.LineageSummaryAnalyzer.get_user_defined_info')
  227. @mock.patch('mindinsight.lineagemgr.querier.querier.LineageSummaryAnalyzer.get_summary_infos')
  228. def setUp(self, *args):
  229. """Initialization before test case execution."""
  230. args[0].return_value = create_lineage_info(
  231. event_data.EVENT_TRAIN_DICT_0,
  232. event_data.EVENT_EVAL_DICT_0,
  233. event_data.EVENT_DATASET_DICT_0
  234. )
  235. args[1].return_value = []
  236. single_summary_path = '/path/to/summary0/log0'
  237. self.single_querier = Querier(single_summary_path)
  238. lineage_infos = get_lineage_infos()
  239. args[0].side_effect = lineage_infos
  240. summary_paths = [
  241. '/path/to/summary0/log0',
  242. '/path/to/summary1/log1',
  243. '/path/to/summary2/log2',
  244. '/path/to/summary3/log3',
  245. '/path/to/summary4/log4',
  246. '/path/to/summary5/log5',
  247. '/path/to/summary6/log6'
  248. ]
  249. self.multi_querier = Querier(summary_paths)
  250. def test_get_summary_lineage_success_1(self):
  251. """Test the success of get_summary_lineage."""
  252. expected_result = [LINEAGE_INFO_0]
  253. result = self.single_querier.get_summary_lineage()
  254. self.assertListEqual(expected_result, result)
  255. def test_get_summary_lineage_success_2(self):
  256. """Test the success of get_summary_lineage."""
  257. expected_result = [LINEAGE_INFO_0]
  258. result = self.single_querier.get_summary_lineage(
  259. summary_dir='/path/to/summary0'
  260. )
  261. self.assertListEqual(expected_result, result)
  262. def test_get_summary_lineage_success_3(self):
  263. """Test the success of get_summary_lineage."""
  264. expected_result = [
  265. {
  266. 'summary_dir': '/path/to/summary0',
  267. 'model': event_data.EVENT_TRAIN_DICT_0['train_lineage']['model'],
  268. 'algorithm': event_data.EVENT_TRAIN_DICT_0['train_lineage']['algorithm']
  269. }
  270. ]
  271. result = self.single_querier.get_summary_lineage(
  272. filter_keys=['model', 'algorithm']
  273. )
  274. self.assertListEqual(expected_result, result)
  275. def test_get_summary_lineage_success_4(self):
  276. """Test the success of get_summary_lineage."""
  277. expected_result = [
  278. LINEAGE_INFO_0,
  279. LINEAGE_INFO_1,
  280. {
  281. 'summary_dir': '/path/to/summary2',
  282. **event_data.EVENT_TRAIN_DICT_2['train_lineage'],
  283. 'metric': event_data.METRIC_2,
  284. 'valid_dataset': event_data.EVENT_EVAL_DICT_2['evaluation_lineage']['valid_dataset'],
  285. 'dataset_graph': event_data.DATASET_DICT_0
  286. },
  287. {
  288. 'summary_dir': '/path/to/summary3',
  289. **event_data.EVENT_TRAIN_DICT_3['train_lineage'],
  290. 'metric': event_data.METRIC_3,
  291. 'valid_dataset': event_data.EVENT_EVAL_DICT_3['evaluation_lineage']['valid_dataset'],
  292. 'dataset_graph': event_data.DATASET_DICT_0
  293. },
  294. {
  295. 'summary_dir': '/path/to/summary4',
  296. **event_data.EVENT_TRAIN_DICT_4['train_lineage'],
  297. 'metric': event_data.METRIC_4,
  298. 'valid_dataset': event_data.EVENT_EVAL_DICT_4['evaluation_lineage']['valid_dataset'],
  299. 'dataset_graph': event_data.DATASET_DICT_0
  300. },
  301. {
  302. 'summary_dir': '/path/to/summary5',
  303. **event_data.EVENT_TRAIN_DICT_5['train_lineage'],
  304. 'metric': {},
  305. 'valid_dataset': {},
  306. 'dataset_graph': event_data.DATASET_DICT_0
  307. },
  308. {
  309. 'summary_dir': '/path/to/summary6',
  310. 'hyper_parameters': {},
  311. 'algorithm': {},
  312. 'model': {},
  313. 'train_dataset': {},
  314. 'metric': event_data.METRIC_5,
  315. 'valid_dataset': event_data.EVENT_EVAL_DICT_5['evaluation_lineage']['valid_dataset'],
  316. 'dataset_graph': event_data.DATASET_DICT_0
  317. }
  318. ]
  319. result = self.multi_querier.get_summary_lineage()
  320. self.assertListEqual(expected_result, result)
  321. def test_get_summary_lineage_success_5(self):
  322. """Test the success of get_summary_lineage."""
  323. expected_result = [LINEAGE_INFO_1]
  324. result = self.multi_querier.get_summary_lineage(
  325. summary_dir='/path/to/summary1'
  326. )
  327. self.assertListEqual(expected_result, result)
  328. def test_get_summary_lineage_success_6(self):
  329. """Test the success of get_summary_lineage."""
  330. expected_result = [
  331. {
  332. 'summary_dir': '/path/to/summary0',
  333. 'hyper_parameters': event_data.EVENT_TRAIN_DICT_0['train_lineage']['hyper_parameters'],
  334. 'train_dataset': event_data.EVENT_TRAIN_DICT_0['train_lineage']['train_dataset'],
  335. 'metric': event_data.METRIC_0,
  336. 'valid_dataset': event_data.EVENT_EVAL_DICT_0['evaluation_lineage']['valid_dataset']
  337. }
  338. ]
  339. filter_keys = [
  340. 'metric', 'hyper_parameters', 'train_dataset', 'valid_dataset'
  341. ]
  342. result = self.multi_querier.get_summary_lineage(
  343. summary_dir='/path/to/summary0', filter_keys=filter_keys
  344. )
  345. self.assertListEqual(expected_result, result)
  346. def test_get_summary_lineage_fail(self):
  347. """Test the function of get_summary_lineage with exception."""
  348. filter_keys = ['xxx']
  349. self.assertRaises(
  350. LineageQuerierParamException,
  351. self.multi_querier.get_summary_lineage,
  352. filter_keys=filter_keys
  353. )
  354. self.assertRaises(
  355. LineageQuerierParamException,
  356. self.multi_querier.get_summary_lineage,
  357. summary_dir='xxx'
  358. )
  359. def test_filter_summary_lineage_success_1(self):
  360. """Test the success of filter_summary_lineage."""
  361. condition = {
  362. 'optimizer': {
  363. 'in': [
  364. 'ApplyMomentum0',
  365. 'ApplyMomentum1',
  366. 'ApplyMomentum2',
  367. 'ApplyMomentum4'
  368. ]
  369. },
  370. 'learning_rate': {
  371. 'lt': 0.5,
  372. 'gt': 0.2
  373. },
  374. 'sorted_name': 'summary_dir'
  375. }
  376. expected_result = {
  377. 'customized': event_data.CUSTOMIZED_0,
  378. 'object': [
  379. LINEAGE_FILTRATION_1,
  380. LINEAGE_FILTRATION_2
  381. ],
  382. 'count': 2,
  383. }
  384. result = self.multi_querier.filter_summary_lineage(condition=condition)
  385. self.assertDictEqual(expected_result, result)
  386. def test_filter_summary_lineage_success_2(self):
  387. """Test the success of filter_summary_lineage."""
  388. condition = {
  389. 'batch_size': {
  390. 'le': 50,
  391. 'ge': 35
  392. },
  393. 'model_size': {
  394. 'lt': 400716934,
  395. 'gt': 400716931
  396. },
  397. 'sorted_name': 'batch_size',
  398. 'sorted_type': 'descending'
  399. }
  400. expected_result = {
  401. 'customized': event_data.CUSTOMIZED_0,
  402. 'object': [
  403. LINEAGE_FILTRATION_2,
  404. LINEAGE_FILTRATION_3
  405. ],
  406. 'count': 2,
  407. }
  408. result = self.multi_querier.filter_summary_lineage(condition=condition)
  409. self.assertDictEqual(expected_result, result)
  410. def test_filter_summary_lineage_success_3(self):
  411. """Test the success of filter_summary_lineage."""
  412. condition = {
  413. 'limit': 2,
  414. 'offset': 1
  415. }
  416. expected_result = {
  417. 'customized': event_data.CUSTOMIZED_0,
  418. 'object': [
  419. LINEAGE_FILTRATION_2,
  420. LINEAGE_FILTRATION_3
  421. ],
  422. 'count': 7,
  423. }
  424. result = self.multi_querier.filter_summary_lineage(condition=condition)
  425. self.assertDictEqual(expected_result, result)
  426. def test_filter_summary_lineage_success_4(self):
  427. """Test the success of filter_summary_lineage."""
  428. expected_result = {
  429. 'customized': event_data.CUSTOMIZED_2,
  430. 'object': [
  431. LINEAGE_FILTRATION_0,
  432. LINEAGE_FILTRATION_1,
  433. LINEAGE_FILTRATION_2,
  434. LINEAGE_FILTRATION_3,
  435. LINEAGE_FILTRATION_4,
  436. LINEAGE_FILTRATION_5,
  437. LINEAGE_FILTRATION_6
  438. ],
  439. 'count': 7,
  440. }
  441. result = self.multi_querier.filter_summary_lineage()
  442. self.assertDictEqual(expected_result, result)
  443. def test_filter_summary_lineage_success_5(self):
  444. """Test the success of filter_summary_lineage."""
  445. condition = {
  446. 'optimizer': {
  447. 'eq': 'ApplyMomentum4'
  448. }
  449. }
  450. expected_result = {
  451. 'customized': event_data.CUSTOMIZED_0,
  452. 'object': [LINEAGE_FILTRATION_4],
  453. 'count': 1,
  454. }
  455. result = self.multi_querier.filter_summary_lineage(condition=condition)
  456. self.assertDictEqual(expected_result, result)
  457. def test_filter_summary_lineage_success_6(self):
  458. """Test the success of filter_summary_lineage."""
  459. condition = {
  460. 'sorted_name': 'metric/accuracy',
  461. 'sorted_type': 'ascending'
  462. }
  463. expected_result = {
  464. 'customized': event_data.CUSTOMIZED_2,
  465. 'object': [
  466. LINEAGE_FILTRATION_0,
  467. LINEAGE_FILTRATION_5,
  468. LINEAGE_FILTRATION_1,
  469. LINEAGE_FILTRATION_2,
  470. LINEAGE_FILTRATION_3,
  471. LINEAGE_FILTRATION_4,
  472. LINEAGE_FILTRATION_6
  473. ],
  474. 'count': 7,
  475. }
  476. result = self.multi_querier.filter_summary_lineage(condition=condition)
  477. self.assertDictEqual(expected_result, result)
  478. def test_filter_summary_lineage_success_7(self):
  479. """Test the success of filter_summary_lineage."""
  480. condition = {
  481. 'sorted_name': 'metric/accuracy',
  482. 'sorted_type': 'descending'
  483. }
  484. expected_result = {
  485. 'customized': event_data.CUSTOMIZED_2,
  486. 'object': [
  487. LINEAGE_FILTRATION_6,
  488. LINEAGE_FILTRATION_4,
  489. LINEAGE_FILTRATION_3,
  490. LINEAGE_FILTRATION_2,
  491. LINEAGE_FILTRATION_1,
  492. LINEAGE_FILTRATION_0,
  493. LINEAGE_FILTRATION_5
  494. ],
  495. 'count': 7,
  496. }
  497. result = self.multi_querier.filter_summary_lineage(condition=condition)
  498. self.assertDictEqual(expected_result, result)
  499. def test_filter_summary_lineage_success_8(self):
  500. """Test the success of filter_summary_lineage."""
  501. condition = {
  502. 'metric/accuracy': {
  503. 'lt': 1.0000006,
  504. 'gt': 1.0000004
  505. }
  506. }
  507. expected_result = {
  508. 'customized': event_data.CUSTOMIZED_0,
  509. 'object': [LINEAGE_FILTRATION_4],
  510. 'count': 1,
  511. }
  512. result = self.multi_querier.filter_summary_lineage(condition=condition)
  513. self.assertDictEqual(expected_result, result)
  514. def test_filter_summary_lineage_success_9(self):
  515. """Test the success of filter_summary_lineage."""
  516. condition = {
  517. 'limit': 3,
  518. 'offset': 3
  519. }
  520. expected_result = {
  521. 'customized': {},
  522. 'object': [],
  523. 'count': 7,
  524. }
  525. result = self.multi_querier.filter_summary_lineage(condition=condition)
  526. self.assertDictEqual(expected_result, result)
  527. def test_filter_summary_lineage_fail(self):
  528. """Test the function of filter_summary_lineage with exception."""
  529. condition = {
  530. 'xxx': {
  531. 'lt': 1.0000006,
  532. 'gt': 1.0000004
  533. }
  534. }
  535. self.assertRaises(
  536. LineageQuerierParamException,
  537. self.multi_querier.filter_summary_lineage,
  538. condition=condition
  539. )
  540. condition = {
  541. 'accuracy': {
  542. 'xxx': 1
  543. }
  544. }
  545. self.assertRaises(
  546. LineageQuerierParamException,
  547. self.multi_querier.filter_summary_lineage,
  548. condition=condition
  549. )
  550. condition = {
  551. 'sorted_name': 'xxx'
  552. }
  553. self.assertRaises(
  554. LineageQuerierParamException,
  555. self.multi_querier.filter_summary_lineage,
  556. condition=condition
  557. )
  558. @mock.patch('mindinsight.lineagemgr.querier.querier.LineageSummaryAnalyzer.get_summary_infos')
  559. def test_init_fail(self, *args):
  560. """Test the function of init with exception."""
  561. summary_path = {'xxx': 1}
  562. with self.assertRaises(LineageParamTypeError):
  563. Querier(summary_path)
  564. summary_path = None
  565. with self.assertRaises(LineageQuerierParamException):
  566. Querier(summary_path)
  567. args[0].side_effect = LineageSummaryAnalyzeException
  568. summary_path = '/path/to/summary0/log0'
  569. with self.assertRaises(LineageSummaryParseException):
  570. Querier(summary_path)
  571. @mock.patch('mindinsight.lineagemgr.querier.querier.LineageSummaryAnalyzer.get_user_defined_info')
  572. @mock.patch('mindinsight.lineagemgr.querier.querier.LineageSummaryAnalyzer.get_summary_infos')
  573. def test_parse_fail_summary_logs_1(self, *args):
  574. """Test the function of parsing fail summary logs."""
  575. lineage_infos = get_lineage_infos()
  576. args[0].side_effect = lineage_infos
  577. args[1].return_value = []
  578. summary_path = ['/path/to/summary0/log0']
  579. querier = Querier(summary_path)
  580. querier._parse_failed_paths.append('/path/to/summary1/log1')
  581. expected_result = [
  582. LINEAGE_INFO_0,
  583. LINEAGE_INFO_1
  584. ]
  585. result = querier.get_summary_lineage()
  586. self.assertListEqual(expected_result, result)
  587. self.assertListEqual([], querier._parse_failed_paths)
  588. @mock.patch('mindinsight.lineagemgr.querier.querier.LineageSummaryAnalyzer.get_user_defined_info')
  589. @mock.patch('mindinsight.lineagemgr.querier.querier.LineageSummaryAnalyzer.get_summary_infos')
  590. def test_parse_fail_summary_logs_2(self, *args):
  591. """Test the function of parsing fail summary logs."""
  592. args[0].return_value = create_lineage_info(
  593. event_data.EVENT_TRAIN_DICT_0,
  594. event_data.EVENT_EVAL_DICT_0,
  595. event_data.EVENT_DATASET_DICT_0,
  596. )
  597. args[1].return_value = []
  598. summary_path = ['/path/to/summary0/log0']
  599. querier = Querier(summary_path)
  600. querier._parse_failed_paths.append('/path/to/summary1/log1')
  601. args[0].return_value = create_lineage_info(None, None, None)
  602. expected_result = [LINEAGE_INFO_0]
  603. result = querier.get_summary_lineage()
  604. self.assertListEqual(expected_result, result)
  605. self.assertListEqual(
  606. ['/path/to/summary1/log1'], querier._parse_failed_paths
  607. )

MindInsight为MindSpore提供了简单易用的调优调试能力。在训练过程中,可以将标量、张量、图像、计算图、模型超参、训练耗时等数据记录到文件中,通过MindInsight可视化页面进行查看及分析。