You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

model_lineage.py 26 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """This module is used to collect lineage information of model training."""
  16. import json
  17. import os
  18. import numpy as np
  19. from mindinsight.lineagemgr.common.exceptions.error_code import LineageErrorMsg, LineageErrors
  20. from mindinsight.lineagemgr.common.log import logger as log
  21. from mindinsight.lineagemgr.common.utils import make_directory
  22. from mindinsight.lineagemgr.common.validator.model_parameter import EvalParameter
  23. from mindinsight.lineagemgr.common.validator.validate import (validate_eval_run_context, validate_file_path,
  24. validate_int_params,
  25. validate_raise_exception,
  26. validate_user_defined_info)
  27. from mindinsight.utils.exceptions import MindInsightException
  28. from ._summary_record import LineageSummary
  29. from .base import Metadata
  30. from .utils import try_except, LineageParamRunContextError, LineageGetModelFileError, LineageLogError
  31. try:
  32. from mindspore.common.tensor import Tensor
  33. from mindspore.train.callback import Callback, RunContext, ModelCheckpoint, SummaryStep
  34. from mindspore.nn import Cell, Optimizer
  35. from mindspore.nn.loss.loss import _Loss
  36. from mindspore.dataset.engine import Dataset, ImageFolderDatasetV2, MnistDataset, Cifar10Dataset, Cifar100Dataset, \
  37. VOCDataset, CelebADataset, MindDataset, ManifestDataset, TFRecordDataset, TextFileDataset
  38. import mindspore.dataset as ds
  39. except (ImportError, ModuleNotFoundError):
  40. log.warning('MindSpore Not Found!')
  41. class TrainLineage(Callback):
  42. """
  43. Collect lineage of a training job.
  44. Args:
  45. summary_record (Union[SummaryRecord, str]): The `SummaryRecord` object which
  46. is used to record the summary value(see mindspore.train.summary.SummaryRecord),
  47. or a log dir(as a `str`) to be passed to `LineageSummary` to create
  48. a lineage summary recorder. It should be noted that instead of making
  49. use of summary_record to record lineage info directly, we obtain
  50. log dir from it then create a new summary file to write lineage info.
  51. raise_exception (bool): Whether to raise exception when error occurs in
  52. TrainLineage. If True, raise exception. If False, catch exception
  53. and continue. Default: False.
  54. user_defined_info (dict): User defined information. Only flatten dict with
  55. str key and int/float/str value is supported. Default: None.
  56. Raises:
  57. MindInsightException: If validating parameter fails.
  58. LineageLogError: If recording lineage information fails.
  59. Examples:
  60. >>> from mindinsight.lineagemgr import TrainLineage
  61. >>> from mindspore.train.callback import ModelCheckpoint, SummaryStep
  62. >>> from mindspore.train.summary import SummaryRecord
  63. >>> model = Model(train_network)
  64. >>> model_ckpt = ModelCheckpoint(directory='/dir/to/save/model/')
  65. >>> summary_writer = SummaryRecord(log_dir='./')
  66. >>> summary_callback = SummaryStep(summary_writer, flush_step=2)
  67. >>> lineagemgr = TrainLineage(summary_record=summary_writer)
  68. >>> model.train(epoch_num, dataset, callbacks=[model_ckpt, summary_callback, lineagemgr])
  69. """
  70. def __init__(self,
  71. summary_record,
  72. raise_exception=False,
  73. user_defined_info=None):
  74. super(TrainLineage, self).__init__()
  75. try:
  76. validate_raise_exception(raise_exception)
  77. self.raise_exception = raise_exception
  78. if isinstance(summary_record, str):
  79. # make directory if not exist
  80. self.lineage_log_dir = make_directory(summary_record)
  81. else:
  82. summary_log_path = summary_record.full_file_name
  83. validate_file_path(summary_log_path)
  84. self.lineage_log_dir = os.path.dirname(summary_log_path)
  85. self.lineage_summary = LineageSummary(self.lineage_log_dir)
  86. self.initial_learning_rate = None
  87. self.user_defined_info = user_defined_info
  88. if user_defined_info:
  89. validate_user_defined_info(user_defined_info)
  90. except MindInsightException as err:
  91. log.error(err)
  92. if raise_exception:
  93. raise
  94. @try_except(log)
  95. def begin(self, run_context):
  96. """
  97. Initialize the training progress when the training job begins.
  98. Args:
  99. run_context (RunContext): It contains all lineage information,
  100. see mindspore.train.callback.RunContext.
  101. Raises:
  102. MindInsightException: If validating parameter fails.
  103. """
  104. log.info('Initialize training lineage collection...')
  105. if self.user_defined_info:
  106. self.lineage_summary.record_user_defined_info(self.user_defined_info)
  107. if not isinstance(run_context, RunContext):
  108. error_msg = f'Invalid TrainLineage run_context.'
  109. log.error(error_msg)
  110. raise LineageParamRunContextError(error_msg)
  111. run_context_args = run_context.original_args()
  112. if not self.initial_learning_rate:
  113. optimizer = run_context_args.get('optimizer')
  114. if optimizer and not isinstance(optimizer, Optimizer):
  115. log.error("The parameter optimizer is invalid. It should be an instance of "
  116. "mindspore.nn.optim.optimizer.Optimizer.")
  117. raise MindInsightException(error=LineageErrors.PARAM_OPTIMIZER_ERROR,
  118. message=LineageErrorMsg.PARAM_OPTIMIZER_ERROR.value)
  119. if optimizer:
  120. log.info('Obtaining initial learning rate...')
  121. self.initial_learning_rate = AnalyzeObject.analyze_optimizer(optimizer)
  122. log.debug('initial_learning_rate: %s', self.initial_learning_rate)
  123. else:
  124. network = run_context_args.get('train_network')
  125. optimizer = AnalyzeObject.get_optimizer_by_network(network)
  126. self.initial_learning_rate = AnalyzeObject.analyze_optimizer(optimizer)
  127. log.debug('initial_learning_rate: %s', self.initial_learning_rate)
  128. # get train dataset graph
  129. train_dataset = run_context_args.get('train_dataset')
  130. dataset_graph_dict = ds.serialize(train_dataset)
  131. dataset_graph_json_str = json.dumps(dataset_graph_dict, indent=2)
  132. dataset_graph_dict = json.loads(dataset_graph_json_str)
  133. log.info('Logging dataset graph...')
  134. try:
  135. self.lineage_summary.record_dataset_graph(dataset_graph=dataset_graph_dict)
  136. except Exception as error:
  137. error_msg = f'Dataset graph log error in TrainLineage begin: {error}'
  138. log.error(error_msg)
  139. raise LineageLogError(error_msg)
  140. log.info('Dataset graph logged successfully.')
  141. @try_except(log)
  142. def end(self, run_context):
  143. """
  144. Collect lineage information when the training job ends.
  145. Args:
  146. run_context (RunContext): It contains all lineage information,
  147. see mindspore.train.callback.RunContext.
  148. Raises:
  149. LineageLogError: If recording lineage information fails.
  150. """
  151. log.info('Start to collect training lineage...')
  152. if not isinstance(run_context, RunContext):
  153. error_msg = f'Invalid TrainLineage run_context.'
  154. log.error(error_msg)
  155. raise LineageParamRunContextError(error_msg)
  156. run_context_args = run_context.original_args()
  157. train_lineage = dict()
  158. train_lineage = AnalyzeObject.get_network_args(
  159. run_context_args, train_lineage
  160. )
  161. train_dataset = run_context_args.get('train_dataset')
  162. callbacks = run_context_args.get('list_callback')
  163. list_callback = getattr(callbacks, '_callbacks', [])
  164. log.info('Obtaining model files...')
  165. ckpt_file_path, _ = AnalyzeObject.get_file_path(list_callback)
  166. train_lineage[Metadata.learning_rate] = self.initial_learning_rate
  167. train_lineage[Metadata.epoch] = run_context_args.get('epoch_num')
  168. train_lineage[Metadata.step_num] = run_context_args.get('cur_step_num')
  169. train_lineage[Metadata.parallel_mode] = run_context_args.get('parallel_mode')
  170. train_lineage[Metadata.device_num] = run_context_args.get('device_number')
  171. train_lineage[Metadata.batch_size] = run_context_args.get('batch_num')
  172. model_path_dict = {
  173. 'ckpt': ckpt_file_path
  174. }
  175. train_lineage[Metadata.model_path] = json.dumps(model_path_dict)
  176. log.info('Calculating model size...')
  177. train_lineage[Metadata.model_size] = AnalyzeObject.get_model_size(
  178. ckpt_file_path
  179. )
  180. log.debug('model_size: %s', train_lineage[Metadata.model_size])
  181. log.info('Analyzing dataset object...')
  182. train_lineage = AnalyzeObject.analyze_dataset(train_dataset, train_lineage, 'train')
  183. log.info('Logging lineage information...')
  184. try:
  185. self.lineage_summary.record_train_lineage(train_lineage)
  186. except IOError as error:
  187. error_msg = f'End error in TrainLineage: {error}'
  188. log.error(error_msg)
  189. raise LineageLogError(error_msg)
  190. except Exception as error:
  191. error_msg = f'End error in TrainLineage: {error}'
  192. log.error(error_msg)
  193. log.error('Fail to log the lineage of the training job.')
  194. raise LineageLogError(error_msg)
  195. log.info('The lineage of the training job has logged successfully.')
  196. class EvalLineage(Callback):
  197. """
  198. Collect lineage of an evaluation job.
  199. Args:
  200. summary_record (Union[SummaryRecord, str]): The `SummaryRecord` object which
  201. is used to record the summary value(see mindspore.train.summary.SummaryRecord),
  202. or a log dir(as a `str`) to be passed to `LineageSummary` to create
  203. a lineage summary recorder. It should be noted that instead of making
  204. use of summary_record to record lineage info directly, we obtain
  205. log dir from it then create a new summary file to write lineage info.
  206. raise_exception (bool): Whether to raise exception when error occurs in
  207. EvalLineage. If True, raise exception. If False, catch exception
  208. and continue. Default: False.
  209. user_defined_info (dict): User defined information. Only flatten dict with
  210. str key and int/float/str value is supported. Default: None.
  211. Raises:
  212. MindInsightException: If validating parameter fails.
  213. LineageLogError: If recording lineage information fails.
  214. Examples:
  215. >>> from mindinsight.lineagemgr import EvalLineage
  216. >>> from mindspore.train.callback import ModelCheckpoint, SummaryStep
  217. >>> from mindspore.train.summary import SummaryRecord
  218. >>> model = Model(train_network)
  219. >>> model_ckpt = ModelCheckpoint(directory='/dir/to/save/model/')
  220. >>> summary_writer = SummaryRecord(log_dir='./')
  221. >>> summary_callback = SummaryStep(summary_writer, flush_step=2)
  222. >>> lineagemgr = EvalLineage(summary_record=summary_writer)
  223. >>> model.eval(epoch_num, dataset, callbacks=[model_ckpt, summary_callback, lineagemgr])
  224. """
  225. def __init__(self,
  226. summary_record,
  227. raise_exception=False,
  228. user_defined_info=None):
  229. super(EvalLineage, self).__init__()
  230. try:
  231. validate_raise_exception(raise_exception)
  232. self.raise_exception = raise_exception
  233. if isinstance(summary_record, str):
  234. # make directory if not exist
  235. self.lineage_log_dir = make_directory(summary_record)
  236. else:
  237. summary_log_path = summary_record.full_file_name
  238. validate_file_path(summary_log_path)
  239. self.lineage_log_dir = os.path.dirname(summary_log_path)
  240. self.lineage_summary = LineageSummary(self.lineage_log_dir)
  241. self.user_defined_info = user_defined_info
  242. if self.user_defined_info:
  243. validate_user_defined_info(self.user_defined_info)
  244. except MindInsightException as err:
  245. log.error(err)
  246. if raise_exception:
  247. raise
  248. @try_except(log)
  249. def end(self, run_context):
  250. """
  251. Collect lineage information when the training job ends.
  252. Args:
  253. run_context (RunContext): It contains all lineage information,
  254. see mindspore.train.callback.RunContext.
  255. Raises:
  256. MindInsightException: If validating parameter fails.
  257. LineageLogError: If recording lineage information fails.
  258. """
  259. if self.user_defined_info:
  260. self.lineage_summary.record_user_defined_info(self.user_defined_info)
  261. if not isinstance(run_context, RunContext):
  262. error_msg = f'Invalid EvalLineage run_context.'
  263. log.error(error_msg)
  264. raise LineageParamRunContextError(error_msg)
  265. run_context_args = run_context.original_args()
  266. validate_eval_run_context(EvalParameter, run_context_args)
  267. valid_dataset = run_context_args.get('valid_dataset')
  268. eval_lineage = dict()
  269. metrics = run_context_args.get('metrics')
  270. eval_lineage[Metadata.metrics] = json.dumps(metrics)
  271. eval_lineage[Metadata.step_num] = run_context_args.get('cur_step_num')
  272. log.info('Analyzing dataset object...')
  273. eval_lineage = AnalyzeObject.analyze_dataset(valid_dataset, eval_lineage, 'valid')
  274. log.info('Logging evaluation job lineage...')
  275. try:
  276. self.lineage_summary.record_evaluation_lineage(eval_lineage)
  277. except IOError as error:
  278. error_msg = f'End error in EvalLineage: {error}'
  279. log.error(error_msg)
  280. log.error('Fail to log the lineage of the evaluation job.')
  281. raise LineageLogError(error_msg)
  282. except Exception as error:
  283. error_msg = f'End error in EvalLineage: {error}'
  284. log.error(error_msg)
  285. log.error('Fail to log the lineage of the evaluation job.')
  286. raise LineageLogError(error_msg)
  287. log.info('The lineage of the evaluation job has logged successfully.')
  288. class AnalyzeObject:
  289. """Analyze class object in MindSpore."""
  290. @staticmethod
  291. def get_optimizer_by_network(network):
  292. """
  293. Get optimizer by analyzing network.
  294. Args:
  295. network (Cell): See mindspore.nn.Cell.
  296. Returns:
  297. Optimizer, an Optimizer object.
  298. """
  299. optimizer = None
  300. net_args = vars(network) if network else {}
  301. net_cell = net_args.get('_cells') if net_args else {}
  302. for _, value in net_cell.items():
  303. if isinstance(value, Optimizer):
  304. optimizer = value
  305. break
  306. return optimizer
  307. @staticmethod
  308. def get_loss_fn_by_network(network):
  309. """
  310. Get loss function by analyzing network.
  311. Args:
  312. network (Cell): See mindspore.nn.Cell.
  313. Returns:
  314. Loss_fn, a Cell object.
  315. """
  316. loss_fn = None
  317. inner_cell_list = []
  318. net_args = vars(network) if network else {}
  319. net_cell = net_args.get('_cells') if net_args else {}
  320. for _, value in net_cell.items():
  321. if isinstance(value, Cell) and \
  322. not isinstance(value, Optimizer):
  323. inner_cell_list.append(value)
  324. while inner_cell_list:
  325. inner_net_args = vars(inner_cell_list[0])
  326. inner_net_cell = inner_net_args.get('_cells')
  327. for value in inner_net_cell.values():
  328. if isinstance(value, _Loss):
  329. loss_fn = value
  330. break
  331. if isinstance(value, Cell):
  332. inner_cell_list.append(value)
  333. if loss_fn:
  334. break
  335. inner_cell_list.pop(0)
  336. return loss_fn
  337. @staticmethod
  338. def get_backbone_network(network):
  339. """
  340. Get the name of backbone network.
  341. Args:
  342. network (Cell): The train network.
  343. Returns:
  344. str, the name of the backbone network.
  345. """
  346. backbone_name = None
  347. has_network = False
  348. network_key = 'network'
  349. backbone_key = '_backbone'
  350. net_args = vars(network) if network else {}
  351. net_cell = net_args.get('_cells') if net_args else {}
  352. for key, value in net_cell.items():
  353. if key == network_key:
  354. network = value
  355. has_network = True
  356. break
  357. if has_network:
  358. while hasattr(network, network_key):
  359. network = getattr(network, network_key)
  360. if hasattr(network, backbone_key):
  361. backbone = getattr(network, backbone_key)
  362. backbone_name = type(backbone).__name__
  363. if backbone_name is None and network is not None:
  364. backbone_name = type(network).__name__
  365. return backbone_name
  366. @staticmethod
  367. def analyze_optimizer(optimizer):
  368. """
  369. Analyze Optimizer, a Cell object of MindSpore.
  370. In this way, we can obtain the following attributes:
  371. learning_rate (float),
  372. weight_decay (float),
  373. momentum (float),
  374. weights (float).
  375. Args:
  376. optimizer (Optimizer): See mindspore.nn.optim.Optimizer.
  377. Returns:
  378. float, the learning rate that the optimizer adopted.
  379. """
  380. learning_rate = None
  381. if isinstance(optimizer, Optimizer):
  382. learning_rate = getattr(optimizer, 'learning_rate', None)
  383. if learning_rate:
  384. learning_rate = learning_rate.default_input
  385. # Get the real learning rate value
  386. if isinstance(learning_rate, Tensor):
  387. learning_rate = learning_rate.asnumpy()
  388. if learning_rate.ndim == 0:
  389. learning_rate = np.atleast_1d(learning_rate)
  390. learning_rate = list(learning_rate)
  391. elif isinstance(learning_rate, float):
  392. learning_rate = [learning_rate]
  393. return learning_rate[0] if learning_rate else None
  394. @staticmethod
  395. def analyze_dataset(dataset, lineage_dict, dataset_type):
  396. """
  397. Analyze Dataset, a Dataset object of MindSpore.
  398. In this way, we can obtain the following attributes:
  399. dataset_path (str),
  400. train_dataset_size (int),
  401. valid_dataset_size (int),
  402. batch_size (int)
  403. Args:
  404. dataset (Dataset): See mindspore.dataengine.datasets.Dataset.
  405. lineage_dict (dict): A dict contains lineage metadata.
  406. dataset_type (str): Dataset type, train or valid.
  407. Returns:
  408. dict, the lineage metadata.
  409. """
  410. batch_num = dataset.get_dataset_size()
  411. batch_size = dataset.get_batch_size()
  412. if batch_num is not None:
  413. validate_int_params(batch_num, 'dataset_batch_num')
  414. validate_int_params(batch_num, 'dataset_batch_size')
  415. log.debug('dataset_batch_num: %d', batch_num)
  416. log.debug('dataset_batch_size: %d', batch_size)
  417. dataset_path = AnalyzeObject.get_dataset_path_wrapped(dataset)
  418. if dataset_path and os.path.isfile(dataset_path):
  419. dataset_path, _ = os.path.split(dataset_path)
  420. dataset_size = int(batch_num * batch_size)
  421. if dataset_type == 'train':
  422. lineage_dict[Metadata.train_dataset_path] = dataset_path
  423. lineage_dict[Metadata.train_dataset_size] = dataset_size
  424. elif dataset_type == 'valid':
  425. lineage_dict[Metadata.valid_dataset_path] = dataset_path
  426. lineage_dict[Metadata.valid_dataset_size] = dataset_size
  427. return lineage_dict
  428. def get_dataset_path(self, output_dataset):
  429. """
  430. Get dataset path of MindDataset object.
  431. Args:
  432. output_dataset (Union[Dataset, ImageFolderDatasetV2, MnistDataset, Cifar10Dataset, Cifar100Dataset,
  433. VOCDataset, CelebADataset, MindDataset, ManifestDataset, TFRecordDataset, TextFileDataset]):
  434. See mindspore.dataengine.datasets.Dataset.
  435. Returns:
  436. str, dataset path.
  437. """
  438. dataset_dir_set = (ImageFolderDatasetV2, MnistDataset, Cifar10Dataset,
  439. Cifar100Dataset, VOCDataset, CelebADataset)
  440. dataset_file_set = (MindDataset, ManifestDataset)
  441. dataset_files_set = (TFRecordDataset, TextFileDataset)
  442. if isinstance(output_dataset, dataset_file_set):
  443. return output_dataset.dataset_file
  444. if isinstance(output_dataset, dataset_dir_set):
  445. return output_dataset.dataset_dir
  446. if isinstance(output_dataset, dataset_files_set):
  447. return output_dataset.dataset_files[0]
  448. return self.get_dataset_path(output_dataset.input[0])
  449. @staticmethod
  450. def get_dataset_path_wrapped(dataset):
  451. """
  452. A wrapper for obtaining dataset path.
  453. Args:
  454. dataset (Union[MindDataset, Dataset]): See
  455. mindspore.dataengine.datasets.Dataset.
  456. Returns:
  457. str, dataset path.
  458. """
  459. dataset_path = None
  460. if isinstance(dataset, Dataset):
  461. try:
  462. dataset_path = AnalyzeObject().get_dataset_path(dataset)
  463. except IndexError:
  464. dataset_path = None
  465. dataset_path = validate_file_path(dataset_path, allow_empty=True)
  466. return dataset_path
  467. @staticmethod
  468. def get_file_path(list_callback):
  469. """
  470. Get ckpt_file_name and summary_log_path from MindSpore callback list.
  471. Args:
  472. list_callback (list[Callback]): The MindSpore training Callback list.
  473. Returns:
  474. tuple, contains ckpt_file_name and summary_log_path.
  475. """
  476. ckpt_file_path = None
  477. summary_log_path = None
  478. for callback in list_callback:
  479. if isinstance(callback, ModelCheckpoint):
  480. ckpt_file_path = callback.latest_ckpt_file_name
  481. if isinstance(callback, SummaryStep):
  482. summary_log_path = callback.summary_file_name
  483. if ckpt_file_path:
  484. validate_file_path(ckpt_file_path)
  485. ckpt_file_path = os.path.realpath(ckpt_file_path)
  486. if summary_log_path:
  487. validate_file_path(summary_log_path)
  488. summary_log_path = os.path.realpath(summary_log_path)
  489. return ckpt_file_path, summary_log_path
  490. @staticmethod
  491. def get_file_size(file_path):
  492. """
  493. Get the file size.
  494. Args:
  495. file_path (str): The file path.
  496. Returns:
  497. int, the file size.
  498. """
  499. try:
  500. return os.path.getsize(file_path)
  501. except (OSError, IOError) as error:
  502. error_msg = f"Error when get model file size: {error}"
  503. log.error(error_msg)
  504. raise LineageGetModelFileError(error_msg)
  505. @staticmethod
  506. def get_model_size(ckpt_file_path):
  507. """
  508. Get model the total size of the model file and the checkpoint file.
  509. Args:
  510. ckpt_file_path (str): The checkpoint file path.
  511. Returns:
  512. int, the total file size.
  513. """
  514. if ckpt_file_path:
  515. ckpt_file_path = os.path.realpath(ckpt_file_path)
  516. ckpt_file_size = AnalyzeObject.get_file_size(ckpt_file_path)
  517. else:
  518. ckpt_file_size = 0
  519. return ckpt_file_size
  520. @staticmethod
  521. def get_network_args(run_context_args, train_lineage):
  522. """
  523. Get the parameters related to the network,
  524. such as optimizer, loss function.
  525. Args:
  526. run_context_args (dict): It contains all information of the training job.
  527. train_lineage (dict): A dict contains lineage metadata.
  528. Returns:
  529. dict, the lineage metadata.
  530. """
  531. network = run_context_args.get('train_network')
  532. optimizer = run_context_args.get('optimizer')
  533. if not optimizer:
  534. optimizer = AnalyzeObject.get_optimizer_by_network(network)
  535. loss_fn = run_context_args.get('loss_fn')
  536. if not loss_fn:
  537. loss_fn = AnalyzeObject.get_loss_fn_by_network(network)
  538. loss = None
  539. else:
  540. loss = run_context_args.get('net_outputs')
  541. if loss:
  542. log.info('Calculating loss...')
  543. loss_numpy = loss.asnumpy()
  544. loss = float(np.atleast_1d(loss_numpy)[0])
  545. log.debug('loss: %s', loss)
  546. train_lineage[Metadata.loss] = loss
  547. else:
  548. train_lineage[Metadata.loss] = None
  549. # Analyze classname of optimizer, loss function and training network.
  550. train_lineage[Metadata.optimizer] = type(optimizer).__name__ \
  551. if optimizer else None
  552. train_lineage[Metadata.train_network] = AnalyzeObject.get_backbone_network(network)
  553. train_lineage[Metadata.loss_function] = type(loss_fn).__name__ \
  554. if loss_fn else None
  555. return train_lineage