You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_checkpoint_hook.py 6.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. # Copyright (c) Alibaba, Inc. and its affiliates.
  2. import os
  3. import shutil
  4. import tempfile
  5. import unittest
  6. import json
  7. import numpy as np
  8. import torch
  9. from torch import nn
  10. from modelscope.metrics.builder import METRICS, MetricKeys
  11. from modelscope.trainers import build_trainer
  12. from modelscope.utils.constant import LogKeys, ModelFile
  13. from modelscope.utils.registry import default_group
  14. from modelscope.utils.test_utils import create_dummy_test_dataset
  15. def create_dummy_metric():
  16. _global_iter = 0
  17. @METRICS.register_module(
  18. group_key=default_group, module_name='DummyMetric', force=True)
  19. class DummyMetric:
  20. _fake_acc_by_epoch = {1: 0.1, 2: 0.5, 3: 0.2}
  21. def add(*args, **kwargs):
  22. pass
  23. def evaluate(self):
  24. global _global_iter
  25. _global_iter += 1
  26. return {MetricKeys.ACCURACY: self._fake_acc_by_epoch[_global_iter]}
  27. dummy_dataset = create_dummy_test_dataset(
  28. np.random.random(size=(5, )), np.random.randint(0, 4, (1, )), 20)
  29. class DummyModel(nn.Module):
  30. def __init__(self):
  31. super().__init__()
  32. self.linear = nn.Linear(5, 4)
  33. self.bn = nn.BatchNorm1d(4)
  34. def forward(self, feat, labels):
  35. x = self.linear(feat)
  36. x = self.bn(x)
  37. loss = torch.sum(x)
  38. return dict(logits=x, loss=loss)
  39. class CheckpointHookTest(unittest.TestCase):
  40. def setUp(self):
  41. print(('Testing %s.%s' % (type(self).__name__, self._testMethodName)))
  42. self.tmp_dir = tempfile.TemporaryDirectory().name
  43. if not os.path.exists(self.tmp_dir):
  44. os.makedirs(self.tmp_dir)
  45. create_dummy_metric()
  46. def tearDown(self):
  47. super().tearDown()
  48. shutil.rmtree(self.tmp_dir)
  49. def test_checkpoint_hook(self):
  50. global _global_iter
  51. _global_iter = 0
  52. json_cfg = {
  53. 'task': 'image_classification',
  54. 'train': {
  55. 'work_dir': self.tmp_dir,
  56. 'dataloader': {
  57. 'batch_size_per_gpu': 2,
  58. 'workers_per_gpu': 1
  59. },
  60. 'optimizer': {
  61. 'type': 'SGD',
  62. 'lr': 0.01,
  63. 'options': {
  64. 'grad_clip': {
  65. 'max_norm': 2.0
  66. }
  67. }
  68. },
  69. 'lr_scheduler': {
  70. 'type': 'StepLR',
  71. 'step_size': 2,
  72. 'options': {
  73. 'warmup': {
  74. 'type': 'LinearWarmup',
  75. 'warmup_iters': 2
  76. }
  77. }
  78. },
  79. 'hooks': [{
  80. 'type': 'CheckpointHook',
  81. 'interval': 1
  82. }]
  83. }
  84. }
  85. config_path = os.path.join(self.tmp_dir, ModelFile.CONFIGURATION)
  86. with open(config_path, 'w') as f:
  87. json.dump(json_cfg, f)
  88. trainer_name = 'EpochBasedTrainer'
  89. kwargs = dict(
  90. cfg_file=config_path,
  91. model=DummyModel(),
  92. data_collator=None,
  93. train_dataset=dummy_dataset,
  94. max_epochs=2)
  95. trainer = build_trainer(trainer_name, kwargs)
  96. trainer.train()
  97. results_files = os.listdir(self.tmp_dir)
  98. self.assertIn(f'{LogKeys.EPOCH}_1.pth', results_files)
  99. self.assertIn(f'{LogKeys.EPOCH}_2.pth', results_files)
  100. class BestCkptSaverHookTest(unittest.TestCase):
  101. def setUp(self):
  102. print(('Testing %s.%s' % (type(self).__name__, self._testMethodName)))
  103. self.tmp_dir = tempfile.TemporaryDirectory().name
  104. if not os.path.exists(self.tmp_dir):
  105. os.makedirs(self.tmp_dir)
  106. create_dummy_metric()
  107. def tearDown(self):
  108. super().tearDown()
  109. shutil.rmtree(self.tmp_dir)
  110. def test_best_checkpoint_hook(self):
  111. global _global_iter
  112. _global_iter = 0
  113. json_cfg = {
  114. 'task': 'image_classification',
  115. 'train': {
  116. 'work_dir':
  117. self.tmp_dir,
  118. 'dataloader': {
  119. 'batch_size_per_gpu': 2,
  120. 'workers_per_gpu': 1
  121. },
  122. 'optimizer': {
  123. 'type': 'SGD',
  124. 'lr': 0.01
  125. },
  126. 'lr_scheduler': {
  127. 'type': 'StepLR',
  128. 'step_size': 2
  129. },
  130. 'hooks': [{
  131. 'type': 'BestCkptSaverHook',
  132. 'metric_key': MetricKeys.ACCURACY,
  133. 'rule': 'min'
  134. }, {
  135. 'type': 'EvaluationHook',
  136. 'interval': 1,
  137. }]
  138. },
  139. 'evaluation': {
  140. 'dataloader': {
  141. 'batch_size_per_gpu': 2,
  142. 'workers_per_gpu': 1,
  143. 'shuffle': False
  144. },
  145. 'metrics': ['DummyMetric']
  146. }
  147. }
  148. config_path = os.path.join(self.tmp_dir, ModelFile.CONFIGURATION)
  149. with open(config_path, 'w') as f:
  150. json.dump(json_cfg, f)
  151. trainer_name = 'EpochBasedTrainer'
  152. kwargs = dict(
  153. cfg_file=config_path,
  154. model=DummyModel(),
  155. data_collator=None,
  156. train_dataset=dummy_dataset,
  157. eval_dataset=dummy_dataset,
  158. max_epochs=3)
  159. trainer = build_trainer(trainer_name, kwargs)
  160. trainer.train()
  161. results_files = os.listdir(self.tmp_dir)
  162. self.assertIn(f'{LogKeys.EPOCH}_1.pth', results_files)
  163. self.assertIn(f'{LogKeys.EPOCH}_2.pth', results_files)
  164. self.assertIn(f'{LogKeys.EPOCH}_3.pth', results_files)
  165. self.assertIn(f'best_{LogKeys.EPOCH}1_{MetricKeys.ACCURACY}0.1.pth',
  166. results_files)
  167. if __name__ == '__main__':
  168. unittest.main()