You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_optimizer_hook.py 6.3 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. # Copyright (c) Alibaba, Inc. and its affiliates.
  2. import os
  3. import shutil
  4. import tempfile
  5. import unittest
  6. import json
  7. import numpy as np
  8. import torch
  9. from torch import nn
  10. from torch.optim import SGD
  11. from torch.optim.lr_scheduler import MultiStepLR
  12. from modelscope.metainfo import Trainers
  13. from modelscope.trainers import build_trainer
  14. from modelscope.utils.constant import ModelFile, TrainerStages
  15. from modelscope.utils.test_utils import create_dummy_test_dataset
  16. dummy_dataset = create_dummy_test_dataset(
  17. np.random.random(size=(2, )), np.random.randint(0, 2, (1, )), 10)
  18. class DummyModel(nn.Module):
  19. def __init__(self):
  20. super().__init__()
  21. self.linear = nn.Linear(2, 2)
  22. self.bn = nn.BatchNorm1d(2)
  23. def forward(self, feat, labels):
  24. x = self.linear(feat)
  25. x = self.bn(x)
  26. loss = torch.sum(x)
  27. return dict(logits=x, loss=loss)
  28. class OptimizerHookTest(unittest.TestCase):
  29. def setUp(self):
  30. print(('Testing %s.%s' % (type(self).__name__, self._testMethodName)))
  31. self.tmp_dir = tempfile.TemporaryDirectory().name
  32. if not os.path.exists(self.tmp_dir):
  33. os.makedirs(self.tmp_dir)
  34. def tearDown(self):
  35. super().tearDown()
  36. shutil.rmtree(self.tmp_dir)
  37. def test_optimizer_hook(self):
  38. json_cfg = {
  39. 'task': 'image_classification',
  40. 'train': {
  41. 'work_dir': self.tmp_dir,
  42. 'dataloader': {
  43. 'batch_size_per_gpu': 2,
  44. 'workers_per_gpu': 1
  45. }
  46. }
  47. }
  48. config_path = os.path.join(self.tmp_dir, ModelFile.CONFIGURATION)
  49. with open(config_path, 'w') as f:
  50. json.dump(json_cfg, f)
  51. model = DummyModel()
  52. optimizer = SGD(model.parameters(), lr=0.01)
  53. lr_scheduler = MultiStepLR(optimizer, milestones=[1, 2])
  54. trainer_name = Trainers.default
  55. kwargs = dict(
  56. cfg_file=config_path,
  57. model=model,
  58. train_dataset=dummy_dataset,
  59. optimizers=(optimizer, lr_scheduler),
  60. max_epochs=2,
  61. device='cpu')
  62. trainer = build_trainer(trainer_name, kwargs)
  63. train_dataloader = trainer._build_dataloader_with_dataset(
  64. trainer.train_dataset, **trainer.cfg.train.get('dataloader', {}))
  65. trainer.register_optimizers_hook()
  66. trainer.invoke_hook(TrainerStages.before_run)
  67. for _ in range(trainer._epoch, trainer._max_epochs):
  68. trainer.invoke_hook(TrainerStages.before_train_epoch)
  69. for _, data_batch in enumerate(train_dataloader):
  70. trainer.invoke_hook(TrainerStages.before_train_iter)
  71. trainer.train_step(trainer.model, data_batch)
  72. trainer.invoke_hook(TrainerStages.after_train_iter)
  73. self.assertEqual(
  74. len(trainer.optimizer.param_groups[0]['params']), 4)
  75. for i in range(4):
  76. self.assertTrue(trainer.optimizer.param_groups[0]['params']
  77. [i].requires_grad)
  78. trainer.invoke_hook(TrainerStages.after_train_epoch)
  79. trainer._epoch += 1
  80. trainer.invoke_hook(TrainerStages.after_run)
  81. class TorchAMPOptimizerHookTest(unittest.TestCase):
  82. def setUp(self):
  83. print(('Testing %s.%s' % (type(self).__name__, self._testMethodName)))
  84. self.tmp_dir = tempfile.TemporaryDirectory().name
  85. if not os.path.exists(self.tmp_dir):
  86. os.makedirs(self.tmp_dir)
  87. def tearDown(self):
  88. super().tearDown()
  89. shutil.rmtree(self.tmp_dir)
  90. @unittest.skipIf(not torch.cuda.is_available(),
  91. 'skip this test when cuda is not available')
  92. def test_amp_optimizer_hook(self):
  93. json_cfg = {
  94. 'task': 'image_classification',
  95. 'train': {
  96. 'work_dir': self.tmp_dir,
  97. 'dataloader': {
  98. 'batch_size_per_gpu': 2,
  99. 'workers_per_gpu': 1
  100. }
  101. }
  102. }
  103. config_path = os.path.join(self.tmp_dir, ModelFile.CONFIGURATION)
  104. with open(config_path, 'w') as f:
  105. json.dump(json_cfg, f)
  106. model = DummyModel().cuda()
  107. optimizer = SGD(model.parameters(), lr=0.01)
  108. lr_scheduler = MultiStepLR(optimizer, milestones=[1, 2])
  109. trainer_name = Trainers.default
  110. kwargs = dict(
  111. cfg_file=config_path,
  112. model=model,
  113. train_dataset=dummy_dataset,
  114. optimizers=(optimizer, lr_scheduler),
  115. max_epochs=2,
  116. use_fp16=True)
  117. trainer = build_trainer(trainer_name, kwargs)
  118. train_dataloader = trainer._build_dataloader_with_dataset(
  119. trainer.train_dataset, **trainer.cfg.train.get('dataloader', {}))
  120. trainer.register_optimizers_hook()
  121. trainer.invoke_hook(TrainerStages.before_run)
  122. for _ in range(trainer._epoch, trainer._max_epochs):
  123. trainer.invoke_hook(TrainerStages.before_train_epoch)
  124. for _, data_batch in enumerate(train_dataloader):
  125. for k, v in data_batch.items():
  126. data_batch[k] = v.cuda()
  127. trainer.invoke_hook(TrainerStages.before_train_iter)
  128. trainer.train_step(trainer.model, data_batch)
  129. trainer.invoke_hook(TrainerStages.after_train_iter)
  130. self.assertEqual(trainer.train_outputs['logits'].dtype,
  131. torch.float16)
  132. # test if `after_train_iter`, whether the model is reset to fp32
  133. trainer.train_step(trainer.model, data_batch)
  134. self.assertEqual(trainer.train_outputs['logits'].dtype,
  135. torch.float32)
  136. self.assertEqual(
  137. len(trainer.optimizer.param_groups[0]['params']), 4)
  138. for i in range(4):
  139. self.assertTrue(trainer.optimizer.param_groups[0]['params']
  140. [i].requires_grad)
  141. trainer.invoke_hook(TrainerStages.after_train_epoch)
  142. trainer._epoch += 1
  143. trainer.invoke_hook(TrainerStages.after_run)
  144. if __name__ == '__main__':
  145. unittest.main()