You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_checkpoint_hook.py 3.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. # Copyright (c) Alibaba, Inc. and its affiliates.
  2. import os
  3. import shutil
  4. import tempfile
  5. import unittest
  6. from abc import ABCMeta
  7. import json
  8. import torch
  9. from torch import nn
  10. from torch.utils.data import Dataset
  11. from modelscope.trainers import build_trainer
  12. from modelscope.utils.constant import ModelFile
  13. class DummyDataset(Dataset, metaclass=ABCMeta):
  14. def __len__(self):
  15. return 20
  16. def __getitem__(self, idx):
  17. return dict(feat=torch.rand((5, )), label=torch.randint(0, 4, (1, )))
  18. class DummyModel(nn.Module):
  19. def __init__(self):
  20. super().__init__()
  21. self.linear = nn.Linear(5, 4)
  22. self.bn = nn.BatchNorm1d(4)
  23. def forward(self, feat, labels):
  24. x = self.linear(feat)
  25. x = self.bn(x)
  26. loss = torch.sum(x)
  27. return dict(logits=x, loss=loss)
  28. class CheckpointHookTest(unittest.TestCase):
  29. def setUp(self):
  30. print(('Testing %s.%s' % (type(self).__name__, self._testMethodName)))
  31. self.tmp_dir = tempfile.TemporaryDirectory().name
  32. if not os.path.exists(self.tmp_dir):
  33. os.makedirs(self.tmp_dir)
  34. def tearDown(self):
  35. super().tearDown()
  36. shutil.rmtree(self.tmp_dir)
  37. def test_checkpoint_hook(self):
  38. json_cfg = {
  39. 'task': 'image_classification',
  40. 'train': {
  41. 'work_dir': self.tmp_dir,
  42. 'dataloader': {
  43. 'batch_size_per_gpu': 2,
  44. 'workers_per_gpu': 1
  45. },
  46. 'optimizer': {
  47. 'type': 'SGD',
  48. 'lr': 0.01,
  49. 'options': {
  50. 'grad_clip': {
  51. 'max_norm': 2.0
  52. }
  53. }
  54. },
  55. 'lr_scheduler': {
  56. 'type': 'StepLR',
  57. 'step_size': 2,
  58. 'options': {
  59. 'warmup': {
  60. 'type': 'LinearWarmup',
  61. 'warmup_iters': 2
  62. }
  63. }
  64. },
  65. 'hooks': [{
  66. 'type': 'CheckpointHook',
  67. 'interval': 1
  68. }]
  69. }
  70. }
  71. config_path = os.path.join(self.tmp_dir, ModelFile.CONFIGURATION)
  72. with open(config_path, 'w') as f:
  73. json.dump(json_cfg, f)
  74. trainer_name = 'EpochBasedTrainer'
  75. kwargs = dict(
  76. cfg_file=config_path,
  77. model=DummyModel(),
  78. data_collator=None,
  79. train_dataset=DummyDataset(),
  80. max_epochs=2)
  81. trainer = build_trainer(trainer_name, kwargs)
  82. trainer.train()
  83. results_files = os.listdir(self.tmp_dir)
  84. self.assertIn('epoch_1.pth', results_files)
  85. self.assertIn('epoch_2.pth', results_files)
  86. if __name__ == '__main__':
  87. unittest.main()