You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_trainer.py 11 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. # Copyright (c) Alibaba, Inc. and its affiliates.
  2. import os
  3. import shutil
  4. import tempfile
  5. import unittest
  6. import json
  7. import numpy as np
  8. import torch
  9. from torch import nn
  10. from torch.optim import SGD
  11. from torch.optim.lr_scheduler import StepLR
  12. from modelscope.metainfo import Metrics, Trainers
  13. from modelscope.metrics.builder import MetricKeys
  14. from modelscope.trainers import build_trainer
  15. from modelscope.utils.constant import LogKeys, ModeKeys, ModelFile
  16. from modelscope.utils.test_utils import create_dummy_test_dataset, test_level
  17. dummy_dataset_small = create_dummy_test_dataset(
  18. np.random.random(size=(5, )), np.random.randint(0, 4, (1, )), 20)
  19. dummy_dataset_big = create_dummy_test_dataset(
  20. np.random.random(size=(5, )), np.random.randint(0, 4, (1, )), 40)
  21. class DummyModel(nn.Module):
  22. def __init__(self):
  23. super().__init__()
  24. self.linear = nn.Linear(5, 4)
  25. self.bn = nn.BatchNorm1d(4)
  26. def forward(self, feat, labels):
  27. x = self.linear(feat)
  28. x = self.bn(x)
  29. loss = torch.sum(x)
  30. return dict(logits=x, loss=loss)
  31. class TrainerTest(unittest.TestCase):
  32. def setUp(self):
  33. print(('Testing %s.%s' % (type(self).__name__, self._testMethodName)))
  34. self.tmp_dir = tempfile.TemporaryDirectory().name
  35. if not os.path.exists(self.tmp_dir):
  36. os.makedirs(self.tmp_dir)
  37. def tearDown(self):
  38. super().tearDown()
  39. shutil.rmtree(self.tmp_dir)
  40. @unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
  41. def test_train_0(self):
  42. json_cfg = {
  43. 'train': {
  44. 'work_dir':
  45. self.tmp_dir,
  46. 'dataloader': {
  47. 'batch_size_per_gpu': 2,
  48. 'workers_per_gpu': 1
  49. },
  50. 'optimizer': {
  51. 'type': 'SGD',
  52. 'lr': 0.01,
  53. 'options': {
  54. 'grad_clip': {
  55. 'max_norm': 2.0
  56. }
  57. }
  58. },
  59. 'lr_scheduler': {
  60. 'type': 'StepLR',
  61. 'step_size': 2,
  62. 'options': {
  63. 'warmup': {
  64. 'type': 'LinearWarmup',
  65. 'warmup_iters': 2
  66. }
  67. }
  68. },
  69. 'hooks': [{
  70. 'type': 'CheckpointHook',
  71. 'interval': 1
  72. }, {
  73. 'type': 'TextLoggerHook',
  74. 'interval': 1
  75. }, {
  76. 'type': 'IterTimerHook'
  77. }, {
  78. 'type': 'EvaluationHook',
  79. 'interval': 1
  80. }]
  81. },
  82. 'evaluation': {
  83. 'dataloader': {
  84. 'batch_size_per_gpu': 2,
  85. 'workers_per_gpu': 1,
  86. 'shuffle': False
  87. },
  88. 'metrics': [Metrics.seq_cls_metric]
  89. }
  90. }
  91. config_path = os.path.join(self.tmp_dir, ModelFile.CONFIGURATION)
  92. with open(config_path, 'w') as f:
  93. json.dump(json_cfg, f)
  94. trainer_name = Trainers.default
  95. kwargs = dict(
  96. cfg_file=config_path,
  97. model=DummyModel(),
  98. data_collator=None,
  99. train_dataset=dummy_dataset_small,
  100. eval_dataset=dummy_dataset_small,
  101. max_epochs=3,
  102. device='cpu')
  103. trainer = build_trainer(trainer_name, kwargs)
  104. trainer.train()
  105. results_files = os.listdir(self.tmp_dir)
  106. self.assertIn(f'{trainer.timestamp}.log.json', results_files)
  107. self.assertIn(f'{LogKeys.EPOCH}_1.pth', results_files)
  108. self.assertIn(f'{LogKeys.EPOCH}_2.pth', results_files)
  109. self.assertIn(f'{LogKeys.EPOCH}_3.pth', results_files)
  110. @unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
  111. def test_train_1(self):
  112. json_cfg = {
  113. 'train': {
  114. 'work_dir':
  115. self.tmp_dir,
  116. 'dataloader': {
  117. 'batch_size_per_gpu': 2,
  118. 'workers_per_gpu': 1
  119. },
  120. 'hooks': [{
  121. 'type': 'CheckpointHook',
  122. 'interval': 1
  123. }, {
  124. 'type': 'TextLoggerHook',
  125. 'interval': 1
  126. }, {
  127. 'type': 'IterTimerHook'
  128. }, {
  129. 'type': 'EvaluationHook',
  130. 'interval': 1
  131. }]
  132. },
  133. 'evaluation': {
  134. 'dataloader': {
  135. 'batch_size_per_gpu': 2,
  136. 'workers_per_gpu': 1,
  137. 'shuffle': False
  138. },
  139. 'metrics': [Metrics.seq_cls_metric]
  140. }
  141. }
  142. config_path = os.path.join(self.tmp_dir, ModelFile.CONFIGURATION)
  143. with open(config_path, 'w') as f:
  144. json.dump(json_cfg, f)
  145. model = DummyModel()
  146. optimmizer = SGD(model.parameters(), lr=0.01)
  147. lr_scheduler = StepLR(optimmizer, 2)
  148. trainer_name = Trainers.default
  149. kwargs = dict(
  150. cfg_file=config_path,
  151. model=model,
  152. data_collator=None,
  153. train_dataset=dummy_dataset_small,
  154. eval_dataset=dummy_dataset_small,
  155. optimizers=(optimmizer, lr_scheduler),
  156. max_epochs=3,
  157. device='cpu')
  158. trainer = build_trainer(trainer_name, kwargs)
  159. trainer.train()
  160. results_files = os.listdir(self.tmp_dir)
  161. self.assertIn(f'{trainer.timestamp}.log.json', results_files)
  162. self.assertIn(f'{LogKeys.EPOCH}_1.pth', results_files)
  163. self.assertIn(f'{LogKeys.EPOCH}_2.pth', results_files)
  164. self.assertIn(f'{LogKeys.EPOCH}_3.pth', results_files)
  165. @unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
  166. def test_train_with_default_config(self):
  167. json_cfg = {
  168. 'train': {
  169. 'work_dir': self.tmp_dir,
  170. 'dataloader': {
  171. 'batch_size_per_gpu': 2,
  172. 'workers_per_gpu': 1
  173. },
  174. 'hooks': [{
  175. 'type': 'EvaluationHook',
  176. 'interval': 1
  177. }]
  178. },
  179. 'evaluation': {
  180. 'dataloader': {
  181. 'batch_size_per_gpu': 2,
  182. 'workers_per_gpu': 1,
  183. 'shuffle': False
  184. },
  185. 'metrics': [Metrics.seq_cls_metric]
  186. }
  187. }
  188. config_path = os.path.join(self.tmp_dir, ModelFile.CONFIGURATION)
  189. with open(config_path, 'w') as f:
  190. json.dump(json_cfg, f)
  191. model = DummyModel()
  192. optimmizer = SGD(model.parameters(), lr=0.01)
  193. lr_scheduler = StepLR(optimmizer, 2)
  194. trainer_name = Trainers.default
  195. kwargs = dict(
  196. cfg_file=config_path,
  197. model=model,
  198. data_collator=None,
  199. train_dataset=dummy_dataset_big,
  200. eval_dataset=dummy_dataset_small,
  201. optimizers=(optimmizer, lr_scheduler),
  202. max_epochs=3,
  203. device='cpu')
  204. trainer = build_trainer(trainer_name, kwargs)
  205. trainer.train()
  206. results_files = os.listdir(self.tmp_dir)
  207. json_file = os.path.join(self.tmp_dir, f'{trainer.timestamp}.log.json')
  208. with open(json_file, 'r') as f:
  209. lines = [i.strip() for i in f.readlines()]
  210. self.assertDictContainsSubset(
  211. {
  212. LogKeys.MODE: ModeKeys.TRAIN,
  213. LogKeys.EPOCH: 1,
  214. LogKeys.ITER: 10,
  215. LogKeys.LR: 0.01
  216. }, json.loads(lines[0]))
  217. self.assertDictContainsSubset(
  218. {
  219. LogKeys.MODE: ModeKeys.TRAIN,
  220. LogKeys.EPOCH: 1,
  221. LogKeys.ITER: 20,
  222. LogKeys.LR: 0.01
  223. }, json.loads(lines[1]))
  224. self.assertDictContainsSubset(
  225. {
  226. LogKeys.MODE: ModeKeys.EVAL,
  227. LogKeys.EPOCH: 1,
  228. LogKeys.ITER: 20
  229. }, json.loads(lines[2]))
  230. self.assertDictContainsSubset(
  231. {
  232. LogKeys.MODE: ModeKeys.TRAIN,
  233. LogKeys.EPOCH: 2,
  234. LogKeys.ITER: 10,
  235. LogKeys.LR: 0.01
  236. }, json.loads(lines[3]))
  237. self.assertDictContainsSubset(
  238. {
  239. LogKeys.MODE: ModeKeys.TRAIN,
  240. LogKeys.EPOCH: 2,
  241. LogKeys.ITER: 20,
  242. LogKeys.LR: 0.01
  243. }, json.loads(lines[4]))
  244. self.assertDictContainsSubset(
  245. {
  246. LogKeys.MODE: ModeKeys.EVAL,
  247. LogKeys.EPOCH: 2,
  248. LogKeys.ITER: 20
  249. }, json.loads(lines[5]))
  250. self.assertDictContainsSubset(
  251. {
  252. LogKeys.MODE: ModeKeys.TRAIN,
  253. LogKeys.EPOCH: 3,
  254. LogKeys.ITER: 10,
  255. LogKeys.LR: 0.001
  256. }, json.loads(lines[6]))
  257. self.assertDictContainsSubset(
  258. {
  259. LogKeys.MODE: ModeKeys.TRAIN,
  260. LogKeys.EPOCH: 3,
  261. LogKeys.ITER: 20,
  262. LogKeys.LR: 0.001
  263. }, json.loads(lines[7]))
  264. self.assertDictContainsSubset(
  265. {
  266. LogKeys.MODE: ModeKeys.EVAL,
  267. LogKeys.EPOCH: 3,
  268. LogKeys.ITER: 20
  269. }, json.loads(lines[8]))
  270. self.assertIn(f'{LogKeys.EPOCH}_1.pth', results_files)
  271. self.assertIn(f'{LogKeys.EPOCH}_2.pth', results_files)
  272. self.assertIn(f'{LogKeys.EPOCH}_3.pth', results_files)
  273. for i in [0, 1, 3, 4, 6, 7]:
  274. self.assertIn(LogKeys.DATA_LOAD_TIME, lines[i])
  275. self.assertIn(LogKeys.ITER_TIME, lines[i])
  276. for i in [2, 5, 8]:
  277. self.assertIn(MetricKeys.ACCURACY, lines[i])
  278. class DummyTrainerTest(unittest.TestCase):
  279. @unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
  280. def test_dummy(self):
  281. default_args = dict(cfg_file='configs/examples/train.json')
  282. trainer = build_trainer('dummy', default_args)
  283. trainer.train()
  284. trainer.evaluate()
  285. if __name__ == '__main__':
  286. unittest.main()