You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

run.py 13 kB

[to #9061073] feat: merge tts to master Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/9061073 Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/9061073 * [to #41669377] docs and tools refinement and release 1. add build_doc linter script 2. add sphinx-docs support 3. add development doc and api doc 4. change version to 0.1.0 for the first internal release version Link: https://code.aone.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/8775307 * [to #41669377] add pipeline tutorial and fix bugs 1. add pipleine tutorial 2. fix bugs when using pipeline with certain model and preprocessor Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/8814301 * refine doc * refine doc * merge remote release/0.1 and fix conflict * Merge branch 'release/0.1' into 'nls/tts' Release/0.1 See merge request !1700968 * [Add] add tts preprocessor without requirements. finish requirements build later * [Add] add requirements and frd submodule * [Fix] remove models submodule * [Add] add am module * [Update] update am and vocoder * [Update] remove submodule * [Update] add models * [Fix] fix init error * [Fix] fix bugs with tts pipeline * merge master * [Update] merge from master * remove frd subdmoule and using wheel from oss * change scripts * [Fix] fix bugs in am and vocoder * [Merge] merge from master * Merge branch 'master' into nls/tts * [Fix] fix bugs * [Fix] fix pep8 * Merge branch 'master' into nls/tts * [Update] remove hparams and import configuration from kwargs * Merge branch 'master' into nls/tts * upgrade tf113 to tf115 * Merge branch 'nls/tts' of gitlab.alibaba-inc.com:Ali-MaaS/MaaS-lib into nls/tts * add multiple versions of ttsfrd * merge master * [Fix] fix cr comments * Merge branch 'master' into nls/tts * [Fix] fix cr comments 0617 * Merge branch 'master' into nls/tts * [Fix] remove comment out codes * [Merge] merge from master * [Fix] fix crash for incompatible tf and pytorch version, and frd using zip file resource * Merge branch 'master' into nls/tts * [Add] add cuda support
3 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. #!/usr/bin/env python
  2. # Copyright (c) Alibaba, Inc. and its affiliates.
  3. import argparse
  4. import datetime
  5. import multiprocessing
  6. import os
  7. import subprocess
  8. import sys
  9. import tempfile
  10. import unittest
  11. from fnmatch import fnmatch
  12. from multiprocessing.managers import BaseManager
  13. from pathlib import Path
  14. from turtle import shape
  15. from unittest import TestResult, TextTestResult
  16. import pandas
  17. # NOTICE: Tensorflow 1.15 seems not so compatible with pytorch.
  18. # A segmentation fault may be raise by pytorch cpp library
  19. # if 'import tensorflow' in front of 'import torch'.
  20. # Puting a 'import torch' here can bypass this incompatibility.
  21. import torch
  22. from modelscope.utils.logger import get_logger
  23. from modelscope.utils.test_utils import set_test_level, test_level
  24. logger = get_logger()
  25. def test_cases_result_to_df(result_list):
  26. table_header = [
  27. 'Name', 'Result', 'Info', 'Start time', 'Stop time',
  28. 'Time cost(seconds)'
  29. ]
  30. df = pandas.DataFrame(
  31. result_list, columns=table_header).sort_values(
  32. by=['Start time'], ascending=True)
  33. return df
  34. def statistics_test_result(df):
  35. total_cases = df.shape[0]
  36. # yapf: disable
  37. success_cases = df.loc[df['Result'] == 'Success'].shape[0]
  38. error_cases = df.loc[df['Result'] == 'Error'].shape[0]
  39. failures_cases = df.loc[df['Result'] == 'Failures'].shape[0]
  40. expected_failure_cases = df.loc[df['Result'] == 'ExpectedFailures'].shape[0]
  41. unexpected_success_cases = df.loc[df['Result'] == 'UnexpectedSuccesses'].shape[0]
  42. skipped_cases = df.loc[df['Result'] == 'Skipped'].shape[0]
  43. # yapf: enable
  44. if failures_cases > 0 or \
  45. error_cases > 0 or \
  46. unexpected_success_cases > 0:
  47. result = 'FAILED'
  48. else:
  49. result = 'SUCCESS'
  50. result_msg = '%s (Runs=%s,success=%s,failures=%s,errors=%s,\
  51. skipped=%s,expected failures=%s,unexpected successes=%s)' % (
  52. result, total_cases, success_cases, failures_cases, error_cases,
  53. skipped_cases, expected_failure_cases, unexpected_success_cases)
  54. print(result_msg)
  55. if result == 'FAILED':
  56. sys.exit(1)
  57. def gather_test_suites_in_files(test_dir, case_file_list, list_tests):
  58. test_suite = unittest.TestSuite()
  59. for case in case_file_list:
  60. test_case = unittest.defaultTestLoader.discover(
  61. start_dir=test_dir, pattern=case)
  62. test_suite.addTest(test_case)
  63. if hasattr(test_case, '__iter__'):
  64. for subcase in test_case:
  65. if list_tests:
  66. print(subcase)
  67. else:
  68. if list_tests:
  69. print(test_case)
  70. return test_suite
  71. def gather_test_suites_files(test_dir, pattern):
  72. case_file_list = []
  73. for dirpath, dirnames, filenames in os.walk(test_dir):
  74. for file in filenames:
  75. if fnmatch(file, pattern):
  76. case_file_list.append(file)
  77. return case_file_list
  78. def collect_test_results(case_results):
  79. result_list = [
  80. ] # each item is Case, Result, Start time, Stop time, Time cost
  81. for case_result in case_results.successes:
  82. result_list.append(
  83. (case_result.test_full_name, 'Success', '', case_result.start_time,
  84. case_result.stop_time, case_result.time_cost))
  85. for case_result in case_results.errors:
  86. result_list.append(
  87. (case_result[0].test_full_name, 'Error', case_result[1],
  88. case_result[0].start_time, case_result[0].stop_time,
  89. case_result[0].time_cost))
  90. for case_result in case_results.skipped:
  91. result_list.append(
  92. (case_result[0].test_full_name, 'Skipped', case_result[1],
  93. case_result[0].start_time, case_result[0].stop_time,
  94. case_result[0].time_cost))
  95. for case_result in case_results.expectedFailures:
  96. result_list.append(
  97. (case_result[0].test_full_name, 'ExpectedFailures', case_result[1],
  98. case_result[0].start_time, case_result[0].stop_time,
  99. case_result[0].time_cost))
  100. for case_result in case_results.failures:
  101. result_list.append(
  102. (case_result[0].test_full_name, 'Failures', case_result[1],
  103. case_result[0].start_time, case_result[0].stop_time,
  104. case_result[0].time_cost))
  105. for case_result in case_results.unexpectedSuccesses:
  106. result_list.append((case_result.test_full_name, 'UnexpectedSuccesses',
  107. '', case_result.start_time, case_result.stop_time,
  108. case_result.time_cost))
  109. return result_list
  110. class TestSuiteRunner:
  111. def run(self, msg_queue, test_dir, test_suite_file):
  112. test_suite = unittest.TestSuite()
  113. test_case = unittest.defaultTestLoader.discover(
  114. start_dir=test_dir, pattern=test_suite_file)
  115. test_suite.addTest(test_case)
  116. runner = TimeCostTextTestRunner()
  117. test_suite_result = runner.run(test_suite)
  118. msg_queue.put(collect_test_results(test_suite_result))
  119. def run_command_with_popen(cmd):
  120. with subprocess.Popen(
  121. cmd,
  122. stdout=subprocess.PIPE,
  123. stderr=subprocess.STDOUT,
  124. bufsize=1,
  125. encoding='utf8') as sub_process:
  126. for line in iter(sub_process.stdout.readline, ''):
  127. sys.stdout.write(line)
  128. def run_in_subprocess(args):
  129. # only case args.isolated_cases run in subporcess, all other run in a subprocess
  130. test_suite_files = gather_test_suites_files(
  131. os.path.abspath(args.test_dir), args.pattern)
  132. if args.subprocess: # run all case in subprocess
  133. isolated_cases = test_suite_files
  134. else:
  135. isolated_cases = []
  136. with open(args.isolated_cases, 'r') as f:
  137. for line in f:
  138. if line.strip() in test_suite_files:
  139. isolated_cases.append(line.strip())
  140. if not args.list_tests:
  141. with tempfile.TemporaryDirectory() as temp_result_dir:
  142. for test_suite_file in isolated_cases: # run case in subprocess
  143. cmd = [
  144. 'python', 'tests/run.py', '--pattern', test_suite_file,
  145. '--result_dir', temp_result_dir
  146. ]
  147. run_command_with_popen(cmd)
  148. result_dfs = []
  149. # run remain cases in a process.
  150. remain_suite_files = [
  151. item for item in test_suite_files if item not in isolated_cases
  152. ]
  153. test_suite = gather_test_suites_in_files(args.test_dir,
  154. remain_suite_files,
  155. args.list_tests)
  156. if test_suite.countTestCases() > 0:
  157. runner = TimeCostTextTestRunner()
  158. result = runner.run(test_suite)
  159. result = collect_test_results(result)
  160. df = test_cases_result_to_df(result)
  161. result_dfs.append(df)
  162. # collect test results
  163. result_path = Path(temp_result_dir)
  164. for result in result_path.iterdir():
  165. if Path.is_file(result):
  166. df = pandas.read_pickle(result)
  167. result_dfs.append(df)
  168. result_pd = pandas.concat(
  169. result_dfs) # merge result of every test suite.
  170. print_table_result(result_pd)
  171. print_abnormal_case_info(result_pd)
  172. statistics_test_result(result_pd)
  173. def get_object_full_name(obj):
  174. klass = obj.__class__
  175. module = klass.__module__
  176. if module == 'builtins':
  177. return klass.__qualname__
  178. return module + '.' + klass.__qualname__
  179. class TimeCostTextTestResult(TextTestResult):
  180. """Record test case time used!"""
  181. def __init__(self, stream, descriptions, verbosity):
  182. self.successes = []
  183. return super(TimeCostTextTestResult,
  184. self).__init__(stream, descriptions, verbosity)
  185. def startTest(self, test):
  186. test.start_time = datetime.datetime.now()
  187. test.test_full_name = get_object_full_name(
  188. test) + '.' + test._testMethodName
  189. self.stream.writeln('Test case: %s start at: %s' %
  190. (test.test_full_name, test.start_time))
  191. return super(TimeCostTextTestResult, self).startTest(test)
  192. def stopTest(self, test):
  193. TextTestResult.stopTest(self, test)
  194. test.stop_time = datetime.datetime.now()
  195. test.time_cost = (test.stop_time - test.start_time).total_seconds()
  196. self.stream.writeln(
  197. 'Test case: %s stop at: %s, cost time: %s(seconds)' %
  198. (test.test_full_name, test.stop_time, test.time_cost))
  199. super(TimeCostTextTestResult, self).stopTest(test)
  200. def addSuccess(self, test):
  201. self.successes.append(test)
  202. super(TextTestResult, self).addSuccess(test)
  203. class TimeCostTextTestRunner(unittest.runner.TextTestRunner):
  204. resultclass = TimeCostTextTestResult
  205. def run(self, test):
  206. return super(TimeCostTextTestRunner, self).run(test)
  207. def _makeResult(self):
  208. result = super(TimeCostTextTestRunner, self)._makeResult()
  209. return result
  210. def gather_test_cases(test_dir, pattern, list_tests):
  211. case_list = []
  212. for dirpath, dirnames, filenames in os.walk(test_dir):
  213. for file in filenames:
  214. if fnmatch(file, pattern):
  215. case_list.append(file)
  216. test_suite = unittest.TestSuite()
  217. for case in case_list:
  218. test_case = unittest.defaultTestLoader.discover(
  219. start_dir=test_dir, pattern=case)
  220. test_suite.addTest(test_case)
  221. if hasattr(test_case, '__iter__'):
  222. for subcase in test_case:
  223. if list_tests:
  224. print(subcase)
  225. else:
  226. if list_tests:
  227. print(test_case)
  228. return test_suite
  229. def print_abnormal_case_info(df):
  230. df = df.loc[(df['Result'] == 'Error') | (df['Result'] == 'Failures')]
  231. for _, row in df.iterrows():
  232. print('Case %s run result: %s, msg:\n%s' %
  233. (row['Name'], row['Result'], row['Info']))
  234. def print_table_result(df):
  235. df = df.loc[df['Result'] != 'Skipped']
  236. df = df.drop('Info', axis=1)
  237. formatters = {
  238. 'Name': '{{:<{}s}}'.format(df['Name'].str.len().max()).format,
  239. 'Result': '{{:<{}s}}'.format(df['Result'].str.len().max()).format,
  240. }
  241. with pandas.option_context('display.max_rows', None, 'display.max_columns',
  242. None, 'display.width', None):
  243. print(df.to_string(justify='left', formatters=formatters, index=False))
  244. def main(args):
  245. runner = TimeCostTextTestRunner()
  246. test_suite = gather_test_cases(
  247. os.path.abspath(args.test_dir), args.pattern, args.list_tests)
  248. if not args.list_tests:
  249. result = runner.run(test_suite)
  250. result = collect_test_results(result)
  251. df = test_cases_result_to_df(result)
  252. if args.result_dir is not None:
  253. file_name = str(int(datetime.datetime.now().timestamp() * 1000))
  254. df.to_pickle(os.path.join(args.result_dir, file_name))
  255. else:
  256. print_table_result(df)
  257. print_abnormal_case_info(df)
  258. statistics_test_result(df)
  259. if __name__ == '__main__':
  260. parser = argparse.ArgumentParser('test runner')
  261. parser.add_argument(
  262. '--list_tests', action='store_true', help='list all tests')
  263. parser.add_argument(
  264. '--pattern', default='test_*.py', help='test file pattern')
  265. parser.add_argument(
  266. '--test_dir', default='tests', help='directory to be tested')
  267. parser.add_argument(
  268. '--level', default=0, type=int, help='2 -- all, 1 -- p1, 0 -- p0')
  269. parser.add_argument(
  270. '--disable_profile', action='store_true', help='disable profiling')
  271. parser.add_argument(
  272. '--isolated_cases',
  273. default=None,
  274. help='specified isolated cases config file')
  275. parser.add_argument(
  276. '--subprocess',
  277. action='store_true',
  278. help='run all test suite in subprocess')
  279. parser.add_argument(
  280. '--result_dir',
  281. default=None,
  282. help='Save result to directory, internal use only')
  283. args = parser.parse_args()
  284. set_test_level(args.level)
  285. os.environ['REGRESSION_BASELINE'] = '1'
  286. logger.info(f'TEST LEVEL: {test_level()}')
  287. if not args.disable_profile:
  288. from utils import profiler
  289. logger.info('enable profile ...')
  290. profiler.enable()
  291. if args.isolated_cases is not None or args.subprocess:
  292. run_in_subprocess(args)
  293. elif args.isolated_cases is not None and args.subprocess:
  294. print('isolated_cases and subporcess conflict')
  295. sys.exit(1)
  296. else:
  297. main(args)