Browse Source

[to #42461396] feat: test_level support

* add test level support
* update develop doc
Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/9021354
master
wenmeng.zwm 3 years ago
parent
commit
c59833c7ee
8 changed files with 100 additions and 4 deletions
  1. +52
    -3
      docs/source/develop.md
  2. +20
    -0
      modelscope/utils/test_utils.py
  3. +1
    -0
      tests/pipelines/test_image_captioning.py
  4. +4
    -0
      tests/pipelines/test_image_matting.py
  5. +3
    -0
      tests/pipelines/test_person_image_cartoon.py
  6. +6
    -0
      tests/pipelines/test_text_classification.py
  7. +5
    -1
      tests/pipelines/test_text_generation.py
  8. +9
    -0
      tests/run.py

+ 52
- 3
docs/source/develop.md View File

@@ -34,13 +34,62 @@ make linter
``` ```


## 2. Test ## 2. Test
### 2.1 Unit test

### 2.1 Test level

There are mainly three test levels:

* level 0: tests for basic interface and function of framework, such as `tests/trainers/test_trainer_base.py`
* level 1: important functional test which test end2end workflow, such as `tests/pipelines/test_image_matting.py`
* level 2: scenario tests for all the implemented modules such as model, pipeline in different algorithm filed.

Default test level is 0, which will only run those cases of level 0, you can set test level
via environment variable `TEST_LEVEL`. For more details, you can refer to [test-doc](https://alidocs.dingtalk.com/i/nodes/mdvQnONayjBJKLXy1Bp38PY2MeXzp5o0?dontjump=true&nav=spaces&navQuery=spaceId%3Dnb9XJNlZxbgrOXyA)


```bash ```bash
# run all tests
TEST_LEVEL=2 make test

# run important functional tests
TEST_LEVEL=1 make test

# run core UT and basic functional tests
make test make test
``` ```


### 2.2 Test data
TODO
When writing test cases, you should assign a test level for your test case using
following code. If left default, the test level will be 0, it will run in each
test stage.

File test_module.py
```python
from modelscope.utils.test_utils import test_level

class ImageCartoonTest(unittest.TestCase):
@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
def test_run_by_direct_model_download(self):
pass
```

### 2.2 Run tests

1. Run your own single test case to test your self-implemented function. You can run your
test file directly, if it fails to run, pls check if variable `TEST_LEVEL`
exists in the environment and unset it.
```bash
python tests/path/to/your_test.py
```

2. Remember to run core tests in local environment before start a codereview, by default it will
only run test cases with level 0.
```bash
make tests
```

3. After you start a code review, ci tests will be triggered which will run test cases with level 1

4. Daily regression tests will run all cases at 0 am each day using master branch.


## Code Review ## Code Review




+ 20
- 0
modelscope/utils/test_utils.py View File

@@ -0,0 +1,20 @@
#!/usr/bin/env python
# Copyright (c) Alibaba, Inc. and its affiliates.

import os

TEST_LEVEL = 2
TEST_LEVEL_STR = 'TEST_LEVEL'


def test_level():
global TEST_LEVEL
if TEST_LEVEL_STR in os.environ:
TEST_LEVEL = int(os.environ[TEST_LEVEL_STR])

return TEST_LEVEL


def set_test_level(level: int):
global TEST_LEVEL
TEST_LEVEL = level

+ 1
- 0
tests/pipelines/test_image_captioning.py View File

@@ -7,6 +7,7 @@ import unittest
from modelscope.fileio import File from modelscope.fileio import File
from modelscope.pipelines import pipeline from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks from modelscope.utils.constant import Tasks
from modelscope.utils.test_utils import test_level




class ImageCaptionTest(unittest.TestCase): class ImageCaptionTest(unittest.TestCase):


+ 4
- 0
tests/pipelines/test_image_matting.py View File

@@ -11,6 +11,7 @@ from modelscope.pipelines import pipeline
from modelscope.pydatasets import PyDataset from modelscope.pydatasets import PyDataset
from modelscope.utils.constant import ModelFile, Tasks from modelscope.utils.constant import ModelFile, Tasks
from modelscope.utils.hub import get_model_cache_dir from modelscope.utils.hub import get_model_cache_dir
from modelscope.utils.test_utils import test_level




class ImageMattingTest(unittest.TestCase): class ImageMattingTest(unittest.TestCase):
@@ -38,6 +39,7 @@ class ImageMattingTest(unittest.TestCase):
) )
cv2.imwrite('result.png', result['output_png']) cv2.imwrite('result.png', result['output_png'])


@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
def test_run_with_dataset(self): def test_run_with_dataset(self):
input_location = [ input_location = [
'http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/data/test/maas/image_matting/test.png' 'http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/data/test/maas/image_matting/test.png'
@@ -52,6 +54,7 @@ class ImageMattingTest(unittest.TestCase):
cv2.imwrite('result.png', next(result)['output_png']) cv2.imwrite('result.png', next(result)['output_png'])
print(f'Output written to {osp.abspath("result.png")}') print(f'Output written to {osp.abspath("result.png")}')


@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
def test_run_modelhub(self): def test_run_modelhub(self):
img_matting = pipeline(Tasks.image_matting, model=self.model_id) img_matting = pipeline(Tasks.image_matting, model=self.model_id)


@@ -61,6 +64,7 @@ class ImageMattingTest(unittest.TestCase):
cv2.imwrite('result.png', result['output_png']) cv2.imwrite('result.png', result['output_png'])
print(f'Output written to {osp.abspath("result.png")}') print(f'Output written to {osp.abspath("result.png")}')


@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
def test_run_modelhub_default_model(self): def test_run_modelhub_default_model(self):
img_matting = pipeline(Tasks.image_matting) img_matting = pipeline(Tasks.image_matting)




+ 3
- 0
tests/pipelines/test_person_image_cartoon.py View File

@@ -8,6 +8,7 @@ import cv2
from modelscope.pipelines import pipeline from modelscope.pipelines import pipeline
from modelscope.pipelines.base import Pipeline from modelscope.pipelines.base import Pipeline
from modelscope.utils.constant import Tasks from modelscope.utils.constant import Tasks
from modelscope.utils.test_utils import test_level




class ImageCartoonTest(unittest.TestCase): class ImageCartoonTest(unittest.TestCase):
@@ -36,10 +37,12 @@ class ImageCartoonTest(unittest.TestCase):
img_cartoon = pipeline(Tasks.image_generation, model=model_dir) img_cartoon = pipeline(Tasks.image_generation, model=model_dir)
self.pipeline_inference(img_cartoon, self.test_image) self.pipeline_inference(img_cartoon, self.test_image)


@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
def test_run_modelhub(self): def test_run_modelhub(self):
img_cartoon = pipeline(Tasks.image_generation, model=self.model_id) img_cartoon = pipeline(Tasks.image_generation, model=self.model_id)
self.pipeline_inference(img_cartoon, self.test_image) self.pipeline_inference(img_cartoon, self.test_image)


@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
def test_run_modelhub_default_model(self): def test_run_modelhub_default_model(self):
img_cartoon = pipeline(Tasks.image_generation) img_cartoon = pipeline(Tasks.image_generation)
self.pipeline_inference(img_cartoon, self.test_image) self.pipeline_inference(img_cartoon, self.test_image)


+ 6
- 0
tests/pipelines/test_text_classification.py View File

@@ -12,6 +12,7 @@ from modelscope.preprocessors import SequenceClassificationPreprocessor
from modelscope.pydatasets import PyDataset from modelscope.pydatasets import PyDataset
from modelscope.utils.constant import Hubs, Tasks from modelscope.utils.constant import Hubs, Tasks
from modelscope.utils.hub import get_model_cache_dir from modelscope.utils.hub import get_model_cache_dir
from modelscope.utils.test_utils import test_level




class SequenceClassificationTest(unittest.TestCase): class SequenceClassificationTest(unittest.TestCase):
@@ -43,6 +44,7 @@ class SequenceClassificationTest(unittest.TestCase):
break break
print(r) print(r)


@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run(self): def test_run(self):
model_url = 'https://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com' \ model_url = 'https://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com' \
'/release/easynlp_modelzoo/alibaba-pai/bert-base-sst2.zip' '/release/easynlp_modelzoo/alibaba-pai/bert-base-sst2.zip'
@@ -67,6 +69,7 @@ class SequenceClassificationTest(unittest.TestCase):
Tasks.text_classification, model=model, preprocessor=preprocessor) Tasks.text_classification, model=model, preprocessor=preprocessor)
print(pipeline2('Hello world!')) print(pipeline2('Hello world!'))


@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
def test_run_with_model_from_modelhub(self): def test_run_with_model_from_modelhub(self):
model = Model.from_pretrained(self.model_id) model = Model.from_pretrained(self.model_id)
preprocessor = SequenceClassificationPreprocessor( preprocessor = SequenceClassificationPreprocessor(
@@ -77,6 +80,7 @@ class SequenceClassificationTest(unittest.TestCase):
preprocessor=preprocessor) preprocessor=preprocessor)
self.predict(pipeline_ins) self.predict(pipeline_ins)


@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
def test_run_with_model_name(self): def test_run_with_model_name(self):
text_classification = pipeline( text_classification = pipeline(
task=Tasks.text_classification, model=self.model_id) task=Tasks.text_classification, model=self.model_id)
@@ -85,6 +89,7 @@ class SequenceClassificationTest(unittest.TestCase):
'glue', name='sst2', target='sentence', hub=Hubs.huggingface)) 'glue', name='sst2', target='sentence', hub=Hubs.huggingface))
self.printDataset(result) self.printDataset(result)


@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
def test_run_with_default_model(self): def test_run_with_default_model(self):
text_classification = pipeline(task=Tasks.text_classification) text_classification = pipeline(task=Tasks.text_classification)
result = text_classification( result = text_classification(
@@ -92,6 +97,7 @@ class SequenceClassificationTest(unittest.TestCase):
'glue', name='sst2', target='sentence', hub=Hubs.huggingface)) 'glue', name='sst2', target='sentence', hub=Hubs.huggingface))
self.printDataset(result) self.printDataset(result)


@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
def test_run_with_dataset(self): def test_run_with_dataset(self):
model = Model.from_pretrained(self.model_id) model = Model.from_pretrained(self.model_id)
preprocessor = SequenceClassificationPreprocessor( preprocessor = SequenceClassificationPreprocessor(


+ 5
- 1
tests/pipelines/test_text_generation.py View File

@@ -8,6 +8,7 @@ from modelscope.models.nlp import PalmForTextGenerationModel
from modelscope.pipelines import TextGenerationPipeline, pipeline from modelscope.pipelines import TextGenerationPipeline, pipeline
from modelscope.preprocessors import TextGenerationPreprocessor from modelscope.preprocessors import TextGenerationPreprocessor
from modelscope.utils.constant import Tasks from modelscope.utils.constant import Tasks
from modelscope.utils.test_utils import test_level




class TextGenerationTest(unittest.TestCase): class TextGenerationTest(unittest.TestCase):
@@ -15,7 +16,7 @@ class TextGenerationTest(unittest.TestCase):
input1 = "今日天气类型='晴'&温度变化趋势='大幅上升'&最低气温='28℃'&最高气温='31℃'&体感='湿热'" input1 = "今日天气类型='晴'&温度变化趋势='大幅上升'&最低气温='28℃'&最高气温='31℃'&体感='湿热'"
input2 = "今日天气类型='多云'&体感='舒适'&最低气温='26℃'&最高气温='30℃'" input2 = "今日天气类型='多云'&体感='舒适'&最低气温='26℃'&最高气温='30℃'"


@unittest.skip('skip temporarily to save test time')
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run(self): def test_run(self):
cache_path = snapshot_download(self.model_id) cache_path = snapshot_download(self.model_id)
preprocessor = TextGenerationPreprocessor( preprocessor = TextGenerationPreprocessor(
@@ -29,6 +30,7 @@ class TextGenerationTest(unittest.TestCase):
print() print()
print(f'input: {self.input2}\npipeline2: {pipeline2(self.input2)}') print(f'input: {self.input2}\npipeline2: {pipeline2(self.input2)}')


@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
def test_run_with_model_from_modelhub(self): def test_run_with_model_from_modelhub(self):
model = Model.from_pretrained(self.model_id) model = Model.from_pretrained(self.model_id)
preprocessor = TextGenerationPreprocessor( preprocessor = TextGenerationPreprocessor(
@@ -37,11 +39,13 @@ class TextGenerationTest(unittest.TestCase):
task=Tasks.text_generation, model=model, preprocessor=preprocessor) task=Tasks.text_generation, model=model, preprocessor=preprocessor)
print(pipeline_ins(self.input1)) print(pipeline_ins(self.input1))


@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
def test_run_with_model_name(self): def test_run_with_model_name(self):
pipeline_ins = pipeline( pipeline_ins = pipeline(
task=Tasks.text_generation, model=self.model_id) task=Tasks.text_generation, model=self.model_id)
print(pipeline_ins(self.input2)) print(pipeline_ins(self.input2))


@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
def test_run_with_default_model(self): def test_run_with_default_model(self):
pipeline_ins = pipeline(task=Tasks.text_generation) pipeline_ins = pipeline(task=Tasks.text_generation)
print(pipeline_ins(self.input2)) print(pipeline_ins(self.input2))


+ 9
- 0
tests/run.py View File

@@ -7,6 +7,11 @@ import sys
import unittest import unittest
from fnmatch import fnmatch from fnmatch import fnmatch


from modelscope.utils.logger import get_logger
from modelscope.utils.test_utils import set_test_level, test_level

logger = get_logger()



def gather_test_cases(test_dir, pattern, list_tests): def gather_test_cases(test_dir, pattern, list_tests):
case_list = [] case_list = []
@@ -49,5 +54,9 @@ if __name__ == '__main__':
'--pattern', default='test_*.py', help='test file pattern') '--pattern', default='test_*.py', help='test file pattern')
parser.add_argument( parser.add_argument(
'--test_dir', default='tests', help='directory to be tested') '--test_dir', default='tests', help='directory to be tested')
parser.add_argument(
'--level', default=0, help='2 -- all, 1 -- p1, 0 -- p0')
args = parser.parse_args() args = parser.parse_args()
set_test_level(args.level)
logger.info(f'TEST LEVEL: {test_level()}')
main(args) main(args)

Loading…
Cancel
Save