diff --git a/modelscope/utils/device.py b/modelscope/utils/device.py index 77e23122..40804970 100644 --- a/modelscope/utils/device.py +++ b/modelscope/utils/device.py @@ -20,9 +20,11 @@ def verify_device(device_name): device info (tuple): device_type and device_id, if device_id is not set, will use 0 as default. """ device_name = device_name.lower() - eles = device_name.split(':') err_msg = 'device should be either cpu, cuda, gpu, gpu:X or cuda:X where X is the ordinal for gpu device.' + assert device_name is not None and device_name != '', err_msg + eles = device_name.split(':') assert len(eles) <= 2, err_msg + assert device_name is not None assert eles[0] in ['cpu', 'cuda', 'gpu'], err_msg device_type = eles[0] device_id = None diff --git a/modelscope/utils/model_tag.py b/modelscope/utils/model_tag.py new file mode 100644 index 00000000..380ddccb --- /dev/null +++ b/modelscope/utils/model_tag.py @@ -0,0 +1,182 @@ +import logging +import os + +import json +import requests + +from modelscope.version import __version__ + + +# 打标 +class ModelTag(object): + _URL = os.environ.get('MODEL_TAG_URL', None) + + # 模型测试结果 + BATCH_COMMIT_RESULT_URL = f'{_URL}/batchCommitResult' + # 测试阶段完成 + BATCH_REFRESH_STAGE_URL = f'{_URL}/batchRefreshStage' + # query_model_stage + QUERY_MODEL_STAGE_URL = f'{_URL}/queryModelStage' + + HEADER = {'Content-Type': 'application/json'} + + # 检测结果 + MODEL_SKIP = 0 + MODEL_FAIL = 1 + MODEL_PASS = 2 + + class ItemResult(object): + + def __init__(self): + self.result = 0 + self.name = '' + self.info = '' + + def to_json(self): + return { + 'name': self.name, + 'result': self.result, + 'info': self.info + } + + def __init__(self): + self.job_name = '' + self.job_id = '' + self.model = '' + self.sdk_version = '' + self.image_version = '' + self.domain = '' + self.task = '' + self.source = '' + self.stage = '' + # ItemResult list + self.item_result = [] + + # 发送请求 + def _post_request(self, url, param): + try: + logging.info(url + ' query: ' + + str(json.dumps(param, ensure_ascii=False))) + res = requests.post( + url=url, + headers=self.HEADER, + data=json.dumps(param, ensure_ascii=False).encode('utf8')) + if res.status_code == 200: + logging.info(f'{url} post结果: ' + res.text) + res_json = json.loads(res.text) + if int(res_json['errorCode']) == 200: + return res_json['content'] + else: + logging.error(res.text) + else: + logging.error(res.text) + except Exception as e: + logging.error(e) + + return None + + # 提交模型测试结果 + def batch_commit_result(self): + try: + param = { + 'sdkVersion': + self.sdk_version, + 'imageVersion': + self.image_version, + 'source': + self.source, + 'jobName': + self.job_name, + 'jobId': + self.job_id, + 'modelList': [{ + 'model': self.model, + 'domain': self.domain, + 'task': self.task, + 'itemResult': self.item_result + }] + } + return self._post_request(self.BATCH_COMMIT_RESULT_URL, param) + + except Exception as e: + logging.error(e) + + return + + # 测试阶段完成 + def batch_refresh_stage(self): + try: + param = { + 'sdkVersion': + self.sdk_version, + 'imageVersion': + self.image_version, + 'source': + self.source, + 'stage': + self.stage, + 'modelList': [{ + 'model': self.model, + 'domain': self.domain, + 'task': self.task + }] + } + return self._post_request(self.BATCH_REFRESH_STAGE_URL, param) + + except Exception as e: + logging.error(e) + + return + + # 查询模型某个阶段的最新测试结果(只返回单个结果 + def query_model_stage(self): + try: + param = { + 'sdkVersion': self.sdk_version, + 'model': self.model, + 'stage': self.stage, + 'imageVersion': self.image_version + } + return self._post_request(self.QUERY_MODEL_STAGE_URL, param) + + except Exception as e: + logging.error(e) + + return None + + # 提交模型UT测试结果 + """ + model_tag = ModelTag() + model_tag.model = "XXX" + model_tag.sdk_version = "0.3.7" + model_tag.domain = "nlp" + model_tag.task = "word-segmentation" + item = model_tag.ItemResult() + item.result = model_tag.MODEL_PASS + item.name = "ALL" + item.info = "" + model_tag.item_result.append(item.to_json()) + """ + + def commit_ut_result(self): + if self._URL is not None: + self.job_name = 'UT' + self.source = 'dev' + self.stage = 'integration' + + self.batch_commit_result() + self.batch_refresh_stage() + + +def commit_model_ut_result(model_name, ut_result): + model_tag = ModelTag() + model_tag.model = model_name.replace('damo/', '') + model_tag.sdk_version = __version__ + # model_tag.domain = "" + # model_tag.task = "" + item = model_tag.ItemResult() + item.result = ut_result + item.name = 'ALL' + item.info = '' + model_tag.item_result.append(item.to_json()) + model_tag.commit_ut_result() diff --git a/modelscope/utils/test_utils.py b/modelscope/utils/test_utils.py index 7adba982..b30c674b 100644 --- a/modelscope/utils/test_utils.py +++ b/modelscope/utils/test_utils.py @@ -11,6 +11,7 @@ import sys import tarfile import tempfile import unittest +from typing import OrderedDict import requests from datasets import Dataset @@ -71,6 +72,37 @@ def download_and_untar(fpath, furl, dst) -> str: return target_dir_path +def get_case_model_info(): + status_code, result = subprocess.getstatusoutput( + 'grep -rn "damo/" tests/ | grep -v ".pyc" | grep -v "Binary file" | grep -v run.py ' + ) + lines = result.split('\n') + test_cases = OrderedDict() + model_cases = OrderedDict() + for line in lines: + # "tests/msdatasets/test_ms_dataset.py:92: model_id = 'damo/bert-base-sst2'" + line = line.strip() + elements = line.split(':') + test_file = elements[0] + model_pos = line.find('damo') + left_quote = line[model_pos - 1] + rquote_idx = line.rfind(left_quote) + model_name = line[model_pos:rquote_idx] + if test_file not in test_cases: + test_cases[test_file] = set() + model_info = test_cases[test_file] + model_info.add(model_name) + + if model_name not in model_cases: + model_cases[model_name] = set() + case_info = model_cases[model_name] + case_info.add( + test_file.replace('tests/', '').replace('.py', + '').replace('/', '.')) + + return model_cases + + _DIST_SCRIPT_TEMPLATE = """ import ast import argparse diff --git a/tests/run.py b/tests/run.py index 478cb9d6..51a563fe 100644 --- a/tests/run.py +++ b/tests/run.py @@ -24,7 +24,9 @@ import torch import yaml from modelscope.utils.logger import get_logger -from modelscope.utils.test_utils import set_test_level, test_level +from modelscope.utils.model_tag import ModelTag, commit_model_ut_result +from modelscope.utils.test_utils import (get_case_model_info, set_test_level, + test_level) logger = get_logger() @@ -62,6 +64,23 @@ def statistics_test_result(df): result, total_cases, success_cases, failures_cases, error_cases, skipped_cases, expected_failure_cases, unexpected_success_cases) + model_cases = get_case_model_info() + for model_name, case_info in model_cases.items(): + cases = df.loc[df['Name'].str.contains('|'.join(list(case_info)))] + results = cases['Result'] + result = None + if any(results == 'Error') or any(results == 'Failures') or any( + results == 'UnexpectedSuccesses'): + result = ModelTag.MODEL_FAIL + elif any(results == 'Success'): + result = ModelTag.MODEL_PASS + elif all(results == 'Skipped'): + result = ModelTag.MODEL_SKIP + else: + print(f'invalid results for {model_name} \n{result}') + + if result is not None: + commit_model_ut_result(model_name, result) print('Testing result summary.') print(result_msg) if result == 'FAILED':