diff --git a/.dev_scripts/run_docker.sh b/.dev_scripts/run_docker.sh new file mode 100644 index 00000000..8999458a --- /dev/null +++ b/.dev_scripts/run_docker.sh @@ -0,0 +1,7 @@ +#sudo docker run --name zwm_maas -v /home/wenmeng.zwm/workspace:/home/wenmeng.zwm/workspace --net host -ti reg.docker.alibaba-inc.com/pai-dlc/tensorflow-training:2.3-gpu-py36-cu101-ubuntu18.04 bash +#sudo docker run --name zwm_maas_pytorch -v /home/wenmeng.zwm/workspace:/home/wenmeng.zwm/workspace --net host -ti reg.docker.alibaba-inc.com/pai-dlc/pytorch-training:1.10PAI-gpu-py36-cu113-ubuntu18.04 bash +CONTAINER_NAME=modelscope-dev +IMAGE_NAME=registry.cn-shanghai.aliyuncs.com/modelscope/modelscope +IMAGE_VERSION=v0.1.1-16-g62856fa-devel +MOUNT_DIR=/home/wenmeng.zwm/workspace +sudo docker run --name $CONTAINER_NAME -v $MOUNT_DIR:$MOUNT_DIR --net host -ti ${IMAGE_NAME}:${IMAGE_VERSION} bash diff --git a/docker/pytorch.dockerfile b/docker/pytorch.dockerfile index 4862cab6..a1fe5b15 100644 --- a/docker/pytorch.dockerfile +++ b/docker/pytorch.dockerfile @@ -30,7 +30,8 @@ RUN apt-get update &&\ zip \ zlib1g-dev \ unzip \ - pkg-config + pkg-config \ + libsndfile1 # install modelscope and its python env WORKDIR /opt/modelscope diff --git a/docs/source/index.rst b/docs/source/index.rst index 3b223531..e93c7aed 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -13,6 +13,7 @@ ModelScope doc quick_start.md develop.md + faq.md .. toctree:: :maxdepth: 2 @@ -20,6 +21,8 @@ ModelScope doc tutorials/index + + .. toctree:: :maxdepth: 2 :caption: Changelog diff --git a/modelscope/models/__init__.py b/modelscope/models/__init__.py index f1074f68..778bef84 100644 --- a/modelscope/models/__init__.py +++ b/modelscope/models/__init__.py @@ -1,11 +1,26 @@ # Copyright (c) Alibaba, Inc. and its affiliates. - -from .audio.ans.frcrn import FRCRNModel -from .audio.kws import GenericKeyWordSpotting -from .audio.tts.am import SambertNetHifi16k -from .audio.tts.vocoder import Hifigan16k from .base import Model from .builder import MODELS, build_model -from .multi_modal import OfaForImageCaptioning -from .nlp import (BertForSequenceClassification, SbertForSentenceSimilarity, - SbertForZeroShotClassification) + +try: + from .audio.tts.am import SambertNetHifi16k + from .audio.tts.vocoder import Hifigan16k + +except ModuleNotFoundError as e: + if str(e) == "No module named 'tensorflow'": + pass + else: + raise ModuleNotFoundError(e) + +try: + from .audio.kws import GenericKeyWordSpotting + from .multi_modal import OfaForImageCaptioning + from .nlp import (BertForSequenceClassification, + SbertForSentenceSimilarity, + SbertForZeroShotClassification) + from .audio.ans.frcrn import FRCRNModel +except ModuleNotFoundError as e: + if str(e) == "No module named 'pytorch'": + pass + else: + raise ModuleNotFoundError(e) diff --git a/modelscope/pipelines/audio/__init__.py b/modelscope/pipelines/audio/__init__.py index 87ccd49a..c4dc0100 100644 --- a/modelscope/pipelines/audio/__init__.py +++ b/modelscope/pipelines/audio/__init__.py @@ -1,3 +1,16 @@ -from .kws_kwsbp_pipeline import * # noqa F403 -from .linear_aec_pipeline import LinearAECPipeline -from .text_to_speech_pipeline import * # noqa F403 +try: + from .kws_kwsbp_pipeline import * # noqa F403 + from .linear_aec_pipeline import LinearAECPipeline +except ModuleNotFoundError as e: + if str(e) == "No module named 'torch'": + pass + else: + raise ModuleNotFoundError(e) + +try: + from .text_to_speech_pipeline import * # noqa F403 +except ModuleNotFoundError as e: + if str(e) == "No module named 'tensorflow'": + pass + else: + raise ModuleNotFoundError(e) diff --git a/modelscope/pipelines/cv/__init__.py b/modelscope/pipelines/cv/__init__.py index b046e076..aa393ec5 100644 --- a/modelscope/pipelines/cv/__init__.py +++ b/modelscope/pipelines/cv/__init__.py @@ -1,5 +1,18 @@ -from .action_recognition_pipeline import ActionRecognitionPipeline -from .animal_recog_pipeline import AnimalRecogPipeline -from .image_cartoon_pipeline import ImageCartoonPipeline -from .image_matting_pipeline import ImageMattingPipeline -from .ocr_detection_pipeline import OCRDetectionPipeline +try: + from .action_recognition_pipeline import ActionRecognitionPipeline + from .animal_recog_pipeline import AnimalRecogPipeline +except ModuleNotFoundError as e: + if str(e) == "No module named 'torch'": + pass + else: + raise ModuleNotFoundError(e) + +try: + from .image_cartoon_pipeline import ImageCartoonPipeline + from .image_matting_pipeline import ImageMattingPipeline + from .ocr_detection_pipeline import OCRDetectionPipeline +except ModuleNotFoundError as e: + if str(e) == "No module named 'tensorflow'": + pass + else: + raise ModuleNotFoundError(e) diff --git a/modelscope/pipelines/multi_modal/__init__.py b/modelscope/pipelines/multi_modal/__init__.py index fdcada89..49b07cce 100644 --- a/modelscope/pipelines/multi_modal/__init__.py +++ b/modelscope/pipelines/multi_modal/__init__.py @@ -1,3 +1,9 @@ -from .image_captioning_pipeline import ImageCaptionPipeline -from .multi_modal_embedding_pipeline import MultiModalEmbeddingPipeline -from .visual_question_answering_pipeline import VisualQuestionAnsweringPipeline +try: + from .image_captioning_pipeline import ImageCaptionPipeline + from .multi_modal_embedding_pipeline import MultiModalEmbeddingPipeline + from .visual_question_answering_pipeline import VisualQuestionAnsweringPipeline +except ModuleNotFoundError as e: + if str(e) == "No module named 'torch'": + pass + else: + raise ModuleNotFoundError(e) diff --git a/modelscope/pipelines/nlp/__init__.py b/modelscope/pipelines/nlp/__init__.py index 5ef12e22..76ed6d4a 100644 --- a/modelscope/pipelines/nlp/__init__.py +++ b/modelscope/pipelines/nlp/__init__.py @@ -1,6 +1,12 @@ -from .fill_mask_pipeline import * # noqa F403 -from .sentence_similarity_pipeline import * # noqa F403 -from .sequence_classification_pipeline import * # noqa F403 -from .text_generation_pipeline import * # noqa F403 -from .word_segmentation_pipeline import * # noqa F403 -from .zero_shot_classification_pipeline import * # noqa F403 +try: + from .fill_mask_pipeline import * # noqa F403 + from .sentence_similarity_pipeline import * # noqa F403 + from .sequence_classification_pipeline import * # noqa F403 + from .text_generation_pipeline import * # noqa F403 + from .word_segmentation_pipeline import * # noqa F403 + from .zero_shot_classification_pipeline import * # noqa F403 +except ModuleNotFoundError as e: + if str(e) == "No module named 'torch'": + pass + else: + raise ModuleNotFoundError(e) diff --git a/modelscope/preprocessors/__init__.py b/modelscope/preprocessors/__init__.py index 694688f6..ae51b2bb 100644 --- a/modelscope/preprocessors/__init__.py +++ b/modelscope/preprocessors/__init__.py @@ -1,11 +1,18 @@ # Copyright (c) Alibaba, Inc. and its affiliates. -from .audio import LinearAECAndFbank from .base import Preprocessor from .builder import PREPROCESSORS, build_preprocessor from .common import Compose from .image import LoadImage, load_image from .kws import WavToLists -from .multi_modal import * # noqa F403 -from .nlp import * # noqa F403 from .text_to_speech import * # noqa F403 + +try: + from .audio import LinearAECAndFbank + from .multi_modal import * # noqa F403 + from .nlp import * # noqa F403 +except ModuleNotFoundError as e: + if str(e) == "No module named 'tensorflow'": + pass + else: + raise ModuleNotFoundError(e) diff --git a/modelscope/utils/check_requirements.py b/modelscope/utils/check_requirements.py new file mode 100644 index 00000000..7aad8e4e --- /dev/null +++ b/modelscope/utils/check_requirements.py @@ -0,0 +1,79 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. + +from modelscope.utils.constant import Fields, Requirements +from modelscope.utils.import_utils import requires + + +def get_msg(field): + msg = f'\n{field} requirements not installed, please execute ' \ + f'`pip install requirements/{field}.txt` or ' \ + f'`pip install modelscope[{field}]`' + return msg + + +class NLPModuleNotFoundError(ModuleNotFoundError): + + def __init__(self, e: ModuleNotFoundError) -> None: + e.msg += get_msg(Fields.nlp) + super().__init__(e) + + +class CVModuleNotFoundError(ModuleNotFoundError): + + def __init__(self, e: ModuleNotFoundError) -> None: + e.msg += get_msg(Fields.cv) + super().__init__(e) + + +class AudioModuleNotFoundError(ModuleNotFoundError): + + def __init__(self, e: ModuleNotFoundError) -> None: + e.msg += get_msg(Fields.audio) + super().__init__(e) + + +class MultiModalModuleNotFoundError(ModuleNotFoundError): + + def __init__(self, e: ModuleNotFoundError) -> None: + e.msg += get_msg(Fields.multi_modal) + super().__init__(e) + + +def check_nlp(): + try: + requires('nlp models', ( + Requirements.torch, + Requirements.tokenizers, + )) + except ImportError as e: + raise NLPModuleNotFoundError(e) + + +def check_cv(): + try: + requires('cv models', ( + Requirements.torch, + Requirements.tokenizers, + )) + except ImportError as e: + raise CVModuleNotFoundError(e) + + +def check_audio(): + try: + requires('audio models', ( + Requirements.torch, + Requirements.tf, + )) + except ImportError as e: + raise AudioModuleNotFoundError(e) + + +def check_multi_modal(): + try: + requires('multi-modal models', ( + Requirements.torch, + Requirements.tokenizers, + )) + except ImportError as e: + raise MultiModalModuleNotFoundError(e) diff --git a/modelscope/utils/config.py b/modelscope/utils/config.py index df9e38fd..79307f17 100644 --- a/modelscope/utils/config.py +++ b/modelscope/utils/config.py @@ -17,9 +17,10 @@ from typing import Dict import addict from yapf.yapflib.yapf_api import FormatCode +from modelscope.utils.import_utils import (import_modules, + import_modules_from_file, + validate_py_syntax) from modelscope.utils.logger import get_logger -from modelscope.utils.pymod import (import_modules, import_modules_from_file, - validate_py_syntax) if platform.system() == 'Windows': import regex as re # type: ignore diff --git a/modelscope/utils/constant.py b/modelscope/utils/constant.py index 3ce3ab98..b6b9afbf 100644 --- a/modelscope/utils/constant.py +++ b/modelscope/utils/constant.py @@ -97,5 +97,18 @@ class ModelFile(object): TORCH_MODEL_BIN_FILE = 'pytorch_model.bin' +class Requirements(object): + """Requirement names for each module + """ + protobuf = 'protobuf' + sentencepiece = 'sentencepiece' + sklearn = 'sklearn' + scipy = 'scipy' + timm = 'timm' + tokenizers = 'tokenizers' + tf = 'tf' + torch = 'torch' + + TENSORFLOW = 'tensorflow' PYTORCH = 'pytorch' diff --git a/modelscope/utils/import_utils.py b/modelscope/utils/import_utils.py new file mode 100644 index 00000000..e4192082 --- /dev/null +++ b/modelscope/utils/import_utils.py @@ -0,0 +1,324 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +# Part of the implementation is borrowed from huggingface/transformers. +import ast +import functools +import importlib.util +import os +import os.path as osp +import sys +import types +from collections import OrderedDict +from functools import wraps +from importlib import import_module +from itertools import chain +from types import ModuleType +from typing import Any + +import json +from packaging import version + +from modelscope.utils.constant import Fields +from modelscope.utils.logger import get_logger + +if sys.version_info < (3, 8): + import importlib_metadata +else: + import importlib.metadata as importlib_metadata + +logger = get_logger() + + +def import_modules_from_file(py_file: str): + """ Import module from a certrain file + + Args: + py_file: path to a python file to be imported + + Return: + + """ + dirname, basefile = os.path.split(py_file) + if dirname == '': + dirname == './' + module_name = osp.splitext(basefile)[0] + sys.path.insert(0, dirname) + validate_py_syntax(py_file) + mod = import_module(module_name) + sys.path.pop(0) + return module_name, mod + + +def import_modules(imports, allow_failed_imports=False): + """Import modules from the given list of strings. + + Args: + imports (list | str | None): The given module names to be imported. + allow_failed_imports (bool): If True, the failed imports will return + None. Otherwise, an ImportError is raise. Default: False. + + Returns: + list[module] | module | None: The imported modules. + + Examples: + >>> osp, sys = import_modules( + ... ['os.path', 'sys']) + >>> import os.path as osp_ + >>> import sys as sys_ + >>> assert osp == osp_ + >>> assert sys == sys_ + """ + if not imports: + return + single_import = False + if isinstance(imports, str): + single_import = True + imports = [imports] + if not isinstance(imports, list): + raise TypeError( + f'custom_imports must be a list but got type {type(imports)}') + imported = [] + for imp in imports: + if not isinstance(imp, str): + raise TypeError( + f'{imp} is of type {type(imp)} and cannot be imported.') + try: + imported_tmp = import_module(imp) + except ImportError: + if allow_failed_imports: + logger.warning(f'{imp} failed to import and is ignored.') + imported_tmp = None + else: + raise ImportError + imported.append(imported_tmp) + if single_import: + imported = imported[0] + return imported + + +def validate_py_syntax(filename): + with open(filename, 'r', encoding='utf-8') as f: + # Setting encoding explicitly to resolve coding issue on windows + content = f.read() + try: + ast.parse(content) + except SyntaxError as e: + raise SyntaxError('There are syntax errors in config ' + f'file {filename}: {e}') + + +# following code borrows implementation from huggingface/transformers +ENV_VARS_TRUE_VALUES = {'1', 'ON', 'YES', 'TRUE'} +ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({'AUTO'}) +USE_TF = os.environ.get('USE_TF', 'AUTO').upper() +USE_TORCH = os.environ.get('USE_TORCH', 'AUTO').upper() +_torch_version = 'N/A' +if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: + _torch_available = importlib.util.find_spec('torch') is not None + if _torch_available: + try: + _torch_version = importlib_metadata.version('torch') + logger.info(f'PyTorch version {_torch_version} available.') + except importlib_metadata.PackageNotFoundError: + _torch_available = False +else: + logger.info('Disabling PyTorch because USE_TF is set') + _torch_available = False + +_tf_version = 'N/A' +if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES: + _tf_available = importlib.util.find_spec('tensorflow') is not None + if _tf_available: + candidates = ( + 'tensorflow', + 'tensorflow-cpu', + 'tensorflow-gpu', + 'tf-nightly', + 'tf-nightly-cpu', + 'tf-nightly-gpu', + 'intel-tensorflow', + 'intel-tensorflow-avx512', + 'tensorflow-rocm', + 'tensorflow-macos', + ) + _tf_version = None + # For the metadata, we have to look for both tensorflow and tensorflow-cpu + for pkg in candidates: + try: + _tf_version = importlib_metadata.version(pkg) + break + except importlib_metadata.PackageNotFoundError: + pass + _tf_available = _tf_version is not None + if _tf_available: + if version.parse(_tf_version) < version.parse('2'): + pass + else: + logger.info(f'TensorFlow version {_tf_version} available.') +else: + logger.info('Disabling Tensorflow because USE_TORCH is set') + _tf_available = False + +_timm_available = importlib.util.find_spec('timm') is not None +try: + _timm_version = importlib_metadata.version('timm') + logger.debug(f'Successfully imported timm version {_timm_version}') +except importlib_metadata.PackageNotFoundError: + _timm_available = False + + +def is_scipy_available(): + return importlib.util.find_spec('scipy') is not None + + +def is_sklearn_available(): + if importlib.util.find_spec('sklearn') is None: + return False + return is_scipy_available() and importlib.util.find_spec('sklearn.metrics') + + +def is_sentencepiece_available(): + return importlib.util.find_spec('sentencepiece') is not None + + +def is_protobuf_available(): + if importlib.util.find_spec('google') is None: + return False + return importlib.util.find_spec('google.protobuf') is not None + + +def is_tokenizers_available(): + return importlib.util.find_spec('tokenizers') is not None + + +def is_timm_available(): + return _timm_available + + +def is_torch_available(): + return _torch_available + + +def is_torch_cuda_available(): + if is_torch_available(): + import torch + + return torch.cuda.is_available() + else: + return False + + +def is_tf_available(): + return _tf_available + + +# docstyle-ignore +PROTOBUF_IMPORT_ERROR = """ +{0} requires the protobuf library but it was not found in your environment. Checkout the instructions on the +installation page of its repo: https://github.com/protocolbuffers/protobuf/tree/master/python#installation and +follow the ones that match your environment. +""" + +# docstyle-ignore +SENTENCEPIECE_IMPORT_ERROR = """ +{0} requires the SentencePiece library but it was not found in your environment. Checkout the instructions on the +installation page of its repo: https://github.com/google/sentencepiece#installation and follow the ones +that match your environment. +""" + +# docstyle-ignore +SKLEARN_IMPORT_ERROR = """ +{0} requires the scikit-learn library but it was not found in your environment. You can install it with: +``` +pip install -U scikit-learn +``` +In a notebook or a colab, you can install it by executing a cell with +``` +!pip install -U scikit-learn +``` +""" + +# docstyle-ignore +TENSORFLOW_IMPORT_ERROR = """ +{0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the +installation page: https://www.tensorflow.org/install and follow the ones that match your environment. +""" + +# docstyle-ignore +TIMM_IMPORT_ERROR = """ +{0} requires the timm library but it was not found in your environment. You can install it with pip: +`pip install timm` +""" + +# docstyle-ignore +TOKENIZERS_IMPORT_ERROR = """ +{0} requires the 🤗 Tokenizers library but it was not found in your environment. You can install it with: +``` +pip install tokenizers +``` +In a notebook or a colab, you can install it by executing a cell with +``` +!pip install tokenizers +``` +""" + +# docstyle-ignore +PYTORCH_IMPORT_ERROR = """ +{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the +installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. +""" + +# docstyle-ignore +SCIPY_IMPORT_ERROR = """ +{0} requires the scipy library but it was not found in your environment. You can install it with pip: +`pip install scipy` +""" + +REQUIREMENTS_MAAPING = OrderedDict([ + ('protobuf', (is_protobuf_available, PROTOBUF_IMPORT_ERROR)), + ('sentencepiece', (is_sentencepiece_available, + SENTENCEPIECE_IMPORT_ERROR)), + ('sklearn', (is_sklearn_available, SKLEARN_IMPORT_ERROR)), + ('tf', (is_tf_available, TENSORFLOW_IMPORT_ERROR)), + ('timm', (is_timm_available, TIMM_IMPORT_ERROR)), + ('tokenizers', (is_tokenizers_available, TOKENIZERS_IMPORT_ERROR)), + ('torch', (is_torch_available, PYTORCH_IMPORT_ERROR)), + ('scipy', (is_scipy_available, SCIPY_IMPORT_ERROR)), +]) + + +def requires(obj, requirements): + if not isinstance(requirements, (list, tuple)): + requirements = [requirements] + if isinstance(obj, str): + name = obj + else: + name = obj.__name__ if hasattr(obj, + '__name__') else obj.__class__.__name__ + checks = (REQUIREMENTS_MAAPING[req] for req in requirements) + failed = [msg.format(name) for available, msg in checks if not available()] + if failed: + raise ImportError(''.join(failed)) + + +def torch_required(func): + # Chose a different decorator name than in tests so it's clear they are not the same. + @functools.wraps(func) + def wrapper(*args, **kwargs): + if is_torch_available(): + return func(*args, **kwargs) + else: + raise ImportError(f'Method `{func.__name__}` requires PyTorch.') + + return wrapper + + +def tf_required(func): + # Chose a different decorator name than in tests so it's clear they are not the same. + @functools.wraps(func) + def wrapper(*args, **kwargs): + if is_tf_available(): + return func(*args, **kwargs) + else: + raise ImportError(f'Method `{func.__name__}` requires TF.') + + return wrapper diff --git a/modelscope/utils/pymod.py b/modelscope/utils/pymod.py deleted file mode 100644 index 6db6798d..00000000 --- a/modelscope/utils/pymod.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (c) Alibaba, Inc. and its affiliates. - -import ast -import os -import os.path as osp -import sys -import types -from importlib import import_module - -from modelscope.utils.logger import get_logger - -logger = get_logger() - - -def import_modules_from_file(py_file: str): - """ Import module from a certrain file - - Args: - py_file: path to a python file to be imported - - Return: - - """ - dirname, basefile = os.path.split(py_file) - if dirname == '': - dirname == './' - module_name = osp.splitext(basefile)[0] - sys.path.insert(0, dirname) - validate_py_syntax(py_file) - mod = import_module(module_name) - sys.path.pop(0) - return module_name, mod - - -def import_modules(imports, allow_failed_imports=False): - """Import modules from the given list of strings. - - Args: - imports (list | str | None): The given module names to be imported. - allow_failed_imports (bool): If True, the failed imports will return - None. Otherwise, an ImportError is raise. Default: False. - - Returns: - list[module] | module | None: The imported modules. - - Examples: - >>> osp, sys = import_modules( - ... ['os.path', 'sys']) - >>> import os.path as osp_ - >>> import sys as sys_ - >>> assert osp == osp_ - >>> assert sys == sys_ - """ - if not imports: - return - single_import = False - if isinstance(imports, str): - single_import = True - imports = [imports] - if not isinstance(imports, list): - raise TypeError( - f'custom_imports must be a list but got type {type(imports)}') - imported = [] - for imp in imports: - if not isinstance(imp, str): - raise TypeError( - f'{imp} is of type {type(imp)} and cannot be imported.') - try: - imported_tmp = import_module(imp) - except ImportError: - if allow_failed_imports: - logger.warning(f'{imp} failed to import and is ignored.') - imported_tmp = None - else: - raise ImportError - imported.append(imported_tmp) - if single_import: - imported = imported[0] - return imported - - -def validate_py_syntax(filename): - with open(filename, 'r', encoding='utf-8') as f: - # Setting encoding explicitly to resolve coding issue on windows - content = f.read() - try: - ast.parse(content) - except SyntaxError as e: - raise SyntaxError('There are syntax errors in config ' - f'file {filename}: {e}') diff --git a/modelscope/utils/registry.py b/modelscope/utils/registry.py index 8009b084..9b37252b 100644 --- a/modelscope/utils/registry.py +++ b/modelscope/utils/registry.py @@ -1,7 +1,9 @@ # Copyright (c) Alibaba, Inc. and its affiliates. import inspect +from typing import List, Tuple, Union +from modelscope.utils.import_utils import requires from modelscope.utils.logger import get_logger default_group = 'default' @@ -52,9 +54,14 @@ class Registry(object): def _register_module(self, group_key=default_group, module_name=None, - module_cls=None): + module_cls=None, + requirements=None): assert isinstance(group_key, str), 'group_key is required and must be str' + + if requirements is not None: + requires(module_cls, requirements) + if group_key not in self._modules: self._modules[group_key] = dict() @@ -86,7 +93,8 @@ class Registry(object): def register_module(self, group_key: str = default_group, module_name: str = None, - module_cls: type = None): + module_cls: type = None, + requirements: Union[List, Tuple] = None): """ Register module Example: @@ -110,17 +118,18 @@ class Registry(object): default group name is 'default' module_name: Module name module_cls: Module class object + requirements: Module necessary requirements """ if not (module_name is None or isinstance(module_name, str)): raise TypeError(f'module_name must be either of None, str,' f'got {type(module_name)}') - if module_cls is not None: self._register_module( group_key=group_key, module_name=module_name, - module_cls=module_cls) + module_cls=module_cls, + requirements=requirements) return module_cls # if module_cls is None, should return a decorator function @@ -128,7 +137,8 @@ class Registry(object): self._register_module( group_key=group_key, module_name=module_name, - module_cls=module_cls) + module_cls=module_cls, + requirements=requirements) return module_cls return _register diff --git a/requirements.txt b/requirements.txt index b9b4a1c4..c6e294ba 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1 @@ -r requirements/runtime.txt --r requirements/pipeline.txt --r requirements/multi-modal.txt --r requirements/nlp.txt --r requirements/audio.txt --r requirements/cv.txt diff --git a/requirements/audio.txt b/requirements/audio.txt index 1f5984ca..4c009d27 100644 --- a/requirements/audio.txt +++ b/requirements/audio.txt @@ -1,10 +1,5 @@ #tts h5py -https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/TTS/requirements/pytorch_wavelets-1.3.0-py3-none-any.whl -https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/TTS/requirements/ttsfrd-0.0.2-cp36-cp36m-linux_x86_64.whl; python_version=='3.6' -https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/TTS/requirements/ttsfrd-0.0.2-cp37-cp37m-linux_x86_64.whl; python_version=='3.7' -https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/TTS/requirements/ttsfrd-0.0.2-cp38-cp38-linux_x86_64.whl; python_version=='3.8' -https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/TTS/requirements/ttsfrd-0.0.2-cp39-cp39-linux_x86_64.whl; python_version=='3.9' inflect keras librosa @@ -14,6 +9,7 @@ nara_wpe numpy protobuf>3,<=3.20 ptflops +pytorch_wavelets==1.3.0 PyWavelets>=1.0.0 scikit-learn SoundFile>0.10 @@ -24,4 +20,5 @@ torch torchaudio torchvision tqdm +ttsfrd==0.0.2 unidecode diff --git a/requirements/multi-modal.txt b/requirements/multi-modal.txt index ad641b63..b96bdd01 100644 --- a/requirements/multi-modal.txt +++ b/requirements/multi-modal.txt @@ -1,8 +1,6 @@ -datasets -einops +fairseq==maas ftfy>=6.0.3 -https://jirenmr.oss-cn-zhangjiakou.aliyuncs.com/ofa/fairseq-maas-py3-none-any.whl -https://jirenmr.oss-cn-zhangjiakou.aliyuncs.com/ofa/ofa-0.0.2-py3-none-any.whl +ofa==0.0.2 pycocoevalcap>=1.2 pycocotools>=2.0.4 rouge_score diff --git a/requirements/nlp.txt b/requirements/nlp.txt index 58dbe839..2a34f3cf 100644 --- a/requirements/nlp.txt +++ b/requirements/nlp.txt @@ -1 +1 @@ -https://alinlp.alibaba-inc.com/pypi/sofa-1.0.4.2-py3-none-any.whl +sofa==1.0.4.2 diff --git a/requirements/pipeline.txt b/requirements/pipeline.txt deleted file mode 100644 index 64500a6b..00000000 --- a/requirements/pipeline.txt +++ /dev/null @@ -1,6 +0,0 @@ -#https://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/package/whl/easynlp-0.0.4-py2.py3-none-any.whl -# tensorflow -#--find-links https://download.pytorch.org/whl/torch_stable.html -# torch<1.10,>=1.8.0 -# torchaudio -# torchvision diff --git a/requirements/runtime.txt b/requirements/runtime.txt index 6580de53..1fcce7ff 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -1,16 +1,18 @@ addict datasets easydict +einops filelock>=3.3.0 numpy -opencv-python-headless +opencv-python Pillow>=6.2.0 +protobuf>3,<=3.20 pyyaml requests -requests==2.27.1 scipy -setuptools==58.0.4 +setuptools tokenizers<=0.10.3 +torch tqdm>=4.64.0 -transformers<=4.16.2 +transformers<=4.16.2,>=4.10.3 yapf diff --git a/setup.py b/setup.py index b027c4cb..3b40ac8b 100644 --- a/setup.py +++ b/setup.py @@ -5,6 +5,8 @@ import shutil import subprocess from setuptools import find_packages, setup +from modelscope.utils.constant import Fields + def readme(): with open('README.md', encoding='utf-8') as f: @@ -169,6 +171,16 @@ if __name__ == '__main__': pack_resource() os.chdir('package') install_requires, deps_link = parse_requirements('requirements.txt') + extra_requires = {} + all_requires = [] + for field in dir(Fields): + if field.startswith('_'): + continue + extra_requires[field], _ = parse_requirements( + f'requirements/{field}.txt') + all_requires.append(extra_requires[field]) + extra_requires['all'] = all_requires + setup( name='model-scope', version=get_version(), @@ -193,5 +205,6 @@ if __name__ == '__main__': license='Apache License 2.0', tests_require=parse_requirements('requirements/tests.txt'), install_requires=install_requires, + extras_require=extra_requires, dependency_links=deps_link, zip_safe=False) diff --git a/tests/utils/test_check_requirements.py b/tests/utils/test_check_requirements.py new file mode 100644 index 00000000..2ad19e82 --- /dev/null +++ b/tests/utils/test_check_requirements.py @@ -0,0 +1,22 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. + +import unittest +from typing import List, Union + +from modelscope.utils.check_requirements import NLPModuleNotFoundError, get_msg +from modelscope.utils.constant import Fields + + +class ImportUtilsTest(unittest.TestCase): + + def test_type_module_not_found(self): + with self.assertRaises(NLPModuleNotFoundError) as ctx: + try: + import not_found + except ModuleNotFoundError as e: + raise NLPModuleNotFoundError(e) + self.assertTrue(get_msg(Fields.nlp) in ctx.exception.msg.msg) + + +if __name__ == '__main__': + unittest.main()