diff --git a/modelscope/exporters/torch_model_exporter.py b/modelscope/exporters/torch_model_exporter.py index 1d332591..9e34f769 100644 --- a/modelscope/exporters/torch_model_exporter.py +++ b/modelscope/exporters/torch_model_exporter.py @@ -17,7 +17,7 @@ from modelscope.utils.regress_test_utils import (compare_arguments_nested, numpify_tensor_nested) from .base import Exporter -logger = get_logger(__name__) +logger = get_logger() class TorchModelExporter(Exporter): diff --git a/modelscope/models/base/base_torch_head.py b/modelscope/models/base/base_torch_head.py index faee4296..fb69be4d 100644 --- a/modelscope/models/base/base_torch_head.py +++ b/modelscope/models/base/base_torch_head.py @@ -6,7 +6,7 @@ import torch from modelscope.models.base.base_head import Head from modelscope.utils.logger import get_logger -logger = get_logger(__name__) +logger = get_logger() class TorchHead(Head, torch.nn.Module): diff --git a/modelscope/models/base/base_torch_model.py b/modelscope/models/base/base_torch_model.py index ff059f7b..b5515b25 100644 --- a/modelscope/models/base/base_torch_model.py +++ b/modelscope/models/base/base_torch_model.py @@ -10,7 +10,7 @@ from modelscope.utils.hub import parse_label_mapping from modelscope.utils.logger import get_logger from .base_model import Model -logger = get_logger(__name__) +logger = get_logger() class TorchModel(Model, torch.nn.Module): diff --git a/modelscope/models/multi_modal/mplug/configuration_mplug.py b/modelscope/models/multi_modal/mplug/configuration_mplug.py index 946ebb82..9900ff7c 100644 --- a/modelscope/models/multi_modal/mplug/configuration_mplug.py +++ b/modelscope/models/multi_modal/mplug/configuration_mplug.py @@ -23,7 +23,7 @@ from transformers.utils import logging from modelscope.utils.constant import Tasks -logger = logging.get_logger(__name__) +logger = logging.get_logger() class MPlugConfig(PretrainedConfig): diff --git a/modelscope/models/multi_modal/mplug/modeling_mplug.py b/modelscope/models/multi_modal/mplug/modeling_mplug.py index 1d003f5c..42eaadc8 100755 --- a/modelscope/models/multi_modal/mplug/modeling_mplug.py +++ b/modelscope/models/multi_modal/mplug/modeling_mplug.py @@ -46,7 +46,7 @@ from modelscope.utils.constant import ModelFile transformers.logging.set_verbosity_error() -logger = logging.get_logger(__name__) +logger = logging.get_logger() CONFIG_NAME = 'config.yaml' diff --git a/modelscope/models/multi_modal/ofa/configuration_mmspeech.py b/modelscope/models/multi_modal/ofa/configuration_mmspeech.py index 37be12e9..4793ee7f 100644 --- a/modelscope/models/multi_modal/ofa/configuration_mmspeech.py +++ b/modelscope/models/multi_modal/ofa/configuration_mmspeech.py @@ -17,7 +17,7 @@ import warnings from transformers import PretrainedConfig from transformers.utils import logging -logger = logging.get_logger(__name__) +logger = logging.get_logger() class MMSpeechConfig(PretrainedConfig): diff --git a/modelscope/models/multi_modal/ofa/configuration_ofa.py b/modelscope/models/multi_modal/ofa/configuration_ofa.py index 2edc651e..e82b542e 100644 --- a/modelscope/models/multi_modal/ofa/configuration_ofa.py +++ b/modelscope/models/multi_modal/ofa/configuration_ofa.py @@ -17,7 +17,7 @@ import warnings from transformers import PretrainedConfig from transformers.utils import logging -logger = logging.get_logger(__name__) +logger = logging.get_logger() OFA_PRETRAINED_CONFIG_ARCHIVE_MAP = { 'ofa-medium': 'https://huggingface.co/ofa-base/resolve/main/config.json', diff --git a/modelscope/models/multi_modal/ofa/modeling_mmspeech.py b/modelscope/models/multi_modal/ofa/modeling_mmspeech.py index 07d5b7e8..7c76f0bc 100644 --- a/modelscope/models/multi_modal/ofa/modeling_mmspeech.py +++ b/modelscope/models/multi_modal/ofa/modeling_mmspeech.py @@ -44,7 +44,7 @@ from .generate import utils from .modeling_ofa import (Embedding, OFADecoder, OFAModel, OFAPreTrainedModel, _expand_mask, shift_tokens_right) -logger = logging.get_logger(__name__) +logger = logging.get_logger() _CHECKPOINT_FOR_DOC = 'mmspeech-base' _CONFIG_FOR_DOC = 'MMSpeechConfig' diff --git a/modelscope/models/multi_modal/ofa/modeling_ofa.py b/modelscope/models/multi_modal/ofa/modeling_ofa.py index 69005ef0..25e866bc 100644 --- a/modelscope/models/multi_modal/ofa/modeling_ofa.py +++ b/modelscope/models/multi_modal/ofa/modeling_ofa.py @@ -38,7 +38,7 @@ from .resnet import ResNet from .utils.utils import DropPath from .vit import vit_base, vit_huge, vit_large, vit_large_336 -logger = logging.get_logger(__name__) +logger = logging.get_logger() _CHECKPOINT_FOR_DOC = 'ofa-base' _CONFIG_FOR_DOC = 'OFAConfig' diff --git a/modelscope/models/multi_modal/ofa/tokenization_ofa.py b/modelscope/models/multi_modal/ofa/tokenization_ofa.py index fd50505c..77de7a1d 100644 --- a/modelscope/models/multi_modal/ofa/tokenization_ofa.py +++ b/modelscope/models/multi_modal/ofa/tokenization_ofa.py @@ -24,7 +24,7 @@ from transformers.utils import logging from modelscope.utils.constant import ModelFile -logger = logging.get_logger(__name__) +logger = logging.get_logger() VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} diff --git a/modelscope/models/multi_modal/ofa/tokenization_ofa_fast.py b/modelscope/models/multi_modal/ofa/tokenization_ofa_fast.py index db11370d..50ad481e 100644 --- a/modelscope/models/multi_modal/ofa/tokenization_ofa_fast.py +++ b/modelscope/models/multi_modal/ofa/tokenization_ofa_fast.py @@ -23,7 +23,7 @@ from transformers.utils import logging from modelscope.utils.constant import ModelFile from .tokenization_ofa import OFATokenizer, OFATokenizerZH -logger = logging.get_logger(__name__) +logger = logging.get_logger() VOCAB_FILES_NAMES = { 'vocab_file': 'vocab.json', diff --git a/modelscope/models/nlp/T5/backbone.py b/modelscope/models/nlp/T5/backbone.py index e8abfbae..9b405449 100644 --- a/modelscope/models/nlp/T5/backbone.py +++ b/modelscope/models/nlp/T5/backbone.py @@ -41,7 +41,7 @@ from modelscope.utils.constant import Tasks from modelscope.utils.logger import get_logger from .configuration import T5Config -logger = get_logger(__name__) +logger = get_logger() ################################################### diff --git a/modelscope/models/nlp/T5/configuration.py b/modelscope/models/nlp/T5/configuration.py index 1f9a965e..d64793ad 100644 --- a/modelscope/models/nlp/T5/configuration.py +++ b/modelscope/models/nlp/T5/configuration.py @@ -20,7 +20,7 @@ from transformers.onnx import OnnxSeq2SeqConfigWithPast from modelscope.utils.logger import get_logger -logger = get_logger(__name__) +logger = get_logger() class T5Config(PretrainedConfig): diff --git a/modelscope/models/nlp/T5/text2text_generation.py b/modelscope/models/nlp/T5/text2text_generation.py index 0275ecb9..0b695589 100644 --- a/modelscope/models/nlp/T5/text2text_generation.py +++ b/modelscope/models/nlp/T5/text2text_generation.py @@ -31,7 +31,7 @@ from modelscope.utils.logger import get_logger from .backbone import T5PreTrainedModel, T5Stack from .configuration import T5Config -logger = get_logger(__name__) +logger = get_logger() # Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = """ diff --git a/modelscope/models/nlp/bert/backbone.py b/modelscope/models/nlp/bert/backbone.py index bd432509..82c576d0 100755 --- a/modelscope/models/nlp/bert/backbone.py +++ b/modelscope/models/nlp/bert/backbone.py @@ -36,7 +36,7 @@ from modelscope.utils.logger import get_logger from modelscope.utils.nlp.utils import parse_labels_in_order from .configuration import BertConfig -logger = get_logger(__name__) +logger = get_logger() _CONFIG_FOR_DOC = 'BertConfig' diff --git a/modelscope/models/nlp/bert/configuration.py b/modelscope/models/nlp/bert/configuration.py index 1e2cef95..6a8441c4 100644 --- a/modelscope/models/nlp/bert/configuration.py +++ b/modelscope/models/nlp/bert/configuration.py @@ -22,7 +22,7 @@ from transformers.onnx import OnnxConfig from modelscope.utils.logger import get_logger -logger = get_logger(__name__) +logger = get_logger() class BertConfig(PretrainedConfig): diff --git a/modelscope/models/nlp/bert/fill_mask.py b/modelscope/models/nlp/bert/fill_mask.py index 1f44365c..8ce6f9b9 100644 --- a/modelscope/models/nlp/bert/fill_mask.py +++ b/modelscope/models/nlp/bert/fill_mask.py @@ -28,7 +28,7 @@ from modelscope.utils.constant import Tasks from .backbone import BertModel, BertPreTrainedModel from .configuration import BertConfig -logger = logging.get_logger(__name__) +logger = logging.get_logger() class BertPredictionHeadTransform(nn.Module): diff --git a/modelscope/models/nlp/bert/text_classification.py b/modelscope/models/nlp/bert/text_classification.py index ff4a2418..32aab7b2 100644 --- a/modelscope/models/nlp/bert/text_classification.py +++ b/modelscope/models/nlp/bert/text_classification.py @@ -27,7 +27,7 @@ from modelscope.utils import logger as logging from modelscope.utils.constant import Tasks from .backbone import BertModel, BertPreTrainedModel -logger = logging.get_logger(__name__) +logger = logging.get_logger() @MODELS.register_module(Tasks.text_classification, module_name=Models.bert) diff --git a/modelscope/models/nlp/bert/text_ranking.py b/modelscope/models/nlp/bert/text_ranking.py index b5ac8d7e..0d1ca1fd 100644 --- a/modelscope/models/nlp/bert/text_ranking.py +++ b/modelscope/models/nlp/bert/text_ranking.py @@ -12,7 +12,7 @@ from modelscope.utils.constant import Tasks from .backbone import BertModel from .text_classification import BertForSequenceClassification -logger = logging.get_logger(__name__) +logger = logging.get_logger() @MODELS.register_module(Tasks.text_ranking, module_name=Models.bert) diff --git a/modelscope/models/nlp/bert/token_classification.py b/modelscope/models/nlp/bert/token_classification.py index 15ea3231..b1b26a37 100644 --- a/modelscope/models/nlp/bert/token_classification.py +++ b/modelscope/models/nlp/bert/token_classification.py @@ -27,7 +27,7 @@ from modelscope.utils import logger as logging from modelscope.utils.constant import Tasks from .backbone import BertModel, BertPreTrainedModel -logger = logging.get_logger(__name__) +logger = logging.get_logger() @MODELS.register_module(Tasks.token_classification, module_name=Models.bert) diff --git a/modelscope/models/nlp/deberta_v2/backbone.py b/modelscope/models/nlp/deberta_v2/backbone.py index 0daa8c7d..11f27a20 100644 --- a/modelscope/models/nlp/deberta_v2/backbone.py +++ b/modelscope/models/nlp/deberta_v2/backbone.py @@ -33,7 +33,7 @@ from modelscope.utils import logger as logging from modelscope.utils.constant import Tasks from .configuration import DebertaV2Config -logger = logging.get_logger(__name__) +logger = logging.get_logger() # Copied from transformers.models.deberta.modeling_deberta.ContextPooler diff --git a/modelscope/models/nlp/deberta_v2/configuration.py b/modelscope/models/nlp/deberta_v2/configuration.py index 7921ca2f..351621f6 100644 --- a/modelscope/models/nlp/deberta_v2/configuration.py +++ b/modelscope/models/nlp/deberta_v2/configuration.py @@ -18,7 +18,7 @@ from transformers import PretrainedConfig from modelscope.utils import logger as logging -logger = logging.get_logger(__name__) +logger = logging.get_logger() class DebertaV2Config(PretrainedConfig): diff --git a/modelscope/models/nlp/deberta_v2/tokenization_fast.py b/modelscope/models/nlp/deberta_v2/tokenization_fast.py index 913ea5bd..c37b18d9 100644 --- a/modelscope/models/nlp/deberta_v2/tokenization_fast.py +++ b/modelscope/models/nlp/deberta_v2/tokenization_fast.py @@ -28,7 +28,7 @@ if is_sentencepiece_available(): else: DebertaV2Tokenizer = None -logger = logging.get_logger(__name__) +logger = logging.get_logger() VOCAB_FILES_NAMES = { 'vocab_file': 'spm.model', diff --git a/modelscope/models/nlp/gpt3/configuration.py b/modelscope/models/nlp/gpt3/configuration.py index 66e8b836..9c98cae8 100644 --- a/modelscope/models/nlp/gpt3/configuration.py +++ b/modelscope/models/nlp/gpt3/configuration.py @@ -17,7 +17,7 @@ import torch from transformers.configuration_utils import PretrainedConfig from transformers.utils import logging -logger = logging.get_logger(__name__) +logger = logging.get_logger() class GPT3Config(PretrainedConfig): diff --git a/modelscope/models/nlp/gpt_moe/configuration.py b/modelscope/models/nlp/gpt_moe/configuration.py index dfab93c6..7dd43ec5 100644 --- a/modelscope/models/nlp/gpt_moe/configuration.py +++ b/modelscope/models/nlp/gpt_moe/configuration.py @@ -17,7 +17,7 @@ import torch from transformers.configuration_utils import PretrainedConfig from transformers.utils import logging -logger = logging.get_logger(__name__) +logger = logging.get_logger() class GPTMoEConfig(PretrainedConfig): diff --git a/modelscope/models/nlp/palm_v2/configuration.py b/modelscope/models/nlp/palm_v2/configuration.py index 3b9e51fb..48e0e20b 100644 --- a/modelscope/models/nlp/palm_v2/configuration.py +++ b/modelscope/models/nlp/palm_v2/configuration.py @@ -19,7 +19,7 @@ from transformers.configuration_utils import PretrainedConfig from modelscope.utils import logger as logging -logger = logging.get_logger(__name__) +logger = logging.get_logger() class PalmConfig(PretrainedConfig): diff --git a/modelscope/models/nlp/palm_v2/text_generation.py b/modelscope/models/nlp/palm_v2/text_generation.py index f1c8e414..5bb446b5 100644 --- a/modelscope/models/nlp/palm_v2/text_generation.py +++ b/modelscope/models/nlp/palm_v2/text_generation.py @@ -760,7 +760,7 @@ class Translator(object): def __init__(self, model, dataset: str = 'cnn'): super().__init__() - self.logger = logging.get_logger(__name__) + self.logger = logging.get_logger() self.args = model.config self.args.dataset = dataset self.model = model.palm diff --git a/modelscope/models/nlp/plug/configuration.py b/modelscope/models/nlp/plug/configuration.py index 44b13a7f..c60458c8 100644 --- a/modelscope/models/nlp/plug/configuration.py +++ b/modelscope/models/nlp/plug/configuration.py @@ -21,7 +21,7 @@ from transformers import PretrainedConfig from modelscope.utils import logger as logging -logger = logging.get_logger(__name__) +logger = logging.get_logger() class PlugNLUConfig(PretrainedConfig): diff --git a/modelscope/models/nlp/plug/distributed_plug.py b/modelscope/models/nlp/plug/distributed_plug.py index 23b83078..679bfc1b 100644 --- a/modelscope/models/nlp/plug/distributed_plug.py +++ b/modelscope/models/nlp/plug/distributed_plug.py @@ -17,7 +17,7 @@ from modelscope.utils.torch_utils import set_random_seed_mpu from . import PlugModel from .configuration import PlugNLGConfig -logger = get_logger(__name__) +logger = get_logger() class DistributedPlug(TorchModel): diff --git a/modelscope/models/nlp/ponet/backbone.py b/modelscope/models/nlp/ponet/backbone.py index 22114f28..731e6516 100644 --- a/modelscope/models/nlp/ponet/backbone.py +++ b/modelscope/models/nlp/ponet/backbone.py @@ -36,7 +36,7 @@ from modelscope.utils.constant import Tasks from modelscope.utils.logger import get_logger from .configuration import PoNetConfig -logger = get_logger(__name__) +logger = get_logger() is_pytorch_12plus = LooseVersion(torch.__version__) >= LooseVersion('1.12.0') diff --git a/modelscope/models/nlp/ponet/configuration.py b/modelscope/models/nlp/ponet/configuration.py index 7dfaba48..b1ac0459 100644 --- a/modelscope/models/nlp/ponet/configuration.py +++ b/modelscope/models/nlp/ponet/configuration.py @@ -18,7 +18,7 @@ from transformers import PretrainedConfig from modelscope.utils import logger as logging -logger = logging.get_logger(__name__) +logger = logging.get_logger() class PoNetConfig(PretrainedConfig): diff --git a/modelscope/models/nlp/ponet/fill_mask.py b/modelscope/models/nlp/ponet/fill_mask.py index fb09efc0..591b1041 100644 --- a/modelscope/models/nlp/ponet/fill_mask.py +++ b/modelscope/models/nlp/ponet/fill_mask.py @@ -26,7 +26,7 @@ from modelscope.utils.constant import Tasks from modelscope.utils.logger import get_logger from .backbone import PoNetModel, PoNetPreTrainedModel -logger = get_logger(__name__) +logger = get_logger() class PoNetPredictionHeadTransform(nn.Module): diff --git a/modelscope/models/nlp/ponet/tokenization.py b/modelscope/models/nlp/ponet/tokenization.py index 2da91545..8cf9a035 100644 --- a/modelscope/models/nlp/ponet/tokenization.py +++ b/modelscope/models/nlp/ponet/tokenization.py @@ -24,7 +24,7 @@ from transformers.tokenization_utils import BatchEncoding, EncodedInput from modelscope.utils.constant import ModelFile from modelscope.utils.logger import get_logger -logger = get_logger(__name__) +logger = get_logger() VOCAB_FILES_NAMES = {'vocab_file': ModelFile.VOCAB_FILE} diff --git a/modelscope/models/nlp/space/configuration.py b/modelscope/models/nlp/space/configuration.py index 0da2d629..8f125b03 100644 --- a/modelscope/models/nlp/space/configuration.py +++ b/modelscope/models/nlp/space/configuration.py @@ -20,7 +20,7 @@ from modelscope.models.nlp.structbert import SbertConfig from modelscope.utils import logger as logging -logger = logging.get_logger(__name__) +logger = logging.get_logger() class SpaceConfig(SbertConfig): diff --git a/modelscope/models/nlp/space/model/tokenization_space.py b/modelscope/models/nlp/space/model/tokenization_space.py index e90c2b5a..cc57eb03 100644 --- a/modelscope/models/nlp/space/model/tokenization_space.py +++ b/modelscope/models/nlp/space/model/tokenization_space.py @@ -19,7 +19,7 @@ from transformers import BasicTokenizer, BertTokenizer, WordpieceTokenizer from modelscope.utils import logger as logging -logger = logging.get_logger(__name__) +logger = logging.get_logger() class SpaceTokenizer(BertTokenizer): diff --git a/modelscope/models/nlp/structbert/adv_utils.py b/modelscope/models/nlp/structbert/adv_utils.py index 91a4cb82..eee44199 100644 --- a/modelscope/models/nlp/structbert/adv_utils.py +++ b/modelscope/models/nlp/structbert/adv_utils.py @@ -18,7 +18,7 @@ from torch import nn from modelscope.utils.logger import get_logger -logger = get_logger(__name__) +logger = get_logger() def _symmetric_kl_div(logits1, logits2, attention_mask=None): diff --git a/modelscope/models/nlp/structbert/backbone.py b/modelscope/models/nlp/structbert/backbone.py index 9d50dc1f..0ba3dbb7 100755 --- a/modelscope/models/nlp/structbert/backbone.py +++ b/modelscope/models/nlp/structbert/backbone.py @@ -39,7 +39,7 @@ from modelscope.utils.logger import get_logger from modelscope.utils.nlp.utils import parse_labels_in_order from .configuration import SbertConfig -logger = get_logger(__name__) +logger = get_logger() class SbertEmbeddings(nn.Module): diff --git a/modelscope/models/nlp/structbert/configuration.py b/modelscope/models/nlp/structbert/configuration.py index 8f095f9d..fcd4c653 100644 --- a/modelscope/models/nlp/structbert/configuration.py +++ b/modelscope/models/nlp/structbert/configuration.py @@ -19,7 +19,7 @@ from transformers import PretrainedConfig from modelscope.utils import logger as logging -logger = logging.get_logger(__name__) +logger = logging.get_logger() class SbertConfig(PretrainedConfig): diff --git a/modelscope/models/nlp/structbert/fill_mask.py b/modelscope/models/nlp/structbert/fill_mask.py index ded32020..3554d0c7 100644 --- a/modelscope/models/nlp/structbert/fill_mask.py +++ b/modelscope/models/nlp/structbert/fill_mask.py @@ -29,7 +29,7 @@ from modelscope.utils.constant import Tasks from .backbone import SbertModel, SbertPreTrainedModel from .configuration import SbertConfig -logger = logging.get_logger(__name__) +logger = logging.get_logger() class SbertPredictionHeadTransform(nn.Module): diff --git a/modelscope/models/nlp/structbert/text_classification.py b/modelscope/models/nlp/structbert/text_classification.py index ab5b127e..f0f0c440 100644 --- a/modelscope/models/nlp/structbert/text_classification.py +++ b/modelscope/models/nlp/structbert/text_classification.py @@ -29,7 +29,7 @@ from .adv_utils import compute_adv_loss from .backbone import SbertModel, SbertPreTrainedModel from .configuration import SbertConfig -logger = logging.get_logger(__name__) +logger = logging.get_logger() @MODELS.register_module( diff --git a/modelscope/models/nlp/structbert/token_classification.py b/modelscope/models/nlp/structbert/token_classification.py index 677dcf31..ab46fc83 100644 --- a/modelscope/models/nlp/structbert/token_classification.py +++ b/modelscope/models/nlp/structbert/token_classification.py @@ -29,7 +29,7 @@ from .adv_utils import compute_adv_loss from .backbone import SbertModel, SbertPreTrainedModel from .configuration import SbertConfig -logger = logging.get_logger(__name__) +logger = logging.get_logger() @MODELS.register_module( diff --git a/modelscope/models/nlp/task_models/task_model.py b/modelscope/models/nlp/task_models/task_model.py index 8c83517a..0c02f8d2 100644 --- a/modelscope/models/nlp/task_models/task_model.py +++ b/modelscope/models/nlp/task_models/task_model.py @@ -15,7 +15,7 @@ from modelscope.utils.constant import Fields, Tasks from modelscope.utils.file_utils import func_receive_dict_inputs from modelscope.utils.logger import get_logger -logger = get_logger(__name__) +logger = get_logger() __all__ = ['EncoderDecoderTaskModelBase', 'SingleBackboneTaskModelBase'] diff --git a/modelscope/models/nlp/veco/backbone.py b/modelscope/models/nlp/veco/backbone.py index 98d8c30a..93ccead6 100644 --- a/modelscope/models/nlp/veco/backbone.py +++ b/modelscope/models/nlp/veco/backbone.py @@ -26,7 +26,7 @@ from modelscope.utils import logger as logging from modelscope.utils.constant import Tasks from .configuration import VecoConfig -logger = logging.get_logger(__name__) +logger = logging.get_logger() VECO_PRETRAINED_MODEL_ARCHIVE_LIST = [] diff --git a/modelscope/models/nlp/veco/configuration.py b/modelscope/models/nlp/veco/configuration.py index 396755dc..844314a9 100644 --- a/modelscope/models/nlp/veco/configuration.py +++ b/modelscope/models/nlp/veco/configuration.py @@ -21,7 +21,7 @@ from transformers import RobertaConfig from modelscope.utils import logger as logging -logger = logging.get_logger(__name__) +logger = logging.get_logger() class VecoConfig(RobertaConfig): diff --git a/modelscope/pipelines/nlp/text_classification_pipeline.py b/modelscope/pipelines/nlp/text_classification_pipeline.py index 845e8315..75ab9ba7 100644 --- a/modelscope/pipelines/nlp/text_classification_pipeline.py +++ b/modelscope/pipelines/nlp/text_classification_pipeline.py @@ -12,7 +12,7 @@ from modelscope.preprocessors import Preprocessor from modelscope.utils.constant import Fields, Tasks from modelscope.utils.logger import get_logger -logger = get_logger(__name__) +logger = get_logger() @PIPELINES.register_module( diff --git a/modelscope/preprocessors/base.py b/modelscope/preprocessors/base.py index c2d5062a..d9bf8209 100644 --- a/modelscope/preprocessors/base.py +++ b/modelscope/preprocessors/base.py @@ -13,7 +13,7 @@ from modelscope.utils.hub import read_config, snapshot_download from modelscope.utils.logger import get_logger from .builder import build_preprocessor -logger = get_logger(__name__) +logger = get_logger() PREPROCESSOR_MAP = { # nlp diff --git a/modelscope/preprocessors/nlp/text_classification_preprocessor.py b/modelscope/preprocessors/nlp/text_classification_preprocessor.py index ef38594f..e62221ef 100644 --- a/modelscope/preprocessors/nlp/text_classification_preprocessor.py +++ b/modelscope/preprocessors/nlp/text_classification_preprocessor.py @@ -14,7 +14,7 @@ from modelscope.utils.logger import get_logger from .transformers_tokenizer import NLPTokenizer from .utils import labels_to_id, parse_text_and_label -logger = get_logger(__name__) +logger = get_logger() class TextClassificationPreprocessorBase(Preprocessor): diff --git a/modelscope/preprocessors/nlp/text_generation_preprocessor.py b/modelscope/preprocessors/nlp/text_generation_preprocessor.py index 71665fab..a5e1d192 100644 --- a/modelscope/preprocessors/nlp/text_generation_preprocessor.py +++ b/modelscope/preprocessors/nlp/text_generation_preprocessor.py @@ -15,7 +15,7 @@ from modelscope.utils.logger import get_logger from .transformers_tokenizer import NLPTokenizer from .utils import parse_text_and_label -logger = get_logger(__name__) +logger = get_logger() class TextGenerationPreprocessorBase(Preprocessor): diff --git a/modelscope/preprocessors/nlp/token_classification_preprocessor.py b/modelscope/preprocessors/nlp/token_classification_preprocessor.py index eb94e85b..bf240bbd 100644 --- a/modelscope/preprocessors/nlp/token_classification_preprocessor.py +++ b/modelscope/preprocessors/nlp/token_classification_preprocessor.py @@ -16,7 +16,7 @@ from modelscope.utils.type_assert import type_assert from .transformers_tokenizer import NLPTokenizer from .utils import parse_text_and_label -logger = get_logger(__name__) +logger = get_logger() @PREPROCESSORS.register_module( diff --git a/modelscope/trainers/hooks/checkpoint_hook.py b/modelscope/trainers/hooks/checkpoint_hook.py index 20082723..d5925dbe 100644 --- a/modelscope/trainers/hooks/checkpoint_hook.py +++ b/modelscope/trainers/hooks/checkpoint_hook.py @@ -70,7 +70,7 @@ class CheckpointHook(Hook): os.makedirs(self.save_dir) if not hasattr(trainer, 'logger'): - self.logger = get_logger(__name__) + self.logger = get_logger() else: self.logger = trainer.logger diff --git a/modelscope/trainers/hooks/lr_scheduler_hook.py b/modelscope/trainers/hooks/lr_scheduler_hook.py index ed018fef..421f14b2 100644 --- a/modelscope/trainers/hooks/lr_scheduler_hook.py +++ b/modelscope/trainers/hooks/lr_scheduler_hook.py @@ -99,7 +99,7 @@ class PlateauLrSchedulerHook(LrSchedulerHook): def before_run(self, trainer): super().before_run(trainer) if not hasattr(trainer, 'logger'): - self.logger = get_logger(__name__) + self.logger = get_logger() else: self.logger = trainer.logger diff --git a/modelscope/trainers/optimizer/child_tuning_adamw_optimizer.py b/modelscope/trainers/optimizer/child_tuning_adamw_optimizer.py index d004071f..74215801 100644 --- a/modelscope/trainers/optimizer/child_tuning_adamw_optimizer.py +++ b/modelscope/trainers/optimizer/child_tuning_adamw_optimizer.py @@ -24,7 +24,7 @@ from torch.optim import Optimizer from modelscope.utils.logger import get_logger from .builder import OPTIMIZERS, default_group -logger = get_logger(__name__) +logger = get_logger() __all__ = ['calculate_fisher', 'ChildTuningAdamW'] diff --git a/modelscope/utils/checkpoint.py b/modelscope/utils/checkpoint.py index e21c3dcc..64f60a17 100644 --- a/modelscope/utils/checkpoint.py +++ b/modelscope/utils/checkpoint.py @@ -18,7 +18,7 @@ from modelscope.utils.config import JSONIteratorEncoder from modelscope.utils.constant import ConfigFields, ModelFile from modelscope.utils.logger import get_logger -logger = get_logger(__name__) +logger = get_logger() storage = LocalStorage() diff --git a/modelscope/utils/hub.py b/modelscope/utils/hub.py index 7841e1fa..20fb0e20 100644 --- a/modelscope/utils/hub.py +++ b/modelscope/utils/hub.py @@ -14,7 +14,7 @@ from modelscope.utils.constant import (DEFAULT_MODEL_REVISION, ConfigFields, ModelFile) from .logger import get_logger -logger = get_logger(__name__) +logger = get_logger() def create_model_if_not_exist( diff --git a/modelscope/utils/import_utils.py b/modelscope/utils/import_utils.py index 74b2d8e9..09dbe11b 100644 --- a/modelscope/utils/import_utils.py +++ b/modelscope/utils/import_utils.py @@ -21,8 +21,6 @@ from modelscope.utils.ast_utils import (INDEX_KEY, MODULE_KEY, REQUIREMENT_KEY, from modelscope.utils.error import * # noqa from modelscope.utils.logger import get_logger -logger = get_logger(__name__) - if sys.version_info < (3, 8): import importlib_metadata else: diff --git a/modelscope/utils/logger.py b/modelscope/utils/logger.py index 994bd719..6a3c1d6f 100644 --- a/modelscope/utils/logger.py +++ b/modelscope/utils/logger.py @@ -1,5 +1,6 @@ # Copyright (c) Alibaba, Inc. and its affiliates. +import importlib import logging from typing import Optional @@ -24,11 +25,27 @@ def get_logger(log_file: Optional[str] = None, if logger_name in init_loggers: return logger + # handle duplicate logs to the console + # Starting in 1.8.0, PyTorch DDP attaches a StreamHandler (NOTSET) + # to the root logger. As logger.propagate is True by default, this root + # level handler causes logging messages from rank>0 processes to + # unexpectedly show up on the console, creating much unwanted clutter. + # To fix this issue, we set the root logger's StreamHandler, if any, to log + # at the ERROR level. + for handler in logger.root.handlers: + if type(handler) is logging.StreamHandler: + handler.setLevel(logging.ERROR) + stream_handler = logging.StreamHandler() handlers = [stream_handler] - # TODO @wenmeng.zwm add logger setting for distributed environment - if log_file is not None: + if importlib.util.find_spec('torch') is not None: + from modelscope.utils.torch_utils import is_master + is_worker0 = is_master() + else: + is_worker0 = True + + if is_worker0 and log_file is not None: file_handler = logging.FileHandler(log_file, file_mode) handlers.append(file_handler) @@ -39,7 +56,11 @@ def get_logger(log_file: Optional[str] = None, handler.setLevel(log_level) logger.addHandler(handler) - logger.setLevel(log_level) + if is_worker0: + logger.setLevel(log_level) + else: + logger.setLevel(logging.ERROR) + init_loggers[logger_name] = True return logger diff --git a/modelscope/utils/test_utils.py b/modelscope/utils/test_utils.py index 5109db11..8ffec100 100644 --- a/modelscope/utils/test_utils.py +++ b/modelscope/utils/test_utils.py @@ -230,6 +230,8 @@ class DistributedTestCase(unittest.TestCase): tmp_env = copy.deepcopy(os.environ) tmp_env['PYTHONPATH'] = ':'.join( (tmp_env.get('PYTHONPATH', ''), script_dir)).lstrip(':') + # avoid distributed test hang + tmp_env['NCCL_P2P_DISABLE'] = '1' script_params = '--save_all_ranks=%s --save_file=%s' % (save_all_ranks, tmp_res_file) script_cmd = '%s %s %s' % (dist_start_cmd, tmp_run_file, script_params)