Browse Source

logger.warn->logger.warning

tags/v1.0.0alpha
x54-729 2 years ago
parent
commit
44d2a574ae
21 changed files with 38 additions and 38 deletions
  1. +1
    -1
      fastNLP/core/metrics/accuracy.py
  2. +1
    -1
      fastNLP/core/metrics/classify_f1_pre_rec_metric.py
  3. +1
    -1
      fastNLP/core/metrics/span_f1_pre_rec_metric.py
  4. +1
    -1
      fastNLP/core/utils/utils.py
  5. +2
    -2
      fastNLP/embeddings/torch/static_embedding.py
  6. +2
    -2
      fastNLP/io/embed_loader.py
  7. +1
    -1
      fastNLP/io/loader/classification.py
  8. +3
    -3
      fastNLP/io/loader/matching.py
  9. +2
    -2
      fastNLP/io/pipe/matching.py
  10. +1
    -1
      fastNLP/io/pipe/utils.py
  11. +1
    -1
      fastNLP/modules/mix_modules/utils.py
  12. +1
    -1
      fastNLP/transformers/torch/configuration_utils.py
  13. +1
    -1
      fastNLP/transformers/torch/generation_beam_search.py
  14. +7
    -7
      fastNLP/transformers/torch/generation_utils.py
  15. +1
    -1
      fastNLP/transformers/torch/models/auto/auto_factory.py
  16. +1
    -1
      fastNLP/transformers/torch/models/auto/configuration_auto.py
  17. +2
    -2
      fastNLP/transformers/torch/models/auto/modeling_auto.py
  18. +1
    -1
      fastNLP/transformers/torch/models/bart/modeling_bart.py
  19. +1
    -1
      fastNLP/transformers/torch/models/bert/modeling_bert.py
  20. +1
    -1
      fastNLP/transformers/torch/models/cpt/modeling_cpt.py
  21. +6
    -6
      fastNLP/transformers/torch/tokenization_utils_base.py

+ 1
- 1
fastNLP/core/metrics/accuracy.py View File

@@ -69,7 +69,7 @@ class Accuracy(Metric):
elif pred.ndim == target.ndim + 1:
pred = pred.argmax(axis=-1)
if seq_len is None and target.ndim > 1:
logger.warn("You are not passing `seq_len` to exclude pad when calculate accuracy.")
logger.warning("You are not passing `seq_len` to exclude pad when calculate accuracy.")

else:
raise RuntimeError(f"when pred have size:{pred.shape}, target should have size: {pred.shape} or "


+ 1
- 1
fastNLP/core/metrics/classify_f1_pre_rec_metric.py View File

@@ -156,7 +156,7 @@ class ClassifyFPreRecMetric(Metric):
elif pred.ndim == target.ndim + 1:
pred = pred.argmax(axis=-1)
if seq_len is None and target.ndim > 1:
logger.warn("You are not passing `seq_len` to exclude pad when calculate accuracy.")
logger.warning("You are not passing `seq_len` to exclude pad when calculate accuracy.")
else:
raise RuntimeError(f"when pred have "
f"size:{pred.shape}, target should have size: {pred.shape} or "


+ 1
- 1
fastNLP/core/metrics/span_f1_pre_rec_metric.py View File

@@ -39,7 +39,7 @@ def _check_tag_vocab_and_encoding_type(tag_vocab: Union[Vocabulary, dict], encod
f"encoding_type."
tags = tags.replace(tag, '') # 删除该值
if tags: # 如果不为空,说明出现了未使用的tag
logger.warn(f"Tag:{tags} in encoding type:{encoding_type} is not presented in your Vocabulary. Check your "
logger.warning(f"Tag:{tags} in encoding type:{encoding_type} is not presented in your Vocabulary. Check your "
"encoding_type.")




+ 1
- 1
fastNLP/core/utils/utils.py View File

@@ -554,7 +554,7 @@ def deprecated(help_message: Optional[str] = None):
def wrapper(*args, **kwargs):
func_hash = hash(deprecated_function)
if func_hash not in _emitted_deprecation_warnings:
logger.warn(warning_msg, category=FutureWarning, stacklevel=2)
logger.warning(warning_msg, category=FutureWarning, stacklevel=2)
_emitted_deprecation_warnings.add(func_hash)
return deprecated_function(*args, **kwargs)



+ 2
- 2
fastNLP/embeddings/torch/static_embedding.py View File

@@ -286,7 +286,7 @@ class StaticEmbedding(TokenEmbedding):
if word in vocab:
index = vocab.to_index(word)
if index in matrix:
logger.warn(f"Word has more than one vector in embedding file. Set logger level to "
logger.warning(f"Word has more than one vector in embedding file. Set logger level to "
f"DEBUG for detail.")
logger.debug(f"Word:{word} occurs again in line:{idx}(starts from 0)")
matrix[index] = torch.from_numpy(np.fromstring(' '.join(nums), sep=' ', dtype=dtype, count=dim))
@@ -295,7 +295,7 @@ class StaticEmbedding(TokenEmbedding):
found_count += 1
except Exception as e:
if error == 'ignore':
logger.warn("Error occurred at the {} line.".format(idx))
logger.warning("Error occurred at the {} line.".format(idx))
else:
logger.error("Error occurred at the {} line.".format(idx))
raise e


+ 2
- 2
fastNLP/io/embed_loader.py View File

@@ -91,7 +91,7 @@ class EmbedLoader:
hit_flags[index] = True
except Exception as e:
if error == 'ignore':
logger.warn("Error occurred at the {} line.".format(idx))
logger.warning("Error occurred at the {} line.".format(idx))
else:
logging.error("Error occurred at the {} line.".format(idx))
raise e
@@ -156,7 +156,7 @@ class EmbedLoader:
found_pad = True
except Exception as e:
if error == 'ignore':
logger.warn("Error occurred at the {} line.".format(idx))
logger.warning("Error occurred at the {} line.".format(idx))
pass
else:
logging.error("Error occurred at the {} line.".format(idx))


+ 1
- 1
fastNLP/io/loader/classification.py View File

@@ -345,7 +345,7 @@ class SST2Loader(Loader):
with open(path, 'r', encoding='utf-8') as f:
f.readline() # 跳过header
if 'test' in os.path.split(path)[1]:
logger.warn("SST2's test file has no target.")
logger.warning("SST2's test file has no target.")
for line in f:
line = line.strip()
if line:


+ 3
- 3
fastNLP/io/loader/matching.py View File

@@ -55,7 +55,7 @@ class MNLILoader(Loader):
with open(path, 'r', encoding='utf-8') as f:
f.readline() # 跳过header
if path.endswith("test_matched.tsv") or path.endswith('test_mismatched.tsv'):
logger.warn("MNLI's test file has no target.")
logger.warning("MNLI's test file has no target.")
for line in f:
line = line.strip()
if line:
@@ -227,7 +227,7 @@ class QNLILoader(JsonLoader):
with open(path, 'r', encoding='utf-8') as f:
f.readline() # 跳过header
if path.endswith("test.tsv"):
logger.warn("QNLI's test file has no target.")
logger.warning("QNLI's test file has no target.")
for line in f:
line = line.strip()
if line:
@@ -289,7 +289,7 @@ class RTELoader(Loader):
with open(path, 'r', encoding='utf-8') as f:
f.readline() # 跳过header
if path.endswith("test.tsv"):
logger.warn("RTE's test file has no target.")
logger.warning("RTE's test file has no target.")
for line in f:
line = line.strip()
if line:


+ 2
- 2
fastNLP/io/pipe/matching.py View File

@@ -146,7 +146,7 @@ class MatchingBertPipe(Pipe):
warn_msg = f"There are {len(target_vocab._no_create_word)} target labels" \
f" in {[name for name in data_bundle.datasets.keys() if 'train' not in name]} " \
f"data set but not in train data set!."
logger.warn(warn_msg)
logger.warning(warn_msg)
print(warn_msg)
has_target_datasets = [dataset for name, dataset in data_bundle.datasets.items() if
@@ -291,7 +291,7 @@ class MatchingPipe(Pipe):
warn_msg = f"There are {len(target_vocab._no_create_word)} target labels" \
f" in {[name for name in data_bundle.datasets.keys() if 'train' not in name]} " \
f"data set but not in train data set!."
logger.warn(warn_msg)
logger.warning(warn_msg)
print(warn_msg)
has_target_datasets = [dataset for name, dataset in data_bundle.datasets.items() if


+ 1
- 1
fastNLP/io/pipe/utils.py View File

@@ -138,7 +138,7 @@ def _indexize(data_bundle, input_field_names='words', target_field_names='target
f" in {[name for name in data_bundle.datasets.keys() if 'train' not in name]} " \
f"data set but not in train data set!.\n" \
f"These label(s) are {tgt_vocab._no_create_word}"
logger.warn(warn_msg)
logger.warning(warn_msg)
# log.warning(warn_msg)
tgt_vocab.index_dataset(*[ds for ds in data_bundle.datasets.values() if ds.has_field(target_field_name)], field_name=target_field_name)
data_bundle.set_vocab(tgt_vocab, target_field_name)


+ 1
- 1
fastNLP/modules/mix_modules/utils.py View File

@@ -112,7 +112,7 @@ def _jittor2torch(jittor_var: 'jittor.Var', device: Optional[Union[str, int]] =
# 如果outputs有_grad键,可以实现求导
no_gradient = not jittor_var.requires_grad if no_gradient is None else no_gradient
if no_gradient == False:
logger.warn("The result tensor will not keep gradients due to differences between jittor and pytorch.")
logger.warning("The result tensor will not keep gradients due to differences between jittor and pytorch.")
jittor_numpy = jittor_var.numpy()
if not np.issubdtype(jittor_numpy.dtype, np.inexact):
no_gradient = True


+ 1
- 1
fastNLP/transformers/torch/configuration_utils.py View File

@@ -327,7 +327,7 @@ class PretrainedConfig:

# Deal with gradient checkpointing
if kwargs.get("gradient_checkpointing", False):
logger.warn(
logger.warning(
"Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 "
"Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the "
"`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`."


+ 1
- 1
fastNLP/transformers/torch/generation_beam_search.py View File

@@ -195,7 +195,7 @@ class BeamSearchScorer(BeamScorer):
)

if "max_length" in kwargs:
logger.warn(
logger.warning(
"Passing `max_length` to BeamSearchScorer is deprecated and has no effect."
"`max_length` should be passed directly to `beam_search(...)`, `beam_sample(...)`"
",or `group_beam_search(...)`."


+ 7
- 7
fastNLP/transformers/torch/generation_utils.py View File

@@ -872,7 +872,7 @@ class GenerationMixin:
max_length = self.config.max_length
elif max_length is not None and max_new_tokens is not None:
# Both are set, this is odd, raise a warning
logger.warn(
logger.warning(
"Both `max_length` and `max_new_tokens` have been set but they serve the same purpose.", UserWarning
)

@@ -1239,7 +1239,7 @@ class GenerationMixin:
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
logger.warn(
logger.warning(
"`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.",
UserWarning,
)
@@ -1475,7 +1475,7 @@ class GenerationMixin:
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
logger.warn(
logger.warning(
"`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.",
UserWarning,
)
@@ -1726,13 +1726,13 @@ class GenerationMixin:
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
logger.warn(
logger.warning(
"`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.",
UserWarning,
)
stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
if len(stopping_criteria) == 0:
logger.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning)
logger.warning("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning)
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
output_scores = output_scores if output_scores is not None else self.config.output_scores
@@ -2030,7 +2030,7 @@ class GenerationMixin:
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
logger.warn(
logger.warning(
"`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.",
UserWarning,
)
@@ -2325,7 +2325,7 @@ class GenerationMixin:
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
logger.warn(
logger.warning(
"`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.",
UserWarning,
)


+ 1
- 1
fastNLP/transformers/torch/models/auto/auto_factory.py View File

@@ -401,7 +401,7 @@ class _BaseAutoModelClass:
"the option `trust_remote_code=True` to remove this error."
)
if kwargs.get("revision", None) is None:
logger.warn(
logger.warning(
"Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure "
"no malicious code has been contributed in a newer revision."
)


+ 1
- 1
fastNLP/transformers/torch/models/auto/configuration_auto.py View File

@@ -130,7 +130,7 @@ class _LazyLoadAllMappings(OrderedDict):
def _initialize(self):
if self._initialized:
return
# logger.warn(
# logger.warning(
# "ALL_PRETRAINED_CONFIG_ARCHIVE_MAP is deprecated and will be removed in v5 of Transformers. "
# "It does not contain all available model checkpoints, far from it. Checkout hf.co/models for that.",
# FutureWarning,


+ 2
- 2
fastNLP/transformers/torch/models/auto/modeling_auto.py View File

@@ -306,7 +306,7 @@ AutoModelForSpeechSeq2Seq = auto_class_update(
class AutoModelWithLMHead(_AutoModelWithLMHead):
@classmethod
def from_config(cls, config):
logger.warn(
logger.warning(
"The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
"`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and "
"`AutoModelForSeq2SeqLM` for encoder-decoder models.",
@@ -316,7 +316,7 @@ class AutoModelWithLMHead(_AutoModelWithLMHead):

@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
logger.warn(
logger.warning(
"The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
"`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and "
"`AutoModelForSeq2SeqLM` for encoder-decoder models.",


+ 1
- 1
fastNLP/transformers/torch/models/bart/modeling_bart.py View File

@@ -513,7 +513,7 @@ class BartPretrainedModel(PreTrainedModel):

class PretrainedBartModel(BartPretrainedModel):
def __init_subclass__(self):
logger.warn(
logger.warning(
"The class `PretrainedBartModel` has been depreciated, please use `BartPretrainedModel` instead.",
FutureWarning,
)


+ 1
- 1
fastNLP/transformers/torch/models/bert/modeling_bert.py View File

@@ -1374,7 +1374,7 @@ class BertForNextSentencePrediction(BertPreTrainedModel):
"""

if "next_sentence_label" in kwargs:
logger.warn(
logger.warning(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)


+ 1
- 1
fastNLP/transformers/torch/models/cpt/modeling_cpt.py View File

@@ -724,7 +724,7 @@ class CPTDecoder(CPTPretrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:

if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)


+ 6
- 6
fastNLP/transformers/torch/tokenization_utils_base.py View File

@@ -312,7 +312,7 @@ class BatchEncoding(UserDict):
"""
if not self._encodings:
raise ValueError("words() is not available when using Python-based tokenizers")
logger.warn(
logger.warning(
"`BatchEncoding.words()` property is deprecated and should be replaced with the identical, "
"but more self-explanatory `BatchEncoding.word_ids()` property.",
FutureWarning,
@@ -1601,7 +1601,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin):
f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not "
"supported for this tokenizer. Use a model identifier or the path to a directory instead."
)
logger.warn(
logger.warning(
f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is deprecated and "
"won't be possible anymore in v5. Use a model identifier or the path to a directory instead.",
FutureWarning,
@@ -2163,7 +2163,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin):
# Get padding strategy
if padding is False and old_pad_to_max_length:
if verbose:
logger.warn(
logger.warning(
"The `pad_to_max_length` argument is deprecated and will be removed in a future version, "
"use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or "
"use `padding='max_length'` to pad to a max length. In this case, you can give a specific "
@@ -2184,7 +2184,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin):
"To pad to max length, use `padding='max_length'`."
)
if old_pad_to_max_length is not False:
logger.warn("Though `pad_to_max_length` = `True`, it is ignored because `padding`=`True`.")
logger.warning("Though `pad_to_max_length` = `True`, it is ignored because `padding`=`True`.")
padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(padding, PaddingStrategy):
padding_strategy = PaddingStrategy(padding)
@@ -2196,7 +2196,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin):
# Get truncation strategy
if truncation is False and old_truncation_strategy != "do_not_truncate":
if verbose:
logger.warn(
logger.warning(
"The `truncation_strategy` argument is deprecated and will be removed in a future version, "
"use `truncation=True` to truncate examples to a max length. You can give a specific "
"length with `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the "
@@ -3352,7 +3352,7 @@ model_inputs["labels"] = labels["input_ids"]
See the documentation of your specific tokenizer for more details on the specific arguments to the tokenizer of choice.
For a more complete example, see the implementation of `prepare_seq2seq_batch`.
"""
logger.warn(formatted_warning, FutureWarning)
logger.warning(formatted_warning, FutureWarning)
# mBART-specific kwargs that should be ignored by other models.
kwargs.pop("src_lang", None)
kwargs.pop("tgt_lang", None)


Loading…
Cancel
Save