Browse Source

modify logger.warn to logger.warning

tags/v0.4.10
Yige Xu 5 years ago
parent
commit
5d3a00f7c5
7 changed files with 20 additions and 19 deletions
  1. +2
    -2
      fastNLP/embeddings/bert_embedding.py
  2. +1
    -1
      fastNLP/io/file_reader.py
  3. +1
    -1
      fastNLP/io/pipe/classification.py
  4. +2
    -2
      fastNLP/io/pipe/matching.py
  5. +4
    -3
      fastNLP/io/pipe/utils.py
  6. +5
    -5
      fastNLP/models/bert.py
  7. +5
    -5
      fastNLP/modules/encoder/bert.py

+ 2
- 2
fastNLP/embeddings/bert_embedding.py View File

@@ -72,8 +72,8 @@ class BertEmbedding(ContextualEmbedding):

if model_dir_or_name.lower() in PRETRAINED_BERT_MODEL_DIR:
if 'cn' in model_dir_or_name.lower() and pool_method not in ('first', 'last'):
logger.warn("For Chinese bert, pooled_method should choose from 'first', 'last' in order to achieve"
" faster speed.")
logger.warning("For Chinese bert, pooled_method should choose from 'first', 'last' in order to achieve"
" faster speed.")
warnings.warn("For Chinese bert, pooled_method should choose from 'first', 'last' in order to achieve"
" faster speed.")


+ 1
- 1
fastNLP/io/file_reader.py View File

@@ -111,7 +111,7 @@ def _read_conll(path, encoding='utf-8', indexes=None, dropna=True):
yield line_idx, res
except Exception as e:
if dropna:
logger.warn('Invalid instance which ends at line: {} has been dropped.'.format(line_idx))
logger.warning('Invalid instance which ends at line: {} has been dropped.'.format(line_idx))
continue
raise ValueError('Invalid instance which ends at line: {}'.format(line_idx))
elif line.startswith('#'):


+ 1
- 1
fastNLP/io/pipe/classification.py View File

@@ -387,7 +387,7 @@ class SST2Pipe(_CLSPipe):
f" in {[name for name in data_bundle.datasets.keys() if 'train' not in name]} " \
f"data set but not in train data set!."
warnings.warn(warn_msg)
logger.warn(warn_msg)
logger.warning(warn_msg)
datasets = []
for name, dataset in data_bundle.datasets.items():
if dataset.has_field(Const.TARGET):


+ 2
- 2
fastNLP/io/pipe/matching.py View File

@@ -121,7 +121,7 @@ class MatchingBertPipe(Pipe):
f" in {[name for name in data_bundle.datasets.keys() if 'train' not in name]} " \
f"data set but not in train data set!."
warnings.warn(warn_msg)
logger.warn(warn_msg)
logger.warning(warn_msg)

has_target_datasets = [dataset for name, dataset in data_bundle.datasets.items() if
dataset.has_field(Const.TARGET)]
@@ -258,7 +258,7 @@ class MatchingPipe(Pipe):
f" in {[name for name in data_bundle.datasets.keys() if 'train' not in name]} " \
f"data set but not in train data set!."
warnings.warn(warn_msg)
logger.warn(warn_msg)
logger.warning(warn_msg)

has_target_datasets = [dataset for name, dataset in data_bundle.datasets.items() if
dataset.has_field(Const.TARGET)]


+ 4
- 3
fastNLP/io/pipe/utils.py View File

@@ -130,11 +130,12 @@ def _indexize(data_bundle, input_field_names=Const.INPUT, target_field_names=Con
if ('train' not in name) and (ds.has_field(target_field_name))]
)
if len(tgt_vocab._no_create_word) > 0:
warn_msg = f"There are {len(tgt_vocab._no_create_word)} target labels" \
warn_msg = f"There are {len(tgt_vocab._no_create_word)} `{target_field_name}` labels" \
f" in {[name for name in data_bundle.datasets.keys() if 'train' not in name]} " \
f"data set but not in train data set!."
f"data set but not in train data set!.\n" \
f"These label(s) are {tgt_vocab._no_create_word}"
warnings.warn(warn_msg)
logger.warn(warn_msg)
logger.warning(warn_msg)
tgt_vocab.index_dataset(*data_bundle.datasets.values(), field_name=target_field_name)
data_bundle.set_vocab(tgt_vocab, target_field_name)


+ 5
- 5
fastNLP/models/bert.py View File

@@ -65,7 +65,7 @@ class BertForSequenceClassification(BaseModel):
self.bert.model.include_cls_sep = True
warn_msg = "Bert for sequence classification excepts BertEmbedding `include_cls_sep` True, " \
"but got False. FastNLP has changed it to True."
logger.warn(warn_msg)
logger.warning(warn_msg)
warnings.warn(warn_msg)

def forward(self, words):
@@ -110,7 +110,7 @@ class BertForSentenceMatching(BaseModel):
self.bert.model.include_cls_sep = True
warn_msg = "Bert for sentence matching excepts BertEmbedding `include_cls_sep` True, " \
"but got False. FastNLP has changed it to True."
logger.warn(warn_msg)
logger.warning(warn_msg)
warnings.warn(warn_msg)

def forward(self, words):
@@ -156,7 +156,7 @@ class BertForMultipleChoice(BaseModel):
self.bert.model.include_cls_sep = True
warn_msg = "Bert for multiple choice excepts BertEmbedding `include_cls_sep` True, " \
"but got False. FastNLP has changed it to True."
logger.warn(warn_msg)
logger.warning(warn_msg)
warnings.warn(warn_msg)

def forward(self, words):
@@ -206,7 +206,7 @@ class BertForTokenClassification(BaseModel):
self.bert.model.include_cls_sep = False
warn_msg = "Bert for token classification excepts BertEmbedding `include_cls_sep` False, " \
"but got True. FastNLP has changed it to False."
logger.warn(warn_msg)
logger.warning(warn_msg)
warnings.warn(warn_msg)

def forward(self, words):
@@ -250,7 +250,7 @@ class BertForQuestionAnswering(BaseModel):
self.bert.model.include_cls_sep = True
warn_msg = "Bert for question answering excepts BertEmbedding `include_cls_sep` True, " \
"but got False. FastNLP has changed it to True."
logger.warn(warn_msg)
logger.warning(warn_msg)
warnings.warn(warn_msg)

def forward(self, words):


+ 5
- 5
fastNLP/modules/encoder/bert.py View File

@@ -488,10 +488,10 @@ class BertModel(nn.Module):

load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
if len(missing_keys) > 0:
logger.warn("Weights of {} not initialized from pretrained model: {}".format(
logger.warning("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.warn("Weights from pretrained model not used in {}: {}".format(
logger.warning("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))

logger.info(f"Load pre-trained BERT parameters from file {weights_path}.")
@@ -800,7 +800,7 @@ class BertTokenizer(object):
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
logger.warn(
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
@@ -824,8 +824,8 @@ class BertTokenizer(object):
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warn("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1


Loading…
Cancel
Save