From 945b30bb6174751130744231aa26119bf9bb2601 Mon Sep 17 00:00:00 2001 From: Xipeng Qiu Date: Sat, 17 Aug 2019 19:57:17 +0800 Subject: [PATCH 01/50] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b35776dc..476c129f 100644 --- a/README.md +++ b/README.md @@ -91,7 +91,7 @@ fastNLP 在 embeddings 模块中内置了几种不同的embedding:静态embedd ## 项目结构 -![](./docs/source/figures/workflow.png) + fastNLP的大致工作流程如上图所示,而项目结构如下: From 19bbaf11b6989a1a29384d5b1516bf934ccac296 Mon Sep 17 00:00:00 2001 From: yh Date: Tue, 27 Aug 2019 01:54:15 +0800 Subject: [PATCH 02/50] =?UTF-8?q?=E4=BD=BF=E7=94=A8=E6=9B=B4pytorch?= =?UTF-8?q?=E7=9A=84=E6=96=B9=E5=BC=8F=E5=A4=84=E7=90=86embedding=E4=B8=AD?= =?UTF-8?q?=E7=9A=84parameter?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/embeddings/bert_embedding.py | 2 +- fastNLP/embeddings/char_embedding.py | 14 ++++++-------- fastNLP/embeddings/elmo_embedding.py | 5 ++--- fastNLP/embeddings/static_embedding.py | 16 +++++++--------- 4 files changed, 16 insertions(+), 21 deletions(-) diff --git a/fastNLP/embeddings/bert_embedding.py b/fastNLP/embeddings/bert_embedding.py index 6a10c489..f3ef69dd 100644 --- a/fastNLP/embeddings/bert_embedding.py +++ b/fastNLP/embeddings/bert_embedding.py @@ -345,7 +345,7 @@ class _WordBertModel(nn.Module): self._wordpiece_pad_index = self.tokenzier.vocab['[PAD]'] # 需要用于生成word_piece print("Found(Or segment into word pieces) {} words out of {}.".format(found_count, len(vocab))) self.word_to_wordpieces = np.array(word_to_wordpieces) - self.word_pieces_lengths = nn.Parameter(torch.LongTensor(word_pieces_lengths), requires_grad=False) + self.register_buffer('word_pieces_lengths', torch.LongTensor(word_pieces_lengths)) print("Successfully generate word pieces.") def forward(self, words): diff --git a/fastNLP/embeddings/char_embedding.py b/fastNLP/embeddings/char_embedding.py index 520e85e6..ea0d4e93 100644 --- a/fastNLP/embeddings/char_embedding.py +++ b/fastNLP/embeddings/char_embedding.py @@ -82,10 +82,9 @@ class CNNCharEmbedding(TokenEmbedding): print(f"In total, there are {len(self.char_vocab)} distinct characters.") # 对vocab进行index max_word_len = max(map(lambda x: len(x[0]), vocab)) - self.words_to_chars_embedding = nn.Parameter(torch.full((len(vocab), max_word_len), - fill_value=self.char_pad_index, dtype=torch.long), - requires_grad=False) - self.word_lengths = nn.Parameter(torch.zeros(len(vocab)).long(), requires_grad=False) + self.register_buffer('words_to_chars_embedding', torch.full((len(vocab), max_word_len), + fill_value=self.char_pad_index, dtype=torch.long)) + self.register_buffer('word_lengths', torch.zeros(len(vocab)).long()) for word, index in vocab: # if index!=vocab.padding_idx: # 如果是pad的话,直接就为pad_value了。修改为不区分pad, 这样所有的也是同一个embed self.words_to_chars_embedding[index, :len(word)] = \ @@ -235,10 +234,9 @@ class LSTMCharEmbedding(TokenEmbedding): print(f"In total, there are {len(self.char_vocab)} distinct characters.") # 对vocab进行index self.max_word_len = max(map(lambda x: len(x[0]), vocab)) - self.words_to_chars_embedding = nn.Parameter(torch.full((len(vocab), self.max_word_len), - fill_value=self.char_pad_index, dtype=torch.long), - requires_grad=False) - self.word_lengths = nn.Parameter(torch.zeros(len(vocab)).long(), requires_grad=False) + self.register_buffer('words_to_chars_embedding', torch.full((len(vocab), self.max_word_len), + fill_value=self.char_pad_index, dtype=torch.long)) + self.register_buffer('word_lengths', torch.zeros(len(vocab)).long()) for word, index in vocab: # if index!=vocab.padding_idx: # 如果是pad的话,直接就为pad_value了. 修改为不区分pad与否 self.words_to_chars_embedding[index, :len(word)] = \ diff --git a/fastNLP/embeddings/elmo_embedding.py b/fastNLP/embeddings/elmo_embedding.py index 24cd052e..80178d21 100644 --- a/fastNLP/embeddings/elmo_embedding.py +++ b/fastNLP/embeddings/elmo_embedding.py @@ -240,10 +240,9 @@ class _ElmoModel(nn.Module): # 生成words到chars的映射 max_chars = config['char_cnn']['max_characters_per_token'] - self.words_to_chars_embedding = nn.Parameter(torch.full((len(vocab) + 2, max_chars), + self.register_buffer('words_to_chars_embedding', torch.full((len(vocab) + 2, max_chars), fill_value=len(char_vocab), - dtype=torch.long), - requires_grad=False) + dtype=torch.long)) for word, index in list(iter(vocab)) + [(BOS_TAG, len(vocab)), (EOS_TAG, len(vocab) + 1)]: if len(word) + 2 > max_chars: word = word[:max_chars - 2] diff --git a/fastNLP/embeddings/static_embedding.py b/fastNLP/embeddings/static_embedding.py index a75ad18f..b0141682 100644 --- a/fastNLP/embeddings/static_embedding.py +++ b/fastNLP/embeddings/static_embedding.py @@ -121,28 +121,27 @@ class StaticEmbedding(TokenEmbedding): embedding = self._load_with_vocab(model_path, vocab=lowered_vocab, init_method=init_method) else: embedding = self._randomly_init_embed(len(vocab), embedding_dim, init_method) - self.words_to_words = nn.Parameter(torch.arange(len(vocab)).long(), requires_grad=False) + self.register_buffer('words_to_words', torch.arange(len(vocab)).long()) if lowered_vocab.unknown: unknown_idx = lowered_vocab.unknown_idx else: unknown_idx = embedding.size(0) - 1 # 否则是最后一个为unknow - self.words_to_words = nn.Parameter(torch.arange(len(vocab)).long(), requires_grad=False) - words_to_words = nn.Parameter(torch.full((len(vocab),), fill_value=unknown_idx).long(), - requires_grad=False) + self.register_buffer('words_to_words', torch.arange(len(vocab)).long()) + words_to_words = torch.full((len(vocab),), fill_value=unknown_idx).long() for word, index in vocab: if word not in lowered_vocab: word = word.lower() if word not in lowered_vocab and lowered_vocab._is_word_no_create_entry(word): continue # 如果不需要创建entry,已经默认unknown了 words_to_words[index] = self.words_to_words[lowered_vocab.to_index(word)] - self.words_to_words = words_to_words + self.register_buffer('words_to_words', words_to_words) self._word_unk_index = lowered_vocab.unknown_idx # 替换一下unknown的index else: if model_path: embedding = self._load_with_vocab(model_path, vocab=vocab, init_method=init_method) else: embedding = self._randomly_init_embed(len(vocab), embedding_dim, init_method) - self.words_to_words = nn.Parameter(torch.arange(len(vocab)).long(), requires_grad=False) + self.register_buffer('words_to_words', torch.arange(len(vocab)).long()) if not self.only_norm_found_vector and normalize: embedding /= (torch.norm(embedding, dim=1, keepdim=True) + 1e-12) @@ -151,7 +150,7 @@ class StaticEmbedding(TokenEmbedding): index_in_truncated_vocab = truncated_words_to_words[i] truncated_words_to_words[i] = self.words_to_words[index_in_truncated_vocab] del self.words_to_words - self.words_to_words = nn.Parameter(truncated_words_to_words, requires_grad=False) + self.register_buffer('words_to_words', truncated_words_to_words) self.embedding = nn.Embedding(num_embeddings=embedding.shape[0], embedding_dim=embedding.shape[1], padding_idx=vocab.padding_idx, @@ -273,8 +272,7 @@ class StaticEmbedding(TokenEmbedding): vectors = torch.cat((vectors, torch.zeros(1, dim)), dim=0).contiguous() else: unknown_idx = vocab.unknown_idx - self.words_to_words = nn.Parameter(torch.full((len(vocab), ), fill_value=unknown_idx).long(), - requires_grad=False) + self.register_buffer('words_to_words', torch.full((len(vocab), ), fill_value=unknown_idx).long()) for index, (index_in_vocab, vec) in enumerate(matrix.items()): if vec is not None: From 04737a105d1d57c334ebb664cac64d4331a8593a Mon Sep 17 00:00:00 2001 From: ChenXin Date: Tue, 27 Aug 2019 20:46:05 +0800 Subject: [PATCH 03/50] update the doc-tool to show __init__ and class doc separately --- docs/count.py | 7 ++++--- docs/source/conf.py | 6 ++++-- docs/source/fastNLP.core.rst | 3 +-- docs/source/fastNLP.embeddings.rst | 1 + docs/source/fastNLP.io.rst | 1 + docs/source/fastNLP.models.biaffine_parser.rst | 1 - docs/source/fastNLP.models.cnn_text_classification.rst | 1 - docs/source/fastNLP.models.rst | 2 +- docs/source/fastNLP.models.sequence_labeling.rst | 1 - docs/source/fastNLP.models.snli.rst | 1 - docs/source/fastNLP.models.star_transformer.rst | 1 - docs/source/fastNLP.modules.decoder.rst | 1 - docs/source/fastNLP.modules.encoder.rst | 1 - docs/source/fastNLP.modules.rst | 2 +- docs/source/fastNLP.modules.utils.rst | 1 - docs/source/fastNLP.rst | 1 + 16 files changed, 14 insertions(+), 17 deletions(-) diff --git a/docs/count.py b/docs/count.py index 72868403..c75173ef 100644 --- a/docs/count.py +++ b/docs/count.py @@ -66,12 +66,13 @@ def create_rst_file(modules, name, children): fout.write(t + "\n") fout.write("\n") fout.write(".. automodule:: " + name + "\n") - if len(m.__all__) > 0: + if name != "fastNLP.core" and len(m.__all__) > 0: fout.write(" :members: " + ", ".join(m.__all__) + "\n") - fout.write(" :inherited-members:\n") + if not (name.startswith('fastNLP.models') or name.startswith('fastNLP.modules')): + fout.write(" :inherited-members:\n") fout.write("\n") if name in children: - fout.write("子模块\n------\n\n.. toctree::\n\n") + fout.write("子模块\n------\n\n.. toctree::\n :maxdepth: 1\n\n") for module in children[name]: fout.write(" " + module + "\n") diff --git a/docs/source/conf.py b/docs/source/conf.py index 83cb7185..7536ee32 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -168,10 +168,12 @@ texinfo_documents = [ # -- Extension configuration ------------------------------------------------- def maybe_skip_member(app, what, name, obj, skip, options): - if name.startswith("_"): - return True if obj.__doc__ is None: return True + if name == "__init__": + return False + if name.startswith("_"): + return True return False diff --git a/docs/source/fastNLP.core.rst b/docs/source/fastNLP.core.rst index 56de46e9..15fe29d5 100644 --- a/docs/source/fastNLP.core.rst +++ b/docs/source/fastNLP.core.rst @@ -2,13 +2,12 @@ fastNLP.core ============ .. automodule:: fastNLP.core - :members: DataSet, Instance, FieldArray, Padder, AutoPadder, EngChar2DPadder, Vocabulary, DataSetIter, BatchIter, TorchLoaderIter, Const, Tester, Trainer, cache_results, seq_len_to_mask, get_seq_len, logger, Callback, GradientClipCallback, EarlyStopCallback, FitlogCallback, EvaluateCallback, LRScheduler, ControlC, LRFinder, TensorboardCallback, WarmupCallback, SaveModelCallback, EchoCallback, TesterCallback, CallbackException, EarlyStopError, LossFunc, CrossEntropyLoss, L1Loss, BCELoss, NLLLoss, LossInForward, AccuracyMetric, SpanFPreRecMetric, ExtractiveQAMetric, Optimizer, SGD, Adam, AdamW, SequentialSampler, BucketSampler, RandomSampler, Sampler - :inherited-members: 子模块 ------ .. toctree:: + :maxdepth: 1 fastNLP.core.batch fastNLP.core.callback diff --git a/docs/source/fastNLP.embeddings.rst b/docs/source/fastNLP.embeddings.rst index 8376408c..b9e6a853 100644 --- a/docs/source/fastNLP.embeddings.rst +++ b/docs/source/fastNLP.embeddings.rst @@ -9,6 +9,7 @@ fastNLP.embeddings ------ .. toctree:: + :maxdepth: 1 fastNLP.embeddings.bert_embedding fastNLP.embeddings.char_embedding diff --git a/docs/source/fastNLP.io.rst b/docs/source/fastNLP.io.rst index 2aacb883..96df9d6c 100644 --- a/docs/source/fastNLP.io.rst +++ b/docs/source/fastNLP.io.rst @@ -9,6 +9,7 @@ fastNLP.io ------ .. toctree:: + :maxdepth: 1 fastNLP.io.data_bundle fastNLP.io.embed_loader diff --git a/docs/source/fastNLP.models.biaffine_parser.rst b/docs/source/fastNLP.models.biaffine_parser.rst index c3dbb0a5..395638fe 100644 --- a/docs/source/fastNLP.models.biaffine_parser.rst +++ b/docs/source/fastNLP.models.biaffine_parser.rst @@ -3,5 +3,4 @@ fastNLP.models.biaffine_parser .. automodule:: fastNLP.models.biaffine_parser :members: BiaffineParser, GraphParser - :inherited-members: diff --git a/docs/source/fastNLP.models.cnn_text_classification.rst b/docs/source/fastNLP.models.cnn_text_classification.rst index fe4bb157..e9ed7ee1 100644 --- a/docs/source/fastNLP.models.cnn_text_classification.rst +++ b/docs/source/fastNLP.models.cnn_text_classification.rst @@ -3,5 +3,4 @@ fastNLP.models.cnn_text_classification .. automodule:: fastNLP.models.cnn_text_classification :members: CNNText - :inherited-members: diff --git a/docs/source/fastNLP.models.rst b/docs/source/fastNLP.models.rst index 88854a79..fb782de1 100644 --- a/docs/source/fastNLP.models.rst +++ b/docs/source/fastNLP.models.rst @@ -3,12 +3,12 @@ fastNLP.models .. automodule:: fastNLP.models :members: CNNText, SeqLabeling, AdvSeqLabel, ESIM, StarTransEnc, STSeqLabel, STNLICls, STSeqCls, BiaffineParser, GraphParser - :inherited-members: 子模块 ------ .. toctree:: + :maxdepth: 1 fastNLP.models.biaffine_parser fastNLP.models.cnn_text_classification diff --git a/docs/source/fastNLP.models.sequence_labeling.rst b/docs/source/fastNLP.models.sequence_labeling.rst index b66e637e..f6551f8b 100644 --- a/docs/source/fastNLP.models.sequence_labeling.rst +++ b/docs/source/fastNLP.models.sequence_labeling.rst @@ -3,5 +3,4 @@ fastNLP.models.sequence_labeling .. automodule:: fastNLP.models.sequence_labeling :members: SeqLabeling, AdvSeqLabel - :inherited-members: diff --git a/docs/source/fastNLP.models.snli.rst b/docs/source/fastNLP.models.snli.rst index 8551051a..eed02139 100644 --- a/docs/source/fastNLP.models.snli.rst +++ b/docs/source/fastNLP.models.snli.rst @@ -3,5 +3,4 @@ fastNLP.models.snli .. automodule:: fastNLP.models.snli :members: ESIM - :inherited-members: diff --git a/docs/source/fastNLP.models.star_transformer.rst b/docs/source/fastNLP.models.star_transformer.rst index f4b5989e..80ab5b33 100644 --- a/docs/source/fastNLP.models.star_transformer.rst +++ b/docs/source/fastNLP.models.star_transformer.rst @@ -3,5 +3,4 @@ fastNLP.models.star_transformer .. automodule:: fastNLP.models.star_transformer :members: StarTransEnc, STNLICls, STSeqCls, STSeqLabel - :inherited-members: diff --git a/docs/source/fastNLP.modules.decoder.rst b/docs/source/fastNLP.modules.decoder.rst index b121f9e9..de6e0d9d 100644 --- a/docs/source/fastNLP.modules.decoder.rst +++ b/docs/source/fastNLP.modules.decoder.rst @@ -3,5 +3,4 @@ fastNLP.modules.decoder .. automodule:: fastNLP.modules.decoder :members: MLP, ConditionalRandomField, viterbi_decode, allowed_transitions - :inherited-members: diff --git a/docs/source/fastNLP.modules.encoder.rst b/docs/source/fastNLP.modules.encoder.rst index 6b44a192..fceabbdb 100644 --- a/docs/source/fastNLP.modules.encoder.rst +++ b/docs/source/fastNLP.modules.encoder.rst @@ -3,5 +3,4 @@ fastNLP.modules.encoder .. automodule:: fastNLP.modules.encoder :members: ConvolutionCharEncoder, LSTMCharEncoder, ConvMaxpool, LSTM, StarTransformer, TransformerEncoder, VarRNN, VarLSTM, VarGRU, MaxPool, MaxPoolWithMask, AvgPool, AvgPoolWithMask, MultiHeadAttention - :inherited-members: diff --git a/docs/source/fastNLP.modules.rst b/docs/source/fastNLP.modules.rst index 6134d0dd..b7c259ed 100644 --- a/docs/source/fastNLP.modules.rst +++ b/docs/source/fastNLP.modules.rst @@ -3,12 +3,12 @@ fastNLP.modules .. automodule:: fastNLP.modules :members: ConvolutionCharEncoder, LSTMCharEncoder, ConvMaxpool, LSTM, StarTransformer, TransformerEncoder, VarRNN, VarLSTM, VarGRU, MaxPool, MaxPoolWithMask, AvgPool, AvgPoolWithMask, MultiHeadAttention, MLP, ConditionalRandomField, viterbi_decode, allowed_transitions, TimestepDropout - :inherited-members: 子模块 ------ .. toctree:: + :maxdepth: 1 fastNLP.modules.decoder fastNLP.modules.encoder diff --git a/docs/source/fastNLP.modules.utils.rst b/docs/source/fastNLP.modules.utils.rst index e28ca35a..101a0f45 100644 --- a/docs/source/fastNLP.modules.utils.rst +++ b/docs/source/fastNLP.modules.utils.rst @@ -3,5 +3,4 @@ fastNLP.modules.utils .. automodule:: fastNLP.modules.utils :members: initial_parameter, summary - :inherited-members: diff --git a/docs/source/fastNLP.rst b/docs/source/fastNLP.rst index f22ea936..e01817f7 100644 --- a/docs/source/fastNLP.rst +++ b/docs/source/fastNLP.rst @@ -9,6 +9,7 @@ fastNLP ------ .. toctree:: + :maxdepth: 1 fastNLP.core fastNLP.embeddings From 169f519ffb0133b5f553d04c17c9f2cac0edebcb Mon Sep 17 00:00:00 2001 From: ChenXin Date: Tue, 27 Aug 2019 21:07:22 +0800 Subject: [PATCH 04/50] ignore the methods inherited from torch.nn.Embedding --- docs/count.py | 3 ++- docs/source/fastNLP.embeddings.bert_embedding.rst | 1 - docs/source/fastNLP.embeddings.char_embedding.rst | 1 - docs/source/fastNLP.embeddings.contextual_embedding.rst | 1 - docs/source/fastNLP.embeddings.elmo_embedding.rst | 1 - docs/source/fastNLP.embeddings.embedding.rst | 1 - docs/source/fastNLP.embeddings.rst | 1 - docs/source/fastNLP.embeddings.stack_embedding.rst | 1 - docs/source/fastNLP.embeddings.static_embedding.rst | 1 - docs/source/fastNLP.embeddings.utils.rst | 1 - docs/source/fastNLP.io.dataset_loader.rst | 6 ------ 11 files changed, 2 insertions(+), 16 deletions(-) delete mode 100644 docs/source/fastNLP.io.dataset_loader.rst diff --git a/docs/count.py b/docs/count.py index c75173ef..6a5d256b 100644 --- a/docs/count.py +++ b/docs/count.py @@ -68,7 +68,8 @@ def create_rst_file(modules, name, children): fout.write(".. automodule:: " + name + "\n") if name != "fastNLP.core" and len(m.__all__) > 0: fout.write(" :members: " + ", ".join(m.__all__) + "\n") - if not (name.startswith('fastNLP.models') or name.startswith('fastNLP.modules')): + short = name[len("fastNLP."):] + if not (short.startswith('models') or short.startswith('modules') or short.startswith('embeddings')): fout.write(" :inherited-members:\n") fout.write("\n") if name in children: diff --git a/docs/source/fastNLP.embeddings.bert_embedding.rst b/docs/source/fastNLP.embeddings.bert_embedding.rst index 51828cb0..1b59dc35 100644 --- a/docs/source/fastNLP.embeddings.bert_embedding.rst +++ b/docs/source/fastNLP.embeddings.bert_embedding.rst @@ -3,5 +3,4 @@ fastNLP.embeddings.bert_embedding .. automodule:: fastNLP.embeddings.bert_embedding :members: BertEmbedding, BertWordPieceEncoder - :inherited-members: diff --git a/docs/source/fastNLP.embeddings.char_embedding.rst b/docs/source/fastNLP.embeddings.char_embedding.rst index a9b129d8..bc8d64f9 100644 --- a/docs/source/fastNLP.embeddings.char_embedding.rst +++ b/docs/source/fastNLP.embeddings.char_embedding.rst @@ -3,5 +3,4 @@ fastNLP.embeddings.char_embedding .. automodule:: fastNLP.embeddings.char_embedding :members: CNNCharEmbedding, LSTMCharEmbedding - :inherited-members: diff --git a/docs/source/fastNLP.embeddings.contextual_embedding.rst b/docs/source/fastNLP.embeddings.contextual_embedding.rst index ee64c7a0..74e5f5be 100644 --- a/docs/source/fastNLP.embeddings.contextual_embedding.rst +++ b/docs/source/fastNLP.embeddings.contextual_embedding.rst @@ -3,5 +3,4 @@ fastNLP.embeddings.contextual_embedding .. automodule:: fastNLP.embeddings.contextual_embedding :members: ContextualEmbedding - :inherited-members: diff --git a/docs/source/fastNLP.embeddings.elmo_embedding.rst b/docs/source/fastNLP.embeddings.elmo_embedding.rst index 06cc13af..b8c6d41c 100644 --- a/docs/source/fastNLP.embeddings.elmo_embedding.rst +++ b/docs/source/fastNLP.embeddings.elmo_embedding.rst @@ -3,5 +3,4 @@ fastNLP.embeddings.elmo_embedding .. automodule:: fastNLP.embeddings.elmo_embedding :members: ElmoEmbedding - :inherited-members: diff --git a/docs/source/fastNLP.embeddings.embedding.rst b/docs/source/fastNLP.embeddings.embedding.rst index 4d5fcf46..6793446b 100644 --- a/docs/source/fastNLP.embeddings.embedding.rst +++ b/docs/source/fastNLP.embeddings.embedding.rst @@ -3,5 +3,4 @@ fastNLP.embeddings.embedding .. automodule:: fastNLP.embeddings.embedding :members: Embedding, TokenEmbedding - :inherited-members: diff --git a/docs/source/fastNLP.embeddings.rst b/docs/source/fastNLP.embeddings.rst index b9e6a853..f4f4a3e0 100644 --- a/docs/source/fastNLP.embeddings.rst +++ b/docs/source/fastNLP.embeddings.rst @@ -3,7 +3,6 @@ fastNLP.embeddings .. automodule:: fastNLP.embeddings :members: Embedding, TokenEmbedding, StaticEmbedding, ElmoEmbedding, BertEmbedding, BertWordPieceEncoder, StackEmbedding, LSTMCharEmbedding, CNNCharEmbedding, get_embeddings - :inherited-members: 子模块 ------ diff --git a/docs/source/fastNLP.embeddings.stack_embedding.rst b/docs/source/fastNLP.embeddings.stack_embedding.rst index 6af91623..a07d1ef5 100644 --- a/docs/source/fastNLP.embeddings.stack_embedding.rst +++ b/docs/source/fastNLP.embeddings.stack_embedding.rst @@ -3,5 +3,4 @@ fastNLP.embeddings.stack_embedding .. automodule:: fastNLP.embeddings.stack_embedding :members: StackEmbedding - :inherited-members: diff --git a/docs/source/fastNLP.embeddings.static_embedding.rst b/docs/source/fastNLP.embeddings.static_embedding.rst index 2df1c329..219ce0e5 100644 --- a/docs/source/fastNLP.embeddings.static_embedding.rst +++ b/docs/source/fastNLP.embeddings.static_embedding.rst @@ -3,5 +3,4 @@ fastNLP.embeddings.static_embedding .. automodule:: fastNLP.embeddings.static_embedding :members: StaticEmbedding - :inherited-members: diff --git a/docs/source/fastNLP.embeddings.utils.rst b/docs/source/fastNLP.embeddings.utils.rst index 13e5936b..077487c1 100644 --- a/docs/source/fastNLP.embeddings.utils.rst +++ b/docs/source/fastNLP.embeddings.utils.rst @@ -3,5 +3,4 @@ fastNLP.embeddings.utils .. automodule:: fastNLP.embeddings.utils :members: get_embeddings - :inherited-members: diff --git a/docs/source/fastNLP.io.dataset_loader.rst b/docs/source/fastNLP.io.dataset_loader.rst deleted file mode 100644 index c211ecf9..00000000 --- a/docs/source/fastNLP.io.dataset_loader.rst +++ /dev/null @@ -1,6 +0,0 @@ -fastNLP.io.dataset_loader -========================= - -.. automodule:: fastNLP.io.dataset_loader - :members: CSVLoader, JsonLoader - From fbbb2fcd8e6526143cd789f9bb7e370d966ac4c4 Mon Sep 17 00:00:00 2001 From: ChenXin Date: Tue, 27 Aug 2019 21:33:18 +0800 Subject: [PATCH 05/50] fix some bugs in docs --- fastNLP/core/callback.py | 21 ++++++++++++--------- fastNLP/io/data_bundle.py | 4 ++-- fastNLP/io/pipe/conll.py | 4 ++-- fastNLP/io/pipe/matching.py | 4 ++-- fastNLP/io/pipe/pipe.py | 2 +- fastNLP/io/pipe/utils.py | 4 ++-- 6 files changed, 21 insertions(+), 18 deletions(-) diff --git a/fastNLP/core/callback.py b/fastNLP/core/callback.py index 2c130061..dde9a31a 100644 --- a/fastNLP/core/callback.py +++ b/fastNLP/core/callback.py @@ -70,10 +70,11 @@ __all__ = [ ] import os +import sys +from copy import deepcopy import torch -from copy import deepcopy -import sys + from .utils import _save_model try: @@ -928,13 +929,15 @@ class WarmupCallback(Callback): class SaveModelCallback(Callback): """ 由于Trainer在训练过程中只会保存最佳的模型, 该callback可实现多种方式的结果存储。 - 会根据训练开始的时间戳在save_dir下建立文件夹,再在文件夹下存放多个模型 - -save_dir - -2019-07-03-15-06-36 - -epoch:0_step:20_{metric_key}:{evaluate_performance}.pt # metric是给定的metric_key, evaluate_performance是性能 - -epoch:1_step:40_{metric_key}:{evaluate_performance}.pt - -2019-07-03-15-10-00 - -epoch:0_step:20_{metric_key}:{evaluate_performance}.pt # metric是给定的metric_key, evaluate_perfomance是性能 + 会根据训练开始的时间戳在save_dir下建立文件夹,再在文件夹下存放多个模型:: + + -save_dir + -2019-07-03-15-06-36 + -epoch:0_step:20_{metric_key}:{evaluate_performance}.pt # metric是给定的metric_key, evaluate_performance是性能 + -epoch:1_step:40_{metric_key}:{evaluate_performance}.pt + -2019-07-03-15-10-00 + -epoch:0_step:20_{metric_key}:{evaluate_performance}.pt # metric是给定的metric_key, evaluate_perfomance是性能 + :param str save_dir: 将模型存放在哪个目录下,会在该目录下创建以时间戳命名的目录,并存放模型 :param int top: 保存dev表现top多少模型。-1为保存所有模型。 :param bool only_param: 是否只保存模型d饿权重。 diff --git a/fastNLP/io/data_bundle.py b/fastNLP/io/data_bundle.py index db60a86f..10f924f0 100644 --- a/fastNLP/io/data_bundle.py +++ b/fastNLP/io/data_bundle.py @@ -204,7 +204,7 @@ class DataBundle: 行的数据进行类型和维度推断本列的数据的类型和维度。 :param bool ignore_miss_dataset: 当某个field名称在某个dataset不存在时,如果为True,则直接忽略该DataSet; 如果为False,则报错 - :return self + :return: self """ for field_name in field_names: for name, dataset in self.datasets.items(): @@ -229,7 +229,7 @@ class DataBundle: 行的数据进行类型和维度推断本列的数据的类型和维度。 :param bool ignore_miss_dataset: 当某个field名称在某个dataset不存在时,如果为True,则直接忽略该DataSet; 如果为False,则报错 - :return self + :return: self """ for field_name in field_names: for name, dataset in self.datasets.items(): diff --git a/fastNLP/io/pipe/conll.py b/fastNLP/io/pipe/conll.py index 2efec8e0..eb7d4909 100644 --- a/fastNLP/io/pipe/conll.py +++ b/fastNLP/io/pipe/conll.py @@ -51,7 +51,7 @@ class _NERPipe(Pipe): "[AL-AIN, United, Arab, ...]", "[B-LOC, B-LOC, I-LOC, ...]" "[...]", "[...]" - :param DataBundle data_bundle: 传入的DataBundle中的DataSet必须包含raw_words和ner两个field,且两个field的内容均为List[str]。 + :param ~fastNLP.DataBundle data_bundle: 传入的DataBundle中的DataSet必须包含raw_words和ner两个field,且两个field的内容均为List[str]。 在传入DataBundle基础上原位修改。 :return: DataBundle """ @@ -244,7 +244,7 @@ class _CNNERPipe(Pipe): raw_chars列为List[str], 是未转换的原始数据; chars列为List[int],是转换为index的输入数据; target列是List[int],是转换为index的 target。返回的DataSet中被设置为input有chars, target, seq_len; 设置为target有target。 - :param DataBundle data_bundle: 传入的DataBundle中的DataSet必须包含raw_words和ner两个field,且两个field的内容均为List[str]。 + :param ~fastNLP.DataBundle data_bundle: 传入的DataBundle中的DataSet必须包含raw_words和ner两个field,且两个field的内容均为List[str]。 在传入DataBundle基础上原位修改。 :return: DataBundle """ diff --git a/fastNLP/io/pipe/matching.py b/fastNLP/io/pipe/matching.py index 699438c8..747e7b44 100644 --- a/fastNLP/io/pipe/matching.py +++ b/fastNLP/io/pipe/matching.py @@ -177,7 +177,7 @@ class MatchingPipe(Pipe): def _tokenize(self, data_bundle, field_names, new_field_names): """ - :param DataBundle data_bundle: DataBundle. + :param ~fastNLP.DataBundle data_bundle: DataBundle. :param list field_names: List[str], 需要tokenize的field名称 :param list new_field_names: List[str], tokenize之后field的名称,与field_names一一对应。 :return: 输入的DataBundle对象 @@ -199,7 +199,7 @@ class MatchingPipe(Pipe): "This site includes a...", "The Government Executive...", "not_entailment" "...", "..." - :param data_bundle: 通过loader读取得到的data_bundle,里面包含了数据集的原始数据内容 + :param ~fastNLP.DataBundle data_bundle: 通过loader读取得到的data_bundle,里面包含了数据集的原始数据内容 :return: data_bundle """ data_bundle = self._tokenize(data_bundle, [Const.RAW_WORDS(0), Const.RAW_WORDS(1)], diff --git a/fastNLP/io/pipe/pipe.py b/fastNLP/io/pipe/pipe.py index a1435fd3..12d9c1cb 100644 --- a/fastNLP/io/pipe/pipe.py +++ b/fastNLP/io/pipe/pipe.py @@ -15,7 +15,7 @@ class Pipe: """ 对输入的DataBundle进行处理,然后返回该DataBundle。 - :param data_bundle: 需要处理的DataBundle对象 + :param ~fastNLP.DataBundle data_bundle: 需要处理的DataBundle对象 :return: """ raise NotImplementedError diff --git a/fastNLP/io/pipe/utils.py b/fastNLP/io/pipe/utils.py index f32f58b7..ea7e0aa8 100644 --- a/fastNLP/io/pipe/utils.py +++ b/fastNLP/io/pipe/utils.py @@ -92,7 +92,7 @@ def _indexize(data_bundle, input_field_names=Const.INPUT, target_field_names=Con """ 在dataset中的field_name列建立词表,Const.TARGET列建立词表,并把词表加入到data_bundle中。 - :param data_bundle: + :param ~fastNLP.DataBundle data_bundle: :param: str,list input_field_names: :param: str,list target_field_names: 这一列的vocabulary没有unknown和padding :return: @@ -154,7 +154,7 @@ def _drop_empty_instance(data_bundle, field_name): """ 删除data_bundle的DataSet中存在的某个field为空的情况 - :param data_bundle: DataBundle + :param ~fastNLP.DataBundle data_bundle: :param str field_name: 对哪个field进行检查,如果为None,则任意field为空都会删掉 :return: 传入的DataBundle """ From 6201f661789e36c4e1e116846cc84d586aca2abd Mon Sep 17 00:00:00 2001 From: yh_cc Date: Wed, 28 Aug 2019 22:56:02 +0800 Subject: [PATCH 06/50] =?UTF-8?q?Trainer=E4=B8=AD=E4=BF=9D=E5=AD=98?= =?UTF-8?q?=E6=9C=80=E4=BD=B3=E6=A8=A1=E5=9E=8B=E5=AD=98=E5=9C=A8bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastNLP/core/trainer.py b/fastNLP/core/trainer.py index 290a89c1..61969c2e 100644 --- a/fastNLP/core/trainer.py +++ b/fastNLP/core/trainer.py @@ -718,7 +718,7 @@ class Trainer(object): self._save_model(self.model, "best_" + "_".join([self.model.__class__.__name__, self.metric_key, self.start_time])) elif self._load_best_model: - self._best_model_states = {name: param.cpu().clone() for name, param in self.model.named_parameters()} + self._best_model_states = {name: param.cpu().clone() for name, param in self.model.state_dict()} self.best_dev_perf = res self.best_dev_epoch = epoch self.best_dev_step = step From a46b8f129b88ef5b53692f18cf609ceeb31e48c0 Mon Sep 17 00:00:00 2001 From: yh_cc Date: Wed, 28 Aug 2019 23:06:13 +0800 Subject: [PATCH 07/50] =?UTF-8?q?Trainer=E4=B8=AD=E4=BF=9D=E5=AD=98?= =?UTF-8?q?=E6=9C=80=E4=BD=B3=E6=A8=A1=E5=9E=8B=E5=AD=98=E5=9C=A8bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastNLP/core/trainer.py b/fastNLP/core/trainer.py index 61969c2e..a47f108b 100644 --- a/fastNLP/core/trainer.py +++ b/fastNLP/core/trainer.py @@ -718,7 +718,7 @@ class Trainer(object): self._save_model(self.model, "best_" + "_".join([self.model.__class__.__name__, self.metric_key, self.start_time])) elif self._load_best_model: - self._best_model_states = {name: param.cpu().clone() for name, param in self.model.state_dict()} + self._best_model_states = {name: param.cpu().clone() for name, param in self.model.state_dict().items()} self.best_dev_perf = res self.best_dev_epoch = epoch self.best_dev_step = step From 55e736bf4c9020ce404400b605d1c2febd8d0766 Mon Sep 17 00:00:00 2001 From: yh_cc Date: Wed, 28 Aug 2019 23:53:20 +0800 Subject: [PATCH 08/50] =?UTF-8?q?SpanFMetric=E5=A2=9E=E5=8A=A0=E5=AF=B9enc?= =?UTF-8?q?oding=5Ftype=E5=92=8Ctag=5Fvocab=E7=9A=84=E6=A3=80=E6=9F=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/metrics.py | 26 ++++++++++++++++++++++++++ test/core/test_metrics.py | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) diff --git a/fastNLP/core/metrics.py b/fastNLP/core/metrics.py index 1d1e3819..28d88fbc 100644 --- a/fastNLP/core/metrics.py +++ b/fastNLP/core/metrics.py @@ -23,6 +23,7 @@ from .utils import _get_func_signature from .utils import seq_len_to_mask from .vocabulary import Vocabulary from abc import abstractmethod +import warnings class MetricBase(object): @@ -492,6 +493,30 @@ def _bio_tag_to_spans(tags, ignore_labels=None): return [(span[0], (span[1][0], span[1][1] + 1)) for span in spans if span[0] not in ignore_labels] +def _check_tag_vocab_and_encoding_type(vocab:Vocabulary, encoding_type:str): + """ + 检查vocab中的tag是否与encoding_type是匹配的 + + :param vocab: target的Vocabulary + :param encoding_type: bio, bmes, bioes, bmeso + :return: + """ + tag_set = set() + for tag, idx in vocab: + if idx in (vocab.unknown_idx, vocab.padding_idx): + continue + tag = tag[:1] + tag_set.add(tag) + tags = encoding_type + for tag in tag_set: + assert tag in tags, f"{tag} is not a valid tag in encoding type:{encoding_type}. Please check your " \ + f"encoding_type." + tags = tags.replace(tag, '') # 删除该值 + if tags: # 如果不为空,说明出现了未使用的tag + warnings.warn(f"Tag:{tags} in encoding type:{encoding_type} is not presented in your Vocabulary. Check your " + "encoding_type.") + + class SpanFPreRecMetric(MetricBase): r""" 别名::class:`fastNLP.SpanFPreRecMetric` :class:`fastNLP.core.metrics.SpanFPreRecMetric` @@ -546,6 +571,7 @@ class SpanFPreRecMetric(MetricBase): raise ValueError("f_type only supports `micro` or `macro`', got {}.".format(f_type)) self.encoding_type = encoding_type + _check_tag_vocab_and_encoding_type(tag_vocab, encoding_type) if self.encoding_type == 'bmes': self.tag_to_span_func = _bmes_tag_to_spans elif self.encoding_type == 'bio': diff --git a/test/core/test_metrics.py b/test/core/test_metrics.py index 236066d6..5a7c55cf 100644 --- a/test/core/test_metrics.py +++ b/test/core/test_metrics.py @@ -338,6 +338,41 @@ class SpanF1PreRecMetric(unittest.TestCase): for key, value in expected_metric.items(): self.assertAlmostEqual(value, metric_value[key], places=5) + def test_encoding_type(self): + # 检查传入的tag_vocab与encoding_type不符合时,是否会报错 + vocabs = {} + import random + from itertools import product + for encoding_type in ['bio', 'bioes', 'bmeso']: + vocab = Vocabulary(unknown=None, padding=None) + for i in range(random.randint(10, 100)): + label = str(random.randint(1, 10)) + for tag in encoding_type: + if tag!='o': + vocab.add_word(f'{tag}-{label}') + else: + vocab.add_word('o') + vocabs[encoding_type] = vocab + for e1, e2 in product(['bio', 'bioes', 'bmeso'], ['bio', 'bioes', 'bmeso']): + with self.subTest(e1=e1, e2=e2): + if e1==e2: + metric = SpanFPreRecMetric(vocabs[e1], encoding_type=e2) + else: + s2 = set(e2) + s2.update(set(e1)) + if s2==set(e2): + continue + with self.assertRaises(AssertionError): + metric = SpanFPreRecMetric(vocabs[e1], encoding_type=e2) + for encoding_type in ['bio', 'bioes', 'bmeso']: + with self.assertRaises(AssertionError): + metric = SpanFPreRecMetric(vocabs[encoding_type], encoding_type='bmes') + + with self.assertWarns(Warning): + vocab = Vocabulary(unknown=None, padding=None).add_word_lst(list('bmes')) + metric = SpanFPreRecMetric(vocab, encoding_type='bmeso') + vocab = Vocabulary().add_word_lst(list('bmes')) + metric = SpanFPreRecMetric(vocab, encoding_type='bmeso') class TestUsefulFunctions(unittest.TestCase): # 测试metrics.py中一些看上去挺有用的函数 From cbe5b347e54ce5181887743c62b06aabcd00b778 Mon Sep 17 00:00:00 2001 From: yh_cc Date: Wed, 28 Aug 2019 23:53:53 +0800 Subject: [PATCH 09/50] =?UTF-8?q?SpanFMetric=E5=A2=9E=E5=8A=A0=E5=AF=B9enc?= =?UTF-8?q?oding=5Ftype=E5=92=8Ctag=5Fvocab=E7=9A=84=E6=A3=80=E6=9F=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastNLP/core/metrics.py b/fastNLP/core/metrics.py index 28d88fbc..0dc601a3 100644 --- a/fastNLP/core/metrics.py +++ b/fastNLP/core/metrics.py @@ -505,7 +505,7 @@ def _check_tag_vocab_and_encoding_type(vocab:Vocabulary, encoding_type:str): for tag, idx in vocab: if idx in (vocab.unknown_idx, vocab.padding_idx): continue - tag = tag[:1] + tag = tag[:1].lower() tag_set.add(tag) tags = encoding_type for tag in tag_set: From 5d8a8c98c6997fda7afa236de8523c0c1916201d Mon Sep 17 00:00:00 2001 From: xuyige Date: Thu, 29 Aug 2019 01:43:04 +0800 Subject: [PATCH 10/50] 1. delete io/data_loader dir; 2. delete model/enas*; 3. delete legacy dir; 4. delete DateSetLoader and relevant codes; 5. fix a test code error in core/test_dataset.py; 6. delete io.BaseLoader and relevant code. --- fastNLP/io/__init__.py | 1 - fastNLP/io/data_bundle.py | 197 +---------- fastNLP/io/data_loader/__init__.py | 39 --- fastNLP/io/data_loader/conll.py | 109 ------ fastNLP/io/data_loader/imdb.py | 99 ------ fastNLP/io/data_loader/matching.py | 248 ------------- fastNLP/io/data_loader/mnli.py | 62 ---- fastNLP/io/data_loader/mtl.py | 68 ---- fastNLP/io/data_loader/people_daily.py | 85 ----- fastNLP/io/data_loader/qnli.py | 47 --- fastNLP/io/data_loader/quora.py | 34 -- fastNLP/io/data_loader/rte.py | 47 --- fastNLP/io/data_loader/snli.py | 46 --- fastNLP/io/data_loader/sst.py | 180 ---------- fastNLP/io/data_loader/yelp.py | 132 ------- fastNLP/io/dataset_loader.py | 121 ------- fastNLP/io/embed_loader.py | 13 +- fastNLP/io/model_io.py | 4 +- fastNLP/models/enas_controller.py | 228 ------------ fastNLP/models/enas_model.py | 393 --------------------- fastNLP/models/enas_trainer.py | 384 -------------------- fastNLP/models/enas_utils.py | 58 ---- legacy/api/README.md | 44 --- legacy/api/__init__.py | 2 - legacy/api/api.py | 463 ------------------------- legacy/api/converter.py | 181 ---------- legacy/api/examples.py | 56 --- legacy/api/pipeline.py | 33 -- legacy/api/processor.py | 428 ----------------------- legacy/api/utils.py | 134 ------- legacy/automl/__init__.py | 0 legacy/automl/enas_controller.py | 223 ------------ legacy/automl/enas_model.py | 388 --------------------- legacy/automl/enas_trainer.py | 383 -------------------- legacy/automl/enas_utils.py | 53 --- legacy/component/__init__.py | 1 - legacy/component/bert_tokenizer.py | 378 -------------------- test/core/test_dataset.py | 5 +- test/io/test_data_loader.py | 15 - test/io/test_dataset_loader.py | 77 ---- 40 files changed, 14 insertions(+), 5445 deletions(-) delete mode 100644 fastNLP/io/data_loader/__init__.py delete mode 100644 fastNLP/io/data_loader/conll.py delete mode 100644 fastNLP/io/data_loader/imdb.py delete mode 100644 fastNLP/io/data_loader/matching.py delete mode 100644 fastNLP/io/data_loader/mnli.py delete mode 100644 fastNLP/io/data_loader/mtl.py delete mode 100644 fastNLP/io/data_loader/people_daily.py delete mode 100644 fastNLP/io/data_loader/qnli.py delete mode 100644 fastNLP/io/data_loader/quora.py delete mode 100644 fastNLP/io/data_loader/rte.py delete mode 100644 fastNLP/io/data_loader/snli.py delete mode 100644 fastNLP/io/data_loader/sst.py delete mode 100644 fastNLP/io/data_loader/yelp.py delete mode 100644 fastNLP/io/dataset_loader.py delete mode 100644 fastNLP/models/enas_controller.py delete mode 100644 fastNLP/models/enas_model.py delete mode 100644 fastNLP/models/enas_trainer.py delete mode 100644 fastNLP/models/enas_utils.py delete mode 100644 legacy/api/README.md delete mode 100644 legacy/api/__init__.py delete mode 100644 legacy/api/api.py delete mode 100644 legacy/api/converter.py delete mode 100644 legacy/api/examples.py delete mode 100644 legacy/api/pipeline.py delete mode 100644 legacy/api/processor.py delete mode 100644 legacy/api/utils.py delete mode 100644 legacy/automl/__init__.py delete mode 100644 legacy/automl/enas_controller.py delete mode 100644 legacy/automl/enas_model.py delete mode 100644 legacy/automl/enas_trainer.py delete mode 100644 legacy/automl/enas_utils.py delete mode 100644 legacy/component/__init__.py delete mode 100644 legacy/component/bert_tokenizer.py delete mode 100644 test/io/test_data_loader.py delete mode 100644 test/io/test_dataset_loader.py diff --git a/fastNLP/io/__init__.py b/fastNLP/io/__init__.py index 8ed1956a..251b7292 100644 --- a/fastNLP/io/__init__.py +++ b/fastNLP/io/__init__.py @@ -82,7 +82,6 @@ __all__ = [ from .embed_loader import EmbedLoader from .data_bundle import DataBundle -from .dataset_loader import CSVLoader, JsonLoader from .model_io import ModelLoader, ModelSaver from .loader import * diff --git a/fastNLP/io/data_bundle.py b/fastNLP/io/data_bundle.py index 10f924f0..969730a3 100644 --- a/fastNLP/io/data_bundle.py +++ b/fastNLP/io/data_bundle.py @@ -6,112 +6,10 @@ __all__ = [ 'DataBundle', ] -import _pickle as pickle -import os -from typing import Union, Dict - from ..core.dataset import DataSet from ..core.vocabulary import Vocabulary -class BaseLoader(object): - """ - 各个 Loader 的基类,提供了 API 的参考。 - - """ - - def __init__(self): - super(BaseLoader, self).__init__() - - @staticmethod - def load_lines(data_path): - """ - 按行读取,舍弃每行两侧空白字符,返回list of str - - :param data_path: 读取数据的路径 - """ - with open(data_path, "r", encoding="utf=8") as f: - text = f.readlines() - return [line.strip() for line in text] - - @classmethod - def load(cls, data_path): - """ - 先按行读取,去除一行两侧空白,再提取每行的字符。返回list of list of str - - :param data_path: - """ - with open(data_path, "r", encoding="utf-8") as f: - text = f.readlines() - return [[word for word in sent.strip()] for sent in text] - - @classmethod - def load_with_cache(cls, data_path, cache_path): - """缓存版的load - """ - if os.path.isfile(cache_path) and os.path.getmtime(data_path) < os.path.getmtime(cache_path): - with open(cache_path, 'rb') as f: - return pickle.load(f) - else: - obj = cls.load(data_path) - with open(cache_path, 'wb') as f: - pickle.dump(obj, f) - return obj - - -def _download_from_url(url, path): - try: - from tqdm.auto import tqdm - except: - from ..core.utils import _pseudo_tqdm as tqdm - import requests - - """Download file""" - r = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'}, stream=True) - chunk_size = 16 * 1024 - total_size = int(r.headers.get('Content-length', 0)) - with open(path, "wb") as file, \ - tqdm(total=total_size, unit='B', unit_scale=1, desc=path.split('/')[-1]) as t: - for chunk in r.iter_content(chunk_size): - if chunk: - file.write(chunk) - t.update(len(chunk)) - - -def _uncompress(src, dst): - import zipfile - import gzip - import tarfile - import os - - def unzip(src, dst): - with zipfile.ZipFile(src, 'r') as f: - f.extractall(dst) - - def ungz(src, dst): - with gzip.open(src, 'rb') as f, open(dst, 'wb') as uf: - length = 16 * 1024 # 16KB - buf = f.read(length) - while buf: - uf.write(buf) - buf = f.read(length) - - def untar(src, dst): - with tarfile.open(src, 'r:gz') as f: - f.extractall(dst) - - fn, ext = os.path.splitext(src) - _, ext_2 = os.path.splitext(fn) - if ext == '.zip': - unzip(src, dst) - elif ext == '.gz' and ext_2 != '.tar': - ungz(src, dst) - elif (ext == '.gz' and ext_2 == '.tar') or ext_2 == '.tgz': - untar(src, dst) - else: - raise ValueError('unsupported file {}'.format(src)) - - class DataBundle: """ 经过处理的数据信息,包括一系列数据集(比如:分开的训练集、验证集和测试集)以及各个field对应的vocabulary。该对象一般由fastNLP中各种 @@ -154,7 +52,7 @@ class DataBundle: self.datasets[name] = dataset return self - def get_dataset(self, name:str)->DataSet: + def get_dataset(self, name: str) -> DataSet: """ 获取名为name的dataset @@ -163,7 +61,7 @@ class DataBundle: """ return self.datasets[name] - def delete_dataset(self, name:str): + def delete_dataset(self, name: str): """ 删除名为name的DataSet @@ -173,7 +71,7 @@ class DataBundle: self.datasets.pop(name, None) return self - def get_vocab(self, field_name:str)->Vocabulary: + def get_vocab(self, field_name: str) -> Vocabulary: """ 获取field名为field_name对应的vocab @@ -182,7 +80,7 @@ class DataBundle: """ return self.vocabs[field_name] - def delete_vocab(self, field_name:str): + def delete_vocab(self, field_name: str): """ 删除vocab :param str field_name: @@ -312,90 +210,3 @@ class DataBundle: return _str -class DataSetLoader: - """ - 别名::class:`fastNLP.io.DataSetLoader` :class:`fastNLP.io.dataset_loader.DataSetLoader` - - 定义了各种 DataSetLoader 所需的API 接口,开发者应该继承它实现各种的 DataSetLoader。 - - 开发者至少应该编写如下内容: - - - _load 函数:从一个数据文件中读取数据到一个 :class:`~fastNLP.DataSet` - - load 函数(可以使用基类的方法):从一个或多个数据文件中读取数据到一个或多个 :class:`~fastNLP.DataSet` - - process 函数:一个或多个从数据文件中读取数据,并处理成可以训练的一个或多个 :class:`~fastNLP.DataSet` - - **process 函数中可以 调用load 函数或 _load 函数** - - """ - URL = '' - DATA_DIR = '' - - ROOT_DIR = '.fastnlp/datasets/' - UNCOMPRESS = True - - def _download(self, url: str, pdir: str, uncompress=True) -> str: - """ - - 从 ``url`` 下载数据到 ``path``, 如果 ``uncompress`` 为 ``True`` ,自动解压。 - - :param url: 下载的网站 - :param pdir: 下载到的目录 - :param uncompress: 是否自动解压缩 - :return: 数据的存放路径 - """ - fn = os.path.basename(url) - path = os.path.join(pdir, fn) - """check data exists""" - if not os.path.exists(path): - os.makedirs(pdir, exist_ok=True) - _download_from_url(url, path) - if uncompress: - dst = os.path.join(pdir, 'data') - if not os.path.exists(dst): - _uncompress(path, dst) - return dst - return path - - def download(self): - return self._download( - self.URL, - os.path.join(self.ROOT_DIR, self.DATA_DIR), - uncompress=self.UNCOMPRESS) - - def load(self, paths: Union[str, Dict[str, str]]) -> Union[DataSet, Dict[str, DataSet]]: - """ - 从指定一个或多个路径中的文件中读取数据,返回一个或多个数据集 :class:`~fastNLP.DataSet` 。 - 如果处理多个路径,传入的 dict 中的 key 与返回的 dict 中的 key 保存一致。 - - :param Union[str, Dict[str, str]] paths: 文件路径 - :return: :class:`~fastNLP.DataSet` 类的对象或存储多个 :class:`~fastNLP.DataSet` 的字典 - """ - if isinstance(paths, str): - return self._load(paths) - return {name: self._load(path) for name, path in paths.items()} - - def _load(self, path: str) -> DataSet: - """从指定路径的文件中读取数据,返回 :class:`~fastNLP.DataSet` 类型的对象 - - :param str path: 文件路径 - :return: 一个 :class:`~fastNLP.DataSet` 类型的对象 - """ - raise NotImplementedError - - def process(self, paths: Union[str, Dict[str, str]], **options) -> DataBundle: - """ - 对于特定的任务和数据集,读取并处理数据,返回处理DataInfo类对象或字典。 - - 从指定一个或多个路径中的文件中读取数据,DataInfo对象中可以包含一个或多个数据集 。 - 如果处理多个路径,传入的 dict 的 key 与返回DataInfo中的 dict 中的 key 保存一致。 - - 返回的 :class:`DataBundle` 对象有如下属性: - - - vocabs: 由从数据集中获取的词表组成的字典,每个词表 - - datasets: 一个dict,包含一系列 :class:`~fastNLP.DataSet` 类型的对象。其中 field 的命名参考 :mod:`~fastNLP.core.const` - - :param paths: 原始数据读取的路径 - :param options: 根据不同的任务和数据集,设计自己的参数 - :return: 返回一个 DataBundle - """ - raise NotImplementedError diff --git a/fastNLP/io/data_loader/__init__.py b/fastNLP/io/data_loader/__init__.py deleted file mode 100644 index 8a9dd60b..00000000 --- a/fastNLP/io/data_loader/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -"""undocumented -.. warning:: - - 本模块在 `0.5.0版本` 中被废弃,由 :mod:`~fastNLP.io.loader` 和 :mod:`~fastNLP.io.pipe` 模块替代。 - -用于读数据集的模块, 可以读取文本分类、序列标注、Matching任务的数据集 - -这些模块的具体介绍如下,您可以通过阅读 :doc:`教程` 来进行了解。 -""" -__all__ = [ - 'ConllLoader', - 'Conll2003Loader', - 'IMDBLoader', - 'MatchingLoader', - 'SNLILoader', - 'MNLILoader', - 'MTL16Loader', - 'PeopleDailyCorpusLoader', - 'QNLILoader', - 'QuoraLoader', - 'RTELoader', - 'SSTLoader', - 'SST2Loader', - 'YelpLoader', -] - - -from .conll import ConllLoader, Conll2003Loader -from .imdb import IMDBLoader -from .matching import MatchingLoader -from .mnli import MNLILoader -from .mtl import MTL16Loader -from .people_daily import PeopleDailyCorpusLoader -from .qnli import QNLILoader -from .quora import QuoraLoader -from .rte import RTELoader -from .snli import SNLILoader -from .sst import SSTLoader, SST2Loader -from .yelp import YelpLoader diff --git a/fastNLP/io/data_loader/conll.py b/fastNLP/io/data_loader/conll.py deleted file mode 100644 index 31a90881..00000000 --- a/fastNLP/io/data_loader/conll.py +++ /dev/null @@ -1,109 +0,0 @@ - -from ...core.dataset import DataSet -from ...core.instance import Instance -from ..data_bundle import DataSetLoader -from ..file_reader import _read_conll -from typing import Union, Dict -from ..utils import check_loader_paths -from ..data_bundle import DataBundle - -class ConllLoader(DataSetLoader): - """ - 别名::class:`fastNLP.io.ConllLoader` :class:`fastNLP.io.data_loader.ConllLoader` - - 该ConllLoader支持读取的数据格式: 以空行隔开两个sample,除了分割行,每一行用空格或者制表符隔开不同的元素。如下例所示: - - Example:: - - # 文件中的内容 - Nadim NNP B-NP B-PER - Ladki NNP I-NP I-PER - - AL-AIN NNP B-NP B-LOC - United NNP B-NP B-LOC - Arab NNP I-NP I-LOC - Emirates NNPS I-NP I-LOC - 1996-12-06 CD I-NP O - ... - - # 如果用以下的参数读取,返回的DataSet将包含raw_words和pos两个field, 这两个field的值分别取自于第0列与第1列 - dataset = ConllLoader(headers=['raw_words', 'pos'], indexes=[0, 1])._load('/path/to/train.conll') - # 如果用以下的参数读取,返回的DataSet将包含raw_words和ner两个field, 这两个field的值分别取自于第0列与第2列 - dataset = ConllLoader(headers=['raw_words', 'ner'], indexes=[0, 3])._load('/path/to/train.conll') - # 如果用以下的参数读取,返回的DataSet将包含raw_words, pos和ner三个field - dataset = ConllLoader(headers=['raw_words', 'pos', 'ner'], indexes=[0, 1, 3])._load('/path/to/train.conll') - - dataset = ConllLoader(headers=['raw_words', 'pos'], indexes=[0, 1])._load('/path/to/train.conll')中DataSet的raw_words - 列与pos列的内容都是List[str] - - 数据中以"-DOCSTART-"开头的行将被忽略,因为该符号在conll 2003中被用为文档分割符。 - - :param list headers: 每一列数据的名称,需为List or Tuple of str。``header`` 与 ``indexes`` 一一对应 - :param list indexes: 需要保留的数据列下标,从0开始。若为 ``None`` ,则所有列都保留。Default: ``None`` - :param bool dropna: 是否忽略非法数据,若 ``False`` ,遇到非法数据时抛出 ``ValueError`` 。Default: ``True`` - """ - - def __init__(self, headers, indexes=None, dropna=True): - super(ConllLoader, self).__init__() - if not isinstance(headers, (list, tuple)): - raise TypeError( - 'invalid headers: {}, should be list of strings'.format(headers)) - self.headers = headers - self.dropna = dropna - if indexes is None: - self.indexes = list(range(len(self.headers))) - else: - if len(indexes) != len(headers): - raise ValueError - self.indexes = indexes - - def _load(self, path): - """ - 传入的一个文件路径,将该文件读入DataSet中,field由Loader初始化时指定的headers决定。 - - :param str path: 文件的路径 - :return: DataSet - """ - ds = DataSet() - for idx, data in _read_conll(path, indexes=self.indexes, dropna=self.dropna): - ins = {h: data[i] for i, h in enumerate(self.headers)} - ds.append(Instance(**ins)) - return ds - - def load(self, paths: Union[str, Dict[str, str]]) -> DataBundle: - """ - 从指定一个或多个路径中的文件中读取数据,返回:class:`~fastNLP.io.DataBundle` 。 - - 读取的field根据ConllLoader初始化时传入的headers决定。 - - :param Union[str, Dict[str, str]] paths: - :return: :class:`~fastNLP.DataSet` 类的对象或 :class:`~fastNLP.io.DataBundle` 的字典 - """ - paths = check_loader_paths(paths) - datasets = {name: self._load(path) for name, path in paths.items()} - data_bundle = DataBundle(datasets=datasets) - return data_bundle - - -class Conll2003Loader(ConllLoader): - """ - 别名::class:`fastNLP.io.Conll2003Loader` :class:`fastNLP.io.data_loader.Conll2003Loader` - - 该Loader用以读取Conll2003数据,conll2003的数据可以在https://github.com/davidsbatista/NER-datasets/tree/master/CONLL2003 - 找到。数据中以"-DOCSTART-"开头的行将被忽略,因为该符号在conll 2003中被用为文档分割符。 - - 返回的DataSet将具有以下['raw_words', 'pos', 'chunks', 'ner']四个field, 每个field中的内容都是List[str]。 - - .. csv-table:: Conll2003Loader处理之 :header: "raw_words", "words", "target", "seq_len" - - "[Nadim, Ladki]", "[1, 2]", "[1, 2]", 2 - "[AL-AIN, United, Arab, ...]", "[3, 4, 5,...]", "[3, 4]", 5 - "[...]", "[...]", "[...]", . - - """ - - def __init__(self): - headers = [ - 'raw_words', 'pos', 'chunks', 'ner', - ] - super(Conll2003Loader, self).__init__(headers=headers) diff --git a/fastNLP/io/data_loader/imdb.py b/fastNLP/io/data_loader/imdb.py deleted file mode 100644 index c9dda76e..00000000 --- a/fastNLP/io/data_loader/imdb.py +++ /dev/null @@ -1,99 +0,0 @@ - -from typing import Union, Dict - -from ..embed_loader import EmbeddingOption, EmbedLoader -from ..data_bundle import DataSetLoader, DataBundle -from ...core.vocabulary import VocabularyOption, Vocabulary -from ...core.dataset import DataSet -from ...core.instance import Instance -from ...core.const import Const - -from ..utils import get_tokenizer - - -class IMDBLoader(DataSetLoader): - """ - 别名::class:`fastNLP.io.IMDBLoader` :class:`fastNLP.io.data_loader.IMDBLoader` - - 读取IMDB数据集,DataSet包含以下fields: - - words: list(str), 需要分类的文本 - - target: str, 文本的标签 - - """ - - def __init__(self): - super(IMDBLoader, self).__init__() - self.tokenizer = get_tokenizer() - - def _load(self, path): - dataset = DataSet() - with open(path, 'r', encoding="utf-8") as f: - for line in f: - line = line.strip() - if not line: - continue - parts = line.split('\t') - target = parts[0] - words = self.tokenizer(parts[1].lower()) - dataset.append(Instance(words=words, target=target)) - - if len(dataset) == 0: - raise RuntimeError(f"{path} has no valid data.") - - return dataset - - def process(self, - paths: Union[str, Dict[str, str]], - src_vocab_opt: VocabularyOption = None, - tgt_vocab_opt: VocabularyOption = None, - char_level_op=False): - - datasets = {} - info = DataBundle() - for name, path in paths.items(): - dataset = self.load(path) - datasets[name] = dataset - - def wordtochar(words): - chars = [] - for word in words: - word = word.lower() - for char in word: - chars.append(char) - chars.append('') - chars.pop() - return chars - - if char_level_op: - for dataset in datasets.values(): - dataset.apply_field(wordtochar, field_name="words", new_field_name='chars') - - datasets["train"], datasets["dev"] = datasets["train"].split(0.1, shuffle=False) - - src_vocab = Vocabulary() if src_vocab_opt is None else Vocabulary(**src_vocab_opt) - src_vocab.from_dataset(datasets['train'], field_name='words') - - src_vocab.index_dataset(*datasets.values(), field_name='words') - - tgt_vocab = Vocabulary(unknown=None, padding=None) \ - if tgt_vocab_opt is None else Vocabulary(**tgt_vocab_opt) - tgt_vocab.from_dataset(datasets['train'], field_name='target') - tgt_vocab.index_dataset(*datasets.values(), field_name='target') - - info.vocabs = { - Const.INPUT: src_vocab, - Const.TARGET: tgt_vocab - } - - info.datasets = datasets - - for name, dataset in info.datasets.items(): - dataset.set_input(Const.INPUT) - dataset.set_target(Const.TARGET) - - return info - - - diff --git a/fastNLP/io/data_loader/matching.py b/fastNLP/io/data_loader/matching.py deleted file mode 100644 index 41c9a98d..00000000 --- a/fastNLP/io/data_loader/matching.py +++ /dev/null @@ -1,248 +0,0 @@ -import os - -from typing import Union, Dict, List - -from ...core.const import Const -from ...core.vocabulary import Vocabulary -from ..data_bundle import DataBundle, DataSetLoader -from ..file_utils import _get_base_url, cached_path, PRETRAINED_BERT_MODEL_DIR -from ...modules.encoder.bert import BertTokenizer - - -class MatchingLoader(DataSetLoader): - """ - 别名::class:`fastNLP.io.MatchingLoader` :class:`fastNLP.io.data_loader.MatchingLoader` - - 读取Matching任务的数据集 - - :param dict paths: key是数据集名称(如train、dev、test),value是对应的文件名 - """ - - def __init__(self, paths: dict=None): - self.paths = paths - - def _load(self, path): - """ - :param str path: 待读取数据集的路径名 - :return: fastNLP.DataSet ds: 返回一个DataSet对象,里面必须包含3个field:其中两个分别为两个句子 - 的原始字符串文本,第三个为标签 - """ - raise NotImplementedError - - def process(self, paths: Union[str, Dict[str, str]], dataset_name: str=None, - to_lower=False, seq_len_type: str=None, bert_tokenizer: str=None, - cut_text: int = None, get_index=True, auto_pad_length: int=None, - auto_pad_token: str='', set_input: Union[list, str, bool]=True, - set_target: Union[list, str, bool]=True, concat: Union[str, list, bool]=None, - extra_split: List[str]=None, ) -> DataBundle: - """ - :param paths: str或者Dict[str, str]。如果是str,则为数据集所在的文件夹或者是全路径文件名:如果是文件夹, - 则会从self.paths里面找对应的数据集名称与文件名。如果是Dict,则为数据集名称(如train、dev、test)和 - 对应的全路径文件名。 - :param str dataset_name: 如果在paths里传入的是一个数据集的全路径文件名,那么可以用dataset_name来定义 - 这个数据集的名字,如果不定义则默认为train。 - :param bool to_lower: 是否将文本自动转为小写。默认值为False。 - :param str seq_len_type: 提供的seq_len类型,支持 ``seq_len`` :提供一个数字作为句子长度; ``mask`` : - 提供一个0/1的mask矩阵作为句子长度; ``bert`` :提供segment_type_id(第一个句子为0,第二个句子为1)和 - attention mask矩阵(0/1的mask矩阵)。默认值为None,即不提供seq_len - :param str bert_tokenizer: bert tokenizer所使用的词表所在的文件夹路径 - :param int cut_text: 将长于cut_text的内容截掉。默认为None,即不截。 - :param bool get_index: 是否需要根据词表将文本转为index - :param int auto_pad_length: 是否需要将文本自动pad到一定长度(超过这个长度的文本将会被截掉),默认为不会自动pad - :param str auto_pad_token: 自动pad的内容 - :param set_input: 如果为True,则会自动将相关的field(名字里含有Const.INPUT的)设置为input,如果为False - 则不会将任何field设置为input。如果传入str或者List[str],则会根据传入的内容将相对应的field设置为input, - 于此同时其他field不会被设置为input。默认值为True。 - :param set_target: set_target将控制哪些field可以被设置为target,用法与set_input一致。默认值为True。 - :param concat: 是否需要将两个句子拼接起来。如果为False则不会拼接。如果为True则会在两个句子之间插入一个。 - 如果传入一个长度为4的list,则分别表示插在第一句开始前、第一句结束后、第二句开始前、第二句结束后的标识符。如果 - 传入字符串 ``bert`` ,则会采用bert的拼接方式,等价于['[CLS]', '[SEP]', '', '[SEP]']. - :param extra_split: 额外的分隔符,即除了空格之外的用于分词的字符。 - :return: - """ - if isinstance(set_input, str): - set_input = [set_input] - if isinstance(set_target, str): - set_target = [set_target] - if isinstance(set_input, bool): - auto_set_input = set_input - else: - auto_set_input = False - if isinstance(set_target, bool): - auto_set_target = set_target - else: - auto_set_target = False - if isinstance(paths, str): - if os.path.isdir(paths): - path = {n: os.path.join(paths, self.paths[n]) for n in self.paths.keys()} - else: - path = {dataset_name if dataset_name is not None else 'train': paths} - else: - path = paths - - data_info = DataBundle() - for data_name in path.keys(): - data_info.datasets[data_name] = self._load(path[data_name]) - - for data_name, data_set in data_info.datasets.items(): - if auto_set_input: - data_set.set_input(Const.INPUTS(0), Const.INPUTS(1)) - if auto_set_target: - if Const.TARGET in data_set.get_field_names(): - data_set.set_target(Const.TARGET) - - if extra_split is not None: - for data_name, data_set in data_info.datasets.items(): - data_set.apply(lambda x: ' '.join(x[Const.INPUTS(0)]), new_field_name=Const.INPUTS(0)) - data_set.apply(lambda x: ' '.join(x[Const.INPUTS(1)]), new_field_name=Const.INPUTS(1)) - - for s in extra_split: - data_set.apply(lambda x: x[Const.INPUTS(0)].replace(s, ' ' + s + ' '), - new_field_name=Const.INPUTS(0)) - data_set.apply(lambda x: x[Const.INPUTS(0)].replace(s, ' ' + s + ' '), - new_field_name=Const.INPUTS(0)) - - _filt = lambda x: x - data_set.apply(lambda x: list(filter(_filt, x[Const.INPUTS(0)].split(' '))), - new_field_name=Const.INPUTS(0), is_input=auto_set_input) - data_set.apply(lambda x: list(filter(_filt, x[Const.INPUTS(1)].split(' '))), - new_field_name=Const.INPUTS(1), is_input=auto_set_input) - _filt = None - - if to_lower: - for data_name, data_set in data_info.datasets.items(): - data_set.apply(lambda x: [w.lower() for w in x[Const.INPUTS(0)]], new_field_name=Const.INPUTS(0), - is_input=auto_set_input) - data_set.apply(lambda x: [w.lower() for w in x[Const.INPUTS(1)]], new_field_name=Const.INPUTS(1), - is_input=auto_set_input) - - if bert_tokenizer is not None: - if bert_tokenizer.lower() in PRETRAINED_BERT_MODEL_DIR: - PRETRAIN_URL = _get_base_url('bert') - model_name = PRETRAINED_BERT_MODEL_DIR[bert_tokenizer] - model_url = PRETRAIN_URL + model_name - model_dir = cached_path(model_url, name='embedding') - # 检查是否存在 - elif os.path.isdir(bert_tokenizer): - model_dir = bert_tokenizer - else: - raise ValueError(f"Cannot recognize BERT tokenizer from {bert_tokenizer}.") - - words_vocab = Vocabulary(padding='[PAD]', unknown='[UNK]') - with open(os.path.join(model_dir, 'vocab.txt'), 'r') as f: - lines = f.readlines() - lines = [line.strip() for line in lines] - words_vocab.add_word_lst(lines) - words_vocab.build_vocab() - - tokenizer = BertTokenizer.from_pretrained(model_dir) - - for data_name, data_set in data_info.datasets.items(): - for fields in data_set.get_field_names(): - if Const.INPUT in fields: - data_set.apply(lambda x: tokenizer.tokenize(' '.join(x[fields])), new_field_name=fields, - is_input=auto_set_input) - - if isinstance(concat, bool): - concat = 'default' if concat else None - if concat is not None: - if isinstance(concat, str): - CONCAT_MAP = {'bert': ['[CLS]', '[SEP]', '', '[SEP]'], - 'default': ['', '', '', '']} - if concat.lower() in CONCAT_MAP: - concat = CONCAT_MAP[concat] - else: - concat = 4 * [concat] - assert len(concat) == 4, \ - f'Please choose a list with 4 symbols which at the beginning of first sentence ' \ - f'the end of first sentence, the begin of second sentence, and the end of second' \ - f'sentence. Your input is {concat}' - - for data_name, data_set in data_info.datasets.items(): - data_set.apply(lambda x: [concat[0]] + x[Const.INPUTS(0)] + [concat[1]] + [concat[2]] + - x[Const.INPUTS(1)] + [concat[3]], new_field_name=Const.INPUT) - data_set.apply(lambda x: [w for w in x[Const.INPUT] if len(w) > 0], new_field_name=Const.INPUT, - is_input=auto_set_input) - - if seq_len_type is not None: - if seq_len_type == 'seq_len': # - for data_name, data_set in data_info.datasets.items(): - for fields in data_set.get_field_names(): - if Const.INPUT in fields: - data_set.apply(lambda x: len(x[fields]), - new_field_name=fields.replace(Const.INPUT, Const.INPUT_LEN), - is_input=auto_set_input) - elif seq_len_type == 'mask': - for data_name, data_set in data_info.datasets.items(): - for fields in data_set.get_field_names(): - if Const.INPUT in fields: - data_set.apply(lambda x: [1] * len(x[fields]), - new_field_name=fields.replace(Const.INPUT, Const.INPUT_LEN), - is_input=auto_set_input) - elif seq_len_type == 'bert': - for data_name, data_set in data_info.datasets.items(): - if Const.INPUT not in data_set.get_field_names(): - raise KeyError(f'Field ``{Const.INPUT}`` not in {data_name} data set: ' - f'got {data_set.get_field_names()}') - data_set.apply(lambda x: [0] * (len(x[Const.INPUTS(0)]) + 2) + [1] * (len(x[Const.INPUTS(1)]) + 1), - new_field_name=Const.INPUT_LENS(0), is_input=auto_set_input) - data_set.apply(lambda x: [1] * len(x[Const.INPUT_LENS(0)]), - new_field_name=Const.INPUT_LENS(1), is_input=auto_set_input) - - if auto_pad_length is not None: - cut_text = min(auto_pad_length, cut_text if cut_text is not None else auto_pad_length) - - if cut_text is not None: - for data_name, data_set in data_info.datasets.items(): - for fields in data_set.get_field_names(): - if (Const.INPUT in fields) or ((Const.INPUT_LEN in fields) and (seq_len_type != 'seq_len')): - data_set.apply(lambda x: x[fields][: cut_text], new_field_name=fields, - is_input=auto_set_input) - - data_set_list = [d for n, d in data_info.datasets.items()] - assert len(data_set_list) > 0, f'There are NO data sets in data info!' - - if bert_tokenizer is None: - words_vocab = Vocabulary(padding=auto_pad_token) - words_vocab = words_vocab.from_dataset(*[d for n, d in data_info.datasets.items() if 'train' in n], - field_name=[n for n in data_set_list[0].get_field_names() - if (Const.INPUT in n)], - no_create_entry_dataset=[d for n, d in data_info.datasets.items() - if 'train' not in n]) - target_vocab = Vocabulary(padding=None, unknown=None) - target_vocab = target_vocab.from_dataset(*[d for n, d in data_info.datasets.items() if 'train' in n], - field_name=Const.TARGET) - data_info.vocabs = {Const.INPUT: words_vocab, Const.TARGET: target_vocab} - - if get_index: - for data_name, data_set in data_info.datasets.items(): - for fields in data_set.get_field_names(): - if Const.INPUT in fields: - data_set.apply(lambda x: [words_vocab.to_index(w) for w in x[fields]], new_field_name=fields, - is_input=auto_set_input) - - if Const.TARGET in data_set.get_field_names(): - data_set.apply(lambda x: target_vocab.to_index(x[Const.TARGET]), new_field_name=Const.TARGET, - is_input=auto_set_input, is_target=auto_set_target) - - if auto_pad_length is not None: - if seq_len_type == 'seq_len': - raise RuntimeError(f'the sequence will be padded with the length {auto_pad_length}, ' - f'so the seq_len_type cannot be `{seq_len_type}`!') - for data_name, data_set in data_info.datasets.items(): - for fields in data_set.get_field_names(): - if Const.INPUT in fields: - data_set.apply(lambda x: x[fields] + [words_vocab.to_index(words_vocab.padding)] * - (auto_pad_length - len(x[fields])), new_field_name=fields, - is_input=auto_set_input) - elif (Const.INPUT_LEN in fields) and (seq_len_type != 'seq_len'): - data_set.apply(lambda x: x[fields] + [0] * (auto_pad_length - len(x[fields])), - new_field_name=fields, is_input=auto_set_input) - - for data_name, data_set in data_info.datasets.items(): - if isinstance(set_input, list): - data_set.set_input(*[inputs for inputs in set_input if inputs in data_set.get_field_names()]) - if isinstance(set_target, list): - data_set.set_target(*[target for target in set_target if target in data_set.get_field_names()]) - - return data_info diff --git a/fastNLP/io/data_loader/mnli.py b/fastNLP/io/data_loader/mnli.py deleted file mode 100644 index 65863f3d..00000000 --- a/fastNLP/io/data_loader/mnli.py +++ /dev/null @@ -1,62 +0,0 @@ - -from ...core.const import Const - -from .matching import MatchingLoader -from ..dataset_loader import CSVLoader - - -class MNLILoader(MatchingLoader, CSVLoader): - """ - 别名::class:`fastNLP.io.MNLILoader` :class:`fastNLP.io.data_loader.MNLILoader` - - 读取MNLI数据集,读取的DataSet包含fields:: - - words1: list(str),第一句文本, premise - - words2: list(str), 第二句文本, hypothesis - - target: str, 真实标签 - - 数据来源: - """ - - def __init__(self, paths: dict=None): - paths = paths if paths is not None else { - 'train': 'train.tsv', - 'dev_matched': 'dev_matched.tsv', - 'dev_mismatched': 'dev_mismatched.tsv', - 'test_matched': 'test_matched.tsv', - 'test_mismatched': 'test_mismatched.tsv', - # 'test_0.9_matched': 'multinli_0.9_test_matched_unlabeled.txt', - # 'test_0.9_mismatched': 'multinli_0.9_test_mismatched_unlabeled.txt', - - # test_0.9_mathed与mismatched是MNLI0.9版本的(数据来源:kaggle) - } - MatchingLoader.__init__(self, paths=paths) - CSVLoader.__init__(self, sep='\t') - self.fields = { - 'sentence1_binary_parse': Const.INPUTS(0), - 'sentence2_binary_parse': Const.INPUTS(1), - 'gold_label': Const.TARGET, - } - - def _load(self, path): - ds = CSVLoader._load(self, path) - - for k, v in self.fields.items(): - if k in ds.get_field_names(): - ds.rename_field(k, v) - - if Const.TARGET in ds.get_field_names(): - if ds[0][Const.TARGET] == 'hidden': - ds.delete_field(Const.TARGET) - - parentheses_table = str.maketrans({'(': None, ')': None}) - - ds.apply(lambda ins: ins[Const.INPUTS(0)].translate(parentheses_table).strip().split(), - new_field_name=Const.INPUTS(0)) - ds.apply(lambda ins: ins[Const.INPUTS(1)].translate(parentheses_table).strip().split(), - new_field_name=Const.INPUTS(1)) - if Const.TARGET in ds.get_field_names(): - ds.drop(lambda x: x[Const.TARGET] == '-') - return ds diff --git a/fastNLP/io/data_loader/mtl.py b/fastNLP/io/data_loader/mtl.py deleted file mode 100644 index 923aadfb..00000000 --- a/fastNLP/io/data_loader/mtl.py +++ /dev/null @@ -1,68 +0,0 @@ - -from typing import Union, Dict - -from ..data_bundle import DataBundle -from ..dataset_loader import CSVLoader -from ...core.vocabulary import Vocabulary, VocabularyOption -from ...core.const import Const -from ..utils import check_loader_paths - - -class MTL16Loader(CSVLoader): - """ - 别名::class:`fastNLP.io.MTL16Loader` :class:`fastNLP.io.data_loader.MTL16Loader` - - 读取MTL16数据集,DataSet包含以下fields: - - words: list(str), 需要分类的文本 - - target: str, 文本的标签 - - 数据来源:https://pan.baidu.com/s/1c2L6vdA - - """ - - def __init__(self): - super(MTL16Loader, self).__init__(headers=(Const.TARGET, Const.INPUT), sep='\t') - - def _load(self, path): - dataset = super(MTL16Loader, self)._load(path) - dataset.apply(lambda x: x[Const.INPUT].lower().split(), new_field_name=Const.INPUT) - if len(dataset) == 0: - raise RuntimeError(f"{path} has no valid data.") - - return dataset - - def process(self, - paths: Union[str, Dict[str, str]], - src_vocab_opt: VocabularyOption = None, - tgt_vocab_opt: VocabularyOption = None,): - - paths = check_loader_paths(paths) - datasets = {} - info = DataBundle() - for name, path in paths.items(): - dataset = self.load(path) - datasets[name] = dataset - - src_vocab = Vocabulary() if src_vocab_opt is None else Vocabulary(**src_vocab_opt) - src_vocab.from_dataset(datasets['train'], field_name=Const.INPUT) - src_vocab.index_dataset(*datasets.values(), field_name=Const.INPUT) - - tgt_vocab = Vocabulary(unknown=None, padding=None) \ - if tgt_vocab_opt is None else Vocabulary(**tgt_vocab_opt) - tgt_vocab.from_dataset(datasets['train'], field_name=Const.TARGET) - tgt_vocab.index_dataset(*datasets.values(), field_name=Const.TARGET) - - info.vocabs = { - Const.INPUT: src_vocab, - Const.TARGET: tgt_vocab - } - - info.datasets = datasets - - for name, dataset in info.datasets.items(): - dataset.set_input(Const.INPUT) - dataset.set_target(Const.TARGET) - - return info diff --git a/fastNLP/io/data_loader/people_daily.py b/fastNLP/io/data_loader/people_daily.py deleted file mode 100644 index afd66744..00000000 --- a/fastNLP/io/data_loader/people_daily.py +++ /dev/null @@ -1,85 +0,0 @@ - -from ..data_bundle import DataSetLoader -from ...core.dataset import DataSet -from ...core.instance import Instance -from ...core.const import Const - - -class PeopleDailyCorpusLoader(DataSetLoader): - """ - 别名::class:`fastNLP.io.PeopleDailyCorpusLoader` :class:`fastNLP.io.data_loader.PeopleDailyCorpusLoader` - - 读取人民日报数据集 - """ - - def __init__(self, pos=True, ner=True): - super(PeopleDailyCorpusLoader, self).__init__() - self.pos = pos - self.ner = ner - - def _load(self, data_path): - with open(data_path, "r", encoding="utf-8") as f: - sents = f.readlines() - examples = [] - for sent in sents: - if len(sent) <= 2: - continue - inside_ne = False - sent_pos_tag = [] - sent_words = [] - sent_ner = [] - words = sent.strip().split()[1:] - for word in words: - if "[" in word and "]" in word: - ner_tag = "U" - print(word) - elif "[" in word: - inside_ne = True - ner_tag = "B" - word = word[1:] - elif "]" in word: - ner_tag = "L" - word = word[:word.index("]")] - if inside_ne is True: - inside_ne = False - else: - raise RuntimeError("only ] appears!") - else: - if inside_ne is True: - ner_tag = "I" - else: - ner_tag = "O" - tmp = word.split("/") - token, pos = tmp[0], tmp[1] - sent_ner.append(ner_tag) - sent_pos_tag.append(pos) - sent_words.append(token) - example = [sent_words] - if self.pos is True: - example.append(sent_pos_tag) - if self.ner is True: - example.append(sent_ner) - examples.append(example) - return self.convert(examples) - - def convert(self, data): - """ - - :param data: python 内置对象 - :return: 一个 :class:`~fastNLP.DataSet` 类型的对象 - """ - data_set = DataSet() - for item in data: - sent_words = item[0] - if self.pos is True and self.ner is True: - instance = Instance( - words=sent_words, pos_tags=item[1], ner=item[2]) - elif self.pos is True: - instance = Instance(words=sent_words, pos_tags=item[1]) - elif self.ner is True: - instance = Instance(words=sent_words, ner=item[1]) - else: - instance = Instance(words=sent_words) - data_set.append(instance) - data_set.apply(lambda ins: len(ins[Const.INPUT]), new_field_name=Const.INPUT_LEN) - return data_set diff --git a/fastNLP/io/data_loader/qnli.py b/fastNLP/io/data_loader/qnli.py deleted file mode 100644 index 84b0f3d6..00000000 --- a/fastNLP/io/data_loader/qnli.py +++ /dev/null @@ -1,47 +0,0 @@ - -from ...core.const import Const - -from .matching import MatchingLoader -from ..dataset_loader import CSVLoader - - -class QNLILoader(MatchingLoader, CSVLoader): - """ - 别名::class:`fastNLP.io.QNLILoader` :class:`fastNLP.io.data_loader.QNLILoader` - - 读取QNLI数据集,读取的DataSet包含fields:: - - words1: list(str),第一句文本, premise - - words2: list(str), 第二句文本, hypothesis - - target: str, 真实标签 - - 数据来源: - """ - - def __init__(self, paths: dict=None): - paths = paths if paths is not None else { - 'train': 'train.tsv', - 'dev': 'dev.tsv', - 'test': 'test.tsv' # test set has not label - } - MatchingLoader.__init__(self, paths=paths) - self.fields = { - 'question': Const.INPUTS(0), - 'sentence': Const.INPUTS(1), - 'label': Const.TARGET, - } - CSVLoader.__init__(self, sep='\t') - - def _load(self, path): - ds = CSVLoader._load(self, path) - - for k, v in self.fields.items(): - if k in ds.get_field_names(): - ds.rename_field(k, v) - for fields in ds.get_all_fields(): - if Const.INPUT in fields: - ds.apply(lambda x: x[fields].strip().split(), new_field_name=fields) - - return ds diff --git a/fastNLP/io/data_loader/quora.py b/fastNLP/io/data_loader/quora.py deleted file mode 100644 index d0ee41ec..00000000 --- a/fastNLP/io/data_loader/quora.py +++ /dev/null @@ -1,34 +0,0 @@ - -from ...core.const import Const - -from .matching import MatchingLoader -from ..dataset_loader import CSVLoader - - -class QuoraLoader(MatchingLoader, CSVLoader): - """ - 别名::class:`fastNLP.io.QuoraLoader` :class:`fastNLP.io.data_loader.QuoraLoader` - - 读取MNLI数据集,读取的DataSet包含fields:: - - words1: list(str),第一句文本, premise - - words2: list(str), 第二句文本, hypothesis - - target: str, 真实标签 - - 数据来源: - """ - - def __init__(self, paths: dict=None): - paths = paths if paths is not None else { - 'train': 'train.tsv', - 'dev': 'dev.tsv', - 'test': 'test.tsv', - } - MatchingLoader.__init__(self, paths=paths) - CSVLoader.__init__(self, sep='\t', headers=(Const.TARGET, Const.INPUTS(0), Const.INPUTS(1), 'pairID')) - - def _load(self, path): - ds = CSVLoader._load(self, path) - return ds diff --git a/fastNLP/io/data_loader/rte.py b/fastNLP/io/data_loader/rte.py deleted file mode 100644 index f8c5e2fc..00000000 --- a/fastNLP/io/data_loader/rte.py +++ /dev/null @@ -1,47 +0,0 @@ - -from ...core.const import Const - -from .matching import MatchingLoader -from ..dataset_loader import CSVLoader - - -class RTELoader(MatchingLoader, CSVLoader): - """ - 别名::class:`fastNLP.io.RTELoader` :class:`fastNLP.io.data_loader.RTELoader` - - 读取RTE数据集,读取的DataSet包含fields:: - - words1: list(str),第一句文本, premise - - words2: list(str), 第二句文本, hypothesis - - target: str, 真实标签 - - 数据来源: - """ - - def __init__(self, paths: dict=None): - paths = paths if paths is not None else { - 'train': 'train.tsv', - 'dev': 'dev.tsv', - 'test': 'test.tsv' # test set has not label - } - MatchingLoader.__init__(self, paths=paths) - self.fields = { - 'sentence1': Const.INPUTS(0), - 'sentence2': Const.INPUTS(1), - 'label': Const.TARGET, - } - CSVLoader.__init__(self, sep='\t') - - def _load(self, path): - ds = CSVLoader._load(self, path) - - for k, v in self.fields.items(): - if k in ds.get_field_names(): - ds.rename_field(k, v) - for fields in ds.get_all_fields(): - if Const.INPUT in fields: - ds.apply(lambda x: x[fields].strip().split(), new_field_name=fields) - - return ds diff --git a/fastNLP/io/data_loader/snli.py b/fastNLP/io/data_loader/snli.py deleted file mode 100644 index 1db0ac5b..00000000 --- a/fastNLP/io/data_loader/snli.py +++ /dev/null @@ -1,46 +0,0 @@ - -from ...core.const import Const - -from .matching import MatchingLoader -from ..dataset_loader import JsonLoader - - -class SNLILoader(MatchingLoader, JsonLoader): - """ - 别名::class:`fastNLP.io.SNLILoader` :class:`fastNLP.io.data_loader.SNLILoader` - - 读取SNLI数据集,读取的DataSet包含fields:: - - words1: list(str),第一句文本, premise - - words2: list(str), 第二句文本, hypothesis - - target: str, 真实标签 - - 数据来源: https://nlp.stanford.edu/projects/snli/snli_1.0.zip - """ - - def __init__(self, paths: dict=None): - fields = { - 'sentence1_binary_parse': Const.INPUTS(0), - 'sentence2_binary_parse': Const.INPUTS(1), - 'gold_label': Const.TARGET, - } - paths = paths if paths is not None else { - 'train': 'snli_1.0_train.jsonl', - 'dev': 'snli_1.0_dev.jsonl', - 'test': 'snli_1.0_test.jsonl'} - MatchingLoader.__init__(self, paths=paths) - JsonLoader.__init__(self, fields=fields) - - def _load(self, path): - ds = JsonLoader._load(self, path) - - parentheses_table = str.maketrans({'(': None, ')': None}) - - ds.apply(lambda ins: ins[Const.INPUTS(0)].translate(parentheses_table).strip().split(), - new_field_name=Const.INPUTS(0)) - ds.apply(lambda ins: ins[Const.INPUTS(1)].translate(parentheses_table).strip().split(), - new_field_name=Const.INPUTS(1)) - ds.drop(lambda x: x[Const.TARGET] == '-') - return ds diff --git a/fastNLP/io/data_loader/sst.py b/fastNLP/io/data_loader/sst.py deleted file mode 100644 index 2034fc2b..00000000 --- a/fastNLP/io/data_loader/sst.py +++ /dev/null @@ -1,180 +0,0 @@ - -from typing import Union, Dict -from nltk import Tree - -from ..data_bundle import DataBundle, DataSetLoader -from ..dataset_loader import CSVLoader -from ...core.vocabulary import VocabularyOption, Vocabulary -from ...core.dataset import DataSet -from ...core.const import Const -from ...core.instance import Instance -from ..utils import check_loader_paths, get_tokenizer - - -class SSTLoader(DataSetLoader): - """ - 别名::class:`fastNLP.io.SSTLoader` :class:`fastNLP.io.data_loader.SSTLoader` - - 读取SST数据集, DataSet包含fields:: - - words: list(str) 需要分类的文本 - target: str 文本的标签 - - 数据来源: https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip - - :param subtree: 是否将数据展开为子树,扩充数据量. Default: ``False`` - :param fine_grained: 是否使用SST-5标准,若 ``False`` , 使用SST-2。Default: ``False`` - """ - - URL = 'https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip' - DATA_DIR = 'sst/' - - def __init__(self, subtree=False, fine_grained=False): - self.subtree = subtree - - tag_v = {'0': 'very negative', '1': 'negative', '2': 'neutral', - '3': 'positive', '4': 'very positive'} - if not fine_grained: - tag_v['0'] = tag_v['1'] - tag_v['4'] = tag_v['3'] - self.tag_v = tag_v - self.tokenizer = get_tokenizer() - - def _load(self, path): - """ - - :param str path: 存储数据的路径 - :return: 一个 :class:`~fastNLP.DataSet` 类型的对象 - """ - datalist = [] - with open(path, 'r', encoding='utf-8') as f: - datas = [] - for l in f: - datas.extend([(s, self.tag_v[t]) - for s, t in self._get_one(l, self.subtree)]) - ds = DataSet() - for words, tag in datas: - ds.append(Instance(words=words, target=tag)) - return ds - - def _get_one(self, data, subtree): - tree = Tree.fromstring(data) - if subtree: - return [(self.tokenizer(' '.join(t.leaves())), t.label()) for t in tree.subtrees() ] - return [(self.tokenizer(' '.join(tree.leaves())), tree.label())] - - def process(self, - paths, train_subtree=True, - src_vocab_op: VocabularyOption = None, - tgt_vocab_op: VocabularyOption = None,): - paths = check_loader_paths(paths) - input_name, target_name = 'words', 'target' - src_vocab = Vocabulary() if src_vocab_op is None else Vocabulary(**src_vocab_op) - tgt_vocab = Vocabulary(unknown=None, padding=None) \ - if tgt_vocab_op is None else Vocabulary(**tgt_vocab_op) - - info = DataBundle() - origin_subtree = self.subtree - self.subtree = train_subtree - info.datasets['train'] = self._load(paths['train']) - self.subtree = origin_subtree - for n, p in paths.items(): - if n != 'train': - info.datasets[n] = self._load(p) - - src_vocab.from_dataset( - info.datasets['train'], - field_name=input_name, - no_create_entry_dataset=[ds for n, ds in info.datasets.items() if n != 'train']) - tgt_vocab.from_dataset(info.datasets['train'], field_name=target_name) - - src_vocab.index_dataset( - *info.datasets.values(), - field_name=input_name, new_field_name=input_name) - tgt_vocab.index_dataset( - *info.datasets.values(), - field_name=target_name, new_field_name=target_name) - info.vocabs = { - input_name: src_vocab, - target_name: tgt_vocab - } - - return info - - -class SST2Loader(CSVLoader): - """ - 别名::class:`fastNLP.io.SST2Loader` :class:`fastNLP.io.data_loader.SST2Loader` - - 数据来源 SST: https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8 - """ - - def __init__(self): - super(SST2Loader, self).__init__(sep='\t') - self.tokenizer = get_tokenizer() - self.field = {'sentence': Const.INPUT, 'label': Const.TARGET} - - def _load(self, path: str) -> DataSet: - ds = super(SST2Loader, self)._load(path) - for k, v in self.field.items(): - if k in ds.get_field_names(): - ds.rename_field(k, v) - ds.apply(lambda x: self.tokenizer(x[Const.INPUT]), new_field_name=Const.INPUT) - print("all count:", len(ds)) - return ds - - def process(self, - paths: Union[str, Dict[str, str]], - src_vocab_opt: VocabularyOption = None, - tgt_vocab_opt: VocabularyOption = None, - char_level_op=False): - - paths = check_loader_paths(paths) - datasets = {} - info = DataBundle() - for name, path in paths.items(): - dataset = self.load(path) - dataset.apply_field(lambda words:words.copy(), field_name='words', new_field_name='raw_words') - datasets[name] = dataset - - def wordtochar(words): - chars = [] - for word in words: - word = word.lower() - for char in word: - chars.append(char) - chars.append('') - chars.pop() - return chars - - input_name, target_name = Const.INPUT, Const.TARGET - info.vocabs={} - - # 就分隔为char形式 - if char_level_op: - for dataset in datasets.values(): - dataset.apply_field(wordtochar, field_name=Const.INPUT, new_field_name=Const.CHAR_INPUT) - src_vocab = Vocabulary() if src_vocab_opt is None else Vocabulary(**src_vocab_opt) - src_vocab.from_dataset(datasets['train'], field_name=Const.INPUT, no_create_entry_dataset=[ - dataset for name, dataset in datasets.items() if name!='train' - ]) - src_vocab.index_dataset(*datasets.values(), field_name=Const.INPUT) - - tgt_vocab = Vocabulary(unknown=None, padding=None) \ - if tgt_vocab_opt is None else Vocabulary(**tgt_vocab_opt) - tgt_vocab.from_dataset(datasets['train'], field_name=Const.TARGET) - tgt_vocab.index_dataset(*datasets.values(), field_name=Const.TARGET) - - info.vocabs = { - Const.INPUT: src_vocab, - Const.TARGET: tgt_vocab - } - - info.datasets = datasets - - for name, dataset in info.datasets.items(): - dataset.set_input(Const.INPUT) - dataset.set_target(Const.TARGET) - - return info - diff --git a/fastNLP/io/data_loader/yelp.py b/fastNLP/io/data_loader/yelp.py deleted file mode 100644 index f2bc60c8..00000000 --- a/fastNLP/io/data_loader/yelp.py +++ /dev/null @@ -1,132 +0,0 @@ - -import csv -from typing import Iterable - -from ...core.const import Const -from ...core.dataset import DataSet -from ...core.instance import Instance -from ...core.vocabulary import VocabularyOption, Vocabulary -from ..data_bundle import DataBundle, DataSetLoader -from typing import Union, Dict -from ..utils import check_loader_paths, get_tokenizer - - -class YelpLoader(DataSetLoader): - """ - 别名::class:`fastNLP.io.YelpLoader` :class:`fastNLP.io.data_loader.YelpLoader` - 读取Yelp_full/Yelp_polarity数据集, DataSet包含fields: - - words: list(str), 需要分类的文本 - - target: str, 文本的标签 - - chars:list(str),未index的字符列表 - - 数据集:yelp_full/yelp_polarity - - :param fine_grained: 是否使用SST-5标准,若 ``False`` , 使用SST-2。Default: ``False`` - :param lower: 是否需要自动转小写,默认为False。 - """ - - def __init__(self, fine_grained=False, lower=False): - super(YelpLoader, self).__init__() - tag_v = {'1.0': 'very negative', '2.0': 'negative', '3.0': 'neutral', - '4.0': 'positive', '5.0': 'very positive'} - if not fine_grained: - tag_v['1.0'] = tag_v['2.0'] - tag_v['5.0'] = tag_v['4.0'] - self.fine_grained = fine_grained - self.tag_v = tag_v - self.lower = lower - self.tokenizer = get_tokenizer() - - def _load(self, path): - ds = DataSet() - csv_reader = csv.reader(open(path, encoding='utf-8')) - all_count = 0 - real_count = 0 - for row in csv_reader: - all_count += 1 - if len(row) == 2: - target = self.tag_v[row[0] + ".0"] - words = clean_str(row[1], self.tokenizer, self.lower) - if len(words) != 0: - ds.append(Instance(words=words, target=target)) - real_count += 1 - print("all count:", all_count) - print("real count:", real_count) - return ds - - def process(self, paths: Union[str, Dict[str, str]], - train_ds: Iterable[str] = None, - src_vocab_op: VocabularyOption = None, - tgt_vocab_op: VocabularyOption = None, - char_level_op=False): - paths = check_loader_paths(paths) - info = DataBundle(datasets=self.load(paths)) - src_vocab = Vocabulary() if src_vocab_op is None else Vocabulary(**src_vocab_op) - tgt_vocab = Vocabulary(unknown=None, padding=None) \ - if tgt_vocab_op is None else Vocabulary(**tgt_vocab_op) - _train_ds = [info.datasets[name] - for name in train_ds] if train_ds else info.datasets.values() - - def wordtochar(words): - chars = [] - for word in words: - word = word.lower() - for char in word: - chars.append(char) - chars.append('') - chars.pop() - return chars - - input_name, target_name = Const.INPUT, Const.TARGET - info.vocabs = {} - # 就分隔为char形式 - if char_level_op: - for dataset in info.datasets.values(): - dataset.apply_field(wordtochar, field_name=Const.INPUT, new_field_name=Const.CHAR_INPUT) - else: - src_vocab.from_dataset(*_train_ds, field_name=input_name) - src_vocab.index_dataset(*info.datasets.values(), field_name=input_name, new_field_name=input_name) - info.vocabs[input_name] = src_vocab - - tgt_vocab.from_dataset(*_train_ds, field_name=target_name) - tgt_vocab.index_dataset( - *info.datasets.values(), - field_name=target_name, new_field_name=target_name) - - info.vocabs[target_name] = tgt_vocab - - info.datasets['train'], info.datasets['dev'] = info.datasets['train'].split(0.1, shuffle=False) - - for name, dataset in info.datasets.items(): - dataset.set_input(Const.INPUT) - dataset.set_target(Const.TARGET) - - return info - - -def clean_str(sentence, tokenizer, char_lower=False): - """ - heavily borrowed from github - https://github.com/LukeZhuang/Hierarchical-Attention-Network/blob/master/yelp-preprocess.ipynb - :param sentence: is a str - :return: - """ - if char_lower: - sentence = sentence.lower() - import re - nonalpnum = re.compile('[^0-9a-zA-Z?!\']+') - words = tokenizer(sentence) - words_collection = [] - for word in words: - if word in ['-lrb-', '-rrb-', '', '-r', '-l', 'b-']: - continue - tt = nonalpnum.split(word) - t = ''.join(tt) - if t != '': - words_collection.append(t) - - return words_collection - diff --git a/fastNLP/io/dataset_loader.py b/fastNLP/io/dataset_loader.py deleted file mode 100644 index fca0de69..00000000 --- a/fastNLP/io/dataset_loader.py +++ /dev/null @@ -1,121 +0,0 @@ -"""undocumented -.. warning:: - - 本模块将在 `0.5.0版本` 中被废弃,由 :mod:`~fastNLP.io.loader` 和 :mod:`~fastNLP.io.pipe` 模块替代。 - -dataset_loader模块实现了许多 DataSetLoader, 用于读取不同格式的数据, 并返回 `DataSet` , -得到的 :class:`~fastNLP.DataSet` 对象可以直接传入 :class:`~fastNLP.Trainer` 和 :class:`~fastNLP.Tester`, 用于模型的训练和测试。 -以SNLI数据集为例:: - - loader = SNLILoader() - train_ds = loader.load('path/to/train') - dev_ds = loader.load('path/to/dev') - test_ds = loader.load('path/to/test') - - # ... do stuff - -为 fastNLP 提供 DataSetLoader 的开发者请参考 :class:`~fastNLP.io.DataSetLoader` 的介绍。 - -""" -__all__ = [ - 'CSVLoader', - 'JsonLoader', -] - - -from .data_bundle import DataSetLoader -from .file_reader import _read_csv, _read_json -from ..core.dataset import DataSet -from ..core.instance import Instance - - -class JsonLoader(DataSetLoader): - """ - 别名::class:`fastNLP.io.JsonLoader` :class:`fastNLP.io.dataset_loader.JsonLoader` - - 读取json格式数据.数据必须按行存储,每行是一个包含各类属性的json对象 - - :param dict fields: 需要读入的json属性名称, 和读入后在DataSet中存储的field_name - ``fields`` 的 `key` 必须是json对象的属性名. ``fields`` 的 `value` 为读入后在DataSet存储的 `field_name` , - `value` 也可为 ``None`` , 这时读入后的 `field_name` 与json对象对应属性同名 - ``fields`` 可为 ``None`` , 这时,json对象所有属性都保存在DataSet中. Default: ``None`` - :param bool dropna: 是否忽略非法数据,若 ``True`` 则忽略,若 ``False`` ,在遇到非法数据时,抛出 ``ValueError`` . - Default: ``False`` - """ - - def __init__(self, fields=None, dropna=False): - super(JsonLoader, self).__init__() - self.dropna = dropna - self.fields = None - self.fields_list = None - if fields: - self.fields = {} - for k, v in fields.items(): - self.fields[k] = k if v is None else v - self.fields_list = list(self.fields.keys()) - - def _load(self, path): - ds = DataSet() - for idx, d in _read_json(path, fields=self.fields_list, dropna=self.dropna): - if self.fields: - ins = {self.fields[k]: v for k, v in d.items()} - else: - ins = d - ds.append(Instance(**ins)) - return ds - - -class CSVLoader(DataSetLoader): - """ - 别名::class:`fastNLP.io.CSVLoader` :class:`fastNLP.io.dataset_loader.CSVLoader` - - 读取CSV格式的数据集。返回 ``DataSet`` - - :param List[str] headers: CSV文件的文件头.定义每一列的属性名称,即返回的DataSet中`field`的名称 - 若为 ``None`` ,则将读入文件的第一行视作 ``headers`` . Default: ``None`` - :param str sep: CSV文件中列与列之间的分隔符. Default: "," - :param bool dropna: 是否忽略非法数据,若 ``True`` 则忽略,若 ``False`` ,在遇到非法数据时,抛出 ``ValueError`` . - Default: ``False`` - """ - - def __init__(self, headers=None, sep=",", dropna=False): - self.headers = headers - self.sep = sep - self.dropna = dropna - - def _load(self, path): - ds = DataSet() - for idx, data in _read_csv(path, headers=self.headers, - sep=self.sep, dropna=self.dropna): - ds.append(Instance(**data)) - return ds - - -def _cut_long_sentence(sent, max_sample_length=200): - """ - 将长于max_sample_length的sentence截成多段,只会在有空格的地方发生截断。 - 所以截取的句子可能长于或者短于max_sample_length - - :param sent: str. - :param max_sample_length: int. - :return: list of str. - """ - sent_no_space = sent.replace(' ', '') - cutted_sentence = [] - if len(sent_no_space) > max_sample_length: - parts = sent.strip().split() - new_line = '' - length = 0 - for part in parts: - length += len(part) - new_line += part + ' ' - if length > max_sample_length: - new_line = new_line[:-1] - cutted_sentence.append(new_line) - length = 0 - new_line = '' - if new_line != '': - cutted_sentence.append(new_line[:-1]) - else: - cutted_sentence.append(sent) - return cutted_sentence diff --git a/fastNLP/io/embed_loader.py b/fastNLP/io/embed_loader.py index 780d91e4..a157901f 100644 --- a/fastNLP/io/embed_loader.py +++ b/fastNLP/io/embed_loader.py @@ -13,7 +13,6 @@ import warnings import numpy as np -from .data_bundle import BaseLoader from ..core.utils import Option from ..core.vocabulary import Vocabulary @@ -32,7 +31,7 @@ class EmbeddingOption(Option): ) -class EmbedLoader(BaseLoader): +class EmbedLoader: """ 别名::class:`fastNLP.io.EmbedLoader` :class:`fastNLP.io.embed_loader.EmbedLoader` @@ -84,9 +83,9 @@ class EmbedLoader(BaseLoader): word = ''.join(parts[:-dim]) nums = parts[-dim:] # 对齐unk与pad - if word==padding and vocab.padding is not None: + if word == padding and vocab.padding is not None: word = vocab.padding - elif word==unknown and vocab.unknown is not None: + elif word == unknown and vocab.unknown is not None: word = vocab.unknown if word in vocab: index = vocab.to_index(word) @@ -171,7 +170,7 @@ class EmbedLoader(BaseLoader): index = vocab.to_index(key) matrix[index] = vec - if (unknown is not None and not found_unknown) or (padding is not None and not found_pad): + if ((unknown is not None) and (not found_unknown)) or ((padding is not None) and (not found_pad)): start_idx = 0 if padding is not None: start_idx += 1 @@ -180,9 +179,9 @@ class EmbedLoader(BaseLoader): mean = np.mean(matrix[start_idx:], axis=0, keepdims=True) std = np.std(matrix[start_idx:], axis=0, keepdims=True) - if (unknown is not None and not found_unknown): + if (unknown is not None) and (not found_unknown): matrix[start_idx - 1] = np.random.randn(1, dim).astype(dtype) * std + mean - if (padding is not None and not found_pad): + if (padding is not None) and (not found_pad): matrix[0] = np.random.randn(1, dim).astype(dtype) * std + mean if normalize: diff --git a/fastNLP/io/model_io.py b/fastNLP/io/model_io.py index 22ced1ce..a1899f51 100644 --- a/fastNLP/io/model_io.py +++ b/fastNLP/io/model_io.py @@ -8,10 +8,8 @@ __all__ = [ import torch -from .data_bundle import BaseLoader - -class ModelLoader(BaseLoader): +class ModelLoader: """ 别名::class:`fastNLP.io.ModelLoader` :class:`fastNLP.io.model_io.ModelLoader` diff --git a/fastNLP/models/enas_controller.py b/fastNLP/models/enas_controller.py deleted file mode 100644 index eec820e4..00000000 --- a/fastNLP/models/enas_controller.py +++ /dev/null @@ -1,228 +0,0 @@ -"""undocumented -Code Modified from https://github.com/carpedm20/ENAS-pytorch -A module with NAS controller-related code. -""" - -__all__ = [] - -import collections -import os - -import torch -import torch.nn.functional as F - -from . import enas_utils as utils -from .enas_utils import Node - - -def _construct_dags(prev_nodes, activations, func_names, num_blocks): - """Constructs a set of DAGs based on the actions, i.e., previous nodes and - activation functions, sampled from the controller/policy pi. - - Args: - prev_nodes: Previous node actions from the policy. - activations: Activations sampled from the policy. - func_names: Mapping from activation function names to functions. - num_blocks: Number of blocks in the target RNN cell. - - Returns: - A list of DAGs defined by the inputs. - - RNN cell DAGs are represented in the following way: - - 1. Each element (node) in a DAG is a list of `Node`s. - - 2. The `Node`s in the list dag[i] correspond to the subsequent nodes - that take the output from node i as their own input. - - 3. dag[-1] is the node that takes input from x^{(t)} and h^{(t - 1)}. - dag[-1] always feeds dag[0]. - dag[-1] acts as if `w_xc`, `w_hc`, `w_xh` and `w_hh` are its - weights. - - 4. dag[N - 1] is the node that produces the hidden state passed to - the next timestep. dag[N - 1] is also always a leaf node, and therefore - is always averaged with the other leaf nodes and fed to the output - decoder. - """ - dags = [] - for nodes, func_ids in zip(prev_nodes, activations): - dag = collections.defaultdict(list) - - # add first node - dag[-1] = [Node(0, func_names[func_ids[0]])] - dag[-2] = [Node(0, func_names[func_ids[0]])] - - # add following nodes - for jdx, (idx, func_id) in enumerate(zip(nodes, func_ids[1:])): - dag[utils.to_item(idx)].append(Node(jdx + 1, func_names[func_id])) - - leaf_nodes = set(range(num_blocks)) - dag.keys() - - # merge with avg - for idx in leaf_nodes: - dag[idx] = [Node(num_blocks, 'avg')] - - # This is actually y^{(t)}. h^{(t)} is node N - 1 in - # the graph, where N Is the number of nodes. I.e., h^{(t)} takes - # only one other node as its input. - # last h[t] node - last_node = Node(num_blocks + 1, 'h[t]') - dag[num_blocks] = [last_node] - dags.append(dag) - - return dags - - -class Controller(torch.nn.Module): - """Based on - https://github.com/pytorch/examples/blob/master/word_language_model/model.py - - RL controllers do not necessarily have much to do with - language models. - - Base the controller RNN on the GRU from: - https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py - """ - def __init__(self, num_blocks=4, controller_hid=100, cuda=False): - torch.nn.Module.__init__(self) - - # `num_tokens` here is just the activation function - # for every even step, - self.shared_rnn_activations = ['tanh', 'ReLU', 'identity', 'sigmoid'] - self.num_tokens = [len(self.shared_rnn_activations)] - self.controller_hid = controller_hid - self.use_cuda = cuda - self.num_blocks = num_blocks - for idx in range(num_blocks): - self.num_tokens += [idx + 1, len(self.shared_rnn_activations)] - self.func_names = self.shared_rnn_activations - - num_total_tokens = sum(self.num_tokens) - - self.encoder = torch.nn.Embedding(num_total_tokens, - controller_hid) - self.lstm = torch.nn.LSTMCell(controller_hid, controller_hid) - - # Perhaps these weights in the decoder should be - # shared? At least for the activation functions, which all have the - # same size. - self.decoders = [] - for idx, size in enumerate(self.num_tokens): - decoder = torch.nn.Linear(controller_hid, size) - self.decoders.append(decoder) - - self._decoders = torch.nn.ModuleList(self.decoders) - - self.reset_parameters() - self.static_init_hidden = utils.keydefaultdict(self.init_hidden) - - def _get_default_hidden(key): - return utils.get_variable( - torch.zeros(key, self.controller_hid), - self.use_cuda, - requires_grad=False) - - self.static_inputs = utils.keydefaultdict(_get_default_hidden) - - def reset_parameters(self): - init_range = 0.1 - for param in self.parameters(): - param.data.uniform_(-init_range, init_range) - for decoder in self.decoders: - decoder.bias.data.fill_(0) - - def forward(self, # pylint:disable=arguments-differ - inputs, - hidden, - block_idx, - is_embed): - if not is_embed: - embed = self.encoder(inputs) - else: - embed = inputs - - hx, cx = self.lstm(embed, hidden) - logits = self.decoders[block_idx](hx) - - logits /= 5.0 - - # # exploration - # if self.args.mode == 'train': - # logits = (2.5 * F.tanh(logits)) - - return logits, (hx, cx) - - def sample(self, batch_size=1, with_details=False, save_dir=None): - """Samples a set of `args.num_blocks` many computational nodes from the - controller, where each node is made up of an activation function, and - each node except the last also includes a previous node. - """ - if batch_size < 1: - raise Exception(f'Wrong batch_size: {batch_size} < 1') - - # [B, L, H] - inputs = self.static_inputs[batch_size] - hidden = self.static_init_hidden[batch_size] - - activations = [] - entropies = [] - log_probs = [] - prev_nodes = [] - # The RNN controller alternately outputs an activation, - # followed by a previous node, for each block except the last one, - # which only gets an activation function. The last node is the output - # node, and its previous node is the average of all leaf nodes. - for block_idx in range(2*(self.num_blocks - 1) + 1): - logits, hidden = self.forward(inputs, - hidden, - block_idx, - is_embed=(block_idx == 0)) - - probs = F.softmax(logits, dim=-1) - log_prob = F.log_softmax(logits, dim=-1) - # .mean() for entropy? - entropy = -(log_prob * probs).sum(1, keepdim=False) - - action = probs.multinomial(num_samples=1).data - selected_log_prob = log_prob.gather( - 1, utils.get_variable(action, requires_grad=False)) - - # why the [:, 0] here? Should it be .squeeze(), or - # .view()? Same below with `action`. - entropies.append(entropy) - log_probs.append(selected_log_prob[:, 0]) - - # 0: function, 1: previous node - mode = block_idx % 2 - inputs = utils.get_variable( - action[:, 0] + sum(self.num_tokens[:mode]), - requires_grad=False) - - if mode == 0: - activations.append(action[:, 0]) - elif mode == 1: - prev_nodes.append(action[:, 0]) - - prev_nodes = torch.stack(prev_nodes).transpose(0, 1) - activations = torch.stack(activations).transpose(0, 1) - - dags = _construct_dags(prev_nodes, - activations, - self.func_names, - self.num_blocks) - - if save_dir is not None: - for idx, dag in enumerate(dags): - utils.draw_network(dag, - os.path.join(save_dir, f'graph{idx}.png')) - - if with_details: - return dags, torch.cat(log_probs), torch.cat(entropies) - - return dags - - def init_hidden(self, batch_size): - zeros = torch.zeros(batch_size, self.controller_hid) - return (utils.get_variable(zeros, self.use_cuda, requires_grad=False), - utils.get_variable(zeros.clone(), self.use_cuda, requires_grad=False)) diff --git a/fastNLP/models/enas_model.py b/fastNLP/models/enas_model.py deleted file mode 100644 index 2e8ca713..00000000 --- a/fastNLP/models/enas_model.py +++ /dev/null @@ -1,393 +0,0 @@ -"""undocumented -Module containing the shared RNN model. -Code Modified from https://github.com/carpedm20/ENAS-pytorch -""" - -__all__ = [] - -import collections - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.autograd import Variable - -from . import enas_utils as utils -from .base_model import BaseModel - - -def _get_dropped_weights(w_raw, dropout_p, is_training): - """Drops out weights to implement DropConnect. - - Args: - w_raw: Full, pre-dropout, weights to be dropped out. - dropout_p: Proportion of weights to drop out. - is_training: True iff _shared_ model is training. - - Returns: - The dropped weights. - - Why does torch.nn.functional.dropout() return: - 1. `torch.autograd.Variable()` on the training loop - 2. `torch.nn.Parameter()` on the controller or eval loop, when - training = False... - - Even though the call to `_setweights` in the Smerity repo's - `weight_drop.py` does not have this behaviour, and `F.dropout` always - returns `torch.autograd.Variable` there, even when `training=False`? - - The above TODO is the reason for the hacky check for `torch.nn.Parameter`. - """ - dropped_w = F.dropout(w_raw, p=dropout_p, training=is_training) - - if isinstance(dropped_w, torch.nn.Parameter): - dropped_w = dropped_w.clone() - - return dropped_w - - -class EmbeddingDropout(torch.nn.Embedding): - """Class for dropping out embeddings by zero'ing out parameters in the - embedding matrix. - - This is equivalent to dropping out particular words, e.g., in the sentence - 'the quick brown fox jumps over the lazy dog', dropping out 'the' would - lead to the sentence '### quick brown fox jumps over ### lazy dog' (in the - embedding vector space). - - See 'A Theoretically Grounded Application of Dropout in Recurrent Neural - Networks', (Gal and Ghahramani, 2016). - """ - - def __init__(self, - num_embeddings, - embedding_dim, - max_norm=None, - norm_type=2, - scale_grad_by_freq=False, - sparse=False, - dropout=0.1, - scale=None): - """Embedding constructor. - - Args: - dropout: Dropout probability. - scale: Used to scale parameters of embedding weight matrix that are - not dropped out. Note that this is _in addition_ to the - `1/(1 - dropout)` scaling. - - See `torch.nn.Embedding` for remaining arguments. - """ - torch.nn.Embedding.__init__(self, - num_embeddings=num_embeddings, - embedding_dim=embedding_dim, - max_norm=max_norm, - norm_type=norm_type, - scale_grad_by_freq=scale_grad_by_freq, - sparse=sparse) - self.dropout = dropout - assert (dropout >= 0.0) and (dropout < 1.0), ('Dropout must be >= 0.0 ' - 'and < 1.0') - self.scale = scale - - def forward(self, inputs): # pylint:disable=arguments-differ - """Embeds `inputs` with the dropped out embedding weight matrix.""" - if self.training: - dropout = self.dropout - else: - dropout = 0 - - if dropout: - mask = self.weight.data.new(self.weight.size(0), 1) - mask.bernoulli_(1 - dropout) - mask = mask.expand_as(self.weight) - mask = mask / (1 - dropout) - masked_weight = self.weight * Variable(mask) - else: - masked_weight = self.weight - if self.scale and self.scale != 1: - masked_weight = masked_weight * self.scale - - return F.embedding(inputs, - masked_weight, - max_norm=self.max_norm, - norm_type=self.norm_type, - scale_grad_by_freq=self.scale_grad_by_freq, - sparse=self.sparse) - - -class LockedDropout(nn.Module): - # code from https://github.com/salesforce/awd-lstm-lm/blob/master/locked_dropout.py - def __init__(self): - super().__init__() - - def forward(self, x, dropout=0.5): - if not self.training or not dropout: - return x - m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - dropout) - mask = Variable(m, requires_grad=False) / (1 - dropout) - mask = mask.expand_as(x) - return mask * x - - -class ENASModel(BaseModel): - """Shared RNN model.""" - - def __init__(self, embed_num, num_classes, num_blocks=4, cuda=False, shared_hid=1000, shared_embed=1000): - super(ENASModel, self).__init__() - - self.use_cuda = cuda - - self.shared_hid = shared_hid - self.num_blocks = num_blocks - self.decoder = nn.Linear(self.shared_hid, num_classes) - self.encoder = EmbeddingDropout(embed_num, - shared_embed, - dropout=0.1) - self.lockdrop = LockedDropout() - self.dag = None - - # Tie weights - # self.decoder.weight = self.encoder.weight - - # Since W^{x, c} and W^{h, c} are always summed, there - # is no point duplicating their bias offset parameter. Likewise for - # W^{x, h} and W^{h, h}. - self.w_xc = nn.Linear(shared_embed, self.shared_hid) - self.w_xh = nn.Linear(shared_embed, self.shared_hid) - - # The raw weights are stored here because the hidden-to-hidden weights - # are weight dropped on the forward pass. - self.w_hc_raw = torch.nn.Parameter( - torch.Tensor(self.shared_hid, self.shared_hid)) - self.w_hh_raw = torch.nn.Parameter( - torch.Tensor(self.shared_hid, self.shared_hid)) - self.w_hc = None - self.w_hh = None - - self.w_h = collections.defaultdict(dict) - self.w_c = collections.defaultdict(dict) - - for idx in range(self.num_blocks): - for jdx in range(idx + 1, self.num_blocks): - self.w_h[idx][jdx] = nn.Linear(self.shared_hid, - self.shared_hid, - bias=False) - self.w_c[idx][jdx] = nn.Linear(self.shared_hid, - self.shared_hid, - bias=False) - - self._w_h = nn.ModuleList([self.w_h[idx][jdx] - for idx in self.w_h - for jdx in self.w_h[idx]]) - self._w_c = nn.ModuleList([self.w_c[idx][jdx] - for idx in self.w_c - for jdx in self.w_c[idx]]) - - self.batch_norm = None - # if args.mode == 'train': - # self.batch_norm = nn.BatchNorm1d(self.shared_hid) - # else: - # self.batch_norm = None - - self.reset_parameters() - self.static_init_hidden = utils.keydefaultdict(self.init_hidden) - - def setDAG(self, dag): - if self.dag is None: - self.dag = dag - - def forward(self, word_seq, hidden=None): - inputs = torch.transpose(word_seq, 0, 1) - - time_steps = inputs.size(0) - batch_size = inputs.size(1) - - self.w_hh = _get_dropped_weights(self.w_hh_raw, - 0.5, - self.training) - self.w_hc = _get_dropped_weights(self.w_hc_raw, - 0.5, - self.training) - - # hidden = self.static_init_hidden[batch_size] if hidden is None else hidden - hidden = self.static_init_hidden[batch_size] - - embed = self.encoder(inputs) - - embed = self.lockdrop(embed, 0.65 if self.training else 0) - - # The norm of hidden states are clipped here because - # otherwise ENAS is especially prone to exploding activations on the - # forward pass. This could probably be fixed in a more elegant way, but - # it might be exposing a weakness in the ENAS algorithm as currently - # proposed. - # - # For more details, see - # https://github.com/carpedm20/ENAS-pytorch/issues/6 - clipped_num = 0 - max_clipped_norm = 0 - h1tohT = [] - logits = [] - for step in range(time_steps): - x_t = embed[step] - logit, hidden = self.cell(x_t, hidden, self.dag) - - hidden_norms = hidden.norm(dim=-1) - max_norm = 25.0 - if hidden_norms.data.max() > max_norm: - # Just directly use the torch slice operations - # in PyTorch v0.4. - # - # This workaround for PyTorch v0.3.1 does everything in numpy, - # because the PyTorch slicing and slice assignment is too - # flaky. - hidden_norms = hidden_norms.data.cpu().numpy() - - clipped_num += 1 - if hidden_norms.max() > max_clipped_norm: - max_clipped_norm = hidden_norms.max() - - clip_select = hidden_norms > max_norm - clip_norms = hidden_norms[clip_select] - - mask = np.ones(hidden.size()) - normalizer = max_norm / clip_norms - normalizer = normalizer[:, np.newaxis] - - mask[clip_select] = normalizer - - if self.use_cuda: - hidden *= torch.autograd.Variable( - torch.FloatTensor(mask).cuda(), requires_grad=False) - else: - hidden *= torch.autograd.Variable( - torch.FloatTensor(mask), requires_grad=False) - logits.append(logit) - h1tohT.append(hidden) - - h1tohT = torch.stack(h1tohT) - output = torch.stack(logits) - raw_output = output - - output = self.lockdrop(output, 0.4 if self.training else 0) - - # Pooling - output = torch.mean(output, 0) - - decoded = self.decoder(output) - - extra_out = {'dropped': decoded, - 'hiddens': h1tohT, - 'raw': raw_output} - return {'pred': decoded, 'hidden': hidden, 'extra_out': extra_out} - - def cell(self, x, h_prev, dag): - """Computes a single pass through the discovered RNN cell.""" - c = {} - h = {} - f = {} - - f[0] = self.get_f(dag[-1][0].name) - c[0] = torch.sigmoid(self.w_xc(x) + F.linear(h_prev, self.w_hc, None)) - h[0] = (c[0] * f[0](self.w_xh(x) + F.linear(h_prev, self.w_hh, None)) + - (1 - c[0]) * h_prev) - - leaf_node_ids = [] - q = collections.deque() - q.append(0) - - # Computes connections from the parent nodes `node_id` - # to their child nodes `next_id` recursively, skipping leaf nodes. A - # leaf node is a node whose id == `self.num_blocks`. - # - # Connections between parent i and child j should be computed as - # h_j = c_j*f_{ij}{(W^h_{ij}*h_i)} + (1 - c_j)*h_i, - # where c_j = \sigmoid{(W^c_{ij}*h_i)} - # - # See Training details from Section 3.1 of the paper. - # - # The following algorithm does a breadth-first (since `q.popleft()` is - # used) search over the nodes and computes all the hidden states. - while True: - if len(q) == 0: - break - - node_id = q.popleft() - nodes = dag[node_id] - - for next_node in nodes: - next_id = next_node.id - if next_id == self.num_blocks: - leaf_node_ids.append(node_id) - assert len(nodes) == 1, ('parent of leaf node should have ' - 'only one child') - continue - - w_h = self.w_h[node_id][next_id] - w_c = self.w_c[node_id][next_id] - - f[next_id] = self.get_f(next_node.name) - c[next_id] = torch.sigmoid(w_c(h[node_id])) - h[next_id] = (c[next_id] * f[next_id](w_h(h[node_id])) + - (1 - c[next_id]) * h[node_id]) - - q.append(next_id) - - # Instead of averaging loose ends, perhaps there should - # be a set of separate unshared weights for each "loose" connection - # between each node in a cell and the output. - # - # As it stands, all weights W^h_{ij} are doing double duty by - # connecting both from i to j, as well as from i to the output. - - # average all the loose ends - leaf_nodes = [h[node_id] for node_id in leaf_node_ids] - output = torch.mean(torch.stack(leaf_nodes, 2), -1) - - # stabilizing the Updates of omega - if self.batch_norm is not None: - output = self.batch_norm(output) - - return output, h[self.num_blocks - 1] - - def init_hidden(self, batch_size): - zeros = torch.zeros(batch_size, self.shared_hid) - return utils.get_variable(zeros, self.use_cuda, requires_grad=False) - - def get_f(self, name): - name = name.lower() - if name == 'relu': - f = torch.relu - elif name == 'tanh': - f = torch.tanh - elif name == 'identity': - f = lambda x: x - elif name == 'sigmoid': - f = torch.sigmoid - return f - - @property - def num_parameters(self): - def size(p): - return np.prod(p.size()) - - return sum([size(param) for param in self.parameters()]) - - def reset_parameters(self): - init_range = 0.025 - # init_range = 0.025 if self.args.mode == 'train' else 0.04 - for param in self.parameters(): - param.data.uniform_(-init_range, init_range) - self.decoder.bias.data.fill_(0) - - def predict(self, word_seq): - """ - - :param word_seq: torch.LongTensor, [batch_size, seq_len] - :return predict: dict of torch.LongTensor, [batch_size, seq_len] - """ - output = self(word_seq) - _, predict = output['pred'].max(dim=1) - return {'pred': predict} diff --git a/fastNLP/models/enas_trainer.py b/fastNLP/models/enas_trainer.py deleted file mode 100644 index 98d778cd..00000000 --- a/fastNLP/models/enas_trainer.py +++ /dev/null @@ -1,384 +0,0 @@ -"""undocumented -Code Modified from https://github.com/carpedm20/ENAS-pytorch -""" - -__all__ = [] - -import math -import time -from datetime import datetime, timedelta - -import numpy as np -import torch -from torch.optim import Adam - -try: - from tqdm.auto import tqdm -except: - from ..core.utils import _pseudo_tqdm as tqdm - -from ..core.trainer import Trainer -from ..core.batch import DataSetIter -from ..core.callback import CallbackException -from ..core.dataset import DataSet -from ..core.utils import _move_dict_value_to_device -from . import enas_utils as utils -from ..core.utils import _build_args - - -def _get_no_grad_ctx_mgr(): - """Returns a the `torch.no_grad` context manager for PyTorch version >= - 0.4, or a no-op context manager otherwise. - """ - return torch.no_grad() - - -class ENASTrainer(Trainer): - """A class to wrap training code.""" - - def __init__(self, train_data, model, controller, **kwargs): - """Constructor for training algorithm. - :param DataSet train_data: the training data - :param torch.nn.modules.module model: a PyTorch model - :param torch.nn.modules.module controller: a PyTorch model - """ - self.final_epochs = kwargs['final_epochs'] - kwargs.pop('final_epochs') - super(ENASTrainer, self).__init__(train_data, model, **kwargs) - self.controller_step = 0 - self.shared_step = 0 - self.max_length = 35 - - self.shared = model - self.controller = controller - - self.shared_optim = Adam( - self.shared.parameters(), - lr=20.0, - weight_decay=1e-7) - - self.controller_optim = Adam( - self.controller.parameters(), - lr=3.5e-4) - - def train(self, load_best_model=True): - """ - :param bool load_best_model: 该参数只有在初始化提供了dev_data的情况下有效,如果True, trainer将在返回之前重新加载dev表现 - 最好的模型参数。 - :return results: 返回一个字典类型的数据, - 内含以下内容:: - - seconds: float, 表示训练时长 - 以下三个内容只有在提供了dev_data的情况下会有。 - best_eval: Dict of Dict, 表示evaluation的结果 - best_epoch: int,在第几个epoch取得的最佳值 - best_step: int, 在第几个step(batch)更新取得的最佳值 - - """ - results = {} - if self.n_epochs <= 0: - print(f"training epoch is {self.n_epochs}, nothing was done.") - results['seconds'] = 0. - return results - try: - if torch.cuda.is_available() and "cuda" in self.device: - self.model = self.model.cuda() - self._model_device = self.model.parameters().__next__().device - self._mode(self.model, is_test=False) - - self.start_time = str(datetime.now().strftime('%Y-%m-%d-%H-%M-%S')) - start_time = time.time() - print("training epochs started " + self.start_time, flush=True) - - try: - self.callback_manager.on_train_begin() - self._train() - self.callback_manager.on_train_end() - except (CallbackException, KeyboardInterrupt) as e: - self.callback_manager.on_exception(e) - - if self.dev_data is not None: - print( - "\nIn Epoch:{}/Step:{}, got best dev performance:".format(self.best_dev_epoch, self.best_dev_step) + - self.tester._format_eval_results(self.best_dev_perf), ) - results['best_eval'] = self.best_dev_perf - results['best_epoch'] = self.best_dev_epoch - results['best_step'] = self.best_dev_step - if load_best_model: - model_name = "best_" + "_".join([self.model.__class__.__name__, self.metric_key, self.start_time]) - load_succeed = self._load_model(self.model, model_name) - if load_succeed: - print("Reloaded the best model.") - else: - print("Fail to reload best model.") - finally: - pass - results['seconds'] = round(time.time() - start_time, 2) - - return results - - def _train(self): - if not self.use_tqdm: - from fastNLP.core.utils import _pseudo_tqdm as inner_tqdm - else: - inner_tqdm = tqdm - self.step = 0 - start = time.time() - total_steps = (len(self.train_data) // self.batch_size + int( - len(self.train_data) % self.batch_size != 0)) * self.n_epochs - with inner_tqdm(total=total_steps, postfix='loss:{0:<6.5f}', leave=False, dynamic_ncols=True) as pbar: - avg_loss = 0 - data_iterator = DataSetIter(self.train_data, batch_size=self.batch_size, sampler=self.sampler, as_numpy=False, - prefetch=self.prefetch) - for epoch in range(1, self.n_epochs + 1): - pbar.set_description_str(desc="Epoch {}/{}".format(epoch, self.n_epochs)) - last_stage = (epoch > self.n_epochs + 1 - self.final_epochs) - if epoch == self.n_epochs + 1 - self.final_epochs: - print('Entering the final stage. (Only train the selected structure)') - # early stopping - self.callback_manager.on_epoch_begin() - - # 1. Training the shared parameters omega of the child models - self.train_shared(pbar) - - # 2. Training the controller parameters theta - if not last_stage: - self.train_controller() - - if ((self.validate_every > 0 and self.step % self.validate_every == 0) or - (self.validate_every < 0 and self.step % len(data_iterator) == 0)) \ - and self.dev_data is not None: - if not last_stage: - self.derive() - eval_res = self._do_validation(epoch=epoch, step=self.step) - eval_str = "Evaluation at Epoch {}/{}. Step:{}/{}. ".format(epoch, self.n_epochs, self.step, - total_steps) + \ - self.tester._format_eval_results(eval_res) - pbar.write(eval_str) - - # lr decay; early stopping - self.callback_manager.on_epoch_end() - # =============== epochs end =================== # - pbar.close() - # ============ tqdm end ============== # - - def get_loss(self, inputs, targets, hidden, dags): - """Computes the loss for the same batch for M models. - - This amounts to an estimate of the loss, which is turned into an - estimate for the gradients of the shared model. - """ - if not isinstance(dags, list): - dags = [dags] - - loss = 0 - for dag in dags: - self.shared.setDAG(dag) - inputs = _build_args(self.shared.forward, **inputs) - inputs['hidden'] = hidden - result = self.shared(**inputs) - output, hidden, extra_out = result['pred'], result['hidden'], result['extra_out'] - - self.callback_manager.on_loss_begin(targets, result) - sample_loss = self._compute_loss(result, targets) - loss += sample_loss - - assert len(dags) == 1, 'there are multiple `hidden` for multple `dags`' - return loss, hidden, extra_out - - def train_shared(self, pbar=None, max_step=None, dag=None): - """Train the language model for 400 steps of minibatches of 64 - examples. - - Args: - max_step: Used to run extra training steps as a warm-up. - dag: If not None, is used instead of calling sample(). - - BPTT is truncated at 35 timesteps. - - For each weight update, gradients are estimated by sampling M models - from the fixed controller policy, and averaging their gradients - computed on a batch of training data. - """ - model = self.shared - model.train() - self.controller.eval() - - hidden = self.shared.init_hidden(self.batch_size) - - abs_max_grad = 0 - abs_max_hidden_norm = 0 - step = 0 - raw_total_loss = 0 - total_loss = 0 - train_idx = 0 - avg_loss = 0 - data_iterator = DataSetIter(self.train_data, batch_size=self.batch_size, sampler=self.sampler, as_numpy=False, - prefetch=self.prefetch) - - for batch_x, batch_y in data_iterator: - _move_dict_value_to_device(batch_x, batch_y, device=self._model_device) - indices = data_iterator.get_batch_indices() - # negative sampling; replace unknown; re-weight batch_y - self.callback_manager.on_batch_begin(batch_x, batch_y, indices) - # prediction = self._data_forward(self.model, batch_x) - - dags = self.controller.sample(1) - inputs, targets = batch_x, batch_y - # self.callback_manager.on_loss_begin(batch_y, prediction) - loss, hidden, extra_out = self.get_loss(inputs, - targets, - hidden, - dags) - hidden.detach_() - - avg_loss += loss.item() - - # Is loss NaN or inf? requires_grad = False - self.callback_manager.on_backward_begin(loss) - self._grad_backward(loss) - self.callback_manager.on_backward_end() - - self._update() - self.callback_manager.on_step_end() - - if (self.step + 1) % self.print_every == 0: - if self.use_tqdm: - print_output = "loss:{0:<6.5f}".format(avg_loss / self.print_every) - pbar.update(self.print_every) - else: - end = time.time() - diff = timedelta(seconds=round(end - start)) - print_output = "[epoch: {:>3} step: {:>4}] train loss: {:>4.6} time: {}".format( - epoch, self.step, avg_loss, diff) - pbar.set_postfix_str(print_output) - avg_loss = 0 - self.step += 1 - step += 1 - self.shared_step += 1 - self.callback_manager.on_batch_end() - # ================= mini-batch end ==================== # - - def get_reward(self, dag, entropies, hidden, valid_idx=0): - """Computes the perplexity of a single sampled model on a minibatch of - validation data. - """ - if not isinstance(entropies, np.ndarray): - entropies = entropies.data.cpu().numpy() - - data_iterator = DataSetIter(self.dev_data, batch_size=self.batch_size, sampler=self.sampler, as_numpy=False, - prefetch=self.prefetch) - - for inputs, targets in data_iterator: - valid_loss, hidden, _ = self.get_loss(inputs, targets, hidden, dag) - valid_loss = utils.to_item(valid_loss.data) - - valid_ppl = math.exp(valid_loss) - - R = 80 / valid_ppl - - rewards = R + 1e-4 * entropies - - return rewards, hidden - - def train_controller(self): - """Fixes the shared parameters and updates the controller parameters. - - The controller is updated with a score function gradient estimator - (i.e., REINFORCE), with the reward being c/valid_ppl, where valid_ppl - is computed on a minibatch of validation data. - - A moving average baseline is used. - - The controller is trained for 2000 steps per epoch (i.e., - first (Train Shared) phase -> second (Train Controller) phase). - """ - model = self.controller - model.train() - # Why can't we call shared.eval() here? Leads to loss - # being uniformly zero for the controller. - # self.shared.eval() - - avg_reward_base = None - baseline = None - adv_history = [] - entropy_history = [] - reward_history = [] - - hidden = self.shared.init_hidden(self.batch_size) - total_loss = 0 - valid_idx = 0 - for step in range(20): - # sample models - dags, log_probs, entropies = self.controller.sample( - with_details=True) - - # calculate reward - np_entropies = entropies.data.cpu().numpy() - # No gradients should be backpropagated to the - # shared model during controller training, obviously. - with _get_no_grad_ctx_mgr(): - rewards, hidden = self.get_reward(dags, - np_entropies, - hidden, - valid_idx) - - reward_history.extend(rewards) - entropy_history.extend(np_entropies) - - # moving average baseline - if baseline is None: - baseline = rewards - else: - decay = 0.95 - baseline = decay * baseline + (1 - decay) * rewards - - adv = rewards - baseline - adv_history.extend(adv) - - # policy loss - loss = -log_probs * utils.get_variable(adv, - 'cuda' in self.device, - requires_grad=False) - - loss = loss.sum() # or loss.mean() - - # update - self.controller_optim.zero_grad() - loss.backward() - - self.controller_optim.step() - - total_loss += utils.to_item(loss.data) - - if ((step % 50) == 0) and (step > 0): - reward_history, adv_history, entropy_history = [], [], [] - total_loss = 0 - - self.controller_step += 1 - # prev_valid_idx = valid_idx - # valid_idx = ((valid_idx + self.max_length) % - # (self.valid_data.size(0) - 1)) - # # Whenever we wrap around to the beginning of the - # # validation data, we reset the hidden states. - # if prev_valid_idx > valid_idx: - # hidden = self.shared.init_hidden(self.batch_size) - - def derive(self, sample_num=10, valid_idx=0): - """We are always deriving based on the very first batch - of validation data? This seems wrong... - """ - hidden = self.shared.init_hidden(self.batch_size) - - dags, _, entropies = self.controller.sample(sample_num, - with_details=True) - - max_R = 0 - best_dag = None - for dag in dags: - R, _ = self.get_reward(dag, entropies, hidden, valid_idx) - if R.max() > max_R: - max_R = R.max() - best_dag = dag - - self.model.setDAG(best_dag) diff --git a/fastNLP/models/enas_utils.py b/fastNLP/models/enas_utils.py deleted file mode 100644 index cd6c2503..00000000 --- a/fastNLP/models/enas_utils.py +++ /dev/null @@ -1,58 +0,0 @@ -"""undocumented -Code Modified from https://github.com/carpedm20/ENAS-pytorch -""" - -__all__ = [] - -import collections -from collections import defaultdict - -import numpy as np -import torch -from torch.autograd import Variable - - -def detach(h): - if type(h) == Variable: - return Variable(h.data) - else: - return tuple(detach(v) for v in h) - - -def get_variable(inputs, cuda=False, **kwargs): - if type(inputs) in [list, np.ndarray]: - inputs = torch.Tensor(inputs) - if cuda: - out = Variable(inputs.cuda(), **kwargs) - else: - out = Variable(inputs, **kwargs) - return out - - -def update_lr(optimizer, lr): - for param_group in optimizer.param_groups: - param_group['lr'] = lr - - -Node = collections.namedtuple('Node', ['id', 'name']) - - -class keydefaultdict(defaultdict): - def __missing__(self, key): - if self.default_factory is None: - raise KeyError(key) - else: - ret = self[key] = self.default_factory(key) - return ret - - -def to_item(x): - """Converts x, possibly scalar and possibly tensor, to a Python scalar.""" - if isinstance(x, (float, int)): - return x - - if float(torch.__version__[0:3]) < 0.4: - assert (x.dim() == 1) and (len(x) == 1) - return x[0] - - return x.item() diff --git a/legacy/api/README.md b/legacy/api/README.md deleted file mode 100644 index 73560f9f..00000000 --- a/legacy/api/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# fastNLP 高级接口 - -### 环境与配置 -1. 系统环境:linux/ubuntu(推荐) -2. 编程语言:Python>=3.6 -3. Python包依赖 - - **torch==1.0** - - numpy>=1.14.2 - -### 中文分词 -```python -text = ['编者按:7月12日,英国航空航天系统公司公布了该公司研制的第一款高科技隐形无人机雷电之神。', - '这款飞行从外型上来看酷似电影中的太空飞行器,据英国方面介绍,可以实现洲际远程打击。', - '那么这款无人机到底有多厉害?'] -from fastNLP.api import CWS -cws = CWS(device='cpu') -print(cws.predict(text)) -# ['编者 按 : 7月 12日 , 英国 航空 航天 系统 公司 公布 了 该 公司 研制 的 第一 款 高 科技 隐形 无人 机雷电 之 神 。', '这 款 飞行 从 外型 上 来 看 酷似 电影 中 的 太空 飞行器 , 据 英国 方面 介绍 , 可以 实现 洲际 远程 打击 。', '那么 这 款 无人 机 到底 有 多 厉害 ?'] -``` - -### 词性标注 -```python -# 输入已分词序列 -text = [['编者', '按:', '7月', '12日', ',', '英国', '航空', '航天', '系统', '公司', '公布', '了', '该', '公司', - '研制', '的', '第一款', '高科技', '隐形', '无人机', '雷电之神', '。'], - ['那么', '这', '款', '无人机', '到底', '有', '多', '厉害', '?']] -from fastNLP.api import POS -pos = POS(device='cpu') -print(pos.predict(text)) -# [['编者/NN', '按:/NN', '7月/NT', '12日/NT', ',/PU', '英国/NR', '航空/NN', '航天/NN', '系统/NN', '公司/NN', '公布/VV', '了/AS', '该/DT', '公司/NN', '研制/VV', '的/DEC', '第一款/NN', '高科技/NN', '隐形/AD', '无人机/VV', '雷电之神/NN', '。/PU'], ['那么/AD', '这/DT', '款/NN', '无人机/VV', '到底/AD', '有/VE', '多/AD', '厉害/VA', '?/PU']] -``` - -### 句法分析 -```python -text = [['编者', '按:', '7月', '12日', ',', '英国', '航空', '航天', '系统', '公司', '公布', '了', '该', '公司', - '研制', '的', '第一款', '高科技', '隐形', '无人机', '雷电之神', '。'], - ['那么', '这', '款', '无人机', '到底', '有', '多', '厉害', '?']] -from fastNLP.api import Parser -parser = Parser(device='cpu') -print(parser.predict(text)) -# [['2/nn', '4/nn', '4/nn', '20/tmod', '11/punct', '10/nn', '10/nn', '10/nn', '10/nn', '11/nsubj', '20/dep', '11/asp', '14/det', '15/nsubj', '18/rcmod', '15/cpm', '18/nn', '11/dobj', '20/advmod', '0/root', '20/dobj', '20/punct'], ['4/advmod', '3/det', '8/xsubj', '8/dep', '8/advmod', '8/dep', '8/advmod', '0/root', '8/punct']] -``` - -完整样例见`examples.py` \ No newline at end of file diff --git a/legacy/api/__init__.py b/legacy/api/__init__.py deleted file mode 100644 index 5171d8c2..00000000 --- a/legacy/api/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -__all__ = ["CWS", "POS", "Parser"] -from .api import CWS, POS, Parser diff --git a/legacy/api/api.py b/legacy/api/api.py deleted file mode 100644 index 1408731f..00000000 --- a/legacy/api/api.py +++ /dev/null @@ -1,463 +0,0 @@ -import warnings - -import torch - -warnings.filterwarnings('ignore') -import os - -from fastNLP.core.dataset import DataSet -from .utils import load_url -from .processor import ModelProcessor -from fastNLP.io.dataset_loader import _cut_long_sentence -from fastNLP.io.data_loader import ConllLoader -from fastNLP.core.instance import Instance -from ..api.pipeline import Pipeline -from fastNLP.core.metrics import SpanFPreRecMetric -from .processor import IndexerProcessor - -# TODO add pretrain urls -model_urls = { - "cws": "http://123.206.98.91:8888/download/cws_lstm_ctb9_1_20-09908656.pkl", - "pos": "http://123.206.98.91:8888/download/pos_tag_model_20190119-43f8b435.pkl", - "parser": "http://123.206.98.91:8888/download/parser_20190204-c72ca5c0.pkl" -} - - -class ConllCWSReader(object): - """Deprecated. Use ConllLoader for all types of conll-format files.""" - - def __init__(self): - pass - - def load(self, path, cut_long_sent=False): - """ - 返回的DataSet只包含raw_sentence这个field,内容为str。 - 假定了输入为conll的格式,以空行隔开两个句子,每行共7列,即 - :: - - 1 编者按 编者按 NN O 11 nmod:topic - 2 : : PU O 11 punct - 3 7月 7月 NT DATE 4 compound:nn - 4 12日 12日 NT DATE 11 nmod:tmod - 5 , , PU O 11 punct - - 1 这 这 DT O 3 det - 2 款 款 M O 1 mark:clf - 3 飞行 飞行 NN O 8 nsubj - 4 从 从 P O 5 case - 5 外型 外型 NN O 8 nmod:prep - - """ - datalist = [] - with open(path, 'r', encoding='utf-8') as f: - sample = [] - for line in f: - if line.startswith('\n'): - datalist.append(sample) - sample = [] - elif line.startswith('#'): - continue - else: - sample.append(line.strip().split()) - if len(sample) > 0: - datalist.append(sample) - - ds = DataSet() - for sample in datalist: - # print(sample) - res = self.get_char_lst(sample) - if res is None: - continue - line = ' '.join(res) - if cut_long_sent: - sents = _cut_long_sentence(line) - else: - sents = [line] - for raw_sentence in sents: - ds.append(Instance(raw_sentence=raw_sentence)) - return ds - - def get_char_lst(self, sample): - if len(sample) == 0: - return None - text = [] - for w in sample: - t1, t2, t3, t4 = w[1], w[3], w[6], w[7] - if t3 == '_': - return None - text.append(t1) - return text - - -class ConllxDataLoader(ConllLoader): - """返回“词级别”的标签信息,包括词、词性、(句法)头依赖、(句法)边标签。跟``ZhConllPOSReader``完全不同。 - - Deprecated. Use ConllLoader for all types of conll-format files. - """ - - def __init__(self): - headers = [ - 'words', 'pos_tags', 'heads', 'labels', - ] - indexs = [ - 1, 3, 6, 7, - ] - super(ConllxDataLoader, self).__init__(headers=headers, indexes=indexs) - - -class API: - def __init__(self): - self.pipeline = None - self._dict = None - - def predict(self, *args, **kwargs): - """Do prediction for the given input. - """ - raise NotImplementedError - - def test(self, file_path): - """Test performance over the given data set. - - :param str file_path: - :return: a dictionary of metric values - """ - raise NotImplementedError - - def load(self, path, device): - if os.path.exists(os.path.expanduser(path)): - _dict = torch.load(path, map_location='cpu') - else: - _dict = load_url(path, map_location='cpu') - self._dict = _dict - self.pipeline = _dict['pipeline'] - for processor in self.pipeline.pipeline: - if isinstance(processor, ModelProcessor): - processor.set_model_device(device) - - -class POS(API): - """FastNLP API for Part-Of-Speech tagging. - - :param str model_path: the path to the model. - :param str device: device name such as "cpu" or "cuda:0". Use the same notation as PyTorch. - - """ - - def __init__(self, model_path=None, device='cpu'): - super(POS, self).__init__() - if model_path is None: - model_path = model_urls['pos'] - - self.load(model_path, device) - - def predict(self, content): - """predict函数的介绍, - 函数介绍的第二句,这句话不会换行 - - :param content: list of list of str. Each string is a token(word). - :return answer: list of list of str. Each string is a tag. - """ - if not hasattr(self, "pipeline"): - raise ValueError("You have to load model first.") - - sentence_list = content - # 1. 检查sentence的类型 - for sentence in sentence_list: - if not all((type(obj) == str for obj in sentence)): - raise ValueError("Input must be list of list of string.") - - # 2. 组建dataset - dataset = DataSet() - dataset.add_field("words", sentence_list) - - # 3. 使用pipeline - self.pipeline(dataset) - - def merge_tag(words_list, tags_list): - rtn = [] - for words, tags in zip(words_list, tags_list): - rtn.append([w + "/" + t for w, t in zip(words, tags)]) - return rtn - - output = dataset.field_arrays["tag"].content - if isinstance(content, str): - return output[0] - elif isinstance(content, list): - return merge_tag(content, output) - - def test(self, file_path): - test_data = ConllxDataLoader().load(file_path) - - save_dict = self._dict - tag_vocab = save_dict["tag_vocab"] - pipeline = save_dict["pipeline"] - index_tag = IndexerProcessor(vocab=tag_vocab, field_name="tag", new_added_field_name="truth", is_input=False) - pipeline.pipeline = [index_tag] + pipeline.pipeline - - test_data.rename_field("pos_tags", "tag") - pipeline(test_data) - test_data.set_target("truth") - prediction = test_data.field_arrays["predict"].content - truth = test_data.field_arrays["truth"].content - seq_len = test_data.field_arrays["word_seq_origin_len"].content - - # padding by hand - max_length = max([len(seq) for seq in prediction]) - for idx in range(len(prediction)): - prediction[idx] = list(prediction[idx]) + ([0] * (max_length - len(prediction[idx]))) - truth[idx] = list(truth[idx]) + ([0] * (max_length - len(truth[idx]))) - evaluator = SpanFPreRecMetric(tag_vocab=tag_vocab, pred="predict", target="truth", - seq_len="word_seq_origin_len") - evaluator({"predict": torch.Tensor(prediction), "word_seq_origin_len": torch.Tensor(seq_len)}, - {"truth": torch.Tensor(truth)}) - test_result = evaluator.get_metric() - f1 = round(test_result['f'] * 100, 2) - pre = round(test_result['pre'] * 100, 2) - rec = round(test_result['rec'] * 100, 2) - - return {"F1": f1, "precision": pre, "recall": rec} - - -class CWS(API): - """ - 中文分词高级接口。 - - :param model_path: 当model_path为None,使用默认位置的model。如果默认位置不存在,则自动下载模型 - :param device: str,可以为'cpu', 'cuda'或'cuda:0'等。会将模型load到相应device进行推断。 - """ - - def __init__(self, model_path=None, device='cpu'): - - super(CWS, self).__init__() - if model_path is None: - model_path = model_urls['cws'] - - self.load(model_path, device) - - def predict(self, content): - """ - 分词接口。 - - :param content: str或List[str], 例如: "中文分词很重要!", 返回的结果是"中文 分词 很 重要 !"。 如果传入的为List[str],比如 - [ "中文分词很重要!", ...], 返回的结果["中文 分词 很 重要 !", ...]。 - :return: str或List[str], 根据输入的的类型决定。 - """ - if not hasattr(self, 'pipeline'): - raise ValueError("You have to load model first.") - - sentence_list = [] - # 1. 检查sentence的类型 - if isinstance(content, str): - sentence_list.append(content) - elif isinstance(content, list): - sentence_list = content - - # 2. 组建dataset - dataset = DataSet() - dataset.add_field('raw_sentence', sentence_list) - - # 3. 使用pipeline - self.pipeline(dataset) - - output = dataset.get_field('output').content - if isinstance(content, str): - return output[0] - elif isinstance(content, list): - return output - - def test(self, filepath): - """ - 传入一个分词文件路径,返回该数据集上分词f1, precision, recall。 - 分词文件应该为:: - - 1 编者按 编者按 NN O 11 nmod:topic - 2 : : PU O 11 punct - 3 7月 7月 NT DATE 4 compound:nn - 4 12日 12日 NT DATE 11 nmod:tmod - 5 , , PU O 11 punct - - 1 这 这 DT O 3 det - 2 款 款 M O 1 mark:clf - 3 飞行 飞行 NN O 8 nsubj - 4 从 从 P O 5 case - 5 外型 外型 NN O 8 nmod:prep - - 以空行分割两个句子,有内容的每行有7列。 - - :param filepath: str, 文件路径路径。 - :return: float, float, float. 分别f1, precision, recall. - """ - tag_proc = self._dict['tag_proc'] - cws_model = self.pipeline.pipeline[-2].model - pipeline = self.pipeline.pipeline[:-2] - - pipeline.insert(1, tag_proc) - pp = Pipeline(pipeline) - - reader = ConllCWSReader() - - # te_filename = '/home/hyan/ctb3/test.conllx' - te_dataset = reader.load(filepath) - pp(te_dataset) - - from ..core.tester import Tester - from ..core.metrics import SpanFPreRecMetric - - tester = Tester(data=te_dataset, model=cws_model, metrics=SpanFPreRecMetric(tag_proc.get_vocab()), batch_size=64, - verbose=0) - eval_res = tester.test() - - f1 = eval_res['SpanFPreRecMetric']['f'] - pre = eval_res['SpanFPreRecMetric']['pre'] - rec = eval_res['SpanFPreRecMetric']['rec'] - # print("f1:{:.2f}, pre:{:.2f}, rec:{:.2f}".format(f1, pre, rec)) - - return {"F1": f1, "precision": pre, "recall": rec} - - -class Parser(API): - def __init__(self, model_path=None, device='cpu'): - super(Parser, self).__init__() - if model_path is None: - model_path = model_urls['parser'] - - self.pos_tagger = POS(device=device) - self.load(model_path, device) - - def predict(self, content): - if not hasattr(self, 'pipeline'): - raise ValueError("You have to load model first.") - - # 1. 利用POS得到分词和pos tagging结果 - pos_out = self.pos_tagger.predict(content) - # pos_out = ['这里/NN 是/VB 分词/NN 结果/NN'.split()] - - # 2. 组建dataset - dataset = DataSet() - dataset.add_field('wp', pos_out) - dataset.apply(lambda x: [''] + [w.split('/')[0] for w in x['wp']], new_field_name='words') - dataset.apply(lambda x: [''] + [w.split('/')[1] for w in x['wp']], new_field_name='pos') - dataset.rename_field("words", "raw_words") - - # 3. 使用pipeline - self.pipeline(dataset) - dataset.apply(lambda x: [str(arc) for arc in x['arc_pred']], new_field_name='arc_pred') - dataset.apply(lambda x: [arc + '/' + label for arc, label in - zip(x['arc_pred'], x['label_pred_seq'])][1:], new_field_name='output') - # output like: [['2/top', '0/root', '4/nn', '2/dep']] - return dataset.field_arrays['output'].content - - def load_test_file(self, path): - def get_one(sample): - sample = list(map(list, zip(*sample))) - if len(sample) == 0: - return None - for w in sample[7]: - if w == '_': - print('Error Sample {}'.format(sample)) - return None - # return word_seq, pos_seq, head_seq, head_tag_seq - return sample[1], sample[3], list(map(int, sample[6])), sample[7] - - datalist = [] - with open(path, 'r', encoding='utf-8') as f: - sample = [] - for line in f: - if line.startswith('\n'): - datalist.append(sample) - sample = [] - elif line.startswith('#'): - continue - else: - sample.append(line.split('\t')) - if len(sample) > 0: - datalist.append(sample) - - data = [get_one(sample) for sample in datalist] - data_list = list(filter(lambda x: x is not None, data)) - return data_list - - def test(self, filepath): - data = self.load_test_file(filepath) - - def convert(data): - BOS = '' - dataset = DataSet() - for sample in data: - word_seq = [BOS] + sample[0] - pos_seq = [BOS] + sample[1] - heads = [0] + sample[2] - head_tags = [BOS] + sample[3] - dataset.append(Instance(raw_words=word_seq, - pos=pos_seq, - gold_heads=heads, - arc_true=heads, - tags=head_tags)) - return dataset - - ds = convert(data) - pp = self.pipeline - for p in pp: - if p.field_name == 'word_list': - p.field_name = 'gold_words' - elif p.field_name == 'pos_list': - p.field_name = 'gold_pos' - # ds.rename_field("words", "raw_words") - # ds.rename_field("tag", "pos") - pp(ds) - head_cor, label_cor, total = 0, 0, 0 - for ins in ds: - head_gold = ins['gold_heads'] - head_pred = ins['arc_pred'] - length = len(head_gold) - total += length - for i in range(length): - head_cor += 1 if head_pred[i] == head_gold[i] else 0 - uas = head_cor / total - # print('uas:{:.2f}'.format(uas)) - - for p in pp: - if p.field_name == 'gold_words': - p.field_name = 'word_list' - elif p.field_name == 'gold_pos': - p.field_name = 'pos_list' - - return {"USA": round(uas, 5)} - - -class Analyzer: - def __init__(self, device='cpu'): - - self.cws = CWS(device=device) - self.pos = POS(device=device) - self.parser = Parser(device=device) - - def predict(self, content, seg=False, pos=False, parser=False): - if seg is False and pos is False and parser is False: - seg = True - output_dict = {} - if seg: - seg_output = self.cws.predict(content) - output_dict['seg'] = seg_output - if pos: - pos_output = self.pos.predict(content) - output_dict['pos'] = pos_output - if parser: - parser_output = self.parser.predict(content) - output_dict['parser'] = parser_output - - return output_dict - - def test(self, filepath): - output_dict = {} - if self.cws: - seg_output = self.cws.test(filepath) - output_dict['seg'] = seg_output - if self.pos: - pos_output = self.pos.test(filepath) - output_dict['pos'] = pos_output - if self.parser: - parser_output = self.parser.test(filepath) - output_dict['parser'] = parser_output - - return output_dict diff --git a/legacy/api/converter.py b/legacy/api/converter.py deleted file mode 100644 index 4e03e465..00000000 --- a/legacy/api/converter.py +++ /dev/null @@ -1,181 +0,0 @@ -import re - - -class SpanConverter: - def __init__(self, replace_tag, pattern): - super(SpanConverter, self).__init__() - - self.replace_tag = replace_tag - self.pattern = pattern - - def find_certain_span_and_replace(self, sentence): - replaced_sentence = '' - prev_end = 0 - for match in re.finditer(self.pattern, sentence): - start, end = match.span() - span = sentence[start:end] - replaced_sentence += sentence[prev_end:start] + self.span_to_special_tag(span) - prev_end = end - replaced_sentence += sentence[prev_end:] - - return replaced_sentence - - def span_to_special_tag(self, span): - - return self.replace_tag - - def find_certain_span(self, sentence): - spans = [] - for match in re.finditer(self.pattern, sentence): - spans.append(match.span()) - return spans - - -class AlphaSpanConverter(SpanConverter): - def __init__(self): - replace_tag = '' - # 理想状态下仅处理纯为字母的情况, 但不处理<[a-zA-Z]+>(因为这应该是特殊的tag). - pattern = '[a-zA-Z]+(?=[\u4e00-\u9fff ,%.!<\\-"])' - - super(AlphaSpanConverter, self).__init__(replace_tag, pattern) - - -class DigitSpanConverter(SpanConverter): - def __init__(self): - replace_tag = '' - pattern = '\d[\d\\.]*(?=[\u4e00-\u9fff ,%.!<-])' - - super(DigitSpanConverter, self).__init__(replace_tag, pattern) - - def span_to_special_tag(self, span): - # return self.special_tag - if span[0] == '0' and len(span) > 2: - return '' - decimal_point_count = 0 # one might have more than one decimal pointers - for idx, char in enumerate(span): - if char == '.' or char == '﹒' or char == '·': - decimal_point_count += 1 - if span[-1] == '.' or span[-1] == '﹒' or span[-1] == '·': - # last digit being decimal point means this is not a number - if decimal_point_count == 1: - return span - else: - return '' - if decimal_point_count == 1: - return '' - elif decimal_point_count > 1: - return '' - else: - return '' - - -class TimeConverter(SpanConverter): - def __init__(self): - replace_tag = '' - pattern = '\d+[::∶][\d::∶]+(?=[\u4e00-\u9fff ,%.!<-])' - - super().__init__(replace_tag, pattern) - - -class MixNumAlphaConverter(SpanConverter): - def __init__(self): - replace_tag = '' - pattern = None - - super().__init__(replace_tag, pattern) - - def find_certain_span_and_replace(self, sentence): - replaced_sentence = '' - start = 0 - matching_flag = False - number_flag = False - alpha_flag = False - link_flag = False - slash_flag = False - bracket_flag = False - for idx in range(len(sentence)): - if re.match('[0-9a-zA-Z/\\(\\)\'′&\\-]', sentence[idx]): - if not matching_flag: - replaced_sentence += sentence[start:idx] - start = idx - if re.match('[0-9]', sentence[idx]): - number_flag = True - elif re.match('[\'′&\\-]', sentence[idx]): - link_flag = True - elif re.match('/', sentence[idx]): - slash_flag = True - elif re.match('[\\(\\)]', sentence[idx]): - bracket_flag = True - else: - alpha_flag = True - matching_flag = True - elif re.match('[\\.]', sentence[idx]): - pass - else: - if matching_flag: - if (number_flag and alpha_flag) or (link_flag and alpha_flag) \ - or (slash_flag and alpha_flag) or (link_flag and number_flag) \ - or (number_flag and bracket_flag) or (bracket_flag and alpha_flag): - span = sentence[start:idx] - start = idx - replaced_sentence += self.span_to_special_tag(span) - matching_flag = False - number_flag = False - alpha_flag = False - link_flag = False - slash_flag = False - bracket_flag = False - - replaced_sentence += sentence[start:] - return replaced_sentence - - def find_certain_span(self, sentence): - spans = [] - start = 0 - matching_flag = False - number_flag = False - alpha_flag = False - link_flag = False - slash_flag = False - bracket_flag = False - for idx in range(len(sentence)): - if re.match('[0-9a-zA-Z/\\(\\)\'′&\\-]', sentence[idx]): - if not matching_flag: - start = idx - if re.match('[0-9]', sentence[idx]): - number_flag = True - elif re.match('[\'′&\\-]', sentence[idx]): - link_flag = True - elif re.match('/', sentence[idx]): - slash_flag = True - elif re.match('[\\(\\)]', sentence[idx]): - bracket_flag = True - else: - alpha_flag = True - matching_flag = True - elif re.match('[\\.]', sentence[idx]): - pass - else: - if matching_flag: - if (number_flag and alpha_flag) or (link_flag and alpha_flag) \ - or (slash_flag and alpha_flag) or (link_flag and number_flag) \ - or (number_flag and bracket_flag) or (bracket_flag and alpha_flag): - spans.append((start, idx)) - start = idx - - matching_flag = False - number_flag = False - alpha_flag = False - link_flag = False - slash_flag = False - bracket_flag = False - - return spans - - -class EmailConverter(SpanConverter): - def __init__(self): - replaced_tag = "" - pattern = '[0-9a-zA-Z]+[@][.﹒0-9a-zA-Z@]+(?=[\u4e00-\u9fff ,%.!<\\-"$])' - - super(EmailConverter, self).__init__(replaced_tag, pattern) diff --git a/legacy/api/examples.py b/legacy/api/examples.py deleted file mode 100644 index c1b2e155..00000000 --- a/legacy/api/examples.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -api/example.py contains all API examples provided by fastNLP. -It is used as a tutorial for API or a test script since it is difficult to test APIs in travis. - -""" -from . import CWS, POS, Parser - -text = ['编者按:7月12日,英国航空航天系统公司公布了该公司研制的第一款高科技隐形无人机雷电之神。', - '这款飞行从外型上来看酷似电影中的太空飞行器,据英国方面介绍,可以实现洲际远程打击。', - '那么这款无人机到底有多厉害?'] - - -def chinese_word_segmentation(): - cws = CWS(device='cpu') - print(cws.predict(text)) - - -def chinese_word_segmentation_test(): - cws = CWS(device='cpu') - print(cws.test("../../test/data_for_tests/zh_sample.conllx")) - - -def pos_tagging(): - # 输入已分词序列 - text = [['编者', '按:', '7月', '12日', ',', '英国', '航空', '航天', '系统', '公司', '公布', '了', '该', '公司', - '研制', '的', '第一款', '高科技', '隐形', '无人机', '雷电之神', '。'], - ['那么', '这', '款', '无人机', '到底', '有', '多', '厉害', '?']] - pos = POS(device='cpu') - print(pos.predict(text)) - - -def pos_tagging_test(): - pos = POS(device='cpu') - print(pos.test("../../test/data_for_tests/zh_sample.conllx")) - - -def syntactic_parsing(): - text = [['编者', '按:', '7月', '12日', ',', '英国', '航空', '航天', '系统', '公司', '公布', '了', '该', '公司', - '研制', '的', '第一款', '高科技', '隐形', '无人机', '雷电之神', '。'], - ['那么', '这', '款', '无人机', '到底', '有', '多', '厉害', '?']] - parser = Parser(device='cpu') - print(parser.predict(text)) - - -def syntactic_parsing_test(): - parser = Parser(device='cpu') - print(parser.test("../../test/data_for_tests/zh_sample.conllx")) - - -if __name__ == "__main__": - # chinese_word_segmentation() - # chinese_word_segmentation_test() - # pos_tagging() - # pos_tagging_test() - syntactic_parsing() - # syntactic_parsing_test() diff --git a/legacy/api/pipeline.py b/legacy/api/pipeline.py deleted file mode 100644 index 2cec16b3..00000000 --- a/legacy/api/pipeline.py +++ /dev/null @@ -1,33 +0,0 @@ -from ..api.processor import Processor - - -class Pipeline: - """ - Pipeline takes a DataSet object as input, runs multiple processors sequentially, and - outputs a DataSet object. - """ - - def __init__(self, processors=None): - self.pipeline = [] - if isinstance(processors, list): - for proc in processors: - assert isinstance(proc, Processor), "Must be a Processor, not {}.".format(type(proc)) - self.pipeline = processors - - def add_processor(self, processor): - assert isinstance(processor, Processor), "Must be a Processor, not {}.".format(type(processor)) - self.pipeline.append(processor) - - def process(self, dataset): - assert len(self.pipeline) != 0, "You need to add some processor first." - - for proc in self.pipeline: - dataset = proc(dataset) - - return dataset - - def __call__(self, *args, **kwargs): - return self.process(*args, **kwargs) - - def __getitem__(self, item): - return self.pipeline[item] diff --git a/legacy/api/processor.py b/legacy/api/processor.py deleted file mode 100644 index 4c442ed2..00000000 --- a/legacy/api/processor.py +++ /dev/null @@ -1,428 +0,0 @@ -import re -from collections import defaultdict - -import torch - -from fastNLP.core.batch import Batch -from fastNLP.core.dataset import DataSet -from fastNLP.core.sampler import SequentialSampler -from fastNLP.core.vocabulary import Vocabulary - - -class Processor(object): - def __init__(self, field_name, new_added_field_name): - """ - - :param field_name: 处理哪个field - :param new_added_field_name: 如果为None,则认为是field_name,即覆盖原有的field - """ - self.field_name = field_name - if new_added_field_name is None: - self.new_added_field_name = field_name - else: - self.new_added_field_name = new_added_field_name - - def process(self, *args, **kwargs): - raise NotImplementedError - - def __call__(self, *args, **kwargs): - return self.process(*args, **kwargs) - - -class FullSpaceToHalfSpaceProcessor(Processor): - """全角转半角,以字符为处理单元 - - """ - - def __init__(self, field_name, change_alpha=True, change_digit=True, change_punctuation=True, - change_space=True): - super(FullSpaceToHalfSpaceProcessor, self).__init__(field_name, None) - - self.change_alpha = change_alpha - self.change_digit = change_digit - self.change_punctuation = change_punctuation - self.change_space = change_space - - FH_SPACE = [(u" ", u" ")] - FH_NUM = [ - (u"0", u"0"), (u"1", u"1"), (u"2", u"2"), (u"3", u"3"), (u"4", u"4"), - (u"5", u"5"), (u"6", u"6"), (u"7", u"7"), (u"8", u"8"), (u"9", u"9")] - FH_ALPHA = [ - (u"a", u"a"), (u"b", u"b"), (u"c", u"c"), (u"d", u"d"), (u"e", u"e"), - (u"f", u"f"), (u"g", u"g"), (u"h", u"h"), (u"i", u"i"), (u"j", u"j"), - (u"k", u"k"), (u"l", u"l"), (u"m", u"m"), (u"n", u"n"), (u"o", u"o"), - (u"p", u"p"), (u"q", u"q"), (u"r", u"r"), (u"s", u"s"), (u"t", u"t"), - (u"u", u"u"), (u"v", u"v"), (u"w", u"w"), (u"x", u"x"), (u"y", u"y"), - (u"z", u"z"), - (u"A", u"A"), (u"B", u"B"), (u"C", u"C"), (u"D", u"D"), (u"E", u"E"), - (u"F", u"F"), (u"G", u"G"), (u"H", u"H"), (u"I", u"I"), (u"J", u"J"), - (u"K", u"K"), (u"L", u"L"), (u"M", u"M"), (u"N", u"N"), (u"O", u"O"), - (u"P", u"P"), (u"Q", u"Q"), (u"R", u"R"), (u"S", u"S"), (u"T", u"T"), - (u"U", u"U"), (u"V", u"V"), (u"W", u"W"), (u"X", u"X"), (u"Y", u"Y"), - (u"Z", u"Z")] - # 谨慎使用标点符号转换, 因为"5.12特大地震"转换后可能就成了"5.12特大地震" - FH_PUNCTUATION = [ - (u'%', u'%'), (u'!', u'!'), (u'"', u'\"'), (u''', u'\''), (u'#', u'#'), - (u'¥', u'$'), (u'&', u'&'), (u'(', u'('), (u')', u')'), (u'*', u'*'), - (u'+', u'+'), (u',', u','), (u'-', u'-'), (u'.', u'.'), (u'/', u'/'), - (u':', u':'), (u';', u';'), (u'<', u'<'), (u'=', u'='), (u'>', u'>'), - (u'?', u'?'), (u'@', u'@'), (u'[', u'['), (u']', u']'), (u'\', u'\\'), - (u'^', u'^'), (u'_', u'_'), (u'`', u'`'), (u'~', u'~'), (u'{', u'{'), - (u'}', u'}'), (u'|', u'|')] - FHs = [] - if self.change_alpha: - FHs = FH_ALPHA - if self.change_digit: - FHs += FH_NUM - if self.change_punctuation: - FHs += FH_PUNCTUATION - if self.change_space: - FHs += FH_SPACE - self.convert_map = {k: v for k, v in FHs} - - def process(self, dataset): - assert isinstance(dataset, DataSet), "Only Dataset class is allowed, not {}.".format(type(dataset)) - - def inner_proc(ins): - sentence = ins[self.field_name] - new_sentence = [""] * len(sentence) - for idx, char in enumerate(sentence): - if char in self.convert_map: - char = self.convert_map[char] - new_sentence[idx] = char - return "".join(new_sentence) - - dataset.apply(inner_proc, new_field_name=self.field_name) - return dataset - - -class PreAppendProcessor(Processor): - """ - 向某个field的起始增加data(应该为str类型)。该field需要为list类型。即新增的field为 - [data] + instance[field_name] - - """ - - def __init__(self, data, field_name, new_added_field_name=None): - super(PreAppendProcessor, self).__init__(field_name, new_added_field_name) - self.data = data - - def process(self, dataset): - dataset.apply(lambda ins: [self.data] + ins[self.field_name], new_field_name=self.new_added_field_name) - return dataset - - -class SliceProcessor(Processor): - """ - 从某个field中只取部分内容。等价于instance[field_name][start:end:step] - - """ - - def __init__(self, start, end, step, field_name, new_added_field_name=None): - super(SliceProcessor, self).__init__(field_name, new_added_field_name) - for o in (start, end, step): - assert isinstance(o, int) or o is None - self.slice = slice(start, end, step) - - def process(self, dataset): - dataset.apply(lambda ins: ins[self.field_name][self.slice], new_field_name=self.new_added_field_name) - return dataset - - -class Num2TagProcessor(Processor): - """ - 将一句话中的数字转换为某个tag。 - - """ - - def __init__(self, tag, field_name, new_added_field_name=None): - """ - - :param tag: str, 将数字转换为该tag - :param field_name: - :param new_added_field_name: - """ - super(Num2TagProcessor, self).__init__(field_name, new_added_field_name) - self.tag = tag - self.pattern = r'[-+]?([0-9]+[.]?[0-9]*)+[/eE]?[-+]?([0-9]+[.]?[0-9]*)' - - def process(self, dataset): - - def inner_proc(ins): - s = ins[self.field_name] - new_s = [None] * len(s) - for i, w in enumerate(s): - if re.search(self.pattern, w) is not None: - w = self.tag - new_s[i] = w - return new_s - - dataset.apply(inner_proc, new_field_name=self.new_added_field_name) - return dataset - - -class IndexerProcessor(Processor): - """ - 给定一个vocabulary , 将指定field转换为index形式。指定field应该是一维的list,比如 - ['我', '是', xxx] - """ - - def __init__(self, vocab, field_name, new_added_field_name, delete_old_field=False, is_input=True): - - assert isinstance(vocab, Vocabulary), "Only Vocabulary class is allowed, not {}.".format(type(vocab)) - - super(IndexerProcessor, self).__init__(field_name, new_added_field_name) - self.vocab = vocab - self.delete_old_field = delete_old_field - self.is_input = is_input - - def set_vocab(self, vocab): - assert isinstance(vocab, Vocabulary), "Only Vocabulary class is allowed, not {}.".format(type(vocab)) - - self.vocab = vocab - - def process(self, dataset): - assert isinstance(dataset, DataSet), "Only DataSet class is allowed, not {}.".format(type(dataset)) - dataset.apply(lambda ins: [self.vocab.to_index(token) for token in ins[self.field_name]], - new_field_name=self.new_added_field_name) - if self.is_input: - dataset.set_input(self.new_added_field_name) - - if self.delete_old_field: - dataset.delete_field(self.field_name) - - return dataset - - -class VocabProcessor(Processor): - """ - 传入若干个DataSet以建立vocabulary。 - - """ - - def __init__(self, field_name, min_freq=1, max_size=None): - super(VocabProcessor, self).__init__(field_name, None) - self.vocab = Vocabulary(min_freq=min_freq, max_size=max_size) - - def process(self, *datasets): - for dataset in datasets: - assert isinstance(dataset, DataSet), "Only Dataset class is allowed, not {}.".format(type(dataset)) - dataset.apply(lambda ins: self.vocab.update(ins[self.field_name])) - - def get_vocab(self): - self.vocab.build_vocab() - return self.vocab - - -class SeqLenProcessor(Processor): - """ - 根据某个field新增一个sequence length的field。取该field的第一维 - - """ - - def __init__(self, field_name, new_added_field_name='seq_lens', is_input=True): - super(SeqLenProcessor, self).__init__(field_name, new_added_field_name) - self.is_input = is_input - - def process(self, dataset): - assert isinstance(dataset, DataSet), "Only Dataset class is allowed, not {}.".format(type(dataset)) - dataset.apply(lambda ins: len(ins[self.field_name]), new_field_name=self.new_added_field_name) - if self.is_input: - dataset.set_input(self.new_added_field_name) - return dataset - - -from fastNLP.core.utils import _build_args - - -class ModelProcessor(Processor): - def __init__(self, model, seq_len_field_name='seq_lens', batch_size=32): - """ - 传入一个model,在process()时传入一个dataset,该processor会通过Batch将DataSet的内容输出给model.predict或者model.forward. - model输出的内容会被增加到dataset中,field_name由model输出决定。如果生成的内容维度不是(Batch_size, )与 - (Batch_size, 1),则使用seqence length这个field进行unpad - TODO 这个类需要删除对seq_lens的依赖。 - - :param seq_len_field_name: - :param batch_size: - """ - super(ModelProcessor, self).__init__(None, None) - self.batch_size = batch_size - self.seq_len_field_name = seq_len_field_name - self.model = model - - def process(self, dataset): - self.model.eval() - assert isinstance(dataset, DataSet), "Only Dataset class is allowed, not {}.".format(type(dataset)) - data_iterator = Batch(dataset, batch_size=self.batch_size, sampler=SequentialSampler()) - - batch_output = defaultdict(list) - predict_func = self.model.forward - with torch.no_grad(): - for batch_x, _ in data_iterator: - refined_batch_x = _build_args(predict_func, **batch_x) - prediction = predict_func(**refined_batch_x) - seq_lens = batch_x[self.seq_len_field_name].tolist() - - for key, value in prediction.items(): - tmp_batch = [] - value = value.cpu().numpy() - if len(value.shape) == 1 or (len(value.shape) == 2 and value.shape[1] == 1): - batch_output[key].extend(value.tolist()) - else: - for idx, seq_len in enumerate(seq_lens): - tmp_batch.append(value[idx, :seq_len]) - batch_output[key].extend(tmp_batch) - if not self.seq_len_field_name in prediction: - batch_output[self.seq_len_field_name].extend(seq_lens) - - # TODO 当前的实现会导致之后的processor需要知道model输出的output的key是什么 - for field_name, fields in batch_output.items(): - dataset.add_field(field_name, fields, is_input=True, is_target=False) - - return dataset - - def set_model(self, model): - self.model = model - - def set_model_device(self, device): - device = torch.device(device) - self.model.to(device) - - -class Index2WordProcessor(Processor): - """ - 将DataSet中某个为index的field根据vocab转换为str - - """ - - def __init__(self, vocab, field_name, new_added_field_name): - super(Index2WordProcessor, self).__init__(field_name, new_added_field_name) - self.vocab = vocab - - def process(self, dataset): - dataset.apply(lambda ins: [self.vocab.to_word(w) for w in ins[self.field_name]], - new_field_name=self.new_added_field_name) - return dataset - - -class SetTargetProcessor(Processor): - def __init__(self, *fields, flag=True): - super(SetTargetProcessor, self).__init__(None, None) - self.fields = fields - self.flag = flag - - def process(self, dataset): - dataset.set_target(*self.fields, flag=self.flag) - return dataset - - -class SetInputProcessor(Processor): - def __init__(self, *fields, flag=True): - super(SetInputProcessor, self).__init__(None, None) - self.fields = fields - self.flag = flag - - def process(self, dataset): - dataset.set_input(*self.fields, flag=self.flag) - return dataset - - -class VocabIndexerProcessor(Processor): - """ - 根据DataSet创建Vocabulary,并将其用数字index。新生成的index的field会被放在new_added_filed_name, 如果没有提供 - new_added_field_name, 则覆盖原有的field_name. - - """ - - def __init__(self, field_name, new_added_filed_name=None, min_freq=1, max_size=None, - verbose=0, is_input=True): - """ - - :param field_name: 从哪个field_name创建词表,以及对哪个field_name进行index操作 - :param new_added_filed_name: index时,生成的index field的名称,如果不传入,则覆盖field_name. - :param min_freq: 创建的Vocabulary允许的单词最少出现次数. - :param max_size: 创建的Vocabulary允许的最大的单词数量 - :param verbose: 0, 不输出任何信息;1,输出信息 - :param bool is_input: - """ - super(VocabIndexerProcessor, self).__init__(field_name, new_added_filed_name) - self.min_freq = min_freq - self.max_size = max_size - - self.verbose = verbose - self.is_input = is_input - - def construct_vocab(self, *datasets): - """ - 使用传入的DataSet创建vocabulary - - :param datasets: DataSet类型的数据,用于构建vocabulary - :return: - """ - self.vocab = Vocabulary(min_freq=self.min_freq, max_size=self.max_size) - for dataset in datasets: - assert isinstance(dataset, DataSet), "Only Dataset class is allowed, not {}.".format(type(dataset)) - dataset.apply(lambda ins: self.vocab.update(ins[self.field_name])) - self.vocab.build_vocab() - if self.verbose: - print("Vocabulary Constructed, has {} items.".format(len(self.vocab))) - - def process(self, *datasets, only_index_dataset=None): - """ - 若还未建立Vocabulary,则使用dataset中的DataSet建立vocabulary;若已经有了vocabulary则使用已有的vocabulary。得到vocabulary - 后,则会index datasets与only_index_dataset。 - - :param datasets: DataSet类型的数据 - :param only_index_dataset: DataSet, or list of DataSet. 该参数中的内容只会被用于index,不会被用于生成vocabulary。 - :return: - """ - if len(datasets) == 0 and not hasattr(self, 'vocab'): - raise RuntimeError("You have to construct vocabulary first. Or you have to pass datasets to construct it.") - if not hasattr(self, 'vocab'): - self.construct_vocab(*datasets) - else: - if self.verbose: - print("Using constructed vocabulary with {} items.".format(len(self.vocab))) - to_index_datasets = [] - if len(datasets) != 0: - for dataset in datasets: - assert isinstance(dataset, DataSet), "Only DataSet class is allowed, not {}.".format(type(dataset)) - to_index_datasets.append(dataset) - - if not (only_index_dataset is None): - if isinstance(only_index_dataset, list): - for dataset in only_index_dataset: - assert isinstance(dataset, DataSet), "Only DataSet class is allowed, not {}.".format(type(dataset)) - to_index_datasets.append(dataset) - elif isinstance(only_index_dataset, DataSet): - to_index_datasets.append(only_index_dataset) - else: - raise TypeError('Only DataSet or list of DataSet is allowed, not {}.'.format(type(only_index_dataset))) - - for dataset in to_index_datasets: - assert isinstance(dataset, DataSet), "Only DataSet class is allowed, not {}.".format(type(dataset)) - dataset.apply(lambda ins: [self.vocab.to_index(token) for token in ins[self.field_name]], - new_field_name=self.new_added_field_name, is_input=self.is_input) - # 只返回一个,infer时为了跟其他processor保持一致 - if len(to_index_datasets) == 1: - return to_index_datasets[0] - - def set_vocab(self, vocab): - assert isinstance(vocab, Vocabulary), "Only fastNLP.core.Vocabulary is allowed, not {}.".format(type(vocab)) - self.vocab = vocab - - def delete_vocab(self): - del self.vocab - - def get_vocab_size(self): - return len(self.vocab) - - def set_verbose(self, verbose): - """ - 设置processor verbose状态。 - - :param verbose: int, 0,不输出任何信息;1,输出vocab 信息。 - :return: - """ - self.verbose = verbose diff --git a/legacy/api/utils.py b/legacy/api/utils.py deleted file mode 100644 index 184e5fe6..00000000 --- a/legacy/api/utils.py +++ /dev/null @@ -1,134 +0,0 @@ -import hashlib -import os -import re -import shutil -import sys -import tempfile - -import torch - -try: - from requests.utils import urlparse - from requests import get as urlopen - requests_available = True -except ImportError: - requests_available = False - if sys.version_info[0] == 2: - from urlparse import urlparse # noqa f811 - from urllib2 import urlopen # noqa f811 - else: - from urllib.request import urlopen - from urllib.parse import urlparse -try: - from tqdm.auto import tqdm -except: - from fastNLP.core.utils import _pseudo_tqdm as tqdm - -# matches bfd8deac from resnet18-bfd8deac.pth -HASH_REGEX = re.compile(r'-([a-f0-9]*)\.') - - -def load_url(url, model_dir=None, map_location=None, progress=True): - r"""Loads the Torch serialized object at the given URL. - - If the object is already present in `model_dir`, it's deserialized and - returned. The filename part of the URL should follow the naming convention - ``filename-.ext`` where ```` is the first eight or more - digits of the SHA256 hash of the contents of the file. The hash is used to - ensure unique names and to verify the contents of the file. - - The default value of `model_dir` is ``$TORCH_HOME/models`` where - ``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be - overridden with the ``$TORCH_MODEL_ZOO`` environment variable. - - Args: - url (string): URL of the object to download - model_dir (string, optional): directory in which to save the object - map_location (optional): a function or a dict specifying how to remap storage locations (see torch.load) - progress (bool, optional): whether or not to display a progress bar to stderr - - Example: - # >>> state_dict = model_zoo.load_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth') - - """ - if model_dir is None: - torch_home = os.path.expanduser(os.getenv('fastNLP_HOME', '~/.fastNLP')) - model_dir = os.getenv('fastNLP_MODEL_ZOO', os.path.join(torch_home, 'models')) - if not os.path.exists(model_dir): - os.makedirs(model_dir) - parts = urlparse(url) - filename = os.path.basename(parts.path) - cached_file = os.path.join(model_dir, filename) - if not os.path.exists(cached_file): - sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file)) - # hash_prefix = HASH_REGEX.search(filename).group(1) - _download_url_to_file(url, cached_file, hash_prefix=None, progress=progress) - return torch.load(cached_file, map_location=map_location) - - -def _download_url_to_file(url, dst, hash_prefix, progress): - if requests_available: - u = urlopen(url, stream=True) - file_size = int(u.headers["Content-Length"]) - u = u.raw - else: - u = urlopen(url) - meta = u.info() - if hasattr(meta, 'getheaders'): - file_size = int(meta.getheaders("Content-Length")[0]) - else: - file_size = int(meta.get_all("Content-Length")[0]) - - f = tempfile.NamedTemporaryFile(delete=False) - try: - if hash_prefix is not None: - sha256 = hashlib.sha256() - with tqdm(total=file_size, disable=not progress) as pbar: - while True: - buffer = u.read(8192) - if len(buffer) == 0: - break - f.write(buffer) - if hash_prefix is not None: - sha256.update(buffer) - pbar.update(len(buffer)) - - f.close() - if hash_prefix is not None: - digest = sha256.hexdigest() - if digest[:len(hash_prefix)] != hash_prefix: - raise RuntimeError('invalid hash value (expected "{}", got "{}")' - .format(hash_prefix, digest)) - shutil.move(f.name, dst) - finally: - f.close() - if os.path.exists(f.name): - os.remove(f.name) - - -if tqdm is None: - # fake tqdm if it's not installed - class tqdm(object): - - def __init__(self, total, disable=False): - self.total = total - self.disable = disable - self.n = 0 - - def update(self, n): - if self.disable: - return - - self.n += n - sys.stderr.write("\r{0:.1f}%".format(100 * self.n / float(self.total))) - sys.stderr.flush() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if self.disable: - return - - sys.stderr.write('\n') - diff --git a/legacy/automl/__init__.py b/legacy/automl/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/legacy/automl/enas_controller.py b/legacy/automl/enas_controller.py deleted file mode 100644 index 6ddbb211..00000000 --- a/legacy/automl/enas_controller.py +++ /dev/null @@ -1,223 +0,0 @@ -# Code Modified from https://github.com/carpedm20/ENAS-pytorch -"""A module with NAS controller-related code.""" -import collections -import os - -import torch -import torch.nn.functional as F - -import fastNLP.automl.enas_utils as utils -from fastNLP.automl.enas_utils import Node - - -def _construct_dags(prev_nodes, activations, func_names, num_blocks): - """Constructs a set of DAGs based on the actions, i.e., previous nodes and - activation functions, sampled from the controller/policy pi. - - Args: - prev_nodes: Previous node actions from the policy. - activations: Activations sampled from the policy. - func_names: Mapping from activation function names to functions. - num_blocks: Number of blocks in the target RNN cell. - - Returns: - A list of DAGs defined by the inputs. - - RNN cell DAGs are represented in the following way: - - 1. Each element (node) in a DAG is a list of `Node`s. - - 2. The `Node`s in the list dag[i] correspond to the subsequent nodes - that take the output from node i as their own input. - - 3. dag[-1] is the node that takes input from x^{(t)} and h^{(t - 1)}. - dag[-1] always feeds dag[0]. - dag[-1] acts as if `w_xc`, `w_hc`, `w_xh` and `w_hh` are its - weights. - - 4. dag[N - 1] is the node that produces the hidden state passed to - the next timestep. dag[N - 1] is also always a leaf node, and therefore - is always averaged with the other leaf nodes and fed to the output - decoder. - """ - dags = [] - for nodes, func_ids in zip(prev_nodes, activations): - dag = collections.defaultdict(list) - - # add first node - dag[-1] = [Node(0, func_names[func_ids[0]])] - dag[-2] = [Node(0, func_names[func_ids[0]])] - - # add following nodes - for jdx, (idx, func_id) in enumerate(zip(nodes, func_ids[1:])): - dag[utils.to_item(idx)].append(Node(jdx + 1, func_names[func_id])) - - leaf_nodes = set(range(num_blocks)) - dag.keys() - - # merge with avg - for idx in leaf_nodes: - dag[idx] = [Node(num_blocks, 'avg')] - - # This is actually y^{(t)}. h^{(t)} is node N - 1 in - # the graph, where N Is the number of nodes. I.e., h^{(t)} takes - # only one other node as its input. - # last h[t] node - last_node = Node(num_blocks + 1, 'h[t]') - dag[num_blocks] = [last_node] - dags.append(dag) - - return dags - - -class Controller(torch.nn.Module): - """Based on - https://github.com/pytorch/examples/blob/master/word_language_model/model.py - - RL controllers do not necessarily have much to do with - language models. - - Base the controller RNN on the GRU from: - https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py - """ - def __init__(self, num_blocks=4, controller_hid=100, cuda=False): - torch.nn.Module.__init__(self) - - # `num_tokens` here is just the activation function - # for every even step, - self.shared_rnn_activations = ['tanh', 'ReLU', 'identity', 'sigmoid'] - self.num_tokens = [len(self.shared_rnn_activations)] - self.controller_hid = controller_hid - self.use_cuda = cuda - self.num_blocks = num_blocks - for idx in range(num_blocks): - self.num_tokens += [idx + 1, len(self.shared_rnn_activations)] - self.func_names = self.shared_rnn_activations - - num_total_tokens = sum(self.num_tokens) - - self.encoder = torch.nn.Embedding(num_total_tokens, - controller_hid) - self.lstm = torch.nn.LSTMCell(controller_hid, controller_hid) - - # Perhaps these weights in the decoder should be - # shared? At least for the activation functions, which all have the - # same size. - self.decoders = [] - for idx, size in enumerate(self.num_tokens): - decoder = torch.nn.Linear(controller_hid, size) - self.decoders.append(decoder) - - self._decoders = torch.nn.ModuleList(self.decoders) - - self.reset_parameters() - self.static_init_hidden = utils.keydefaultdict(self.init_hidden) - - def _get_default_hidden(key): - return utils.get_variable( - torch.zeros(key, self.controller_hid), - self.use_cuda, - requires_grad=False) - - self.static_inputs = utils.keydefaultdict(_get_default_hidden) - - def reset_parameters(self): - init_range = 0.1 - for param in self.parameters(): - param.data.uniform_(-init_range, init_range) - for decoder in self.decoders: - decoder.bias.data.fill_(0) - - def forward(self, # pylint:disable=arguments-differ - inputs, - hidden, - block_idx, - is_embed): - if not is_embed: - embed = self.encoder(inputs) - else: - embed = inputs - - hx, cx = self.lstm(embed, hidden) - logits = self.decoders[block_idx](hx) - - logits /= 5.0 - - # # exploration - # if self.args.mode == 'train': - # logits = (2.5 * F.tanh(logits)) - - return logits, (hx, cx) - - def sample(self, batch_size=1, with_details=False, save_dir=None): - """Samples a set of `args.num_blocks` many computational nodes from the - controller, where each node is made up of an activation function, and - each node except the last also includes a previous node. - """ - if batch_size < 1: - raise Exception(f'Wrong batch_size: {batch_size} < 1') - - # [B, L, H] - inputs = self.static_inputs[batch_size] - hidden = self.static_init_hidden[batch_size] - - activations = [] - entropies = [] - log_probs = [] - prev_nodes = [] - # The RNN controller alternately outputs an activation, - # followed by a previous node, for each block except the last one, - # which only gets an activation function. The last node is the output - # node, and its previous node is the average of all leaf nodes. - for block_idx in range(2*(self.num_blocks - 1) + 1): - logits, hidden = self.forward(inputs, - hidden, - block_idx, - is_embed=(block_idx == 0)) - - probs = F.softmax(logits, dim=-1) - log_prob = F.log_softmax(logits, dim=-1) - # .mean() for entropy? - entropy = -(log_prob * probs).sum(1, keepdim=False) - - action = probs.multinomial(num_samples=1).data - selected_log_prob = log_prob.gather( - 1, utils.get_variable(action, requires_grad=False)) - - # why the [:, 0] here? Should it be .squeeze(), or - # .view()? Same below with `action`. - entropies.append(entropy) - log_probs.append(selected_log_prob[:, 0]) - - # 0: function, 1: previous node - mode = block_idx % 2 - inputs = utils.get_variable( - action[:, 0] + sum(self.num_tokens[:mode]), - requires_grad=False) - - if mode == 0: - activations.append(action[:, 0]) - elif mode == 1: - prev_nodes.append(action[:, 0]) - - prev_nodes = torch.stack(prev_nodes).transpose(0, 1) - activations = torch.stack(activations).transpose(0, 1) - - dags = _construct_dags(prev_nodes, - activations, - self.func_names, - self.num_blocks) - - if save_dir is not None: - for idx, dag in enumerate(dags): - utils.draw_network(dag, - os.path.join(save_dir, f'graph{idx}.png')) - - if with_details: - return dags, torch.cat(log_probs), torch.cat(entropies) - - return dags - - def init_hidden(self, batch_size): - zeros = torch.zeros(batch_size, self.controller_hid) - return (utils.get_variable(zeros, self.use_cuda, requires_grad=False), - utils.get_variable(zeros.clone(), self.use_cuda, requires_grad=False)) diff --git a/legacy/automl/enas_model.py b/legacy/automl/enas_model.py deleted file mode 100644 index 4f9fb449..00000000 --- a/legacy/automl/enas_model.py +++ /dev/null @@ -1,388 +0,0 @@ -# Code Modified from https://github.com/carpedm20/ENAS-pytorch - -"""Module containing the shared RNN model.""" -import collections - -import numpy as np -import torch -import torch.nn.functional as F -from torch import nn -from torch.autograd import Variable - -import fastNLP.automl.enas_utils as utils -from fastNLP.models.base_model import BaseModel - - -def _get_dropped_weights(w_raw, dropout_p, is_training): - """Drops out weights to implement DropConnect. - - Args: - w_raw: Full, pre-dropout, weights to be dropped out. - dropout_p: Proportion of weights to drop out. - is_training: True iff _shared_ model is training. - - Returns: - The dropped weights. - - Why does torch.nn.functional.dropout() return: - 1. `torch.autograd.Variable()` on the training loop - 2. `torch.nn.Parameter()` on the controller or eval loop, when - training = False... - - Even though the call to `_setweights` in the Smerity repo's - `weight_drop.py` does not have this behaviour, and `F.dropout` always - returns `torch.autograd.Variable` there, even when `training=False`? - - The above TODO is the reason for the hacky check for `torch.nn.Parameter`. - """ - dropped_w = F.dropout(w_raw, p=dropout_p, training=is_training) - - if isinstance(dropped_w, torch.nn.Parameter): - dropped_w = dropped_w.clone() - - return dropped_w - -class EmbeddingDropout(torch.nn.Embedding): - """Class for dropping out embeddings by zero'ing out parameters in the - embedding matrix. - - This is equivalent to dropping out particular words, e.g., in the sentence - 'the quick brown fox jumps over the lazy dog', dropping out 'the' would - lead to the sentence '### quick brown fox jumps over ### lazy dog' (in the - embedding vector space). - - See 'A Theoretically Grounded Application of Dropout in Recurrent Neural - Networks', (Gal and Ghahramani, 2016). - """ - def __init__(self, - num_embeddings, - embedding_dim, - max_norm=None, - norm_type=2, - scale_grad_by_freq=False, - sparse=False, - dropout=0.1, - scale=None): - """Embedding constructor. - - Args: - dropout: Dropout probability. - scale: Used to scale parameters of embedding weight matrix that are - not dropped out. Note that this is _in addition_ to the - `1/(1 - dropout)` scaling. - - See `torch.nn.Embedding` for remaining arguments. - """ - torch.nn.Embedding.__init__(self, - num_embeddings=num_embeddings, - embedding_dim=embedding_dim, - max_norm=max_norm, - norm_type=norm_type, - scale_grad_by_freq=scale_grad_by_freq, - sparse=sparse) - self.dropout = dropout - assert (dropout >= 0.0) and (dropout < 1.0), ('Dropout must be >= 0.0 ' - 'and < 1.0') - self.scale = scale - - def forward(self, inputs): # pylint:disable=arguments-differ - """Embeds `inputs` with the dropped out embedding weight matrix.""" - if self.training: - dropout = self.dropout - else: - dropout = 0 - - if dropout: - mask = self.weight.data.new(self.weight.size(0), 1) - mask.bernoulli_(1 - dropout) - mask = mask.expand_as(self.weight) - mask = mask / (1 - dropout) - masked_weight = self.weight * Variable(mask) - else: - masked_weight = self.weight - if self.scale and self.scale != 1: - masked_weight = masked_weight * self.scale - - return F.embedding(inputs, - masked_weight, - max_norm=self.max_norm, - norm_type=self.norm_type, - scale_grad_by_freq=self.scale_grad_by_freq, - sparse=self.sparse) - - -class LockedDropout(nn.Module): - # code from https://github.com/salesforce/awd-lstm-lm/blob/master/locked_dropout.py - def __init__(self): - super().__init__() - - def forward(self, x, dropout=0.5): - if not self.training or not dropout: - return x - m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - dropout) - mask = Variable(m, requires_grad=False) / (1 - dropout) - mask = mask.expand_as(x) - return mask * x - - -class ENASModel(BaseModel): - """Shared RNN model.""" - def __init__(self, embed_num, num_classes, num_blocks=4, cuda=False, shared_hid=1000, shared_embed=1000): - super(ENASModel, self).__init__() - - self.use_cuda = cuda - - self.shared_hid = shared_hid - self.num_blocks = num_blocks - self.decoder = nn.Linear(self.shared_hid, num_classes) - self.encoder = EmbeddingDropout(embed_num, - shared_embed, - dropout=0.1) - self.lockdrop = LockedDropout() - self.dag = None - - # Tie weights - # self.decoder.weight = self.encoder.weight - - # Since W^{x, c} and W^{h, c} are always summed, there - # is no point duplicating their bias offset parameter. Likewise for - # W^{x, h} and W^{h, h}. - self.w_xc = nn.Linear(shared_embed, self.shared_hid) - self.w_xh = nn.Linear(shared_embed, self.shared_hid) - - # The raw weights are stored here because the hidden-to-hidden weights - # are weight dropped on the forward pass. - self.w_hc_raw = torch.nn.Parameter( - torch.Tensor(self.shared_hid, self.shared_hid)) - self.w_hh_raw = torch.nn.Parameter( - torch.Tensor(self.shared_hid, self.shared_hid)) - self.w_hc = None - self.w_hh = None - - self.w_h = collections.defaultdict(dict) - self.w_c = collections.defaultdict(dict) - - for idx in range(self.num_blocks): - for jdx in range(idx + 1, self.num_blocks): - self.w_h[idx][jdx] = nn.Linear(self.shared_hid, - self.shared_hid, - bias=False) - self.w_c[idx][jdx] = nn.Linear(self.shared_hid, - self.shared_hid, - bias=False) - - self._w_h = nn.ModuleList([self.w_h[idx][jdx] - for idx in self.w_h - for jdx in self.w_h[idx]]) - self._w_c = nn.ModuleList([self.w_c[idx][jdx] - for idx in self.w_c - for jdx in self.w_c[idx]]) - - self.batch_norm = None - # if args.mode == 'train': - # self.batch_norm = nn.BatchNorm1d(self.shared_hid) - # else: - # self.batch_norm = None - - self.reset_parameters() - self.static_init_hidden = utils.keydefaultdict(self.init_hidden) - - def setDAG(self, dag): - if self.dag is None: - self.dag = dag - - def forward(self, word_seq, hidden=None): - inputs = torch.transpose(word_seq, 0, 1) - - time_steps = inputs.size(0) - batch_size = inputs.size(1) - - - self.w_hh = _get_dropped_weights(self.w_hh_raw, - 0.5, - self.training) - self.w_hc = _get_dropped_weights(self.w_hc_raw, - 0.5, - self.training) - - # hidden = self.static_init_hidden[batch_size] if hidden is None else hidden - hidden = self.static_init_hidden[batch_size] - - embed = self.encoder(inputs) - - embed = self.lockdrop(embed, 0.65 if self.training else 0) - - # The norm of hidden states are clipped here because - # otherwise ENAS is especially prone to exploding activations on the - # forward pass. This could probably be fixed in a more elegant way, but - # it might be exposing a weakness in the ENAS algorithm as currently - # proposed. - # - # For more details, see - # https://github.com/carpedm20/ENAS-pytorch/issues/6 - clipped_num = 0 - max_clipped_norm = 0 - h1tohT = [] - logits = [] - for step in range(time_steps): - x_t = embed[step] - logit, hidden = self.cell(x_t, hidden, self.dag) - - hidden_norms = hidden.norm(dim=-1) - max_norm = 25.0 - if hidden_norms.data.max() > max_norm: - # Just directly use the torch slice operations - # in PyTorch v0.4. - # - # This workaround for PyTorch v0.3.1 does everything in numpy, - # because the PyTorch slicing and slice assignment is too - # flaky. - hidden_norms = hidden_norms.data.cpu().numpy() - - clipped_num += 1 - if hidden_norms.max() > max_clipped_norm: - max_clipped_norm = hidden_norms.max() - - clip_select = hidden_norms > max_norm - clip_norms = hidden_norms[clip_select] - - mask = np.ones(hidden.size()) - normalizer = max_norm/clip_norms - normalizer = normalizer[:, np.newaxis] - - mask[clip_select] = normalizer - - if self.use_cuda: - hidden *= torch.autograd.Variable( - torch.FloatTensor(mask).cuda(), requires_grad=False) - else: - hidden *= torch.autograd.Variable( - torch.FloatTensor(mask), requires_grad=False) - logits.append(logit) - h1tohT.append(hidden) - - h1tohT = torch.stack(h1tohT) - output = torch.stack(logits) - raw_output = output - - output = self.lockdrop(output, 0.4 if self.training else 0) - - #Pooling - output = torch.mean(output, 0) - - decoded = self.decoder(output) - - extra_out = {'dropped': decoded, - 'hiddens': h1tohT, - 'raw': raw_output} - return {'pred': decoded, 'hidden': hidden, 'extra_out': extra_out} - - def cell(self, x, h_prev, dag): - """Computes a single pass through the discovered RNN cell.""" - c = {} - h = {} - f = {} - - f[0] = self.get_f(dag[-1][0].name) - c[0] = torch.sigmoid(self.w_xc(x) + F.linear(h_prev, self.w_hc, None)) - h[0] = (c[0]*f[0](self.w_xh(x) + F.linear(h_prev, self.w_hh, None)) + - (1 - c[0])*h_prev) - - leaf_node_ids = [] - q = collections.deque() - q.append(0) - - # Computes connections from the parent nodes `node_id` - # to their child nodes `next_id` recursively, skipping leaf nodes. A - # leaf node is a node whose id == `self.num_blocks`. - # - # Connections between parent i and child j should be computed as - # h_j = c_j*f_{ij}{(W^h_{ij}*h_i)} + (1 - c_j)*h_i, - # where c_j = \sigmoid{(W^c_{ij}*h_i)} - # - # See Training details from Section 3.1 of the paper. - # - # The following algorithm does a breadth-first (since `q.popleft()` is - # used) search over the nodes and computes all the hidden states. - while True: - if len(q) == 0: - break - - node_id = q.popleft() - nodes = dag[node_id] - - for next_node in nodes: - next_id = next_node.id - if next_id == self.num_blocks: - leaf_node_ids.append(node_id) - assert len(nodes) == 1, ('parent of leaf node should have ' - 'only one child') - continue - - w_h = self.w_h[node_id][next_id] - w_c = self.w_c[node_id][next_id] - - f[next_id] = self.get_f(next_node.name) - c[next_id] = torch.sigmoid(w_c(h[node_id])) - h[next_id] = (c[next_id]*f[next_id](w_h(h[node_id])) + - (1 - c[next_id])*h[node_id]) - - q.append(next_id) - - # Instead of averaging loose ends, perhaps there should - # be a set of separate unshared weights for each "loose" connection - # between each node in a cell and the output. - # - # As it stands, all weights W^h_{ij} are doing double duty by - # connecting both from i to j, as well as from i to the output. - - # average all the loose ends - leaf_nodes = [h[node_id] for node_id in leaf_node_ids] - output = torch.mean(torch.stack(leaf_nodes, 2), -1) - - # stabilizing the Updates of omega - if self.batch_norm is not None: - output = self.batch_norm(output) - - return output, h[self.num_blocks - 1] - - def init_hidden(self, batch_size): - zeros = torch.zeros(batch_size, self.shared_hid) - return utils.get_variable(zeros, self.use_cuda, requires_grad=False) - - def get_f(self, name): - name = name.lower() - if name == 'relu': - f = torch.relu - elif name == 'tanh': - f = torch.tanh - elif name == 'identity': - f = lambda x: x - elif name == 'sigmoid': - f = torch.sigmoid - return f - - - @property - def num_parameters(self): - def size(p): - return np.prod(p.size()) - return sum([size(param) for param in self.parameters()]) - - - def reset_parameters(self): - init_range = 0.025 - # init_range = 0.025 if self.args.mode == 'train' else 0.04 - for param in self.parameters(): - param.data.uniform_(-init_range, init_range) - self.decoder.bias.data.fill_(0) - - def predict(self, word_seq): - """ - - :param word_seq: torch.LongTensor, [batch_size, seq_len] - :return predict: dict of torch.LongTensor, [batch_size, seq_len] - """ - output = self(word_seq) - _, predict = output['pred'].max(dim=1) - return {'pred': predict} diff --git a/legacy/automl/enas_trainer.py b/legacy/automl/enas_trainer.py deleted file mode 100644 index e3524aa9..00000000 --- a/legacy/automl/enas_trainer.py +++ /dev/null @@ -1,383 +0,0 @@ -# Code Modified from https://github.com/carpedm20/ENAS-pytorch - -import math -import time -from datetime import datetime -from datetime import timedelta - -import numpy as np -import torch - -try: - from tqdm.auto import tqdm -except: - from fastNLP.core.utils import _pseudo_tqdm as tqdm - -from fastNLP.core.batch import Batch -from fastNLP.core.callback import CallbackException -from fastNLP.core.dataset import DataSet -from fastNLP.core.utils import _move_dict_value_to_device -import fastNLP -from . import enas_utils as utils -from fastNLP.core.utils import _build_args - -from torch.optim import Adam - - -def _get_no_grad_ctx_mgr(): - """Returns a the `torch.no_grad` context manager for PyTorch version >= - 0.4, or a no-op context manager otherwise. - """ - return torch.no_grad() - - -class ENASTrainer(fastNLP.Trainer): - """A class to wrap training code.""" - def __init__(self, train_data, model, controller, **kwargs): - """Constructor for training algorithm. - :param DataSet train_data: the training data - :param torch.nn.modules.module model: a PyTorch model - :param torch.nn.modules.module controller: a PyTorch model - """ - self.final_epochs = kwargs['final_epochs'] - kwargs.pop('final_epochs') - super(ENASTrainer, self).__init__(train_data, model, **kwargs) - self.controller_step = 0 - self.shared_step = 0 - self.max_length = 35 - - self.shared = model - self.controller = controller - - self.shared_optim = Adam( - self.shared.parameters(), - lr=20.0, - weight_decay=1e-7) - - self.controller_optim = Adam( - self.controller.parameters(), - lr=3.5e-4) - - def train(self, load_best_model=True): - """ - :param bool load_best_model: 该参数只有在初始化提供了dev_data的情况下有效,如果True, trainer将在返回之前重新加载dev表现 - 最好的模型参数。 - :return results: 返回一个字典类型的数据, - 内含以下内容:: - - seconds: float, 表示训练时长 - 以下三个内容只有在提供了dev_data的情况下会有。 - best_eval: Dict of Dict, 表示evaluation的结果 - best_epoch: int,在第几个epoch取得的最佳值 - best_step: int, 在第几个step(batch)更新取得的最佳值 - - """ - results = {} - if self.n_epochs <= 0: - print(f"training epoch is {self.n_epochs}, nothing was done.") - results['seconds'] = 0. - return results - try: - if torch.cuda.is_available() and self.use_cuda: - self.model = self.model.cuda() - self._model_device = self.model.parameters().__next__().device - self._mode(self.model, is_test=False) - - self.start_time = str(datetime.now().strftime('%Y-%m-%d-%H-%M-%S')) - start_time = time.time() - print("training epochs started " + self.start_time, flush=True) - - try: - self.callback_manager.on_train_begin() - self._train() - self.callback_manager.on_train_end(self.model) - except (CallbackException, KeyboardInterrupt) as e: - self.callback_manager.on_exception(e, self.model) - - if self.dev_data is not None: - print("\nIn Epoch:{}/Step:{}, got best dev performance:".format(self.best_dev_epoch, self.best_dev_step) + - self.tester._format_eval_results(self.best_dev_perf),) - results['best_eval'] = self.best_dev_perf - results['best_epoch'] = self.best_dev_epoch - results['best_step'] = self.best_dev_step - if load_best_model: - model_name = "best_" + "_".join([self.model.__class__.__name__, self.metric_key, self.start_time]) - load_succeed = self._load_model(self.model, model_name) - if load_succeed: - print("Reloaded the best model.") - else: - print("Fail to reload best model.") - finally: - pass - results['seconds'] = round(time.time() - start_time, 2) - - return results - - def _train(self): - if not self.use_tqdm: - from fastNLP.core.utils import _pseudo_tqdm as inner_tqdm - else: - inner_tqdm = tqdm - self.step = 0 - start = time.time() - total_steps = (len(self.train_data) // self.batch_size + int( - len(self.train_data) % self.batch_size != 0)) * self.n_epochs - with inner_tqdm(total=total_steps, postfix='loss:{0:<6.5f}', leave=False, dynamic_ncols=True) as pbar: - avg_loss = 0 - data_iterator = Batch(self.train_data, batch_size=self.batch_size, sampler=self.sampler, as_numpy=False, - prefetch=self.prefetch) - for epoch in range(1, self.n_epochs+1): - pbar.set_description_str(desc="Epoch {}/{}".format(epoch, self.n_epochs)) - last_stage = (epoch > self.n_epochs + 1 - self.final_epochs) - if epoch == self.n_epochs + 1 - self.final_epochs: - print('Entering the final stage. (Only train the selected structure)') - # early stopping - self.callback_manager.on_epoch_begin(epoch, self.n_epochs) - - # 1. Training the shared parameters omega of the child models - self.train_shared(pbar) - - # 2. Training the controller parameters theta - if not last_stage: - self.train_controller() - - if ((self.validate_every > 0 and self.step % self.validate_every == 0) or - (self.validate_every < 0 and self.step % len(data_iterator) == 0)) \ - and self.dev_data is not None: - if not last_stage: - self.derive() - eval_res = self._do_validation(epoch=epoch, step=self.step) - eval_str = "Evaluation at Epoch {}/{}. Step:{}/{}. ".format(epoch, self.n_epochs, self.step, - total_steps) + \ - self.tester._format_eval_results(eval_res) - pbar.write(eval_str) - - # lr decay; early stopping - self.callback_manager.on_epoch_end(epoch, self.n_epochs, self.optimizer) - # =============== epochs end =================== # - pbar.close() - # ============ tqdm end ============== # - - - def get_loss(self, inputs, targets, hidden, dags): - """Computes the loss for the same batch for M models. - - This amounts to an estimate of the loss, which is turned into an - estimate for the gradients of the shared model. - """ - if not isinstance(dags, list): - dags = [dags] - - loss = 0 - for dag in dags: - self.shared.setDAG(dag) - inputs = _build_args(self.shared.forward, **inputs) - inputs['hidden'] = hidden - result = self.shared(**inputs) - output, hidden, extra_out = result['pred'], result['hidden'], result['extra_out'] - - self.callback_manager.on_loss_begin(targets, result) - sample_loss = self._compute_loss(result, targets) - loss += sample_loss - - assert len(dags) == 1, 'there are multiple `hidden` for multple `dags`' - return loss, hidden, extra_out - - def train_shared(self, pbar=None, max_step=None, dag=None): - """Train the language model for 400 steps of minibatches of 64 - examples. - - Args: - max_step: Used to run extra training steps as a warm-up. - dag: If not None, is used instead of calling sample(). - - BPTT is truncated at 35 timesteps. - - For each weight update, gradients are estimated by sampling M models - from the fixed controller policy, and averaging their gradients - computed on a batch of training data. - """ - model = self.shared - model.train() - self.controller.eval() - - hidden = self.shared.init_hidden(self.batch_size) - - abs_max_grad = 0 - abs_max_hidden_norm = 0 - step = 0 - raw_total_loss = 0 - total_loss = 0 - train_idx = 0 - avg_loss = 0 - data_iterator = Batch(self.train_data, batch_size=self.batch_size, sampler=self.sampler, as_numpy=False, - prefetch=self.prefetch) - - for batch_x, batch_y in data_iterator: - _move_dict_value_to_device(batch_x, batch_y, device=self._model_device) - indices = data_iterator.get_batch_indices() - # negative sampling; replace unknown; re-weight batch_y - self.callback_manager.on_batch_begin(batch_x, batch_y, indices) - # prediction = self._data_forward(self.model, batch_x) - - dags = self.controller.sample(1) - inputs, targets = batch_x, batch_y - # self.callback_manager.on_loss_begin(batch_y, prediction) - loss, hidden, extra_out = self.get_loss(inputs, - targets, - hidden, - dags) - hidden.detach_() - - avg_loss += loss.item() - - # Is loss NaN or inf? requires_grad = False - self.callback_manager.on_backward_begin(loss, self.model) - self._grad_backward(loss) - self.callback_manager.on_backward_end(self.model) - - self._update() - self.callback_manager.on_step_end(self.optimizer) - - if (self.step+1) % self.print_every == 0: - if self.use_tqdm: - print_output = "loss:{0:<6.5f}".format(avg_loss / self.print_every) - pbar.update(self.print_every) - else: - end = time.time() - diff = timedelta(seconds=round(end - start)) - print_output = "[epoch: {:>3} step: {:>4}] train loss: {:>4.6} time: {}".format( - epoch, self.step, avg_loss, diff) - pbar.set_postfix_str(print_output) - avg_loss = 0 - self.step += 1 - step += 1 - self.shared_step += 1 - self.callback_manager.on_batch_end() - # ================= mini-batch end ==================== # - - - def get_reward(self, dag, entropies, hidden, valid_idx=0): - """Computes the perplexity of a single sampled model on a minibatch of - validation data. - """ - if not isinstance(entropies, np.ndarray): - entropies = entropies.data.cpu().numpy() - - data_iterator = Batch(self.dev_data, batch_size=self.batch_size, sampler=self.sampler, as_numpy=False, - prefetch=self.prefetch) - - for inputs, targets in data_iterator: - valid_loss, hidden, _ = self.get_loss(inputs, targets, hidden, dag) - valid_loss = utils.to_item(valid_loss.data) - - valid_ppl = math.exp(valid_loss) - - R = 80 / valid_ppl - - rewards = R + 1e-4 * entropies - - return rewards, hidden - - def train_controller(self): - """Fixes the shared parameters and updates the controller parameters. - - The controller is updated with a score function gradient estimator - (i.e., REINFORCE), with the reward being c/valid_ppl, where valid_ppl - is computed on a minibatch of validation data. - - A moving average baseline is used. - - The controller is trained for 2000 steps per epoch (i.e., - first (Train Shared) phase -> second (Train Controller) phase). - """ - model = self.controller - model.train() - # Why can't we call shared.eval() here? Leads to loss - # being uniformly zero for the controller. - # self.shared.eval() - - avg_reward_base = None - baseline = None - adv_history = [] - entropy_history = [] - reward_history = [] - - hidden = self.shared.init_hidden(self.batch_size) - total_loss = 0 - valid_idx = 0 - for step in range(20): - # sample models - dags, log_probs, entropies = self.controller.sample( - with_details=True) - - # calculate reward - np_entropies = entropies.data.cpu().numpy() - # No gradients should be backpropagated to the - # shared model during controller training, obviously. - with _get_no_grad_ctx_mgr(): - rewards, hidden = self.get_reward(dags, - np_entropies, - hidden, - valid_idx) - - - reward_history.extend(rewards) - entropy_history.extend(np_entropies) - - # moving average baseline - if baseline is None: - baseline = rewards - else: - decay = 0.95 - baseline = decay * baseline + (1 - decay) * rewards - - adv = rewards - baseline - adv_history.extend(adv) - - # policy loss - loss = -log_probs*utils.get_variable(adv, - self.use_cuda, - requires_grad=False) - - loss = loss.sum() # or loss.mean() - - # update - self.controller_optim.zero_grad() - loss.backward() - - self.controller_optim.step() - - total_loss += utils.to_item(loss.data) - - if ((step % 50) == 0) and (step > 0): - reward_history, adv_history, entropy_history = [], [], [] - total_loss = 0 - - self.controller_step += 1 - # prev_valid_idx = valid_idx - # valid_idx = ((valid_idx + self.max_length) % - # (self.valid_data.size(0) - 1)) - # # Whenever we wrap around to the beginning of the - # # validation data, we reset the hidden states. - # if prev_valid_idx > valid_idx: - # hidden = self.shared.init_hidden(self.batch_size) - - def derive(self, sample_num=10, valid_idx=0): - """We are always deriving based on the very first batch - of validation data? This seems wrong... - """ - hidden = self.shared.init_hidden(self.batch_size) - - dags, _, entropies = self.controller.sample(sample_num, - with_details=True) - - max_R = 0 - best_dag = None - for dag in dags: - R, _ = self.get_reward(dag, entropies, hidden, valid_idx) - if R.max() > max_R: - max_R = R.max() - best_dag = dag - - self.model.setDAG(best_dag) diff --git a/legacy/automl/enas_utils.py b/legacy/automl/enas_utils.py deleted file mode 100644 index 7a53dd12..00000000 --- a/legacy/automl/enas_utils.py +++ /dev/null @@ -1,53 +0,0 @@ -# Code Modified from https://github.com/carpedm20/ENAS-pytorch - -from __future__ import print_function - -import collections -from collections import defaultdict - -import numpy as np -import torch -from torch.autograd import Variable - - -def detach(h): - if type(h) == Variable: - return Variable(h.data) - else: - return tuple(detach(v) for v in h) - -def get_variable(inputs, cuda=False, **kwargs): - if type(inputs) in [list, np.ndarray]: - inputs = torch.Tensor(inputs) - if cuda: - out = Variable(inputs.cuda(), **kwargs) - else: - out = Variable(inputs, **kwargs) - return out - -def update_lr(optimizer, lr): - for param_group in optimizer.param_groups: - param_group['lr'] = lr - -Node = collections.namedtuple('Node', ['id', 'name']) - - -class keydefaultdict(defaultdict): - def __missing__(self, key): - if self.default_factory is None: - raise KeyError(key) - else: - ret = self[key] = self.default_factory(key) - return ret - - -def to_item(x): - """Converts x, possibly scalar and possibly tensor, to a Python scalar.""" - if isinstance(x, (float, int)): - return x - - if float(torch.__version__[0:3]) < 0.4: - assert (x.dim() == 1) and (len(x) == 1) - return x[0] - - return x.item() diff --git a/legacy/component/__init__.py b/legacy/component/__init__.py deleted file mode 100644 index c6784aef..00000000 --- a/legacy/component/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .bert_tokenizer import BertTokenizer diff --git a/legacy/component/bert_tokenizer.py b/legacy/component/bert_tokenizer.py deleted file mode 100644 index 6354076d..00000000 --- a/legacy/component/bert_tokenizer.py +++ /dev/null @@ -1,378 +0,0 @@ -""" -bert_tokenizer.py is modified from huggingface/pytorch-pretrained-BERT, which is licensed under the Apache License 2.0. -""" -import collections -import os -import unicodedata -from io import open - - -PRETRAINED_VOCAB_ARCHIVE_MAP = { - 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", - 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", - 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", - 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", - 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt", - 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", - 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt", -} -PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = { - 'bert-base-uncased': 512, - 'bert-large-uncased': 512, - 'bert-base-cased': 512, - 'bert-large-cased': 512, - 'bert-base-multilingual-uncased': 512, - 'bert-base-multilingual-cased': 512, - 'bert-base-chinese': 512, -} -VOCAB_NAME = 'vocab.txt' - - -def load_vocab(vocab_file): - """Loads a vocabulary file into a dictionary.""" - vocab = collections.OrderedDict() - index = 0 - with open(vocab_file, "r", encoding="utf-8") as reader: - while True: - token = reader.readline() - if not token: - break - token = token.strip() - vocab[token] = index - index += 1 - return vocab - - -def whitespace_tokenize(text): - """Runs basic whitespace cleaning and splitting on a piece of text.""" - text = text.strip() - if not text: - return [] - tokens = text.split() - return tokens - - -class BertTokenizer(object): - """Runs end-to-end tokenization: punctuation splitting + wordpiece""" - - def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True, - never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): - """Constructs a BertTokenizer. - Args: - vocab_file: Path to a one-wordpiece-per-line vocabulary file - do_lower_case: Whether to lower case the input - Only has an effect when do_wordpiece_only=False - do_basic_tokenize: Whether to do basic tokenization before wordpiece. - max_len: An artificial maximum length to truncate tokenized sequences to; - Effective maximum length is always the minimum of this - value (if specified) and the underlying BERT model's - sequence length. - never_split: List of tokens which will never be split during tokenization. - Only has an effect when do_wordpiece_only=False - """ - if not os.path.isfile(vocab_file): - raise ValueError( - "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " - "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) - self.vocab = load_vocab(vocab_file) - self.ids_to_tokens = collections.OrderedDict( - [(ids, tok) for tok, ids in self.vocab.items()]) - self.do_basic_tokenize = do_basic_tokenize - if do_basic_tokenize: - self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, - never_split=never_split) - self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) - self.max_len = max_len if max_len is not None else int(1e12) - - def tokenize(self, text): - split_tokens = [] - if self.do_basic_tokenize: - for token in self.basic_tokenizer.tokenize(text): - for sub_token in self.wordpiece_tokenizer.tokenize(token): - split_tokens.append(sub_token) - else: - split_tokens = self.wordpiece_tokenizer.tokenize(text) - return split_tokens - - def convert_tokens_to_ids(self, tokens): - """Converts a sequence of tokens into ids using the vocab.""" - ids = [] - for token in tokens: - ids.append(self.vocab[token]) - if len(ids) > self.max_len: - print( - "WARNING!\n\"" - "Token indices sequence length is longer than the specified maximum " - "sequence length for this BERT model ({} > {}). Running this" - " sequence through BERT will result in indexing errors".format(len(ids), self.max_len) - ) - return ids - - def convert_ids_to_tokens(self, ids): - """Converts a sequence of ids in wordpiece tokens using the vocab.""" - tokens = [] - for i in ids: - tokens.append(self.ids_to_tokens[i]) - return tokens - - def save_vocabulary(self, vocab_path): - """Save the tokenizer vocabulary to a directory or file.""" - index = 0 - if os.path.isdir(vocab_path): - vocab_file = os.path.join(vocab_path, VOCAB_NAME) - with open(vocab_file, "w", encoding="utf-8") as writer: - for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): - if index != token_index: - print("Saving vocabulary to {}: vocabulary indices are not consecutive." - " Please check that the vocabulary is not corrupted!".format(vocab_file)) - index = token_index - writer.write(token + u'\n') - index += 1 - return vocab_file - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): - """ - Instantiate a PreTrainedBertModel from a pre-trained model file. - Download and cache the pre-trained model file if needed. - """ - if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: - vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path] - if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True): - print("The pre-trained model you are loading is a cased model but you have not set " - "`do_lower_case` to False. We are setting `do_lower_case=False` for you but " - "you may want to check this behavior.") - kwargs['do_lower_case'] = False - elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True): - print("The pre-trained model you are loading is an uncased model but you have set " - "`do_lower_case` to False. We are setting `do_lower_case=True` for you " - "but you may want to check this behavior.") - kwargs['do_lower_case'] = True - else: - vocab_file = pretrained_model_name_or_path - if os.path.isdir(vocab_file): - vocab_file = os.path.join(vocab_file, VOCAB_NAME) - # redirect to the cache, if necessary - resolved_vocab_file = vocab_file - print("loading vocabulary file {}".format(vocab_file)) - if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: - # if we're using a pretrained model, ensure the tokenizer wont index sequences longer - # than the number of positional embeddings - max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path] - kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) - # Instantiate tokenizer. - tokenizer = cls(resolved_vocab_file, *inputs, **kwargs) - return tokenizer - - -class BasicTokenizer(object): - """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" - - def __init__(self, - do_lower_case=True, - never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): - """Constructs a BasicTokenizer. - Args: - do_lower_case: Whether to lower case the input. - """ - self.do_lower_case = do_lower_case - self.never_split = never_split - - def tokenize(self, text): - """Tokenizes a piece of text.""" - text = self._clean_text(text) - # This was added on November 1st, 2018 for the multilingual and Chinese - # models. This is also applied to the English models now, but it doesn't - # matter since the English models were not trained on any Chinese data - # and generally don't have any Chinese data in them (there are Chinese - # characters in the vocabulary because Wikipedia does have some Chinese - # words in the English Wikipedia.). - text = self._tokenize_chinese_chars(text) - orig_tokens = whitespace_tokenize(text) - split_tokens = [] - for token in orig_tokens: - if self.do_lower_case and token not in self.never_split: - token = token.lower() - token = self._run_strip_accents(token) - split_tokens.extend(self._run_split_on_punc(token)) - - output_tokens = whitespace_tokenize(" ".join(split_tokens)) - return output_tokens - - def _run_strip_accents(self, text): - """Strips accents from a piece of text.""" - text = unicodedata.normalize("NFD", text) - output = [] - for char in text: - cat = unicodedata.category(char) - if cat == "Mn": - continue - output.append(char) - return "".join(output) - - def _run_split_on_punc(self, text): - """Splits punctuation on a piece of text.""" - if text in self.never_split: - return [text] - chars = list(text) - i = 0 - start_new_word = True - output = [] - while i < len(chars): - char = chars[i] - if _is_punctuation(char): - output.append([char]) - start_new_word = True - else: - if start_new_word: - output.append([]) - start_new_word = False - output[-1].append(char) - i += 1 - - return ["".join(x) for x in output] - - def _tokenize_chinese_chars(self, text): - """Adds whitespace around any CJK character.""" - output = [] - for char in text: - cp = ord(char) - if self._is_chinese_char(cp): - output.append(" ") - output.append(char) - output.append(" ") - else: - output.append(char) - return "".join(output) - - def _is_chinese_char(self, cp): - """Checks whether CP is the codepoint of a CJK character.""" - # This defines a "chinese character" as anything in the CJK Unicode block: - # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) - # - # Note that the CJK Unicode block is NOT all Japanese and Korean characters, - # despite its name. The modern Korean Hangul alphabet is a different block, - # as is Japanese Hiragana and Katakana. Those alphabets are used to write - # space-separated words, so they are not treated specially and handled - # like the all of the other languages. - if ((cp >= 0x4E00 and cp <= 0x9FFF) or # - (cp >= 0x3400 and cp <= 0x4DBF) or # - (cp >= 0x20000 and cp <= 0x2A6DF) or # - (cp >= 0x2A700 and cp <= 0x2B73F) or # - (cp >= 0x2B740 and cp <= 0x2B81F) or # - (cp >= 0x2B820 and cp <= 0x2CEAF) or - (cp >= 0xF900 and cp <= 0xFAFF) or # - (cp >= 0x2F800 and cp <= 0x2FA1F)): # - return True - - return False - - def _clean_text(self, text): - """Performs invalid character removal and whitespace cleanup on text.""" - output = [] - for char in text: - cp = ord(char) - if cp == 0 or cp == 0xfffd or _is_control(char): - continue - if _is_whitespace(char): - output.append(" ") - else: - output.append(char) - return "".join(output) - - -class WordpieceTokenizer(object): - """Runs WordPiece tokenization.""" - - def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100): - self.vocab = vocab - self.unk_token = unk_token - self.max_input_chars_per_word = max_input_chars_per_word - - def tokenize(self, text): - """Tokenizes a piece of text into its word pieces. - This uses a greedy longest-match-first algorithm to perform tokenization - using the given vocabulary. - For example: - input = "unaffable" - output = ["un", "##aff", "##able"] - Args: - text: A single token or whitespace separated tokens. This should have - already been passed through `BasicTokenizer`. - Returns: - A list of wordpiece tokens. - """ - - output_tokens = [] - for token in whitespace_tokenize(text): - chars = list(token) - if len(chars) > self.max_input_chars_per_word: - output_tokens.append(self.unk_token) - continue - - is_bad = False - start = 0 - sub_tokens = [] - while start < len(chars): - end = len(chars) - cur_substr = None - while start < end: - substr = "".join(chars[start:end]) - if start > 0: - substr = "##" + substr - if substr in self.vocab: - cur_substr = substr - break - end -= 1 - if cur_substr is None: - is_bad = True - break - sub_tokens.append(cur_substr) - start = end - - if is_bad: - output_tokens.append(self.unk_token) - else: - output_tokens.extend(sub_tokens) - return output_tokens - - -def _is_whitespace(char): - """Checks whether `chars` is a whitespace character.""" - # \t, \n, and \r are technically contorl characters but we treat them - # as whitespace since they are generally considered as such. - if char == " " or char == "\t" or char == "\n" or char == "\r": - return True - cat = unicodedata.category(char) - if cat == "Zs": - return True - return False - - -def _is_control(char): - """Checks whether `chars` is a control character.""" - # These are technically control characters but we count them as whitespace - # characters. - if char == "\t" or char == "\n" or char == "\r": - return False - cat = unicodedata.category(char) - if cat.startswith("C"): - return True - return False - - -def _is_punctuation(char): - """Checks whether `chars` is a punctuation character.""" - cp = ord(char) - # We treat all non-letter/number ASCII as punctuation. - # Characters such as "^", "$", and "`" are not in the Unicode - # Punctuation class but we treat them as punctuation anyways, for - # consistency. - if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or - (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): - return True - cat = unicodedata.category(char) - if cat.startswith("P"): - return True - return False - diff --git a/test/core/test_dataset.py b/test/core/test_dataset.py index 9c05c334..059d52d2 100644 --- a/test/core/test_dataset.py +++ b/test/core/test_dataset.py @@ -182,8 +182,9 @@ class TestDataSetMethods(unittest.TestCase): def test_apply2(self): def split_sent(ins): return ins['raw_sentence'].split() - csv_loader = CSVLoader(headers=['raw_sentence', 'label'],sep='\t') - dataset = csv_loader.load('test/data_for_tests/tutorial_sample_dataset.csv') + csv_loader = CSVLoader(headers=['raw_sentence', 'label'], sep='\t') + data_bundle = csv_loader.load('test/data_for_tests/tutorial_sample_dataset.csv') + dataset = data_bundle.datasets['train'] dataset.drop(lambda x: len(x['raw_sentence'].split()) == 0, inplace=True) dataset.apply(split_sent, new_field_name='words', is_input=True) # print(dataset) diff --git a/test/io/test_data_loader.py b/test/io/test_data_loader.py deleted file mode 100644 index 5b1bb749..00000000 --- a/test/io/test_data_loader.py +++ /dev/null @@ -1,15 +0,0 @@ -import unittest - -from fastNLP.core.const import Const -from fastNLP.io.data_loader import MNLILoader - - -class TestDataLoader(unittest.TestCase): - - def test_mnli_loader(self): - ds = MNLILoader().process('test/data_for_tests/sample_mnli.tsv', - to_lower=True, get_index=True, seq_len_type='mask') - self.assertTrue('train' in ds.datasets) - self.assertTrue(len(ds.datasets) == 1) - self.assertTrue(len(ds.datasets['train']) == 11) - self.assertTrue(isinstance(ds.datasets['train'][0][Const.INPUT_LENS(0)], list)) diff --git a/test/io/test_dataset_loader.py b/test/io/test_dataset_loader.py deleted file mode 100644 index 6fb8e4f7..00000000 --- a/test/io/test_dataset_loader.py +++ /dev/null @@ -1,77 +0,0 @@ -import unittest -import os -from fastNLP.io import CSVLoader, JsonLoader -from fastNLP.io.data_loader import SSTLoader, SNLILoader, Conll2003Loader, PeopleDailyCorpusLoader - - -class TestDatasetLoader(unittest.TestCase): - - def test_Conll2003Loader(self): - """ - Test the the loader of Conll2003 dataset - """ - dataset_path = "test/data_for_tests/conll_2003_example.txt" - loader = Conll2003Loader() - dataset_2003 = loader.load(dataset_path) - - def test_PeopleDailyCorpusLoader(self): - data_set = PeopleDailyCorpusLoader().load("test/data_for_tests/people_daily_raw.txt") - - def test_CSVLoader(self): - ds = CSVLoader(sep='\t', headers=['words', 'label']) \ - .load('test/data_for_tests/tutorial_sample_dataset.csv') - assert len(ds) > 0 - - def test_SNLILoader(self): - ds = SNLILoader().load('test/data_for_tests/sample_snli.jsonl') - assert len(ds) == 3 - - def test_JsonLoader(self): - ds = JsonLoader().load('test/data_for_tests/sample_snli.jsonl') - assert len(ds) == 3 - - def no_test_SST(self): - train_data = """(3 (2 (2 The) (2 Rock)) (4 (3 (2 is) (4 (2 destined) (2 (2 (2 (2 (2 to) (2 (2 be) (2 (2 the) (2 (2 21st) (2 (2 (2 Century) (2 's)) (2 (3 new) (2 (2 ``) (2 Conan)))))))) (2 '')) (2 and)) (3 (2 that) (3 (2 he) (3 (2 's) (3 (2 going) (3 (2 to) (4 (3 (2 make) (3 (3 (2 a) (3 splash)) (2 (2 even) (3 greater)))) (2 (2 than) (2 (2 (2 (2 (1 (2 Arnold) (2 Schwarzenegger)) (2 ,)) (2 (2 Jean-Claud) (2 (2 Van) (2 Damme)))) (2 or)) (2 (2 Steven) (2 Segal))))))))))))) (2 .))) -(4 (4 (4 (2 The) (4 (3 gorgeously) (3 (2 elaborate) (2 continuation)))) (2 (2 (2 of) (2 ``)) (2 (2 The) (2 (2 (2 Lord) (2 (2 of) (2 (2 the) (2 Rings)))) (2 (2 '') (2 trilogy)))))) (2 (3 (2 (2 is) (2 (2 so) (2 huge))) (2 (2 that) (3 (2 (2 (2 a) (2 column)) (2 (2 of) (2 words))) (2 (2 (2 (2 can) (1 not)) (3 adequately)) (2 (2 describe) (2 (3 (2 (2 co-writer\/director) (2 (2 Peter) (3 (2 Jackson) (2 's)))) (3 (2 expanded) (2 vision))) (2 (2 of) (2 (2 (2 J.R.R.) (2 (2 Tolkien) (2 's))) (2 Middle-earth))))))))) (2 .))) -(3 (3 (2 (2 (2 (2 (2 Singer\/composer) (2 (2 Bryan) (2 Adams))) (2 (2 contributes) (2 (2 (2 a) (2 slew)) (2 (2 of) (2 songs))))) (2 (2 --) (2 (2 (2 (2 a) (2 (2 few) (3 potential))) (2 (2 (2 hits) (2 ,)) (2 (2 (2 a) (2 few)) (1 (1 (2 more) (1 (2 simply) (2 intrusive))) (2 (2 to) (2 (2 the) (2 story))))))) (2 --)))) (2 but)) (3 (4 (2 the) (3 (2 whole) (2 package))) (2 (3 certainly) (3 (2 captures) (2 (1 (2 the) (2 (2 (2 intended) (2 (2 ,) (2 (2 er) (2 ,)))) (3 spirit))) (2 (2 of) (2 (2 the) (2 piece)))))))) (2 .)) -(2 (2 (2 You) (2 (2 'd) (2 (2 think) (2 (2 by) (2 now))))) (2 (2 America) (2 (2 (2 would) (1 (2 have) (2 (2 (2 had) (1 (2 enough) (2 (2 of) (2 (2 plucky) (2 (2 British) (1 eccentrics)))))) (4 (2 with) (4 (3 hearts) (3 (2 of) (3 gold))))))) (2 .)))) -""" - test_data = """(3 (2 Yet) (3 (2 (2 the) (2 act)) (3 (4 (3 (2 is) (3 (2 still) (4 charming))) (2 here)) (2 .)))) -(4 (2 (2 Whether) (2 (2 (2 (2 or) (1 not)) (3 (2 you) (2 (2 're) (3 (3 enlightened) (2 (2 by) (2 (2 any) (2 (2 of) (2 (2 Derrida) (2 's))))))))) (2 (2 lectures) (2 (2 on) (2 (2 ``) (2 (2 (2 (2 (2 (2 the) (2 other)) (2 '')) (2 and)) (2 ``)) (2 (2 the) (2 self)))))))) (3 (2 ,) (3 (2 '') (3 (2 Derrida) (3 (3 (2 is) (4 (2 an) (4 (4 (2 undeniably) (3 (4 (3 fascinating) (2 and)) (4 playful))) (2 fellow)))) (2 .)))))) -(4 (3 (2 (2 Just) (2 (2 the) (2 labour))) (3 (2 involved) (3 (2 in) (4 (2 creating) (3 (3 (2 the) (3 (3 layered) (2 richness))) (3 (2 of) (3 (2 (2 the) (2 imagery)) (2 (2 in) (3 (2 (2 this) (2 chiaroscuro)) (2 (2 of) (2 (2 (2 madness) (2 and)) (2 light)))))))))))) (3 (3 (2 is) (4 astonishing)) (2 .))) -(3 (3 (2 Part) (3 (2 of) (4 (2 (2 the) (3 charm)) (2 (2 of) (2 (2 Satin) (2 Rouge)))))) (3 (3 (2 is) (3 (2 that) (3 (2 it) (2 (1 (2 avoids) (2 (2 the) (1 obvious))) (3 (2 with) (3 (3 (3 humour) (2 and)) (2 lightness))))))) (2 .))) -(4 (2 (2 a) (2 (2 screenplay) (2 more))) (3 (4 ingeniously) (2 (2 constructed) (2 (2 (2 (2 than) (2 ``)) (2 Memento)) (2 ''))))) -(3 (2 ``) (3 (2 (2 Extreme) (2 Ops)) (3 (2 '') (4 (4 (3 exceeds) (2 expectations)) (2 .))))) -""" - train, test = 'train--', 'test--' - with open(train, 'w', encoding='utf-8') as f: - f.write(train_data) - with open(test, 'w', encoding='utf-8') as f: - f.write(test_data) - - loader = SSTLoader() - info = loader.process( - {train: train, test: test}, - train_ds=[train], - src_vocab_op=dict(min_freq=2) - ) - assert len(list(info.vocabs.items())) == 2 - assert len(list(info.datasets.items())) == 2 - print(info.vocabs) - print(info.datasets) - os.remove(train), os.remove(test) - - # def test_import(self): - # import fastNLP - # from fastNLP.io import SNLILoader - # ds = SNLILoader().process('test/data_for_tests/sample_snli.jsonl', to_lower=True, - # get_index=True, seq_len_type='seq_len', extra_split=['-']) - # assert 'train' in ds.datasets - # assert len(ds.datasets) == 1 - # assert len(ds.datasets['train']) == 3 - # - # ds = SNLILoader().process('test/data_for_tests/sample_snli.jsonl', to_lower=True, - # get_index=True, seq_len_type='seq_len') - # assert 'train' in ds.datasets - # assert len(ds.datasets) == 1 - # assert len(ds.datasets['train']) == 3 From 39de27f472fab631b97b47d4934b05f10019b081 Mon Sep 17 00:00:00 2001 From: Yige Xu Date: Thu, 29 Aug 2019 08:19:36 +0800 Subject: [PATCH 11/50] Update BertModel.from_pretrained function. Now can pass a model_dir_or_name instead of model_dir. --- fastNLP/embeddings/bert_embedding.py | 29 ++++------ fastNLP/modules/encoder/bert.py | 81 ++++++++++++++-------------- 2 files changed, 52 insertions(+), 58 deletions(-) diff --git a/fastNLP/embeddings/bert_embedding.py b/fastNLP/embeddings/bert_embedding.py index b1b1a200..e15c15f5 100644 --- a/fastNLP/embeddings/bert_embedding.py +++ b/fastNLP/embeddings/bert_embedding.py @@ -18,7 +18,7 @@ from itertools import chain from ..core.vocabulary import Vocabulary from ..io.file_utils import _get_embedding_url, cached_path, PRETRAINED_BERT_MODEL_DIR -from ..modules.encoder.bert import _WordPieceBertModel, BertModel, BertTokenizer +from ..modules.encoder.bert import _WordPieceBertModel, BertModel, BertTokenizer, _get_bert_dir from .contextual_embedding import ContextualEmbedding import warnings from ..core import logger @@ -70,19 +70,16 @@ class BertEmbedding(ContextualEmbedding): pool_method: str = 'first', word_dropout=0, dropout=0, include_cls_sep: bool = False, pooled_cls=True, requires_grad: bool = False, auto_truncate: bool = False): super(BertEmbedding, self).__init__(vocab, word_dropout=word_dropout, dropout=dropout) - - # 根据model_dir_or_name检查是否存在并下载 + if model_dir_or_name.lower() in PRETRAINED_BERT_MODEL_DIR: if 'cn' in model_dir_or_name.lower() and pool_method not in ('first', 'last'): + logger.warn("For Chinese bert, pooled_method should choose from 'first', 'last' in order to achieve" + " faster speed.") warnings.warn("For Chinese bert, pooled_method should choose from 'first', 'last' in order to achieve" " faster speed.") - model_url = _get_embedding_url('bert', model_dir_or_name.lower()) - model_dir = cached_path(model_url, name='embedding') - # 检查是否存在 - elif os.path.isdir(os.path.abspath(os.path.expanduser(model_dir_or_name))): - model_dir = os.path.abspath(os.path.expanduser(model_dir_or_name)) - else: - raise ValueError(f"Cannot recognize {model_dir_or_name}.") + + # 根据model_dir_or_name检查是否存在并下载 + model_dir = _get_bert_dir(model_dir_or_name) self._word_sep_index = None if '[SEP]' in vocab: @@ -173,15 +170,9 @@ class BertWordPieceEncoder(nn.Module): def __init__(self, model_dir_or_name: str = 'en-base-uncased', layers: str = '-1', pooled_cls: bool = False, word_dropout=0, dropout=0, requires_grad: bool = False): super().__init__() - - if model_dir_or_name.lower() in PRETRAINED_BERT_MODEL_DIR: - model_url = _get_embedding_url('bert', model_dir_or_name.lower()) - model_dir = cached_path(model_url, name='embedding') - # 检查是否存在 - elif os.path.isdir(os.path.expanduser(os.path.abspath(model_dir_or_name))): - model_dir = model_dir_or_name - else: - raise ValueError(f"Cannot recognize {model_dir_or_name}.") + + # 根据model_dir_or_name检查是否存在并下载 + model_dir = _get_bert_dir(model_dir_or_name) self.model = _WordPieceBertModel(model_dir=model_dir, layers=layers, pooled_cls=pooled_cls) self._sep_index = self.model._sep_index diff --git a/fastNLP/modules/encoder/bert.py b/fastNLP/modules/encoder/bert.py index 5026f48a..89a1b09d 100644 --- a/fastNLP/modules/encoder/bert.py +++ b/fastNLP/modules/encoder/bert.py @@ -18,13 +18,13 @@ import torch from torch import nn from ..utils import _get_file_name_base_on_postfix +from ...io.file_utils import _get_embedding_url, cached_path, PRETRAINED_BERT_MODEL_DIR from ...core import logger CONFIG_FILE = 'bert_config.json' VOCAB_NAME = 'vocab.txt' - class BertConfig(object): """Configuration class to store the configuration of a `BertModel`. """ @@ -133,6 +133,19 @@ def swish(x): ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} +def _get_bert_dir(model_dir_or_name: str = 'en-base-uncased'): + if model_dir_or_name.lower() in PRETRAINED_BERT_MODEL_DIR: + model_url = _get_embedding_url('bert', model_dir_or_name.lower()) + model_dir = cached_path(model_url, name='embedding') + # 检查是否存在 + elif os.path.isdir(os.path.abspath(os.path.expanduser(model_dir_or_name))): + model_dir = os.path.abspath(os.path.expanduser(model_dir_or_name)) + else: + logger.error(f"Cannot recognize BERT dir or name ``{model_dir_or_name}``.") + raise ValueError(f"Cannot recognize BERT dir or name ``{model_dir_or_name}``.") + return model_dir + + class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). @@ -339,27 +352,9 @@ class BertModel(nn.Module): BERT(Bidirectional Embedding Representations from Transformers). - 如果你想使用预训练好的权重矩阵,请在以下网址下载. - sources:: - - 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin", - 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin", - 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin", - 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin", - 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin", - 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin", - 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin", - 'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-pytorch_model.bin", - 'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin", - 'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin", - 'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin", - 'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin", - 'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin" - - 用预训练权重矩阵来建立BERT模型:: - model = BertModel.from_pretrained("path/to/weights/directory") + model = BertModel.from_pretrained(model_dir_or_name) 用随机初始化权重矩阵来建立BERT模型:: @@ -440,11 +435,15 @@ class BertModel(nn.Module): return encoded_layers, pooled_output @classmethod - def from_pretrained(cls, pretrained_model_dir, *inputs, **kwargs): + def from_pretrained(cls, pretrained_model_dir_or_name, *inputs, **kwargs): state_dict = kwargs.get('state_dict', None) kwargs.pop('state_dict', None) kwargs.pop('cache_dir', None) kwargs.pop('from_tf', None) + + # get model dir from name or dir + pretrained_model_dir = _get_bert_dir(pretrained_model_dir_or_name) + # Load config config_file = _get_file_name_base_on_postfix(pretrained_model_dir, '.json') config = BertConfig.from_json_file(config_file) @@ -493,6 +492,8 @@ class BertModel(nn.Module): if len(unexpected_keys) > 0: logger.warn("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) + + logger.info(f"Load pre-trained BERT parameters from dir {pretrained_model_dir}.") return model @@ -562,7 +563,7 @@ class WordpieceTokenizer(object): output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) - if len(output_tokens)==0: #防止里面全是空格或者回车符号 + if len(output_tokens) == 0: # 防止里面全是空格或者回车符号 return [self.unk_token] return output_tokens @@ -673,14 +674,14 @@ class BasicTokenizer(object): # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. - if ((cp >= 0x4E00 and cp <= 0x9FFF) or # - (cp >= 0x3400 and cp <= 0x4DBF) or # - (cp >= 0x20000 and cp <= 0x2A6DF) or # - (cp >= 0x2A700 and cp <= 0x2B73F) or # - (cp >= 0x2B740 and cp <= 0x2B81F) or # - (cp >= 0x2B820 and cp <= 0x2CEAF) or - (cp >= 0xF900 and cp <= 0xFAFF) or # - (cp >= 0x2F800 and cp <= 0x2FA1F)): # + if (((cp >= 0x4E00) and (cp <= 0x9FFF)) or # + ((cp >= 0x3400) and (cp <= 0x4DBF)) or # + ((cp >= 0x20000) and (cp <= 0x2A6DF)) or # + ((cp >= 0x2A700) and (cp <= 0x2B73F)) or # + ((cp >= 0x2B740) and (cp <= 0x2B81F)) or # + ((cp >= 0x2B820) and (cp <= 0x2CEAF)) or + ((cp >= 0xF900) and (cp <= 0xFAFF)) or # + ((cp >= 0x2F800) and (cp <= 0x2FA1F))): # return True return False @@ -730,8 +731,8 @@ def _is_punctuation(char): # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. - if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or - (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): + if (((cp >= 33) and (cp <= 47)) or ((cp >= 58) and (cp <= 64)) or + ((cp >= 91) and (cp <= 96)) or ((cp >= 123) and (cp <= 126))): return True cat = unicodedata.category(char) if cat.startswith("P"): @@ -830,11 +831,11 @@ class BertTokenizer(object): return vocab_file @classmethod - def from_pretrained(cls, model_dir, *inputs, **kwargs): + def from_pretrained(cls, model_dir_or_name, *inputs, **kwargs): """ - 给定path,直接读取vocab. - + 给定模型的名字或者路径,直接读取vocab. """ + model_dir = _get_bert_dir(model_dir_or_name) pretrained_model_name_or_path = _get_file_name_base_on_postfix(model_dir, '.txt') logger.info("loading vocabulary file {}".format(pretrained_model_name_or_path)) max_len = 512 @@ -843,17 +844,19 @@ class BertTokenizer(object): tokenizer = cls(pretrained_model_name_or_path, *inputs, **kwargs) return tokenizer + class _WordPieceBertModel(nn.Module): """ 这个模块用于直接计算word_piece的结果. """ - def __init__(self, model_dir: str, layers: str = '-1', pooled_cls:bool=False): + def __init__(self, model_dir_or_name: str, layers: str = '-1', pooled_cls: bool=False): super().__init__() - self.tokenzier = BertTokenizer.from_pretrained(model_dir) - self.encoder = BertModel.from_pretrained(model_dir) + self.model_dir = _get_bert_dir(model_dir_or_name) + self.tokenzier = BertTokenizer.from_pretrained(self.model_dir) + self.encoder = BertModel.from_pretrained(self.model_dir) # 检查encoder_layer_number是否合理 encoder_layer_number = len(self.encoder.encoder.layer) self.layers = list(map(int, layers.split(','))) @@ -914,7 +917,7 @@ class _WordPieceBertModel(nn.Module): attn_masks = word_pieces.ne(self._wordpiece_pad_index) bert_outputs, pooled_cls = self.encoder(word_pieces, token_type_ids=token_type_ids, attention_mask=attn_masks, - output_all_encoded_layers=True) + output_all_encoded_layers=True) # output_layers = [self.layers] # len(self.layers) x batch_size x max_word_piece_length x hidden_size outputs = bert_outputs[0].new_zeros((len(self.layers), batch_size, max_len, bert_outputs[0].size(-1))) for l_index, l in enumerate(self.layers): From 09d0b74595c8273b8bcb3af48a84cdcd5e6c982e Mon Sep 17 00:00:00 2001 From: yhcc Date: Thu, 29 Aug 2019 09:56:36 +0800 Subject: [PATCH 12/50] Update .travis.yml MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit TRAVIS默认已经加入了 --- .travis.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 0d63417a..210d158a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,9 +1,6 @@ language: python python: - "3.6" - -env: - - TRAVIS=1 # command to install dependencies install: - pip install --quiet -r requirements.txt From 146a004deee58f139ba7317e7b66740a709947ba Mon Sep 17 00:00:00 2001 From: yh_cc Date: Thu, 29 Aug 2019 10:12:30 +0800 Subject: [PATCH 13/50] =?UTF-8?q?=E4=BF=AE=E6=94=B9travis=20converage?= =?UTF-8?q?=E8=AE=BE=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .coverage | 1 + .travis.yml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 .coverage diff --git a/.coverage b/.coverage new file mode 100644 index 00000000..a6d89bc8 --- /dev/null +++ b/.coverage @@ -0,0 +1 @@ +!coverage.py: This is a private format, don't read it directly!{"lines":{"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/__init__.py":[12,14,15,18,19,20,22,23,24,26,27,29,30,31,32,33,34,35,37,38,39,41,42,43,45,46,47,48,50,51,52,53,55,56,57,58,59,60,62,64,66,68,69,70,71,72],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/embeddings/__init__.py":[6,9,10,11,12,13,14,15,16,17,18,21,22,23,24,25,26,27],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/embeddings/embedding.py":[128,129,130,131,4,133,7,8,11,12,13,140,15,141,142,18,146,148,143,144,145,155,157,39,41,169,43,45,174,47,48,177,178,49,51,181,182,52,55,185,186,179,60,61,63,193,68,199,72,201,73,75,76,205,82,85,86,87,89,90,91,93,104,111,119,120,121,122,123,124,125,126,127],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/embeddings/utils.py":[4,5,6,7,9,42,43,12,44,45,46,16,24,57,26,27,28,25,31],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/__init__.py":[13,15,17,19,20,21,22,24,26,27,28,30,32,33,35,36,37,38,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,56,57,58,59,60,61,63,64,65,67,68,69,70,72,73,74,75,78,79,80,83,84,85,86,87,88,89,90,91,92,93,94],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/_logger.py":[1,130,131,4,132,134,7,8,9,10,11,137,13,140,15,16,143,19,20,24,25,26,155,27,29,30,31,32,33,45,46,47,49,50,51,52,53,56,78,79,80,83,84,88,92,94,95,99,100,101,102,103,106,107,108,110,114,119,125,127],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/batch.py":[4,6,7,8,11,13,14,15,16,18,19,20,21,24,29,32,33,34,35,36,37,38,40,42,43,44,45,47,50,57,58,59,60,61,62,63,64,65,67,68,69,70,73,74,75,76,80,81,83,84,85,87,92,99,100,101,102,103,105,106,108,109,112,113,114,115,116,117,119,120,122,124,125,126,127,129,130,131,132,133,135,136,138,139,141,146,171,174,175,176,177,178,181,182,183,184,185,186,187,189,190,193,194,202,204,207,211,215,223,224,225,226,227,228,229,230,233],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/sampler.py":[3,5,6,7,8,134,135,11,140,13,137,16,149,150,151,24,153,26,155,156,158,160,34,162,163,164,166,165,40,167,42,170,43,46,52,54,55,58,186,187,188,190,191,192,193,68,70,71,72,73,75,83,84,86,87,89,90,91,92,93,94,96,97,98,100,102,103,104,105,106,107,108,109,110,112,113,114,115,117,120],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/dataset.py":[515,516,518,532,543,550,552,554,560,562,570,578,585,586,587,589,590,592,606,607,608,609,610,611,617,619,631,632,633,634,635,640,641,643,660,676,688,694,696,702,704,722,723,725,726,727,728,729,734,737,738,859,740,742,751,752,753,754,755,756,757,758,862,760,761,762,763,764,765,766,767,768,770,771,772,774,791,792,793,794,795,796,285,287,290,291,803,293,294,806,296,297,298,299,300,301,302,303,811,305,809,824,314,316,317,318,319,320,321,834,835,836,837,838,322,323,324,325,326,327,328,334,329,332,337,849,338,339,340,342,335,344,857,858,347,348,861,345,350,346,865,864,863,866,860,351,868,354,353,356,871,867,875,869,870,360,363,364,877,365,367,369,883,884,886,376,377,378,379,380,381,382,383,384,385,386,387,388,894,895,896,897,402,409,410,412,413,415,420,421,422,423,425,426,427,431,432,434,872,441,443,445,447,451,452,453,454,459,873,474,807,486,487,488,490,491,493,499,500,502,503,505,506,507,509],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/field.py":[4,7,8,9,12,13,14,15,16,18,19,21,22,535,25,26,27,28,29,30,33,34,35,36,37,38,41,43,44,557,46,559,560,47,562,48,52,53,54,563,56,57,58,59,60,564,62,63,64,568,570,67,68,572,70,71,72,65,74,578,76,580,78,590,80,591,585,83,592,85,593,87,594,89,595,596,597,598,599,95,96,97,98,99,100,613,101,614,102,615,609,616,617,618,104,106,108,622,624,113,114,115,116,629,117,118,120,119,122,130,131,132,133,134,135,136,137,138,651,139,653,140,141,142,146,147,148,149,150,663,152,659,661,157,158,159,160,162,165,677,167,169,681,682,683,685,686,175,687,177,178,688,180,181,182,183,184,690,691,187,692,693,190,694,192,697,200,201,202,205,206,207,209,211,212,214,220,221,222,226,45,236,242,244,252,254,255,256,257,259,261,278,565,566,567,298,569,571,318,573,574,575,339,576,577,579,359,581,582,379,584,586,626,398,419,695,428,429,430,431,432,433,434,435,436,437,438,439,441,443,444,445,446,447,448,450,451,452,453,454,455,456,458,459,460,465,482,484,485,487,490,491,610],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/utils.py":[3,5,517,6,7,518,10,11,12,13,14,521,16,17,18,19,20,524,22,23,527,528,26,27,535,29,536,540,541,542,543,35,544,545,547,546,40,548,551,552,553,554,46,550,49,563,564,53,565,567,568,569,59,60,574,62,63,64,67,522,592,599,609,615,530,118,119,120,121,122,124,125,126,127,641,129,130,132,131,134,647,648,649,650,651,652,135,139,140,656,142,144,147,659,145,146,662,663,664,151,666,667,152,669,670,153,672,673,674,163,676,165,678,679,168,681,682,643,685,644,645,192,709,217,218,219,220,222,736,738,227,739,740,226,229,232,233,230,148,231,745,234,235,149,236,237,238,239,240,244,245,241,242,243,246,247,248,249,250,251,252,253,254,255,256,259,260,263,154,271,273,274,156,277,157,280,158,642,288,289,159,291,292,293,294,295,296,297,298,161,301,316,333,334,335,337,338,339,340,341,342,343,344,345,346,347,348,349,350,351,352,353,354,355,356,357,358,359,360,361,364,388,389,390,391,392,393,396,397,405,411,413,416,417,421,430,433,436,437,438,439,440,290,445,449,451,452,454,456,457,458,460,463,465,466,469,470,471,475,476,477,478,479,480,485,496,497,498,499,500,501,502,503,505,506,507,508,509,510,511],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/instance.py":[58,5,37,39,7,11,46,47,48,52,53,55,56,24,26,59,28,30],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/const.py":[4,7,11,29,30,31,32,33,34,35,36,37,39,42,43,45,51,56,61,64,65,67,70,71,73,76,77,79],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/callback.py":[1024,513,1026,1030,1031,1036,529,531,1043,1044,1060,1071,562,51,53,1077,55,56,57,58,59,60,61,62,63,64,65,66,576,68,69,1092,583,72,73,74,76,78,80,81,84,85,87,88,89,91,92,603,606,97,612,106,108,621,109,623,110,113,111,118,120,123,125,128,130,133,135,648,138,140,143,145,660,148,150,153,155,159,161,674,164,166,681,169,171,683,685,686,175,687,177,688,179,692,693,183,696,701,189,191,703,705,706,708,197,710,199,721,722,210,212,723,726,724,728,729,730,220,733,222,741,229,231,743,745,746,748,237,749,239,750,751,752,753,756,245,754,247,758,761,759,252,765,254,766,767,768,770,771,260,773,262,774,775,776,778,779,780,781,782,783,777,785,786,275,787,788,789,791,790,273,794,283,795,796,797,287,799,289,800,801,802,805,293,295,303,818,820,821,310,311,312,313,822,315,316,823,318,830,824,321,322,826,836,828,827,831,832,833,841,329,331,332,333,334,839,336,337,842,851,339,340,341,852,855,348,349,350,863,351,353,864,357,870,871,361,875,365,369,881,373,377,889,890,381,385,389,902,393,907,397,912,401,405,410,411,922,929,420,428,945,946,437,961,964,455,968,457,459,461,462,463,468,469,471,472,473,987,479,482,504,489,491,1003,492,493,494,496,1009,497,1014,1016,506,1020],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/tester.py":[34,35,37,38,40,41,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,62,66,92,94,95,97,100,102,103,104,105,106,107,109,110,111,118,119,121,127,131,132,134,138,139,141,148,149,150,151,152,153,154,155,156,158,159,160,162,164,165,166,167,170,171,173,174,176,177,178,181,182,183,184,185,187,188,189,190,191,192,194,195,196,197,199,206,207,209,211,213,214,215,217,223,224,225,226,227,228],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/metrics.py":[4,6,7,8,9,12,13,15,16,18,19,20,21,22,23,24,25,26,29,117,119,120,121,122,124,133,137,138,141,151,156,158,165,166,179,180,181,182,183,185,186,187,188,192,193,194,195,200,208,209,213,215,230,231,235,236,239,240,241,242,246,247,249,250,253,254,255,256,257,258,259,262,263,264,265,267,270,271,272,274,277,278,279,280,281,282,284,285,286,287,288,290,292,295,305,307,309,311,313,314,316,329,330,332,336,340,341,343,345,346,347,348,350,354,355,356,357,359,360,362,369,370,371,372,373,376,386,388,389,390,391,392,393,394,395,396,398,399,400,401,402,406,437,468,477,479,480,481,482,483,484,485,486,488,489,491,492,493,496,504,505,506,507,508,509,510,511,512,514,515,516,520,561,564,566,568,570,573,574,575,576,577,578,579,580,581,582,586,587,588,589,590,592,593,595,597,598,599,601,609,612,616,620,622,623,624,625,633,634,635,636,637,638,640,641,643,644,646,647,648,649,651,652,653,655,657,658,659,660,661,662,663,664,665,666,667,668,669,670,671,672,673,674,675,676,677,678,679,681,686,687,688,689,690,691,692,694,695,696,697,699,700,702,704,712,713,714,716,719,726,727,728,729,730,732,733,734,736,738,742,743,747,750,759,760,761,762,763,766,776,777,778,779,780,781,784,799,802,804,806,808,810,811,813,814,816,818,819,820,821,823,825,827,836,837,838,839,841,842,845,846,850,851,852,853,855,856,857,858,859,862,863,864,865,867,868,870,873,875,876,878,879,880,881,883,884,885,887,888,891,893,895,897,900,901,903,905,907,909,910,911,912,914,916,918,919,920,921,923,929,932,933,935,936,937,939,940,942,944,945,946,947,949],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/vocabulary.py":[4,7,8,11,12,13,15,16,17,18,21,26,35,40,42,43,44,46,49,54,56,57,58,59,61,62,64,67,90,92,93,94,95,96,97,98,99,100,102,104,105,116,117,118,120,121,133,134,135,137,145,146,147,148,149,150,151,153,154,166,168,169,181,182,184,190,191,192,193,194,195,197,198,199,200,201,202,203,204,205,206,207,209,214,215,217,219,221,229,231,242,244,251,252,253,254,258,259,273,279,280,282,283,285,287,289,291,292,295,296,297,301,302,303,304,305,311,313,317,337,338,342,343,344,345,346,348,349,350,352,354,355,356,358,359,360,361,368,369,370,371,377,379,385,387,398,400,401,406,407,408,410,411,416,417,418,420,428,430,443,447,448,450,451,453,457,458,460,463,465,466],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/_parallel_utils.py":[1,97,3,5,7,8,9,10,11,76,104,14,105,107],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/losses.py":[4,6,8,9,11,12,13,14,17,18,20,21,23,24,25,26,27,28,29,30,31,34,37,39,40,41,43,52,55,62,63,76,77,78,79,80,82,83,84,85,89,90,91,92,102,110,112,113,114,115,119,120,122,123,125,126,127,128,129,130,131,134,135,136,137,139,141,142,143,145,148,149,150,151,152,153,155,156,157,158,160,162,163,165,168,188,190,192,193,194,195,198,201,222,224,225,226,227,228,229,230,232,233,234,235,236,239,240,241,242,243,245,246,249,259,261,262,263,264,265,267,268,271,280,282,283,284,285,286,288,289,292,303,305,306,307,308,309,310,312,313,316,323,325,326,327,329,331,332,333,334,335,336,337,338,339,340,341,343,345,347,353,356,357,358,359,360,361,366,374,377,386,387,395,410,432],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/optimizer.py":[4,6,7,8,9,135,138,12,13,14,15,18,151,24,26,27,156,29,30,32,35,41,43,47,48,51,54,61,68,70,71,72,73,75,76,78,80,83,90,92,93,95,96,98,99,101,103,106],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/trainer.py":[517,518,519,521,522,523,524,525,526,527,528,529,530,531,532,533,534,536,537,538,539,540,541,545,547,548,551,552,553,554,555,556,557,558,559,560,561,562,564,565,567,570,571,573,593,594,598,599,600,601,602,603,604,606,607,608,609,619,620,621,622,623,624,625,626,627,628,629,630,634,635,637,639,640,641,643,644,645,646,647,648,649,650,651,652,653,654,656,657,658,659,660,662,663,666,667,668,669,672,673,674,676,677,679,680,681,682,683,685,686,687,688,689,690,691,693,694,695,696,697,698,700,701,705,707,708,711,712,713,715,716,717,720,721,722,723,724,725,727,728,857,730,737,740,742,746,747,352,749,750,751,752,755,757,764,765,766,768,775,777,800,802,812,813,816,818,823,824,825,826,827,829,319,831,321,832,835,324,325,326,833,328,329,330,843,332,333,841,847,336,848,338,851,340,341,342,343,344,339,853,854,855,349,350,351,856,345,346,858,347,348,864,865,868,869,353,354,355,356,358,872,873,875,876,877,878,879,880,881,882,883,884,885,886,887,888,889,890,891,892,893,895,896,898,899,900,901,902,903,904,905,907,908,909,910,911,913,914,915,916,917,918,919,920,924,925,927,928,418,932,936,425,426,427,937,941,942,939,940,431,943,433,944,945,947,437,438,948,949,441,954,950,444,951,958,449,450,961,962,964,454,965,456,968,458,970,971,974,466,482,484,485,489,490,491,498,499,502,503,506,507,510,511],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/embeddings/static_embedding.py":[4,7,9,11,12,13,14,16,17,18,19,20,21,22,25,66,69,70,71,72,75,76,77,78,79,83,84,91,92,119,121,122,123,124,127,128,130,133,134,135,136,140,141,142,143,144,146,147,148,150,151,153,154,155,156,158,164,165,166,167,168,169,171,179,181,182,186,188,202,204,205,207,209,210,226,227,229,230,231,232,233,237,238,239,240,241,242,243,244,245,246,247,248,249,250,252,254,257,258,259,260,261,262,269,270,271,272,275,277,279,283,284,285,286,287,288,290,292,299,300,301,302,303,304],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/__init__.py":[13,15,17,19,21,22,23,24,25,26,28,29,30,31,32,33,34,35,37,38,40,42,43,44,45,46,48,50,51,52,53,54,55,57,58,59,60,61,63,65,66,67,68,69,70,71,72,73,74,75,76,78,79,83,84,85,87,88],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/embed_loader.py":[4,6,7,10,11,12,14,16,17,20,22,23,24,25,34,39,41,44,45,46,63,64,66,67,68,69,70,71,72,73,75,76,77,78,80,81,82,83,84,86,88,90,91,92,93,100,101,102,103,104,105,106,107,108,109,111,112,114,116,117,118,133,134,135,136,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,157,166,168,169,170,171,173,174,175,176,177,178,180,181,182,183,184,185,187,188,190],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/data_bundle.py":[4,6,9,10,13,142,27,29,30,31,159,33,45,55,184,64,74,203,83,92,117],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/model_io.py":[32,3,5,6,9,42,12,17,19,53,22,55,62],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/loader/__init__.py":[44,47,49,50,51,52,53,54,56,57,58,59,60,61,62,63,65,66,68,70,71,72,73,74,76,77,78,79,80,81,82,83],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/loader/classification.py":[1,259,4,5,6,7,8,9,261,264,12,13,14,15,16,17,19,20,21,279,24,291,164,45,47,304,50,178,180,306,309,183,72,73,201,339,244,119,120],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/loader/loader.py":[65,66,1,4,33,70,7,67,9,10,11,12,68,78,15,19,21,22,24,63],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/file_utils.py":[4,7,8,9,10,11,14,15,16,17,18,19,21,22,23,25,28,29,30,32,33,35,36,38,40,41,43,44,45,46,50,51,52,53,54,58,60,61,62,63,64,65,66,67,68,69,71,73,74,76,77,78,79,83,84,85,86,87,88,89,90,91,92,93,94,96,97,98,99,102,103,104,107,108,109,110,114,159,186,202,228,252,273,293,306,418,427,434,443],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/utils.py":[33,34,35,4,36,7,10,11,12,14,17,81],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/loader/conll.py":[1,4,5,6,7,8,9,10,11,12,15,16,17,18,19,146,21,22,23,24,25,150,278,28,279,282,286,287,408,421,175,177,183,62,446,64,448,451,325,204,78,208,92,349,222,273,224,351,354,227,404,117,405,119,125],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/file_reader.py":[33,34,3,35,5,7,9,41,42,12,43,78,47,44,24,25,26,30],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/loader/csv.py":[32,1,34,33,4,35,36,7,8,9,10,37,13,24,26,27,28,29,30],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/loader/cws.py":[1,4,38,7,8,9,10,11,39,13,14,15,47,18,56],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/loader/json.py":[1,4,38,7,8,9,10,13,25,27],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/loader/matching.py":[1,129,4,5,6,7,8,11,12,13,15,16,17,18,19,20,273,277,23,159,35,37,40,170,298,300,303,184,186,189,318,66,216,98,228,109,241,243,246,120,122],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/pipe/__init__.py":[9,11,13,15,16,17,18,19,21,22,23,24,25,26,28,29,30,31,32,33,34,35,36,37,38,39,42,43,44,46,47,48],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/pipe/classification.py":[1,4,5,6,7,8,134,264,11,392,13,15,16,17,18,19,20,21,22,408,24,410,28,414,32,34,37,172,52,182,315,320,449,195,197,70,201,333,335,339,89,218,247,228,104,106,119,249,382],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/pipe/pipe.py":[1,4,7,10,13,14,23],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/pipe/utils.py":[1,66,153,4,5,6,39,9,137,11,12,15,87,121,91],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/pipe/conll.py":[1,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,141,272,23,286,288,34,36,293,43,306,308,182,313,192,328,330,79,208,210,215,225,98,227,100,233,113,114],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/pipe/matching.py":[128,1,129,259,4,5,6,7,8,9,10,11,12,13,14,15,135,140,18,19,20,21,22,146,147,25,152,260,134,169,42,171,44,177,50,265,266,191,64,141,271,272,247,248,122,123,253,254],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/io/pipe/cws.py":[1,4,7,8,136,10,11,12,13,14,17,155,157,34,168,50,65,202,84,110,254],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/modules/__init__.py":[18,22,23,25,27,29,31,33,34,35,37,38,39,40,42,44,45,46,47,49,52,53,54,55,56],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/modules/decoder/__init__.py":[4,6,7,8,9,12,13,14,15],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/modules/decoder/crf.py":[1,4,5,8,9,11,12,15,29,31,32,33,34,35,36,37,38,40,41,42,43,44,46,47,48,50,51,52,53,54,55,56,57,58,59,60,63,73,74,75,76,93,94,95,96,97,98,102,121,122,123,124,125,126,127,128,157,170,173,175,177,178,181,182,183,184,186,187,192,194,196,204,205,206,207,209,211,212,213,214,215,216,218,219,221,223,231,232,233,236,237,238,240,242,243,244,245,246,247,248,250,252,261,262,263,264,265,267,269,282,283,284,287,288,289,290,291,295,296,297,298,299,300,301,302,303,304,306,310,311,312,314,316,317,318,319,320,321,322,323,328,329],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/modules/utils.py":[4,134,7,8,11,12,14,15,16,19,35,37,39,41,43,45,47,49,52,54,56,57,60,61,62,63,64,65,67,68,69,70,72,73,74,75,77,80,83,120],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/modules/decoder/mlp.py":[1,4,7,8,10,13,44,46,47,48,49,50,51,52,53,55,57,60,61,62,64,65,71,72,73,75,76,79,86,88,93,94,95,96,98,99],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/modules/decoder/utils.py":[1,4,6,9],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/modules/encoder/__init__.py":[4,9,10,12,14,16,18,20,21,22,24,25,26,27,29,32,33,34,35,36,37,38,39,40],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/modules/encoder/attention.py":[128,1,4,132,7,9,10,11,13,16,20,22,23,24,25,26,27,28,30,38,39,40,41,42,43,46,175,55,184,57,186,58,59,60,61,62,64,65,66,67,69,198,70,71,73,74,75,76,77,78,80,212,88,89,90,92,93,94,97,98,99,100,101,102,105,106,107,110,126],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/modules/encoder/bert.py":[512,4,517,7,10,11,12,13,14,15,17,18,20,21,22,24,25,28,30,44,571,70,586,587,75,76,77,591,78,79,80,81,82,83,84,85,600,86,87,92,100,107,621,110,115,119,632,125,126,129,133,136,654,149,150,153,154,667,155,156,158,159,160,161,162,165,167,169,170,171,172,173,689,177,178,180,181,182,183,184,187,188,189,703,191,192,193,194,197,198,199,200,715,204,205,206,208,209,210,212,214,727,215,216,217,219,220,221,222,224,225,226,229,230,743,744,232,747,235,239,241,242,243,244,245,248,249,250,251,252,253,255,256,257,258,259,262,263,776,264,265,266,268,269,270,271,274,275,786,276,277,278,279,283,796,284,285,286,289,290,291,292,293,294,296,809,297,298,299,300,303,304,816,305,306,307,308,310,311,312,313,314,317,318,319,320,833,321,323,324,325,326,327,328,329,330,331,334,335,848,336,337,338,340,852,854,343,344,345,346,349,877,374,376,377,378,385,386,387,388,389,390,391,393,396,909,399,400,401,402,403,404,406,407,409,410,417,424,425,427,428,429,430,431,432,433,434,435,437,500,509,510],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/modules/encoder/char_encoder.py":[1,4,5,7,8,10,14,25,27,28,29,30,32,34,36,41,43,45,47,48,49,50,52,54,55,57,58,61,68,70,77,78,80,81,82,83,84,85,87,92,93,94,95,96,98,99],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/modules/encoder/conv_maxpool.py":[1,4,6,7,8,11,23,25,26,28,29,32,33,36,37,38,43,52,59,60,69,77,79,80,81,82,84,85,86],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/modules/encoder/lstm.py":[4,7,10,11,12,15,30,33,34,35,36,37,38,40,41,42,44,45,46,47,49,51,61,62,65,66,67,68,69,72,73,74,75,76,77,82],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/modules/encoder/pooling.py":[1,129,4,5,6,7,135,9,10,137,13,141,25,27,38,62,67,69,73,85,86,88,92,102,107,109,114],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/modules/encoder/star_transformer.py":[3,6,9,10,11,12,15,32,34,35,36,38,40,41,42,43,44,45,46,48,49,53,63,65,67,68,69,71,72,76,77,78,79,80,81,82,83,85,87,89,91,94,95,96,99,100,101,102,104,107,109,111,112,114,116,117,118,119,120,121,122,123,124,125,126,127,129,130,132,134,137,138,140,141,142,143,144,146,149,151,153,154,156,158,159,160,161,162,163,164,165,166],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/modules/encoder/transformer.py":[1,4,6,8,9,12,26,28,29,30,31,32,33,34,35,36,37,39,46,47,48,49,50,51,52,54,55,56,58,65,66,69,70,71,72,73],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/modules/dropout.py":[1,4,7,10,14,16,17,18,19,20,24],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/modules/encoder/variational_rnn.py":[3,6,7,8,11,12,13,15,16,25,28,31,33,34,35,36,37,38,40,52,53,54,55,56,58,59,60,61,62,63,64,66,67,69,70,73,74,75,76,77,79,80,81,82,83,84,85,86,87,88,89,96,97,98,99,102,120,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,146,147,148,149,150,151,152,153,155,163,164,165,166,167,168,169,170,172,173,175,176,177,178,179,181,182,183,184,185,186,187,188,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,210,212,213,215,216,218,219,221,224,239,241,242,243,245,246,249,264,266,270,274,289,291,295],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/embeddings/elmo_embedding.py":[4,7,136,10,11,12,13,14,15,141,17,18,19,20,21,23,155,163,171,173,305,58,61,92,99,111,119],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/modules/encoder/_elmo.py":[514,3,515,5,7,263,9,10,11,12,264,14,528,17,409,410,309,56,65,453,327,328,85,98,493,239,240,251,510],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/embeddings/contextual_embedding.py":[99,4,7,104,10,12,76,14,15,16,17,18,19,20,23,24,27],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/embeddings/bert_embedding.py":[4,7,8,135,11,12,14,15,16,17,271,19,20,21,22,23,24,149,273,27,157,168,171,186,67,198,71,203,207,211,215,95,98,227,361,115,250],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/embeddings/char_embedding.py":[4,7,8,11,12,13,14,16,17,18,19,20,21,22,25,57,61,62,64,65,67,68,70,71,72,85,87,88,89,91,92,93,94,95,98,99,101,104,106,108,109,110,111,113,120,121,122,123,124,125,127,128,129,130,131,132,133,134,135,136,137,138,142,143,145,161,168,169,170,172,173,174,175,177,180,211,216,217,219,221,222,224,225,226,239,241,242,243,245,246,247,248,249,252,253,255,258,260,261,263,264,265,267,274,275,276,277,278,279,281,282,283,284,285,286,289,290,291,292,297,299,301,318],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/embeddings/stack_embedding.py":[4,7,10,12,13,15,18,37,39,40,41,42,43,44,45,46,48,49,50,51,52,53,55,64,71,75,87,92,99,100,101,102,103,104],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/models/__init__.py":[32,33,34,9,11,13,14,16,18,19,20,21,23,24,27,28,30,31],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/models/base_model.py":[32,1,33,3,5,7,10,12,14,15,17,20,24,25,26,27,29,30],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/models/bert.py":[4,6,8,10,11,13,14,15,16,17,20,57,58,59,60,61,65,67,68,69,71,77,78,80,81,82,83,84,86,91,93,98,135,136,137,138,139,142,144,145,146,148,154,155,156,157,158,159,160,161,162,164,169,171,176,215,216,217,218,219,222,224,225,226,228,234,235,236,237,239,251,253,258,300,301,302,303,306,308,311,313,319,320,321,322,323,324,326,343,345],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/models/biaffine_parser.py":[3,5,517,6,520,9,10,11,12,522,14,523,16,17,18,19,20,21,22,23,24,25,530,536,28,539,542,534,544,33,34,35,36,37,38,39,40,41,545,546,547,548,46,47,48,49,50,51,52,53,45,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,73,74,75,76,524,77,78,79,80,525,81,84,82,87,526,527,92,93,94,95,528,96,97,99,101,102,103,104,105,107,108,109,110,111,112,531,114,115,116,117,118,119,120,121,122,532,124,125,126,533,128,131,136,138,139,141,142,151,152,153,154,155,156,157,158,160,161,170,171,172,173,174,175,176,177,178,179,182,188,190,191,192,193,194,195,198,200,549,207,208,209,210,211,42,214,43,222,44,224,225,226,227,229,236,237,238,241,262,275,276,277,278,279,280,281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,297,305,306,307,308,310,311,312,313,314,315,316,317,318,322,323,324,325,326,327,328,329,330,331,333,334,335,336,337,338,339,341,342,344,362,366,368,369,371,372,373,376,377,378,379,380,381,382,383,385,386,387,391,392,393,394,397,400,402,403,405,406,416,417,418,419,420,421,422,424,437,438,439,440,441,442,443,444,445,446,447,449,450,451,452,453,454,456,469,470,471,472,473,474,477,489,493,494,495,496,497,498,499,502],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/models/cnn_text_classification.py":[4,7,10,11,13,14,15,16,19,32,38,39,42,43,44,45,46,47,48,50,57,58,59,60,62,63,64,65,67,74,75,76],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/models/sequence_labeling.py":[3,5,6,10,11,12,14,15,16,17,18,19,20,21,22,25,39,41,61,75,78,82,93,95,96,98,99,100,101,102,104,112,113,114,116,118,120,122,124,132,134,136,138,140,141,143,151,152,153,154,155,156,158,159,160,161,162,163,165,170,171,174,189,191,193,195,196,197,198,199,200,201,202,203,204,206,207,213,218,219,221,229,230,231,232,233,234,236,237,238,239,240,241,243,252,253,254,257,259,263,264,267,269,270,271,272,273,274,275,277,279,287,289,296],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/models/snli.py":[4,6,9,10,11,12,14,15,16,17,20,32,35,36,38,41,42,43,44,45,48,49,50,51,52,54,57,58,59,60,61,63,65,66,68,77,78,79,80,81,82,83,87,89,90,91,92,94,95,99,100,101,102,104,105,107,113,115,116,117,121,122,123,124,126,127,128,129,130,131,134,136,137,138,139,142,143,144,145,146,147,148,149,151,153,154,155,156,158,160,162,165,167,168,169,174,177,178,179,182,183,184,185,186,187,189,190,193,194,195,196,197,198,199,202,204,205,208,209,211,213,214,215,216,217,218,220],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/models/star_transformer.py":[3,5,6,7,8,11,12,14,15,16,17,20,36,38,46,47,48,49,51,52,53,54,55,56,58,67,68,69,70,73,74,75,76,77,78,79,80,83,84,85,88,89,90,91,92,93,94,95,96,99,100,101,102,105,123,133,134,135,136,137,138,139,140,141,142,143,145,152,153,154,155,156,158,165,166,167,170,188,198,199,200,201,202,203,204,205,206,207,208,210,217,218,219,220,221,223,230,231,232,235,253,263,264,265,266,267,268,269,270,271,272,273,275,284,285,287,288,289,291,292,293,294,296,305,306,307],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/dist_trainer.py":[3,4,5,6,7,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,152,29,30,157,34,169,47,304,50,179,183,312,58,320,332,343,355,229],"/hdd/fudanNLP/fastNLP/fastNLP/fastNLP/core/predictor.py":[1,4,7,9,11,12,13,14,17,25,27,28,31,32,33,35,42,44,47,48,49,50,51,53,56,58,59,60,61,62,64,67,68,69,70,80,81]}} \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index 210d158a..bd7a34f5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,7 +8,7 @@ install: - pip install pytest-cov # command to run tests script: - - pytest --cov=./ test/ + - pytest --cov=fastNLP test/ after_success: - bash <(curl -s https://codecov.io/bash) From 1756e3ffdf1ffa7ac4d296883fc5ebf4e3ad38c9 Mon Sep 17 00:00:00 2001 From: yh_cc Date: Thu, 29 Aug 2019 11:16:59 +0800 Subject: [PATCH 14/50] =?UTF-8?q?1.=E4=BF=AE=E5=A4=8DMNLILoader=E4=B8=AD?= =?UTF-8?q?=E7=9A=84bug;=202.=E4=BF=AE=E5=A4=8Dfield=E4=B8=AD=E7=9A=84tens?= =?UTF-8?q?or=20warning?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/field.py | 6 +++--- fastNLP/core/vocabulary.py | 4 ++-- fastNLP/io/loader/matching.py | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/fastNLP/core/field.py b/fastNLP/core/field.py index 05f987c2..859dfb1f 100644 --- a/fastNLP/core/field.py +++ b/fastNLP/core/field.py @@ -595,7 +595,7 @@ class AutoPadder(Padder): max_len = max(map(len, contents)) tensor = torch.full((len(contents), max_len), fill_value=self.pad_val, dtype=field_ele_dtype) for i, content_i in enumerate(contents): - tensor[i, :len(content_i)] = torch.tensor(content_i) + tensor[i, :len(content_i)] = content_i.clone().detach() elif dim == 2: max_len = max(map(len, contents)) max_word_len = max([max([len(content_ii) for content_ii in content_i]) for @@ -604,7 +604,7 @@ class AutoPadder(Padder): dtype=field_ele_dtype) for i, content_i in enumerate(contents): for j, content_ii in enumerate(content_i): - tensor[i, j, :len(content_ii)] = torch.tensor(content_ii) + tensor[i, j, :len(content_ii)] = content_ii.clone().detach() else: shapes = set([np.shape(content_i) for content_i in contents]) if len(shapes) > 1: @@ -615,7 +615,7 @@ class AutoPadder(Padder): tensor = torch.full([len(contents)] + list(shape), fill_value=self.pad_val, dtype=field_ele_dtype) for i, content_i in enumerate(contents): - tensor[i] = torch.tensor(content_i, dtype=field_ele_dtype) + tensor[i] = content_i.clone().detach().to(field_ele_dtype) else: raise RuntimeError( f"Field:{field_name} has 3 dimensions, every sample should have the same shape.") diff --git a/fastNLP/core/vocabulary.py b/fastNLP/core/vocabulary.py index 52d33a5a..cd4f2c0f 100644 --- a/fastNLP/core/vocabulary.py +++ b/fastNLP/core/vocabulary.py @@ -253,7 +253,7 @@ class Vocabulary(object): if self.unknown is not None: return self.word2idx[self.unknown] else: - raise ValueError("word {} not in vocabulary".format(w)) + raise ValueError("word `{}` not in vocabulary".format(w)) @_check_build_vocab def index_dataset(self, *datasets, field_name, new_field_name=None): @@ -360,7 +360,7 @@ class Vocabulary(object): try: dataset.apply(construct_vocab) except BaseException as e: - log("When processing the `{}` dataset, the following error occurred:".format(idx)) + logger.error("When processing the `{}` dataset, the following error occurred:".format(idx)) raise e else: raise TypeError("Only DataSet type is allowed.") diff --git a/fastNLP/io/loader/matching.py b/fastNLP/io/loader/matching.py index 7f03ca3e..a21d0845 100644 --- a/fastNLP/io/loader/matching.py +++ b/fastNLP/io/loader/matching.py @@ -41,7 +41,7 @@ class MNLILoader(Loader): ds = DataSet() with open(path, 'r', encoding='utf-8') as f: f.readline() # 跳过header - if path.endswith("test.tsv"): + if path.endswith("test_matched.tsv") or path.endswith('test_mismatched.tsv'): warnings.warn("RTE's test file has no target.") for line in f: line = line.strip() From 0908c736ebc1a2afb9c36c908391943b08a45e95 Mon Sep 17 00:00:00 2001 From: Yige Xu Date: Thu, 29 Aug 2019 16:38:17 +0800 Subject: [PATCH 15/50] fix code in BertModel.from_pretrained and BertEmbedding --- fastNLP/embeddings/bert_embedding.py | 20 +++++++------------- fastNLP/modules/encoder/bert.py | 12 +++++++----- 2 files changed, 14 insertions(+), 18 deletions(-) diff --git a/fastNLP/embeddings/bert_embedding.py b/fastNLP/embeddings/bert_embedding.py index e15c15f5..d1a5514a 100644 --- a/fastNLP/embeddings/bert_embedding.py +++ b/fastNLP/embeddings/bert_embedding.py @@ -17,8 +17,8 @@ import numpy as np from itertools import chain from ..core.vocabulary import Vocabulary -from ..io.file_utils import _get_embedding_url, cached_path, PRETRAINED_BERT_MODEL_DIR -from ..modules.encoder.bert import _WordPieceBertModel, BertModel, BertTokenizer, _get_bert_dir +from ..io.file_utils import PRETRAINED_BERT_MODEL_DIR +from ..modules.encoder.bert import _WordPieceBertModel, BertModel, BertTokenizer from .contextual_embedding import ContextualEmbedding import warnings from ..core import logger @@ -77,15 +77,12 @@ class BertEmbedding(ContextualEmbedding): " faster speed.") warnings.warn("For Chinese bert, pooled_method should choose from 'first', 'last' in order to achieve" " faster speed.") - - # 根据model_dir_or_name检查是否存在并下载 - model_dir = _get_bert_dir(model_dir_or_name) self._word_sep_index = None if '[SEP]' in vocab: self._word_sep_index = vocab['[SEP]'] - self.model = _WordBertModel(model_dir=model_dir, vocab=vocab, layers=layers, + self.model = _WordBertModel(model_dir_or_name=model_dir_or_name, vocab=vocab, layers=layers, pool_method=pool_method, include_cls_sep=include_cls_sep, pooled_cls=pooled_cls, auto_truncate=auto_truncate, min_freq=2) @@ -170,11 +167,8 @@ class BertWordPieceEncoder(nn.Module): def __init__(self, model_dir_or_name: str = 'en-base-uncased', layers: str = '-1', pooled_cls: bool = False, word_dropout=0, dropout=0, requires_grad: bool = False): super().__init__() - - # 根据model_dir_or_name检查是否存在并下载 - model_dir = _get_bert_dir(model_dir_or_name) - self.model = _WordPieceBertModel(model_dir=model_dir, layers=layers, pooled_cls=pooled_cls) + self.model = _WordPieceBertModel(model_dir_or_name=model_dir_or_name, layers=layers, pooled_cls=pooled_cls) self._sep_index = self.model._sep_index self._wordpiece_pad_index = self.model._wordpiece_pad_index self._wordpiece_unk_index = self.model._wordpiece_unknown_index @@ -269,12 +263,12 @@ class BertWordPieceEncoder(nn.Module): class _WordBertModel(nn.Module): - def __init__(self, model_dir: str, vocab: Vocabulary, layers: str = '-1', pool_method: str = 'first', + def __init__(self, model_dir_or_name: str, vocab: Vocabulary, layers: str = '-1', pool_method: str = 'first', include_cls_sep: bool = False, pooled_cls: bool = False, auto_truncate: bool = False, min_freq=2): super().__init__() - self.tokenzier = BertTokenizer.from_pretrained(model_dir) - self.encoder = BertModel.from_pretrained(model_dir) + self.tokenzier = BertTokenizer.from_pretrained(model_dir_or_name) + self.encoder = BertModel.from_pretrained(model_dir_or_name) self._max_position_embeddings = self.encoder.config.max_position_embeddings # 检查encoder_layer_number是否合理 encoder_layer_number = len(self.encoder.encoder.layer) diff --git a/fastNLP/modules/encoder/bert.py b/fastNLP/modules/encoder/bert.py index 89a1b09d..e73a8172 100644 --- a/fastNLP/modules/encoder/bert.py +++ b/fastNLP/modules/encoder/bert.py @@ -143,7 +143,7 @@ def _get_bert_dir(model_dir_or_name: str = 'en-base-uncased'): else: logger.error(f"Cannot recognize BERT dir or name ``{model_dir_or_name}``.") raise ValueError(f"Cannot recognize BERT dir or name ``{model_dir_or_name}``.") - return model_dir + return str(model_dir) class BertLayerNorm(nn.Module): @@ -453,6 +453,9 @@ class BertModel(nn.Module): if state_dict is None: weights_path = _get_file_name_base_on_postfix(pretrained_model_dir, '.bin') state_dict = torch.load(weights_path, map_location='cpu') + else: + logger.error(f'Cannot load parameters through `state_dict` variable.') + raise RuntimeError(f'Cannot load parameters through `state_dict` variable.') old_keys = [] new_keys = [] @@ -493,7 +496,7 @@ class BertModel(nn.Module): logger.warn("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) - logger.info(f"Load pre-trained BERT parameters from dir {pretrained_model_dir}.") + logger.info(f"Load pre-trained BERT parameters from file {weights_path}.") return model @@ -854,9 +857,8 @@ class _WordPieceBertModel(nn.Module): def __init__(self, model_dir_or_name: str, layers: str = '-1', pooled_cls: bool=False): super().__init__() - self.model_dir = _get_bert_dir(model_dir_or_name) - self.tokenzier = BertTokenizer.from_pretrained(self.model_dir) - self.encoder = BertModel.from_pretrained(self.model_dir) + self.tokenzier = BertTokenizer.from_pretrained(model_dir_or_name) + self.encoder = BertModel.from_pretrained(model_dir_or_name) # 检查encoder_layer_number是否合理 encoder_layer_number = len(self.encoder.encoder.layer) self.layers = list(map(int, layers.split(','))) From 9e6f4ffb8bf29020e7871f06eef4f8e0d32e3774 Mon Sep 17 00:00:00 2001 From: lyhuang18 <42239874+lyhuang18@users.noreply.github.com> Date: Fri, 30 Aug 2019 01:21:59 +0800 Subject: [PATCH 16/50] =?UTF-8?q?datasetloader=E6=94=B9=E6=88=90pipe?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../text_classification/train_awdlstm.py | 23 ++++++++--------- .../text_classification/train_lstm.py | 25 ++++++++----------- .../text_classification/train_lstm_att.py | 25 ++++++++----------- 3 files changed, 32 insertions(+), 41 deletions(-) diff --git a/reproduction/text_classification/train_awdlstm.py b/reproduction/text_classification/train_awdlstm.py index b2a67fdb..7537e6f7 100644 --- a/reproduction/text_classification/train_awdlstm.py +++ b/reproduction/text_classification/train_awdlstm.py @@ -1,11 +1,9 @@ # 这个模型需要在pytorch=0.4下运行,weight_drop不支持1.0 -# 首先需要加入以下的路径到环境变量,因为当前只对内部测试开放,所以需要手动申明一下路径 -import os -os.environ['FASTNLP_BASE_URL'] = 'http://10.141.222.118:8888/file/download/' -os.environ['FASTNLP_CACHE_DIR'] = '/remote-home/hyan01/fastnlp_caches' +import sys +sys.path.append('../..') -from fastNLP.io.data_loader import IMDBLoader +from fastNLP.io.pipe.classification import IMDBPipe from fastNLP.embeddings import StaticEmbedding from model.awd_lstm import AWDLSTMSentiment @@ -32,15 +30,14 @@ opt=Config() # load data -dataloader=IMDBLoader() -datainfo=dataloader.process(opt.datapath) +data_bundle=IMDBPipe.process_from_file(opt.datapath) -# print(datainfo.datasets["train"]) -# print(datainfo) +# print(data_bundle.datasets["train"]) +# print(data_bundle) # define model -vocab=datainfo.vocabs['words'] +vocab=data_bundle.vocabs['words'] embed = StaticEmbedding(vocab, model_dir_or_name='en-glove-840b-300', requires_grad=True) model=AWDLSTMSentiment(init_embed=embed, num_classes=opt.num_classes, hidden_dim=opt.hidden_dim, num_layers=opt.num_layers, nfc=opt.nfc, wdrop=opt.wdrop) @@ -52,11 +49,11 @@ optimizer= Adam([param for param in model.parameters() if param.requires_grad==T def train(datainfo, model, optimizer, loss, metrics, opt): - trainer = Trainer(datainfo.datasets['train'], model, optimizer=optimizer, loss=loss, - metrics=metrics, dev_data=datainfo.datasets['test'], device=0, check_code_level=-1, + trainer = Trainer(data_bundle.datasets['train'], model, optimizer=optimizer, loss=loss, + metrics=metrics, dev_data=data_bundle.datasets['test'], device=0, check_code_level=-1, n_epochs=opt.train_epoch, save_path=opt.save_model_path) trainer.train() if __name__ == "__main__": - train(datainfo, model, optimizer, loss, metrics, opt) + train(data_bundle, model, optimizer, loss, metrics, opt) diff --git a/reproduction/text_classification/train_lstm.py b/reproduction/text_classification/train_lstm.py index 40f77061..a23be0cb 100644 --- a/reproduction/text_classification/train_lstm.py +++ b/reproduction/text_classification/train_lstm.py @@ -1,9 +1,7 @@ -# 首先需要加入以下的路径到环境变量,因为当前只对内部测试开放,所以需要手动申明一下路径 -import os -os.environ['FASTNLP_BASE_URL'] = 'http://10.141.222.118:8888/file/download/' -os.environ['FASTNLP_CACHE_DIR'] = '/remote-home/hyan01/fastnlp_caches' +import sys +sys.path.append('../..') -from fastNLP.io.data_loader import IMDBLoader +from fastNLP.io.pipe.classification import IMDBPipe from fastNLP.embeddings import StaticEmbedding from model.lstm import BiLSTMSentiment @@ -29,15 +27,14 @@ opt=Config() # load data -dataloader=IMDBLoader() -datainfo=dataloader.process(opt.datapath) +data_bundle=IMDBPipe.process_from_file(opt.datapath) -# print(datainfo.datasets["train"]) -# print(datainfo) +# print(data_bundle.datasets["train"]) +# print(data_bundle) # define model -vocab=datainfo.vocabs['words'] +vocab=data_bundle.vocabs['words'] embed = StaticEmbedding(vocab, model_dir_or_name='en-glove-840b-300', requires_grad=True) model=BiLSTMSentiment(init_embed=embed, num_classes=opt.num_classes, hidden_dim=opt.hidden_dim, num_layers=opt.num_layers, nfc=opt.nfc) @@ -48,12 +45,12 @@ metrics=AccuracyMetric() optimizer= Adam([param for param in model.parameters() if param.requires_grad==True], lr=opt.lr) -def train(datainfo, model, optimizer, loss, metrics, opt): - trainer = Trainer(datainfo.datasets['train'], model, optimizer=optimizer, loss=loss, - metrics=metrics, dev_data=datainfo.datasets['test'], device=0, check_code_level=-1, +def train(data_bundle, model, optimizer, loss, metrics, opt): + trainer = Trainer(data_bundle.datasets['train'], model, optimizer=optimizer, loss=loss, + metrics=metrics, dev_data=data_bundle.datasets['test'], device=0, check_code_level=-1, n_epochs=opt.train_epoch, save_path=opt.save_model_path) trainer.train() if __name__ == "__main__": - train(datainfo, model, optimizer, loss, metrics, opt) \ No newline at end of file + train(data_bundle, model, optimizer, loss, metrics, opt) \ No newline at end of file diff --git a/reproduction/text_classification/train_lstm_att.py b/reproduction/text_classification/train_lstm_att.py index 1052f606..a2b8612d 100644 --- a/reproduction/text_classification/train_lstm_att.py +++ b/reproduction/text_classification/train_lstm_att.py @@ -1,9 +1,7 @@ -# 首先需要加入以下的路径到环境变量,因为当前只对内部测试开放,所以需要手动申明一下路径 -import os -os.environ['FASTNLP_BASE_URL'] = 'http://10.141.222.118:8888/file/download/' -os.environ['FASTNLP_CACHE_DIR'] = '/remote-home/hyan01/fastnlp_caches' +import sys +sys.path.append('../..') -from fastNLP.io.data_loader import IMDBLoader +from fastNLP.io.pipe.classification import IMDBPipe from fastNLP.embeddings import StaticEmbedding from model.lstm_self_attention import BiLSTM_SELF_ATTENTION @@ -31,15 +29,14 @@ opt=Config() # load data -dataloader=IMDBLoader() -datainfo=dataloader.process(opt.datapath) +data_bundle=IMDBPipe.process_from_file(opt.datapath) -# print(datainfo.datasets["train"]) -# print(datainfo) +# print(data_bundle.datasets["train"]) +# print(data_bundle) # define model -vocab=datainfo.vocabs['words'] +vocab=data_bundle.vocabs['words'] embed = StaticEmbedding(vocab, model_dir_or_name='en-glove-840b-300', requires_grad=True) model=BiLSTM_SELF_ATTENTION(init_embed=embed, num_classes=opt.num_classes, hidden_dim=opt.hidden_dim, num_layers=opt.num_layers, attention_unit=opt.attention_unit, attention_hops=opt.attention_hops, nfc=opt.nfc) @@ -50,12 +47,12 @@ metrics=AccuracyMetric() optimizer= Adam([param for param in model.parameters() if param.requires_grad==True], lr=opt.lr) -def train(datainfo, model, optimizer, loss, metrics, opt): - trainer = Trainer(datainfo.datasets['train'], model, optimizer=optimizer, loss=loss, - metrics=metrics, dev_data=datainfo.datasets['test'], device=0, check_code_level=-1, +def train(data_bundle, model, optimizer, loss, metrics, opt): + trainer = Trainer(data_bundle.datasets['train'], model, optimizer=optimizer, loss=loss, + metrics=metrics, dev_data=data_bundle.datasets['test'], device=0, check_code_level=-1, n_epochs=opt.train_epoch, save_path=opt.save_model_path) trainer.train() if __name__ == "__main__": - train(datainfo, model, optimizer, loss, metrics, opt) + train(data_bundle, model, optimizer, loss, metrics, opt) From 9529f89abd41ee7ef0d9e2e32596ef9ee1aedb1e Mon Sep 17 00:00:00 2001 From: yh Date: Fri, 30 Aug 2019 19:54:28 +0800 Subject: [PATCH 17/50] =?UTF-8?q?=E5=A2=9E=E5=8A=A0DataBundle=E7=9A=84?= =?UTF-8?q?=E6=96=B9=E6=B3=95=EF=BC=9B=E5=A2=9E=E5=8A=A0BilSTMCRF=E7=9A=84?= =?UTF-8?q?=E6=B3=A8=E9=87=8A?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/dataset.py | 14 ++-- fastNLP/io/data_bundle.py | 72 +++++++++++++++- fastNLP/models/sequence_labeling.py | 83 ++++++++----------- .../seqence_labelling/ner/train_ontonote.py | 4 +- 4 files changed, 112 insertions(+), 61 deletions(-) diff --git a/fastNLP/core/dataset.py b/fastNLP/core/dataset.py index 51bcef43..551cf1f8 100644 --- a/fastNLP/core/dataset.py +++ b/fastNLP/core/dataset.py @@ -575,18 +575,18 @@ class DataSet(object): """ return len(self) - def rename_field(self, old_name, new_name): + def rename_field(self, field_name, new_field_name): """ 将某个field重新命名. - :param str old_name: 原来的field名称。 - :param str new_name: 修改为new_name。 + :param str field_name: 原来的field名称。 + :param str new_field_name: 修改为new_name。 """ - if old_name in self.field_arrays: - self.field_arrays[new_name] = self.field_arrays.pop(old_name) - self.field_arrays[new_name].name = new_name + if field_name in self.field_arrays: + self.field_arrays[new_field_name] = self.field_arrays.pop(field_name) + self.field_arrays[new_field_name].name = new_field_name else: - raise KeyError("DataSet has no field named {}.".format(old_name)) + raise KeyError("DataSet has no field named {}.".format(field_name)) return self def set_target(self, *field_names, flag=True, use_1st_ins_infer_dim_type=True): diff --git a/fastNLP/io/data_bundle.py b/fastNLP/io/data_bundle.py index 969730a3..f30add34 100644 --- a/fastNLP/io/data_bundle.py +++ b/fastNLP/io/data_bundle.py @@ -139,9 +139,44 @@ class DataBundle: dataset.set_target(field_name, flag=flag, use_1st_ins_infer_dim_type=use_1st_ins_infer_dim_type) return self + def set_pad_val(self, field_name, pad_val, ignore_miss_dataset=True): + """ + 将DataBundle中所有的DataSet中名为field_name的Field的padding值设置为pad_val. + + :param str field_name: + :param int pad_val: + :param bool ignore_miss_dataset: 当某个field名称在某个dataset不存在时,如果为True,则直接忽略该DataSet; + 如果为False,则报错 + :return: self + """ + for name, dataset in self.datasets.items(): + if dataset.has_field(field_name=field_name): + dataset.set_pad_val(field_name=field_name, pad_val=pad_val) + elif not ignore_miss_dataset: + raise KeyError(f"{field_name} not found DataSet:{name}.") + return self + + def set_ignore_type(self, *field_names, flag=True, ignore_miss_dataset=True): + """ + 将DataBundle中所有的DataSet中名为*field_names的Field的ignore_type设置为flag状态 + + :param str field_names: + :param bool flag: + :param bool ignore_miss_dataset: 当某个field名称在某个dataset不存在时,如果为True,则直接忽略该DataSet; + 如果为False,则报错 + :return: self + """ + for name, dataset in self.datasets.items(): + for field_name in field_names: + if dataset.has_field(field_name=field_name): + dataset.set_ignore_type(field_name, flag=flag) + elif not ignore_miss_dataset: + raise KeyError(f"{field_name} not found DataSet:{name}.") + return self + def copy_field(self, field_name, new_field_name, ignore_miss_dataset=True): """ - 将DataBundle中所有的field_name复制一份叫new_field_name. + 将DataBundle中所有的DataSet中名为field_name的Field复制一份并命名为叫new_field_name. :param str field_name: :param str new_field_name: @@ -156,9 +191,42 @@ class DataBundle: raise KeyError(f"{field_name} not found DataSet:{name}.") return self + def rename_field(self, field_name, new_field_name, ignore_miss_dataset=True): + """ + 将DataBundle中所有DataSet中名为field_name的field重命名为new_field_name. + + :param str field_name: + :param str new_field_name: + :param bool ignore_miss_dataset: 当某个field名称在某个dataset不存在时,如果为True,则直接忽略该DataSet; + 如果为False,则报错 + :return: self + """ + for name, dataset in self.datasets.items(): + if dataset.has_field(field_name=field_name): + dataset.rename_field(field_name=field_name, new_field_name=new_field_name) + elif not ignore_miss_dataset: + raise KeyError(f"{field_name} not found DataSet:{name}.") + return self + + def delete_field(self, field_name, ignore_miss_dataset=True): + """ + 将DataBundle中所有DataSet中名为field_name的field删除掉. + + :param str field_name: + :param bool ignore_miss_dataset: 当某个field名称在某个dataset不存在时,如果为True,则直接忽略该DataSet; + 如果为False,则报错 + :return: self + """ + for name, dataset in self.datasets.items(): + if dataset.has_field(field_name=field_name): + dataset.delete_field(field_name=field_name) + elif not ignore_miss_dataset: + raise KeyError(f"{field_name} not found DataSet:{name}.") + return self + def apply_field(self, func, field_name:str, new_field_name:str, ignore_miss_dataset=True, **kwargs): """ - 对DataBundle中所有的dataset使用apply方法 + 对DataBundle中所有的dataset使用apply_field方法 :param callable func: input是instance中名为 `field_name` 的field的内容。 :param str field_name: 传入func的是哪个field。 diff --git a/fastNLP/models/sequence_labeling.py b/fastNLP/models/sequence_labeling.py index 0dff21f0..0c573a90 100644 --- a/fastNLP/models/sequence_labeling.py +++ b/fastNLP/models/sequence_labeling.py @@ -4,7 +4,7 @@ __all__ = [ "SeqLabeling", "AdvSeqLabel", - # "BiLSTMCRF" + "BiLSTMCRF" ] import torch @@ -14,7 +14,6 @@ import torch.nn.functional as F from .base_model import BaseModel from ..core.const import Const as C from ..core.utils import seq_len_to_mask -from ..embeddings import embedding from ..embeddings import get_embeddings from ..modules import ConditionalRandomField from ..modules import LSTM @@ -24,18 +23,15 @@ from ..modules.decoder.crf import allowed_transitions class BiLSTMCRF(BaseModel): """ - 结构为BiLSTM + FC + Dropout + CRF. + 结构为embedding + BiLSTM + FC + Dropout + CRF. - .. todo:: - 继续补充文档 - - :param embed: tuple: - :param num_classes: - :param num_layers: - :param hidden_size: - :param dropout: - :param target_vocab: - :param encoding_type: + :param embed: 支持(1)fastNLP的各种Embedding, (2) tuple, 指明num_embedding, dimension, 如(1000, 100) + :param num_classes: 一共多少个类 + :param num_layers: BiLSTM的层数 + :param hidden_size: BiLSTM的hidden_size,实际hidden size为该值的两倍(前向、后向) + :param dropout: dropout的概率,0为不dropout + :param target_vocab: Vocabulary对象,target与index的对应关系 + :param encoding_type: encoding的类型,支持'bioes', 'bmes', 'bio', 'bmeso'等 """ def __init__(self, embed, num_classes, num_layers=1, hidden_size=100, dropout=0.5, target_vocab=None, encoding_type=None): @@ -86,21 +82,20 @@ class SeqLabeling(BaseModel): 一个基础的Sequence labeling的模型。 用于做sequence labeling的基础类。结构包含一层Embedding,一层LSTM(单向,一层),一层FC,以及一层CRF。 - :param tuple(int,int),torch.FloatTensor,nn.Embedding,numpy.ndarray init_embed: Embedding的大小(传入tuple(int, int), - 第一个int为vocab_zie, 第二个int为embed_dim); 如果为Tensor, Embedding, ndarray等则直接使用该值初始化Embedding + :param tuple(int,int),torch.FloatTensor,nn.Embedding,numpy.ndarray embed: Embedding的大小(传入tuple(int, int), + 第一个int为vocab_zie, 第二个int为embed_dim); 如果为Tensor, embedding, ndarray等则直接使用该值初始化Embedding :param int hidden_size: LSTM隐藏层的大小 :param int num_classes: 一共有多少类 """ - def __init__(self, init_embed, hidden_size, num_classes): + def __init__(self, embed, hidden_size, num_classes): super(SeqLabeling, self).__init__() - self.Embedding = embedding.Embedding(init_embed) - self.Rnn = encoder.LSTM(self.Embedding.embedding_dim, hidden_size) - self.Linear = nn.Linear(hidden_size, num_classes) - self.Crf = decoder.ConditionalRandomField(num_classes) - self.mask = None - + self.embedding = get_embeddings(embed) + self.rnn = encoder.LSTM(self.embedding.embedding_dim, hidden_size) + self.fc = nn.Linear(hidden_size, num_classes) + self.crf = decoder.ConditionalRandomField(num_classes) + def forward(self, words, seq_len, target): """ :param torch.LongTensor words: [batch_size, max_len],序列的index @@ -109,17 +104,14 @@ class SeqLabeling(BaseModel): :return y: If truth is None, return list of [decode path(list)]. Used in testing and predicting. If truth is not None, return loss, a scalar. Used in training. """ - assert words.shape[0] == seq_len.shape[0] - assert target.shape == words.shape - self.mask = self._make_mask(words, seq_len) - - x = self.Embedding(words) + mask = seq_len_to_mask(seq_len, max_len=words.size(1)) + x = self.embedding(words) # [batch_size, max_len, word_emb_dim] - x, _ = self.Rnn(x, seq_len) + x, _ = self.rnn(x, seq_len) # [batch_size, max_len, hidden_size * direction] - x = self.Linear(x) + x = self.fc(x) # [batch_size, max_len, num_classes] - return {C.LOSS: self._internal_loss(x, target)} + return {C.LOSS: self._internal_loss(x, target, mask)} def predict(self, words, seq_len): """ @@ -129,18 +121,18 @@ class SeqLabeling(BaseModel): :param torch.LongTensor seq_len: [batch_size,] :return: {'pred': xx}, [batch_size, max_len] """ - self.mask = self._make_mask(words, seq_len) + mask = seq_len_to_mask(seq_len, max_len=words.size(1)) - x = self.Embedding(words) + x = self.embedding(words) # [batch_size, max_len, word_emb_dim] - x, _ = self.Rnn(x, seq_len) + x, _ = self.rnn(x, seq_len) # [batch_size, max_len, hidden_size * direction] - x = self.Linear(x) + x = self.fc(x) # [batch_size, max_len, num_classes] - pred = self._decode(x) + pred = self._decode(x, mask) return {C.OUTPUT: pred} - def _internal_loss(self, x, y): + def _internal_loss(self, x, y, mask): """ Negative log likelihood loss. :param x: Tensor, [batch_size, max_len, tag_size] @@ -152,22 +144,15 @@ class SeqLabeling(BaseModel): y = y.long() assert x.shape[:2] == y.shape assert y.shape == self.mask.shape - total_loss = self.Crf(x, y, self.mask) + total_loss = self.crf(x, y, mask) return torch.mean(total_loss) - def _make_mask(self, x, seq_len): - batch_size, max_len = x.size(0), x.size(1) - mask = seq_len_to_mask(seq_len) - mask = mask.view(batch_size, max_len) - mask = mask.to(x).float() - return mask - - def _decode(self, x): + def _decode(self, x, mask): """ :param torch.FloatTensor x: [batch_size, max_len, tag_size] :return prediction: [batch_size, max_len] """ - tag_seq, _ = self.Crf.viterbi_decode(x, self.mask) + tag_seq, _ = self.crf.viterbi_decode(x, mask) return tag_seq @@ -177,7 +162,7 @@ class AdvSeqLabel(nn.Module): 更复杂的Sequence Labelling模型。结构为Embedding, LayerNorm, 双向LSTM(两层),FC,LayerNorm,DropOut,FC,CRF。 - :param tuple(int,int),torch.FloatTensor,nn.Embedding,numpy.ndarray init_embed: Embedding的大小(传入tuple(int, int), + :param tuple(int,int),torch.FloatTensor,nn.Embedding,numpy.ndarray embed: Embedding的大小(传入tuple(int, int), 第一个int为vocab_zie, 第二个int为embed_dim); 如果为Tensor, Embedding, ndarray等则直接使用该值初始化Embedding :param int hidden_size: LSTM的隐层大小 :param int num_classes: 有多少个类 @@ -188,11 +173,11 @@ class AdvSeqLabel(nn.Module): :param str encoding_type: 支持"BIO", "BMES", "BEMSO", 只有在id2words不为None的情况有用。 """ - def __init__(self, init_embed, hidden_size, num_classes, dropout=0.3, id2words=None, encoding_type='bmes'): + def __init__(self, embed, hidden_size, num_classes, dropout=0.3, id2words=None, encoding_type='bmes'): super().__init__() - self.Embedding = embedding.Embedding(init_embed) + self.Embedding = get_embeddings(embed) self.norm1 = torch.nn.LayerNorm(self.Embedding.embedding_dim) self.Rnn = encoder.LSTM(input_size=self.Embedding.embedding_dim, hidden_size=hidden_size, num_layers=2, dropout=dropout, diff --git a/reproduction/seqence_labelling/ner/train_ontonote.py b/reproduction/seqence_labelling/ner/train_ontonote.py index ee80b6f7..9fd13100 100644 --- a/reproduction/seqence_labelling/ner/train_ontonote.py +++ b/reproduction/seqence_labelling/ner/train_ontonote.py @@ -18,11 +18,9 @@ from fastNLP.io.pipe.conll import OntoNotesNERPipe #######hyper normalize = False -lower = False lr = 0.01 dropout = 0.5 batch_size = 32 -job_embed = False data_name = 'ontonote' #######hyper @@ -41,7 +39,7 @@ def cache(): word_dropout=0.01, dropout=dropout, lower=True, - min_freq=2) + min_freq=1) return data, char_embed, word_embed data, char_embed, word_embed = cache() From 82b5726686dcbac9f9a2032537f53c3eb77f7698 Mon Sep 17 00:00:00 2001 From: yunfan Date: Sat, 24 Aug 2019 13:59:30 +0800 Subject: [PATCH 18/50] update transformer --- fastNLP/modules/encoder/transformer.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/fastNLP/modules/encoder/transformer.py b/fastNLP/modules/encoder/transformer.py index ce9172d5..70b82bde 100644 --- a/fastNLP/modules/encoder/transformer.py +++ b/fastNLP/modules/encoder/transformer.py @@ -32,9 +32,10 @@ class TransformerEncoder(nn.Module): self.norm1 = nn.LayerNorm(model_size) self.ffn = nn.Sequential(nn.Linear(model_size, inner_size), nn.ReLU(), - nn.Linear(inner_size, model_size), - TimestepDropout(dropout), ) + nn.Dropout(dropout), + nn.Linear(inner_size, model_size)) self.norm2 = nn.LayerNorm(model_size) + self.dropout = nn.Dropout(dropout) def forward(self, input, seq_mask=None, atte_mask_out=None): """ @@ -43,17 +44,20 @@ class TransformerEncoder(nn.Module): :param seq_mask: [batch, seq_len] :return: [batch, seq_len, model_size] """ + input = self.norm1(input) attention = self.atte(input, input, input, atte_mask_out) - norm_atte = self.norm1(attention + input) - attention *= seq_mask - output = self.ffn(norm_atte) - output = self.norm2(output + norm_atte) - output *= seq_mask + input = input + self.dropout(attention) + # attention *= seq_mask + input = self.norm2(input) + output = self.ffn(input) + input = input + self.dropout(output) + # output *= seq_mask return output def __init__(self, num_layers, **kargs): super(TransformerEncoder, self).__init__() self.layers = nn.ModuleList([self.SubLayer(**kargs) for _ in range(num_layers)]) + self.norm = nn.LayerNorm(kargs['model_size']) def forward(self, x, seq_mask=None): """ @@ -70,4 +74,4 @@ class TransformerEncoder(nn.Module): seq_mask = seq_mask[:, :, None] for layer in self.layers: output = layer(output, seq_mask, atte_mask_out) - return output + return self.norm(output) From 44af647839fe99f69b9364457ff3636df6367204 Mon Sep 17 00:00:00 2001 From: yunfan Date: Thu, 29 Aug 2019 20:19:13 +0800 Subject: [PATCH 19/50] [update] change data-loader to pipe --- .../text_classification/train_dpcnn.py | 30 +++++-------------- 1 file changed, 8 insertions(+), 22 deletions(-) diff --git a/reproduction/text_classification/train_dpcnn.py b/reproduction/text_classification/train_dpcnn.py index f3f4e231..c7f5751c 100644 --- a/reproduction/text_classification/train_dpcnn.py +++ b/reproduction/text_classification/train_dpcnn.py @@ -8,21 +8,18 @@ from fastNLP.core.trainer import Trainer from fastNLP import CrossEntropyLoss, AccuracyMetric from fastNLP.embeddings import StaticEmbedding from reproduction.text_classification.model.dpcnn import DPCNN -from fastNLP.io.data_loader import YelpLoader from fastNLP.core.sampler import BucketSampler from fastNLP.core import LRScheduler from fastNLP.core.const import Const as C from fastNLP.core.vocabulary import VocabularyOption -from fastNLP.core.dist_trainer import DistTrainer from utils.util_init import set_rng_seeds from fastNLP import logger import os -# os.environ['FASTNLP_BASE_URL'] = 'http://10.141.222.118:8888/file/download/' -# os.environ['FASTNLP_CACHE_DIR'] = '/remote-home/hyan01/fastnlp_caches' +from fastNLP.io import YelpFullPipe, YelpPolarityPipe + os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # hyper logger.add_file('log', 'INFO') -print(logger.handlers) class Config(): seed = 12345 @@ -50,18 +47,14 @@ class Config(): ops = Config() set_rng_seeds(ops.seed) -# print('RNG SEED: {}'.format(ops.seed)) logger.info('RNG SEED %d'%ops.seed) # 1.task相关信息:利用dataloader载入dataInfo -#datainfo=SSTLoader(fine_grained=True).process(paths=ops.datapath, train_ds=['train']) - @cache_results(ops.model_dir_or_name+'-data-cache') def load_data(): - datainfo = YelpLoader(fine_grained=True, lower=True).process( - paths=ops.datapath, train_ds=['train'], src_vocab_op=ops.src_vocab_op) + datainfo = YelpFullPipe(lower=True, tokenizer='raw').process_from_file(ops.datapath) for ds in datainfo.datasets.values(): ds.apply_field(len, C.INPUT, C.INPUT_LEN) ds.set_input(C.INPUT, C.INPUT_LEN) @@ -79,11 +72,8 @@ print(embedding.embedding.weight.data.mean(), embedding.embedding.weight.data.st # 2.或直接复用fastNLP的模型 -# embedding = StackEmbedding([StaticEmbedding(vocab), CNNCharEmbedding(vocab, 100)]) -datainfo.datasets['train'] = datainfo.datasets['train'][:1000] -datainfo.datasets['test'] = datainfo.datasets['test'][:1000] -# print(datainfo) -# print(datainfo.datasets['train'][0]) +# datainfo.datasets['train'] = datainfo.datasets['train'][:1000] # for debug purpose +# datainfo.datasets['test'] = datainfo.datasets['test'][:1000] logger.info(datainfo) model = DPCNN(init_embed=embedding, num_cls=len(datainfo.vocabs[C.TARGET]), @@ -99,14 +89,7 @@ optimizer = SGD([param for param in model.parameters() if param.requires_grad == callbacks = [] callbacks.append(LRScheduler(CosineAnnealingLR(optimizer, 5))) -# callbacks.append( -# LRScheduler(LambdaLR(optimizer, lambda epoch: ops.lr if epoch < -# ops.train_epoch * 0.8 else ops.lr * 0.1)) -# ) -# callbacks.append( -# FitlogCallback(data=datainfo.datasets, verbose=1) -# ) device = 'cuda:0' if torch.cuda.is_available() else 'cpu' @@ -114,12 +97,15 @@ device = 'cuda:0' if torch.cuda.is_available() else 'cpu' logger.info(device) # 4.定义train方法 +# normal trainer trainer = Trainer(datainfo.datasets['train'], model, optimizer=optimizer, loss=loss, sampler=BucketSampler(num_buckets=50, batch_size=ops.batch_size), metrics=[metric], use_tqdm=False, save_path='save', dev_data=datainfo.datasets['test'], device=device, check_code_level=-1, batch_size=ops.batch_size, callbacks=callbacks, n_epochs=ops.train_epoch, num_workers=4) + +# distributed trainer # trainer = DistTrainer(datainfo.datasets['train'], model, optimizer=optimizer, loss=loss, # metrics=[metric], # dev_data=datainfo.datasets['test'], device='cuda', From bbda73c14f2352583f1a89bafdd1ff7471543cc4 Mon Sep 17 00:00:00 2001 From: yunfan Date: Fri, 30 Aug 2019 21:48:00 +0800 Subject: [PATCH 20/50] [update] transformer --- fastNLP/modules/encoder/attention.py | 39 +++++++++++--------------- fastNLP/modules/encoder/transformer.py | 17 ++++++----- 2 files changed, 25 insertions(+), 31 deletions(-) diff --git a/fastNLP/modules/encoder/attention.py b/fastNLP/modules/encoder/attention.py index 02bd078a..6a973864 100644 --- a/fastNLP/modules/encoder/attention.py +++ b/fastNLP/modules/encoder/attention.py @@ -30,14 +30,14 @@ class DotAttention(nn.Module): def forward(self, Q, K, V, mask_out=None): """ - :param Q: [batch, seq_len_q, key_size] - :param K: [batch, seq_len_k, key_size] - :param V: [batch, seq_len_k, value_size] - :param mask_out: [batch, 1, seq_len] or [batch, seq_len_q, seq_len_k] + :param Q: [..., seq_len_q, key_size] + :param K: [..., seq_len_k, key_size] + :param V: [..., seq_len_k, value_size] + :param mask_out: [..., 1, seq_len] or [..., seq_len_q, seq_len_k] """ - output = torch.matmul(Q, K.transpose(1, 2)) / self.scale + output = torch.matmul(Q, K.transpose(-1, -2)) / self.scale if mask_out is not None: - output.masked_fill_(mask_out, -1e18) + output.masked_fill_(mask_out, -1e9) output = self.softmax(output) output = self.drop(output) return torch.matmul(output, V) @@ -65,17 +65,16 @@ class MultiHeadAttention(nn.Module): self.q_in = nn.Linear(input_size, in_size) self.k_in = nn.Linear(input_size, in_size) self.v_in = nn.Linear(input_size, in_size) - # follow the paper, do not apply dropout within dot-product self.attention = DotAttention(key_size=key_size, value_size=value_size, dropout=dropout) self.out = nn.Linear(value_size * num_head, input_size) self.reset_parameters() def reset_parameters(self): sqrt = math.sqrt - nn.init.normal_(self.q_in.weight, mean=0, std=sqrt(2.0 / (self.input_size + self.key_size))) - nn.init.normal_(self.k_in.weight, mean=0, std=sqrt(2.0 / (self.input_size + self.key_size))) - nn.init.normal_(self.v_in.weight, mean=0, std=sqrt(2.0 / (self.input_size + self.value_size))) - nn.init.xavier_normal_(self.out.weight) + nn.init.normal_(self.q_in.weight, mean=0, std=sqrt(1.0 / self.input_size)) + nn.init.normal_(self.k_in.weight, mean=0, std=sqrt(1.0 / self.input_size)) + nn.init.normal_(self.v_in.weight, mean=0, std=sqrt(1.0 / self.input_size)) + nn.init.normal_(self.out.weight, mean=0, std=sqrt(1.0 / self.input_size)) def forward(self, Q, K, V, atte_mask_out=None): """ @@ -89,20 +88,16 @@ class MultiHeadAttention(nn.Module): sk = K.size(1) d_k, d_v, n_head = self.key_size, self.value_size, self.num_head # input linear - q = self.q_in(Q).view(batch, sq, n_head, d_k) - k = self.k_in(K).view(batch, sk, n_head, d_k) - v = self.v_in(V).view(batch, sk, n_head, d_v) - - # transpose q, k and v to do batch attention - q = q.permute(2, 0, 1, 3).contiguous().view(-1, sq, d_k) - k = k.permute(2, 0, 1, 3).contiguous().view(-1, sk, d_k) - v = v.permute(2, 0, 1, 3).contiguous().view(-1, sk, d_v) + q = self.q_in(Q).view(batch, sq, n_head, d_k).transpose(1, 2) + k = self.k_in(K).view(batch, sk, n_head, d_k).transpose(1, 2) + v = self.v_in(V).view(batch, sk, n_head, d_v).transpose(1, 2) + if atte_mask_out is not None: - atte_mask_out = atte_mask_out.repeat(n_head, 1, 1) - atte = self.attention(q, k, v, atte_mask_out).view(n_head, batch, sq, d_v) + atte_mask_out = atte_mask_out[:,None,:,:] # [bsz,1,1,len] + atte = self.attention(q, k, v, atte_mask_out).view(batch, n_head, sq, d_v) # concat all heads, do output linear - atte = atte.permute(1, 2, 0, 3).contiguous().view(batch, sq, -1) + atte = atte.transpose(1, 2).contiguous().view(batch, sq, -1) output = self.out(atte) return output diff --git a/fastNLP/modules/encoder/transformer.py b/fastNLP/modules/encoder/transformer.py index 70b82bde..d8a612a0 100644 --- a/fastNLP/modules/encoder/transformer.py +++ b/fastNLP/modules/encoder/transformer.py @@ -5,8 +5,7 @@ __all__ = [ ] from torch import nn -from fastNLP.modules.encoder.attention import MultiHeadAttention -from ..dropout import TimestepDropout +from .attention import MultiHeadAttention class TransformerEncoder(nn.Module): @@ -29,12 +28,12 @@ class TransformerEncoder(nn.Module): def __init__(self, model_size, inner_size, key_size, value_size, num_head, dropout=0.1): super(TransformerEncoder.SubLayer, self).__init__() self.atte = MultiHeadAttention(model_size, key_size, value_size, num_head, dropout) - self.norm1 = nn.LayerNorm(model_size) + self.norm1 = nn.LayerNorm(model_size, eps=1e-6) self.ffn = nn.Sequential(nn.Linear(model_size, inner_size), nn.ReLU(), nn.Dropout(dropout), nn.Linear(inner_size, model_size)) - self.norm2 = nn.LayerNorm(model_size) + self.norm2 = nn.LayerNorm(model_size, eps=1e-6) self.dropout = nn.Dropout(dropout) def forward(self, input, seq_mask=None, atte_mask_out=None): @@ -47,17 +46,17 @@ class TransformerEncoder(nn.Module): input = self.norm1(input) attention = self.atte(input, input, input, atte_mask_out) input = input + self.dropout(attention) - # attention *= seq_mask + attention *= seq_mask input = self.norm2(input) output = self.ffn(input) input = input + self.dropout(output) - # output *= seq_mask - return output + input *= seq_mask + return input def __init__(self, num_layers, **kargs): super(TransformerEncoder, self).__init__() self.layers = nn.ModuleList([self.SubLayer(**kargs) for _ in range(num_layers)]) - self.norm = nn.LayerNorm(kargs['model_size']) + self.norm = nn.LayerNorm(kargs['model_size'], eps=1e-6) def forward(self, x, seq_mask=None): """ @@ -70,7 +69,7 @@ class TransformerEncoder(nn.Module): if seq_mask is None: atte_mask_out = None else: - atte_mask_out = (seq_mask < 1)[:, None, :] + atte_mask_out = (seq_mask == 0)[:, None, :] seq_mask = seq_mask[:, :, None] for layer in self.layers: output = layer(output, seq_mask, atte_mask_out) From 4440801dbfc9bea20a86be6ceeb1431f5d020681 Mon Sep 17 00:00:00 2001 From: Yige Xu Date: Sun, 1 Sep 2019 01:19:10 +0800 Subject: [PATCH 21/50] 1. update bert.py and fix a bug in bert_embedding to adapt torch 1.2.0; 2. update models/bert.py and add BertForSentenceMatching model, now a BertEmbedding param should be passed to these five models; 3. create a small bert version for testing and modify test/models/test_bert.py; 4. move small glove and word2vec files to data_for_tests/embedding/small_static_embedding dir and fix relevant test codes; 5. delete some __init__.py files in test dir. --- fastNLP/embeddings/bert_embedding.py | 2 +- fastNLP/models/bert.py | 373 ++++++------------ fastNLP/modules/encoder/bert.py | 4 +- test/__init__.py | 3 - .../embedding/small_bert/config.json | 13 + .../small_bert/small_pytorch_model.bin | Bin 0 -> 37965 bytes .../embedding/small_bert/vocab.txt | 20 + .../glove.6B.50d_test.txt | 0 .../small_static_embedding}/word2vec_test.txt | 0 test/embeddings/__init__.py | 0 test/embeddings/test_bert_embedding.py | 11 +- test/embeddings/test_static_embedding.py | 6 +- test/io/test_embed_loader.py | 8 +- test/models/__init__.py | 0 test/models/test_bert.py | 86 ++-- test/modules/__init__.py | 0 test/modules/decoder/__init__.py | 0 17 files changed, 225 insertions(+), 301 deletions(-) delete mode 100644 test/__init__.py create mode 100644 test/data_for_tests/embedding/small_bert/config.json create mode 100644 test/data_for_tests/embedding/small_bert/small_pytorch_model.bin create mode 100644 test/data_for_tests/embedding/small_bert/vocab.txt rename test/data_for_tests/{ => embedding/small_static_embedding}/glove.6B.50d_test.txt (100%) rename test/data_for_tests/{ => embedding/small_static_embedding}/word2vec_test.txt (100%) delete mode 100644 test/embeddings/__init__.py delete mode 100644 test/models/__init__.py delete mode 100644 test/modules/__init__.py delete mode 100644 test/modules/decoder/__init__.py diff --git a/fastNLP/embeddings/bert_embedding.py b/fastNLP/embeddings/bert_embedding.py index d1a5514a..f6c36623 100644 --- a/fastNLP/embeddings/bert_embedding.py +++ b/fastNLP/embeddings/bert_embedding.py @@ -393,7 +393,7 @@ class _WordBertModel(nn.Module): batch_indexes = torch.arange(batch_size).to(words) word_pieces[batch_indexes, word_pieces_lengths + 1] = self._sep_index if self._has_sep_in_vocab: # 但[SEP]在vocab中出现应该才会需要token_ids - sep_mask = word_pieces.eq(self._sep_index) # batch_size x max_len + sep_mask = word_pieces.eq(self._sep_index).long() # batch_size x max_len sep_mask_cumsum = sep_mask.flip(dims=[-1]).cumsum(dim=-1).flip(dims=[-1]) token_type_ids = sep_mask_cumsum.fmod(2) if token_type_ids[0, 0].item(): # 如果开头是奇数,则需要flip一下结果,因为需要保证开头为0 diff --git a/fastNLP/models/bert.py b/fastNLP/models/bert.py index 0a89b765..08f16db2 100644 --- a/fastNLP/models/bert.py +++ b/fastNLP/models/bert.py @@ -5,253 +5,145 @@ bert.py is modified from huggingface/pytorch-pretrained-BERT, which is licensed __all__ = [] -import os +import warnings import torch from torch import nn from .base_model import BaseModel from ..core.const import Const -from ..core.utils import seq_len_to_mask +from ..core._logger import logger from ..modules.encoder import BertModel from ..modules.encoder.bert import BertConfig, CONFIG_FILE +from ..embeddings.bert_embedding import BertEmbedding class BertForSequenceClassification(BaseModel): """BERT model for classification. - This module is composed of the BERT model with a linear layer on top of - the pooled output. - Params: - `config`: a BertConfig class instance with the configuration to build a new model. - `num_labels`: the number of classes for the classifier. Default = 2. - Inputs: - `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] - with the word token indices in the vocabulary. Items in the batch should begin with the special "CLS" token. (see the tokens preprocessing logic in the scripts - `extract_features.py`, `run_classifier.py` and `run_squad.py`) - `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token - types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to - a `sentence B` token (see BERT paper for more details). - `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices - selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max - input sequence length in the current batch. It's the mask that we typically use for attention when - a batch has varying length sentences. - `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] - with indices selected in [0, ..., num_labels]. - Outputs: - if `labels` is not `None`: - Outputs the CrossEntropy classification loss of the output with the labels. - if `labels` is `None`: - Outputs the classification logits of shape [batch_size, num_labels]. - Example usage: - ```python - # Already been converted into WordPiece token ids - input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) - input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) - token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) - config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, - num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) - num_labels = 2 - model = BertForSequenceClassification(num_labels, config) - logits = model(input_ids, token_type_ids, input_mask) - ``` """ - def __init__(self, num_labels, config=None, bert_dir=None): + def __init__(self, init_embed: BertEmbedding, num_labels: int=2): super(BertForSequenceClassification, self).__init__() + self.num_labels = num_labels - if bert_dir is not None: - self.bert = BertModel.from_pretrained(bert_dir) - config = BertConfig(os.path.join(bert_dir, CONFIG_FILE)) - else: - if config is None: - config = BertConfig(30522) - self.bert = BertModel(config) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, num_labels) - - @classmethod - def from_pretrained(cls, num_labels, pretrained_model_dir): - config = BertConfig(pretrained_model_dir) - model = cls(num_labels=num_labels, config=config, bert_dir=pretrained_model_dir) - return model - - def forward(self, words, seq_len=None, target=None): - if seq_len is None: - seq_len = torch.ones_like(words, dtype=words.dtype, device=words.device) - if len(seq_len.size()) + 1 == len(words.size()): - seq_len = seq_len_to_mask(seq_len, max_len=words.size(-1)) - _, pooled_output = self.bert(words, attention_mask=seq_len, output_all_encoded_layers=False) - pooled_output = self.dropout(pooled_output) - logits = self.classifier(pooled_output) + self.bert = init_embed + self.dropout = nn.Dropout(0.1) + self.classifier = nn.Linear(self.bert.embedding_dim, num_labels) + + if not self.bert.model.include_cls_sep: + warn_msg = "Bert for sequence classification excepts BertEmbedding `include_cls_sep` True, but got False." + logger.warn(warn_msg) + warnings.warn(warn_msg) + + def forward(self, words): + hidden = self.dropout(self.bert(words)) + cls_hidden = hidden[:, 0] + logits = self.classifier(cls_hidden) + + return {Const.OUTPUT: logits} + + def predict(self, words): + logits = self.forward(words)[Const.OUTPUT] + return {Const.OUTPUT: torch.argmax(logits, dim=-1)} + + +class BertForSentenceMatching(BaseModel): + + """BERT model for matching. + """ + def __init__(self, init_embed: BertEmbedding, num_labels: int=2): + super(BertForSentenceMatching, self).__init__() + self.num_labels = num_labels + self.bert = init_embed + self.dropout = nn.Dropout(0.1) + self.classifier = nn.Linear(self.bert.embedding_dim, num_labels) + + if not self.bert.model.include_cls_sep: + error_msg = "Bert for sentence matching excepts BertEmbedding `include_cls_sep` True, but got False." + logger.error(error_msg) + raise RuntimeError(error_msg) - if target is not None: - loss_fct = nn.CrossEntropyLoss() - loss = loss_fct(logits, target) - return {Const.OUTPUT: logits, Const.LOSS: loss} - else: - return {Const.OUTPUT: logits} + def forward(self, words): + hidden = self.dropout(self.bert(words)) + cls_hidden = hidden[:, 0] + logits = self.classifier(cls_hidden) - def predict(self, words, seq_len=None): - logits = self.forward(words, seq_len=seq_len)[Const.OUTPUT] + return {Const.OUTPUT: logits} + + def predict(self, words): + logits = self.forward(words)[Const.OUTPUT] return {Const.OUTPUT: torch.argmax(logits, dim=-1)} class BertForMultipleChoice(BaseModel): """BERT model for multiple choice tasks. - This module is composed of the BERT model with a linear layer on top of - the pooled output. - Params: - `config`: a BertConfig class instance with the configuration to build a new model. - `num_choices`: the number of classes for the classifier. Default = 2. - Inputs: - `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] - with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts - `extract_features.py`, `run_classifier.py` and `run_squad.py`) - `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] - with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` - and type 1 corresponds to a `sentence B` token (see BERT paper for more details). - `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices - selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max - input sequence length in the current batch. It's the mask that we typically use for attention when - a batch has varying length sentences. - `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] - with indices selected in [0, ..., num_choices]. - Outputs: - if `labels` is not `None`: - Outputs the CrossEntropy classification loss of the output with the labels. - if `labels` is `None`: - Outputs the classification logits of shape [batch_size, num_labels]. - Example usage: - ```python - # Already been converted into WordPiece token ids - input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]]) - input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]]) - token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]]) - config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, - num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) - num_choices = 2 - model = BertForMultipleChoice(num_choices, config, bert_dir) - logits = model(input_ids, token_type_ids, input_mask) - ``` """ - def __init__(self, num_choices, config=None, bert_dir=None): + def __init__(self, init_embed: BertEmbedding, num_choices=2): super(BertForMultipleChoice, self).__init__() + self.num_choices = num_choices - if bert_dir is not None: - self.bert = BertModel.from_pretrained(bert_dir) - else: - if config is None: - config = BertConfig(30522) - self.bert = BertModel(config) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, 1) - - @classmethod - def from_pretrained(cls, num_choices, pretrained_model_dir): - config = BertConfig(pretrained_model_dir) - model = cls(num_choices=num_choices, config=config, bert_dir=pretrained_model_dir) - return model - - def forward(self, words, seq_len1=None, seq_len2=None, target=None): - input_ids, token_type_ids, attention_mask = words, seq_len1, seq_len2 - flat_input_ids = input_ids.view(-1, input_ids.size(-1)) - flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) - flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) - _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False) + self.bert = init_embed + self.dropout = nn.Dropout(0.1) + self.classifier = nn.Linear(self.bert.embedding_dim, 1) + self.include_cls_sep = init_embed.model.include_cls_sep + + if not self.bert.model.include_cls_sep: + error_msg = "Bert for multiple choice excepts BertEmbedding `include_cls_sep` True, but got False." + logger.error(error_msg) + raise RuntimeError(error_msg) + + def forward(self, words): + """ + :param torch.Tensor words: [batch_size, num_choices, seq_len] + :return: [batch_size, num_labels] + """ + batch_size, num_choices, seq_len = words.size() + + input_ids = words.view(batch_size * num_choices, seq_len) + hidden = self.bert(input_ids) + pooled_output = hidden[:, 0] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, self.num_choices) - if target is not None: - loss_fct = nn.CrossEntropyLoss() - loss = loss_fct(reshaped_logits, target) - return {Const.OUTPUT: reshaped_logits, Const.LOSS: loss} - else: - return {Const.OUTPUT: reshaped_logits} + return {Const.OUTPUT: reshaped_logits} - def predict(self, words, seq_len1=None, seq_len2=None,): - logits = self.forward(words, seq_len1=seq_len1, seq_len2=seq_len2)[Const.OUTPUT] + def predict(self, words): + logits = self.forward(words)[Const.OUTPUT] return {Const.OUTPUT: torch.argmax(logits, dim=-1)} class BertForTokenClassification(BaseModel): """BERT model for token-level classification. - This module is composed of the BERT model with a linear layer on top of - the full hidden state of the last layer. - Params: - `config`: a BertConfig class instance with the configuration to build a new model. - `num_labels`: the number of classes for the classifier. Default = 2. - `bert_dir`: a dir which contains the bert parameters within file `pytorch_model.bin` - Inputs: - `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] - with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts - `extract_features.py`, `run_classifier.py` and `run_squad.py`) - `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token - types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to - a `sentence B` token (see BERT paper for more details). - `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices - selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max - input sequence length in the current batch. It's the mask that we typically use for attention when - a batch has varying length sentences. - `labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length] - with indices selected in [0, ..., num_labels]. - Outputs: - if `labels` is not `None`: - Outputs the CrossEntropy classification loss of the output with the labels. - if `labels` is `None`: - Outputs the classification logits of shape [batch_size, sequence_length, num_labels]. - Example usage: - ```python - # Already been converted into WordPiece token ids - input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) - input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) - token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) - config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, - num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) - num_labels = 2 - bert_dir = 'your-bert-file-dir' - model = BertForTokenClassification(num_labels, config, bert_dir) - logits = model(input_ids, token_type_ids, input_mask) - ``` """ - def __init__(self, num_labels, config=None, bert_dir=None): + def __init__(self, init_embed: BertEmbedding, num_labels): super(BertForTokenClassification, self).__init__() + self.num_labels = num_labels - if bert_dir is not None: - self.bert = BertModel.from_pretrained(bert_dir) - else: - if config is None: - config = BertConfig(30522) - self.bert = BertModel(config) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, num_labels) - - @classmethod - def from_pretrained(cls, num_labels, pretrained_model_dir): - config = BertConfig(pretrained_model_dir) - model = cls(num_labels=num_labels, config=config, bert_dir=pretrained_model_dir) - return model - - def forward(self, words, seq_len1=None, seq_len2=None, target=None): - sequence_output, _ = self.bert(words, seq_len1, seq_len2, output_all_encoded_layers=False) + self.bert = init_embed + self.dropout = nn.Dropout(0.1) + self.classifier = nn.Linear(self.bert.embedding_dim, num_labels) + self.include_cls_sep = init_embed.model.include_cls_sep + + if self.include_cls_sep: + warn_msg = "Bert for token classification excepts BertEmbedding `include_cls_sep` False, but got True." + warnings.warn(warn_msg) + logger.warn(warn_msg) + + def forward(self, words): + """ + :param torch.Tensor words: [batch_size, seq_len] + :return: [batch_size, seq_len, num_labels] + """ + sequence_output = self.bert(words) + if self.include_cls_sep: + sequence_output = sequence_output[:, 1: -1] # [batch_size, seq_len, embed_dim] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) - if target is not None: - loss_fct = nn.CrossEntropyLoss() - # Only keep active parts of the loss - if seq_len2 is not None: - active_loss = seq_len2.view(-1) == 1 - active_logits = logits.view(-1, self.num_labels)[active_loss] - active_labels = target.view(-1)[active_loss] - loss = loss_fct(active_logits, active_labels) - else: - loss = loss_fct(logits.view(-1, self.num_labels), target.view(-1)) - return {Const.OUTPUT: logits, Const.LOSS: loss} - else: - return {Const.OUTPUT: logits} - - def predict(self, words, seq_len1=None, seq_len2=None): - logits = self.forward(words, seq_len1, seq_len2)[Const.OUTPUT] + return {Const.OUTPUT: logits} + + def predict(self, words): + logits = self.forward(words)[Const.OUTPUT] return {Const.OUTPUT: torch.argmax(logits, dim=-1)} @@ -298,53 +190,24 @@ class BertForQuestionAnswering(BaseModel): start_logits, end_logits = model(input_ids, token_type_ids, input_mask) ``` """ - def __init__(self, config=None, bert_dir=None): + def __init__(self, init_embed: BertEmbedding, num_labels=2): super(BertForQuestionAnswering, self).__init__() - if bert_dir is not None: - self.bert = BertModel.from_pretrained(bert_dir) - else: - if config is None: - config = BertConfig(30522) - self.bert = BertModel(config) - # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version - # self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.qa_outputs = nn.Linear(config.hidden_size, 2) - - @classmethod - def from_pretrained(cls, pretrained_model_dir): - config = BertConfig(pretrained_model_dir) - model = cls(config=config, bert_dir=pretrained_model_dir) - return model - - def forward(self, words, seq_len1=None, seq_len2=None, target1=None, target2=None): - sequence_output, _ = self.bert(words, seq_len1, seq_len2, output_all_encoded_layers=False) - logits = self.qa_outputs(sequence_output) - start_logits, end_logits = logits.split(1, dim=-1) - start_logits = start_logits.squeeze(-1) - end_logits = end_logits.squeeze(-1) - - if target1 is not None and target2 is not None: - # If we are on multi-GPU, split add a dimension - if len(target1.size()) > 1: - target1 = target1.squeeze(-1) - if len(target2.size()) > 1: - target2 = target2.squeeze(-1) - # sometimes the start/end positions are outside our model inputs, we ignore these terms - ignored_index = start_logits.size(1) - target1.clamp_(0, ignored_index) - target2.clamp_(0, ignored_index) - - loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index) - start_loss = loss_fct(start_logits, target1) - end_loss = loss_fct(end_logits, target2) - total_loss = (start_loss + end_loss) / 2 - return {Const.OUTPUTS(0): start_logits, Const.OUTPUTS(1): end_logits, Const.LOSS: total_loss} - else: - return {Const.OUTPUTS(0): start_logits, Const.OUTPUTS(1): end_logits} - - def predict(self, words, seq_len1=None, seq_len2=None): - logits = self.forward(words, seq_len1, seq_len2) - start_logits = logits[Const.OUTPUTS(0)] - end_logits = logits[Const.OUTPUTS(1)] - return {Const.OUTPUTS(0): torch.argmax(start_logits, dim=-1), - Const.OUTPUTS(1): torch.argmax(end_logits, dim=-1)} + + self.bert = init_embed + self.num_labels = num_labels + self.qa_outputs = nn.Linear(self.bert.embedding_dim, self.num_labels) + + if not self.bert.model.include_cls_sep: + error_msg = "Bert for multiple choice excepts BertEmbedding `include_cls_sep` True, but got False." + logger.error(error_msg) + raise RuntimeError(error_msg) + + def forward(self, words): + sequence_output = self.bert(words) + logits = self.qa_outputs(sequence_output) # [batch_size, seq_len, num_labels] + + return {Const.OUTPUTS(i): logits[:, :, i] for i in range(self.num_labels)} + + def predict(self, words): + logits = self.forward(words) + return {Const.OUTPUTS(i): torch.argmax(logits[Const.OUTPUTS(i)], dim=-1) for i in range(self.num_labels)} diff --git a/fastNLP/modules/encoder/bert.py b/fastNLP/modules/encoder/bert.py index e73a8172..6f6c4291 100644 --- a/fastNLP/modules/encoder/bert.py +++ b/fastNLP/modules/encoder/bert.py @@ -435,14 +435,14 @@ class BertModel(nn.Module): return encoded_layers, pooled_output @classmethod - def from_pretrained(cls, pretrained_model_dir_or_name, *inputs, **kwargs): + def from_pretrained(cls, model_dir_or_name, *inputs, **kwargs): state_dict = kwargs.get('state_dict', None) kwargs.pop('state_dict', None) kwargs.pop('cache_dir', None) kwargs.pop('from_tf', None) # get model dir from name or dir - pretrained_model_dir = _get_bert_dir(pretrained_model_dir_or_name) + pretrained_model_dir = _get_bert_dir(model_dir_or_name) # Load config config_file = _get_file_name_base_on_postfix(pretrained_model_dir, '.json') diff --git a/test/__init__.py b/test/__init__.py deleted file mode 100644 index c7a5f082..00000000 --- a/test/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -import fastNLP - -__all__ = ["fastNLP"] diff --git a/test/data_for_tests/embedding/small_bert/config.json b/test/data_for_tests/embedding/small_bert/config.json new file mode 100644 index 00000000..3e516872 --- /dev/null +++ b/test/data_for_tests/embedding/small_bert/config.json @@ -0,0 +1,13 @@ +{ + "attention_probs_dropout_prob": 0.1, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "hidden_size": 16, + "initializer_range": 0.02, + "intermediate_size": 64, + "max_position_embeddings": 32, + "num_attention_heads": 4, + "num_hidden_layers": 2, + "type_vocab_size": 2, + "vocab_size": 20 +} \ No newline at end of file diff --git a/test/data_for_tests/embedding/small_bert/small_pytorch_model.bin b/test/data_for_tests/embedding/small_bert/small_pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..fe968fb5d64a87b224d0ed9d793e6bf3aeb70971 GIT binary patch literal 37965 zcmd43c|29^_y3PjhL9o3kR*-f@tkXIMMWj0)1*N~R8ly{ku=?foKO)`5}_hVNebs$ zyFo>BZYpV>=TsUs{x+xk{eItlZlB-#_kDc-_<20;wbx!<`&`fS+Sk7Jy4K$3UZs$z zy71dJk5>#NEI#nItGvm%0*21N)vtq>~q_f_J85%a@DVxf|kf^R2o zFE~^zRQ6KzRc!l=u+VwJ&R&X`5i-N>KB2}S62TjC5p%G!CS&Qvd zg`N8M_wC#U$$gmryda^X>=0Ftg<<~U39_K7P{~(=Ys0g5apJkS*g4xdI5|2AmD|71 zobX6tXYZl%W5vQQxaerInv1rAQP4bHR;PEQ8cJ)yl@eX-C$AnYMy=qV8z3WP?!ga3g+92OE3 zI;;H*|Id?Q{ErkajhpG&W4M$SO)U3i@~0v9mMJPB?(;Sc~g;U|YvpYlMTm>nYt@32cpp1qUM;a8WAzq)j4>(W`)rHfd|6A1aT zE{Q}40wLpT^RF&L=Y(-1#A;#Nm$b66@)vUhwe7T9MFcIJYbA^f5=H;@y|a9o{}LjgO^7)% zfIzV@NFbam1DGcf&KC%S|Lak)*neSU&|jlsL5C2I_D(j!kiVo@_%A6I$)pICk%Wnb z;R2yhMk103BLqV6e|<`1B<*5EcF1AxXyYJU{7a0eUt%n26C+xNuv9EuCJ-){A*_%H zR|G5{0{S%L6~4B)6lcuXKX?%ThkGxE1_`RfK+-BIVZ zE{@y{^u%8hoNPZ0UPI%Br#L0JTX&6&pjIqAEfAiO5uB9>&k2O*egB=t{Wg34jiCL@ zU+A!ZCui<%dhypNxb$lj)U|#2KV@Cli-ipW;bmFZS0uu#0%2pv>i_Ece_RjOIs|ZV zaj_9L{Q|iD3*bf@z)cyzEwS*nKzK(6a91L{ClKEMkNoYQzx%%d+Q0vU4!gIvv2zeU z{MG%VU)?`$>;8$X`=?^zGlB5Atos)d;Y)$=RY%j4?f;rhG^f+Xk-QTN-wT8vWF#LY!cPL>=Z>z&4oKR?XzhT5o38D-tmexvF~0s1<6D~; z-(?6t#KNBfk(|7oTswtGp7SIk1YOJLb#0GoaJHin{6BR2EA1^-JjUtg&TZ{HgSk#SkW%7x%IhRvVq{D4xgu0w37U^+TAnMLp8KFMsNkj(i(H@-r zugBPb<|d+^+>sqn+H#8o2N#iHTZ>;}8gc%=#5Cr1WMZ0do(#1Y=ZQt8oE3KW1tRv@%MNXU*h@3eqgLUCNiHO%8<#YDGIro2qZM&{TggdfBRA+9A5kXrE znWPNo{*u&{bK4{x!fj=gLpe_@a^tK(nG(^CY5i?NJ}jc9cN< zudMB=j^~d4M^c`xoxQz{gN;bg)4YLuT_!%^BQr|H#-@8?fPSup+;<7P34`G?Vk&Alp(c(JXF9Hb`f4 zo($BV^TeV6&I&|xI4c7Udn* z@y~RQwoZ;*okJAS*5a3lV$N?r(_FG9isW|M^EJ_8&XXZVah_PTgtG!sG-qXqOF2&> zTGk$2&e@KVxZe=lam5w7Pv zv1kKl1)^lm$_P_9Pa@jb9!=$JM`_@{)cg0m)2?6|cT7io&bHj*U6kI|LdLg=bARz= zaBdslW^OCv+roKbQ6^^vqOF{j@nvzIM6|6vn$1~XgTEFD;bCD5xpLEgE}TUTA&X)~1?yAk>c4Sv=KIh4hc5t3pw3D*}(Js!)kP0|YA}VZ;?&hp- z_kTocm#2vP_YQ#^xiXJvPg@I_JjI;*OP;-)+a^y5x0SK%<2LiOK* zZMSJ**WZF|_d}t^--GSbx={1CV0&#;>$hNA+Q*f=|MoR`?S)(I--7LBWSzeS?QGi@ zC%V4{+m}sxzXjWua@~Imwl6UCe+#y+dJX;_v}<37_xLT?ULxrETd=)!Vfb6Hy|7~R zTd=+GWBm7^eS2NX2JaIQe*Gmg6&mMv%d!&+N-mDehapjj?MoT zlr12Heg8A~>y{|A_>bUUDTlCMkd>SQcOi3shN>)Wj%Yyt4eE48y$#W1TOl=Z3dv;> zFvKwvYgSg$ye@J~e&s!AKC~QDHr(LPaLvcG^OwQAfPh-07Q5`!7W{nRI*i*H2X=6j zGzB=|CgnHOa^M(dM#2)ZZ0}fV`b>d+V*3b>8(T6OPNwiIWHVMyeU8JwAHbRK@=*U| z6R4yn!>;=eNyG|Im~K=GDnrZYN%w=Ol^KF>($?TwuWeAS_k*n7^OUCS8-)+FTczfX zrF7@9qvXWeL^$_aBHgt^dPN!Y9;KKwxA$~r^1zBMjJr;XuK8kW zvkHV5AHlmFUUxKO3`Rf2?n#QwM>{k8VEYP& zD93^Kd3D@W)d};gc=%vdGG6&^3Rjo+!jRiWU^JtGJ{g$^l8AvYeMSYztr$vDCp7b? z_|AZVUuKeT*RAO)%M9Xv+LGKEU`Be*G9{<<8HjR}1J`&t*41db)F;IrN4vZvmtTD& zfo5A^z=kl=l5iKbf{){=bCYqY{TjIW?KIh;vH?fU1E@Z6U#j{un1lv|psMK>yypO* z`NRm$e_2))uel9ouC2iHGWaL&IACguCNt)eFH9V`m5!}DKqQ{_Y->|C>{XZx+x!O6N$E3* z;*5Qur|$&UVx8DnL2uHpY$3^RDuu$pKQW?u39$2{VBQcD=Fd;@`1FnrCd`S%`xkXF z#VwpJOFDoCo+rqtL`A0TNdxto?Zon)s<0;_6p;@4B6Yjd8*Z8$qd9RV*bD|Z*Fc-y z^5=aTjdh^-&5=1AyO*EuZq6njwuDmQSNbDUPTHB7j}PA+#f0&v@mBE*v{5%hkD6LY zU%L@1(q7Qe=3d~~@(1Rxn2iJKccUc61>bALqF85nA+tGt#bYZ zpDy2qiUf035StB>LJcfm+bmr*h!2~VmXirl=SYv@MEErNGTxr(3Q_yKA&^X9dWTy> zdggWf@VpWJNKS_V(^F~n?HuY@l8-<7|AE2VHp7atG=9%wb*PBhK_ZN-nR%A|Fi5Es zqe4&MChZ~Yqgi`gddw}o`=pyDxUW#e)r-#K$~nhK$cyPPV|6#|=Ujk+amsM`aX$D& z?4%0U(_G4Sq)AoZWx@1Kfta~{IgKkALY<`D;ZDjr6m4^$I%B#pIgbZJX?QTR;pSDm ztjmJ;lQ_C+jth1=p-vL}z9UH|l=(jUdeW$$8DwLm8hjYyL9clKB=19&Q7zUSbShsH zof~UOVf8cA_Bcsz=jNh@#ZgE#Ghr;>C6e5iy3FHPR~oaeKiq6u30`$$A@$~M>8Rzt z7{4YMom#fjsq5UBWcNgVvf(tGGfkCUvGxNkad(8U5jxm=V}vx`UyUh>yiX?CzsJa4 zNAb~9BaA<3k85t;fyfhEA*jNv>J8hKJ-sG|%0K@~?6%2EulMQ1-fCpw(T0!Mcin7~ zxyKZF6Sq-YZa$IsxIiy=vLHvR6xo&D`_V+)nf3g-Roa+lgZ!;3P^HSl{N^+8;z%T( zT{9Xm&k#P5By?`N1kWZav!A|nhOh=ztXA8KNy9_=i*C8$^OZj6yh{a5wpv5PfNl&t zZp1WZFJ8KB#@xS~1Id0KSnHEWFNF*Tj~kv4IyW2YCM03wOyv1(WnlT-4RG+&Zs?so z2qTwCvH13Vq-LQQes~YbI%>nujy@0ZKE9}R?-@qaoyJ8OtKmaQJ>1WDf6?;Z?8|ZPJ^U7GV~s#mC&4o=!3bLB(p<0Y^j*VTEcqA<2OE{4>DmDtb>$na%~xf6 z6up2LgPU;0BLS0c%_b3EzUUr35+C1uMY`7PBVh;D!Ttwbq4m`*D6R5>&8_2Mrt>No zc2}7_xi0|%HfCV8$ppONHG-E$OKGR8FYt1oi>SZlBYEAlliD_SMhvXu&+n8EqMn2{ zjW`2&kwb9GRXr5JKHR3Yi)beEadDX*tMOKu?aCN2Auk``vy3>Hm$wZ++8ANATm)Tn zZ3#vz$b->@tK?lb7bsPm5B-+bpr+Sz_<7$H4Rf5a>fQ<%^7&wpvZwUwLpE_&M*Qy-z&=qn;M{vt=u2&(-7oXq|^WE@`q+A5|b|%{~5X z!BsM0@iypvH60keV`$T?z}$}*#P{9zg8J02M}<52xbNu;qQ5O1f=jy6AZLV@-rX=` zS}z({UI^o?v%zj_3Zc6aXxWB8c@mTH(7HJVzh5F@Kd-&h{PtYX=bD;1+Iz&YuCKUs%VU|v3c93l$zFDNr zl-?iCpPPLRK3_NwO;f`#Y?d;kvCo=cv3C;&e$xQYL_?K`yO=BIX;8VnifNi3WG@?s^Dt7{q*i9ZRQcN1>+4CV3p&C_*4b7>}L_?paWYh zw}SCAOP1PfI!goXH_|Zb0~a*~WWZ24cKM;hWZd~elFFJu)iXtQg|r2BrNofL6UDIX zb{@6-S`PImMASHHHoM0!g9tj+6Q+0(*}3-|&g>h6GrGKo&I30=QbKQ>t5r%{*K}r0 zw;q7-^q17VF%I)4S;Ek<*NB0K7mV<@hZ6sEDFGukufGG~nH~n+eWn<$bO}vo+{fhe z4dii{G8-N9hHMyBgYWy8Q|*u)q-ExII6r(o88^Bwn`E>dJSXnPyu}UV{d0FXzCDXr zq-K-XdL>}oFOAgC90SkdQc23iHRMdc!R!W?L@fQd9<5EiX-e=ZsImKj4w1)*$@v4g zUuze%ZtV=aQa$+x-+qMi-^Rd#@ZEy^BP149)Kr6!yDn4wZ9N>(Go*t2r&6WuM`+cJljvcR3AIE1 zAibh@Pz|l=U?TsF?({KbzxXA!dgNFgB7p4n9Uy{Q;CORnkC>bQ%=bLh{pR z!blS>M)ycB2)^P&@4X)ga=+%mu5XptXtRls{iquq)F{uk6um;LN6{py8n;9 z=EtIF%-WZZk2mX4^Zr+*mrh4RL3a^qsl6bz_sy7z?xvWd;DQ!e!(i7Hdv?;n8TdW+ z3p}hSpcNl?L6t5y|FGvta*_}~x4J>x(^81)cMrpK7UQ*C9uB%{gA)Un;9=!m=rHOf zju|Sz+l!j$*!przRa3|NE5}Kh5&BrRbT~}-^qy9P%!Y(h+Qih>ib=7Eqr*~?pahj60-PAt%=aBqMQnB{gQ=ww)n2t3Y|p3> zyB{l|PJ0qvPhJ>M?y6uNrE1G%RH zj0-~z9wfkoBzwG^SV637r8vo=3^i>+@l(Jbuy5!U z{to>kRtG(soWQ^IC_IYZUpdqv0+~Eci+P&NDuY+_w!bBGUgCo5Z13Z>>U!ch`a4ZK zG#@vgNJHhf^HK4^AUqyWLrK3CZ1VscxN-3*p0zXsdFvfenAHR=pH2arRtMRguELDF z1*FA#E#7jimFkzi!PqC6c%*nA4%v`SeU{Iqxi7!ca{V)qw580Y^R>OywXy=jtXc?T zElFBe{=;7t>^<=jvm0vNfW`%b}@M!cp4N=7_zU%^oG5zdiX@!mc4X;B{c56g?sfh zaCL1FR=6CZW;>_SBcT@vyFr&S8xP|5zTQxBVI9PDIpQpT)_^>q+BD!13qPl3VNd_poJrs{^9LX*_TZBQ2hcZnwyW*4e zLXsMO1)fx%fCQCH)DJ4ei7RUG{>=vZaM4`ask1hXjy8tx>$))0zc(YxzY3#&?#4q& z4zyP3C%M>n2#Lzrj^2y);Qp{gtQ%fVbj=LWrMCiAd;N%4BDt8 z!Rj+C$>qZZ=WeXqV@2#5B2QF~b%KnDK2UaIJZW&;i?F>v^Jc9+7;h7UNnSLZG4jUg z^DVKwb}P=CUsrgyrr$|Z-Wr`0%k((kJDhjtX|aCMqb_ng7TYb#he2czj*0sEq5Ee<*==4)l8 zV5sFTIA}3~KTP@*w{+Wqv3=9fbB7$L7ofm{1LCV3>-NL#ujFdIjeWMSEuAk5huOyc8Jm_wBbSnBJC27eA@L(OZz z0nBpnhNTGW!ZeCOZ_l|^+&)~+E z3{_-HZMsu7C>sLyJs~?o5>U8sJ-8njh>A*+Y5w$0u%+P)%Gr3qdGod8j%E_|Y0yME z>piO9y@Rgj*JT(}C7aq_HZK*L&A@nEJSxo+`GY zBN8vcn4&mTd1i#o;g@jIrAl0;UQgsFG@_x$TzbdX1XII}nHsSko7M9xJ+8kFd#p?Y z;Rs!F!)pPQEB0l6ET2JFkT-a4QvurLSfbW6UG|7c88#2wjB4AOfO@u^WgNlqJ)>Dgt7|0fK`psetj65j@(nDb2ay#&tsy&F9tTX6(tCz1UK<|_i@OeH z%m*9cT_Za*NeQF5JFe5oD~j+$uN~Cq=?8S{q{m3s=acLW2SB@O9USi-M0wV3cwgR# z?EkJrLzi!c+}r2z?yTYbvqK+2>(PU-xpE6Us?Nfj107+*xN~5({s}hM+Ek6LKPFWl z^^{)p&ZMfZt;pQgXVClRC+K@_Imoxx+7a4+;Q_;+`}YYP{{ci&fJ%2R#(cyC3CUP%+)Rgm)qv1X)p*UM4=7mbKuD_`I zRP2F~!OCp9TO91#UrCA?hPj#h2x|M@gAnys==H9K>Q||-CdI?i*}9r%`o}Af?9C#l zCaqy^gJq@6f`Dbx@eG22IU8Akw;wo`B_;HESR%U+0TQex%{zF6&5L zx0R5;v@`s8^AV?fSp)ttm8d*qEP1%`7`ZiQEm<<*91bwQOeap*3DxWGNhkN~4%#_g znM1krLC-7r`S&eyV(UWx~v$j`$%a^T?ANHp8?&kw)AWz1;4F}fE-pQ54{xF z&3RkklzTU}x@Qw!wDDjabD#39_bW-Aymr6@&;YaQHKcW-78@nXMoSHOlA+lj?A%*0 zc)tV8O{(bS)(j(>-I;NY zi%C-BXv!+*K>gg)aOsu_)5SUlHYdlT96p6DyoY$c`6DC`KS!fpZ>ME?RebMv@tA0Q zhz9>DfoG}((Br2%eye^;PL&!!fJOrZEX#xXd$rO>n&Ux3?gM$8rp0(GSmV2jVRVpH zD9}8F%>BDDx2GIi7QGl=|BzAx@i>fm8jELkkH#^*^ZCon5E4h;C7T!I;EHwcaFAQP zG=G>1>(!?Sv|?ki+Rg+so~?oLLzLlipB`w|`x0GreH1IXR8H<^#liQ=;cUu96|6Y( zj+8VRpmFg==v1W-A-BeZd$ucVdTWno=k&u9=@QKJK7uu=nes4tB1*fod~0 zg5XjlcCLGlQ4_l0wcX`VXs*q;pXvwo+0wKAX!i&+?ke6Cm@?O7K;S zguBlINzNi~xXV>R3PxM7&+JCBInhlp@9IlB^!8=SY)%55_vs)~wqV|mX0h8?IksiS zX6U!TjP>t?IN`%6uKuUPM6FJwmX&l5#cF<9N@=0S%K^ zQ=MKJbY*chvbFM5Ghh+=Hy?nhgN&J7Ne^Hh*#_C(qsZZ(1|ZH!=H{~Q_|T}H-l$Cj z%RCnt^;rWQ7AZ>uxqCu+UMo%GR%I?thj3ow5!iJp6rv{Yql+U;_%ZXJ2blSck5z%wA%4_H_wk|`P)jI55n{@>F_mSyZL-s$8B?c$FNcUNCv?j$E!nU6v zGe0lG&rZ7uQ&r5(O()2psk^Y}md!N3nK4h6`QZsZ_cOJ0lF+d2C7XSq)LA> z|M{PW>_kf)n&jw5mF}!(Q`7T^TElVdH6|IsJP~<{hoRL@4a6_6N>h?PRMiy4(aB2s zOnqd3W}M+dx@FFLnCo&A@29PRODbtFCf@{yt?L5cSAE27uXp0LedqCgLnpRq?G@Tx zzmyyt?gIf^>T&ahrLgD1F=#dTM2{CeA^U3TaO&pUh=(tePm0ShCdQ9yzC1{F3Le9% zx;)C2$}yzk3*JdrU}nE~EZzK}D>Yb`1UI`z!|d1FG2u`>e#j6&F4khUzaDq5c}~J4 z(@13CL#*&hz|&!AWWODfHB%l)YwJ8ff2BGbU@L*zuHI-d+Z=jyd4kKG-eQT*S}cs* zj$=y*sec?u`eUY7V@94~uSMbQNPpP*#D~V5(Wf^b+8llv)|EAM3_}$Bq>juz z+H&(dd0ylKPhK4&?=4A>6Q9`QrIZvaL2&U;jcX zR+M2~BE$c5Z9VnfWkVyZr$SQJcI;M`L$$Ng;bWZvW4!+k8u*WdOD`4C@PZZ-61x;u zKdqq2`@Z4lwO4rRjo!Ft^<9`kwm`N26ZjULC^cyP0N;nmv#J$AG z9VR>?m@fZ3j=xzl5^HN-lN&o-q1IcUeYM*bw1#h@cf<+|6LOj_zt{;p>xyt(lL}5# z(&cLUAILe9K`Vw%f#kiN=%E|)S;3boJokJ)Y>QRFhR=mCy{j^-@W2|so?i^p=S;-6 zCwoHIqQNBT`WRG-xkvP_*kWf}4WiQ6orFX`#4|4ym`Q&ws7hHh1>(MsL-Q+Kp1N)c zeOW4p)#sj*#4p29EVu}8JrhGsGjPqseb9I%QyNmBOtrF9D;KZKfX}AR7#&#;E5vqi zRig)8;GoW)94W=7jJKGSV}K(LZNyhPMo{FW3Z>lhi5E+@(4MMm;NbqvWN3CWhQ*IS zRl^8!RXqb3`C(Y~Qwj+aEg?E}JU(UAh-bY8W4Z1uY52-w>A4X&D5nl9w|~Nu<>W+O1F*!l3-Dw_tUOu$IPJw9r2QiVx|q2iNFBYLXJ;y~=_$2u{G0J(6kHPljMz zz6MVf7tmYrmT+0aoQ~?@k7{@WuN=5S{1?t6mupr-)*snWZMIx$xXd2nI;F!WoiRAE z@*5eLI)ux6IziyK9iTgY9G(#=;Ux7x!1LyFJknfDSI>52izQCbs*yv2RA1qWk^7~l zv;v0b?7|yx4i)vT;A^)*xFXR59gY*W=Kc$)9lHivP0LA$StnLBcME)^8({mL9D08} zqGb@0AL;TAS$pz3~{ru7YSyCAg7P2&k0{c-}b@ z!v_zA$0cUW=J#A~={uLFwQL|-j@m;HR&U1o>R4D3p^9gM4QN^VRJi0Hg=cM?z~j_7 z+V7hK`#skZR7dW?f+U5iEyjVE^hN`|{^*A4lMUd_Z4LA{UJ0pr>tLR1G^}TfahvBi zl6}%2nw(!rhx#1?@L342Oh&OygU$i3Odrk(D!^)w787sdfL~wiMdH+o1K9^?nGp}+ zYb{`0#TfSJb3AAv4Zq4LnJ8MVn*cP{fZ`Bk2{W+_K$jSYm|M&0Dk<};ukL|B|>66Vj$Uf#SJ(rk|??-UYc@j5BE1ouD zdXfrd(wAZLJqL{Py9tU>NAST~9d=RQEYyuRr{nJR1lMa*QPVyE=XuS6IRRVIYgP`D z#tLF=8Ut$8<+SjNE_k}wP>T<-5F@$YjiD?pFfMM&kcYv z)BGSLQjc0b+zv+JZdg#F#Q&pDZ?=2+I`VpSPx>t50KG9Om(W#fXifj_jJ4(ya^#*q z^iUs(g_BQEuSpHOZ5Ey+XZ%~zRc}9t2A;yKA$H``ATwrhpb6IPkY{U#MUZ1Dr{Qwh z2HJagC7EJ!0VBTcpkCwMA-typIab2p%@tC*=IRAFtf|PTU=6nJ&7^V3F_>SLMl3Ox z=Dywvu%&7oB)zSKbk_`WBiaJyIT}Lg zB~|>fQVF$>oPn*0>dXqy3p8=wPdvUt0$!e{KyyqU-4?kSeXmRDi`yQ!>uouCx;G!R z_imDY&U}U?UVA`!es3rosLyyAt^p-=ZjDm<96zlurgyb3;KK9>EI6r!?;T^%kFUax zv#`dLgKKa@VJ)Pl@t8Ns+E6q{k*|zqpnRwc(^%Av8G3F%@m%3TQ@XZ-jpcC~k}(+i z9_FE$B#Yd8@|qSzWl*2isbHkD1Wed+$k55-r<&Wsi0v~`cbqYEyw4QYVm5(uH@m_? z{{%=GSw`&E-ip1=enb2HXLEh`^p@+`8(S=tRNqIg-u&J8Mqr{fr z7qA|}Btc%Y>HKt|90Vw!rkBn#1Gu9!F(;WM5Bg%z>Dp}B<_Ag zoV*gaXOgR7(Y(V%`>i74>H?fPD3;_;9Zd&Bt)dDFCQQG3;V|jY4p?Y=0oN#up%R}m z`XOIHM%QIyP&m?8M|whd#c;A=^(*dO967G`Ru3B%C_s2s3hr!PM~*L>#>*e5&t5w0 zCB6EKUSB!RhT zvl5^CyoV6m0?eIug~&fmh8Ix-S*z&|T#fcDe(L!EV%7&jrZ^XylsmEVM+)irS&Ojz zkrG?JssZGCx}wEmA$0GSfp4C>krCe3Fp+yE+~^;nW|a!C`t6$`2dN>MNVj1x3q%uGVF0*VfaugEDd9+Btkr#a4Lz6;Lnr zCeD7Sfk!fIaO9QkQq6gqU{n1Bw6=`F0of_EXO1$X$a4qp#$!;w^*lVTJ4EUpmJ{~R zayXh3D?KLm0gc23;JSM*4stmQhNVfcbnOoCof}H+M;YSeO>(HN_zgd(SCbhxE)mtF zD#S&v&~jBFOk7~hdUhH|%rCDaCrLKn++-F89ahG(c9$_weFjeG*GLr28j$^50#iy7 za6#Z<_-vAjD-~a1{GKd+_fj1!SL?w{>^cWMy;kEuxhpWWuRq4s{)ueOu`1;{55Cc? zPc++cKiXf&;cB}lX^*|*py}o{0HX#_?f#r^!aXBT>ADjS>l{FT?`vSRcs*CE7n4*A zceoL@1+Fa7VQYF@;_c^K!6f_^hGq1C^5@&=lm}b5{Qoo*U;jqkwg!-&7gFF$-5PN6 zNg)1fzvHTFW5}at<*4900!=Giak=|(Y;ks?nn9n@+rWl3^mvA~CeKkZYADLZpQ9^# zQB1H)!8dji`o=X0D|nMpGhicn`aJ#rUEU@4>v@n(-x8qHkTk`#V2a0U(3EgBkD{~S zyZa)ndU1%WVg5mk48NfAz3oKbZy|hOn;=@U5v?!zk=AE=G(FuJT)!!>7sutojYt8g zG#`Yo8y;ihkkiC`pdapx>5S=%i%^`wV;^L1L?erPbos=-utKWLTzl>WuExD!y3$$b z-A9or5Cq}(KeK51j=QA#T6e1J@r8bN_Qav{%u&uY0jO0R>YH=d%jEtTF>W2Nd2#~A zw|eoHB@RM|qN!x&`!cw<_A~@F_&`z^AJM!FXZK8_FY@J>tCC)DN;H99@Yw@ZOUAJQ z6(^us`6@gi5{&;`2Dz_NuveJ|t}4}I?@pOPn7E&Kna<~*OP)pwZYeTxFE`MMR<(5S zqxTSJz`|S2wWL@@8H0*X&=KkfK&N&fs~ahz+Go#`2g8=)t4W8UN?(quKMBL&BNb$S zSqxZrvH{g-cRI7!j^5aJ0gD!UlccEqSgv2pcL`W7-R7;pB)SIRip=4#zkW4Y!QC?! z9a}G5zV#?HobStA$yvx4%(}r>JU*TA#Zla|_3kv=y$Gk!!SvLa9~9Sf&zz0+;C}8s zk-BBRWW=^EFgrtwd2nefUMm^MSo7*3!t4i``h5@G6PyoM4xE9SGulvAWd@gC&4k)> zrN~oNW6Lsb!PWc9XuAG0e5g!B;m}x+`y+&%ccN4ApRt;Om zm&1#BsVLWIfhtqAG4;qjaDJOdHh(h#g%3{La|BEHHt;&UPw#=w-3BsaGxbPX-XJzo z*8`Sh4Z?l{VsO9wMl=qosk(GD1ZVQxVA4o+?!CqSY}(r0^p@oXW|=i>e0U zh|a69ziKY;p~Q&2@;#V->+XjpU9+IzUOBE`SI)nk$r4S03M&rN!?8p3sN1pQSh99C z$j>|}?RQik{+QmK^yvQpR(te-NiE)J)EbKAr{;j$*eu+3%%A%A_(nH3>$1Cg)}fM~ z1}(C!#4_%k>JaW7?(kC&AzJSg`Rrql0+mnHS(FT;lPsCInp4nY!90F=x3!qNN`Wah z%&9DJSVDKXR*@LbB{XMn0Zg%q1;6<#Nt2r^uK&qEu9XV&#>y1qrgp*YC$w1i%u2GW z@HuJu^AS06rz;e-Dl_k<)uW30CaBmb!RGWPl0NVf&#_AjS8IvIqF7bzY8+2yf92lw z&N+#VAro$#vro1-z>4{n5EscX z*4M-?$10SVTRu`cWjLbg{9SPNvjcpVe@TCQ@`P)nDk1h*9zSd$%l+>I3gDdH7~bY? z*DxWr29rHH<5itRG~T5ONyJ-fJ}w&uWGp9p?=*1lf-ivhtn;+HS^?40&4bfZRGAHT zAJbLS))T|21$4S(H=LjRnjRc`86$JYB0&lGs`bTrr3C%i6jV?f216f>M1|SqkefUM z!yLv#bA2w9%U`2&`+Xy4E_Y+J&Mm@Yf(KA`R}WgY6u{+Wd(il<4Ky7uhxfx&+0Wg- z@|}-eg9*ts;1X~YPl-;$g{SfSPQe>+gv7e4j8uZA$vMz_uEB&Xy2M{S>;Nf>w1BLK zx~TGTA{+9(M0!sA2ov))+3yNzn6K_gQxi1NQAY!Mcn3hv>8n^1o`Nqlv!svSOb2nw zFxcsEo9vr77YS8jx8K#OG`Rek=9Cmv(cyg|JK`*EJ*o~5CNHP^eyXyieY()7&iCQT z`m6lb@M>JftHRGAU*PiYJnR~ff`aj%Nm<=^B9{{&9p`4k_=FLn|F|pb$KM2Nk{07v z%VPMP-2kr*rZ8(PT$q5z;WWqX2%hWZLXWxsz%@_j!;y;VaQvMGZmNw#)iXB%lix_Q zr`^JN!%bmNqY|d=I*0-HS7QX>-l22*0KDTL5DI6)>xg)oe8L*{-E4v75r4uZQ8P7G z-3%ux-{7neW4JMPYgJy|HeCL60_cuC01Gk)@-!aWlCF2o;ZO}H*g4n(YBq#|cf=qV zb*c~37&?Vo=yaAEb7OBoYk%T;&`+9Ont-Z$VW9WO4!?W7gt2kG$#iZE`0Ra)^W^)X z^U)d9eBfOW1o(sNp6%4GDuo@**8m&IS$;KBi>~1X;NjaHmUvsxjZ4=;gVABUq)a`((c`wMz zadT+GH(T@^9|OsztH|&%t02?XjZVD!4EOc4WyhJVp?oE3HaU)6JTB-Mp4~^pa;%-m8P*(J1QjSqbijav#(8BqVg&0Wo%p%qQ;I zd5_Rue4q1X*l%Z(G)hi{!53nr+r@Gq7M_y&1d8dZAv<}FgQb``+l6X)24QxfgnLKo zEacWya<#S{7}9$w`D2R`GrVC3-G5;fweHlDt$nUYroO4d%boVY%Q zj5`(Z?VJw%F<}%;?@<6jRga}(N}tkM&N__h-H+f<;SJvwE5Ontsa4VsKqJ9eorxAmy2X619f+QRWc0rw^0{bw#&@Cn@)nK6X566Xm zNthnO{RZRD_4sbk8058dW|)c>BvOsR&mT79_7nx?A*zHdw-R3lK?$6SA!1UluwuyE8*FDH`uAT z2LhZ+(5&k_xY+y?6rAi---i2~6iXb0)vQ3?$QNiUXUVWkgUFV-h??DP| z*$+K7oK)aF&DZ?c`rhojg>%5{`D&c-a6go7T7wn0VzK#9v~<~z96TgHp4@et3O9Q^ z#-=HHth(GxxE8tqjhDKj-kt8`n89#Jh1q2D$z_!T$7i5tAMX7sqb=Nff~VobtyVB> zErq_`AIVhrNVvE}ld5_RB|6+Qxy-pHptWB_Y_|LG$j z=M zR^V~TfIoV(J(Jbbis20gR2%;Y=lL7Jx!ngz**?Glp3Dkn|g1>fc$$HowZoH$>|AY)d)Cvp2QTh641HuiriY4 zM24?_K(>neFc;UX$EG9JwAL$+Kh1bM&NC{dBaU$O`JRjM{qkyZJJSK{x*Q=lo_#{q zG99MnO(XU`D}s8jA^5=vd@0G_ zevT^J-XsHpRMBw4eY$_d4D=k^gAU!}P-T^Vjaa`6Be~NeV4h+yZJnJ21=A1HmJKJs zDJGF#_&k(7anJ<%N~U=&f^Q$Ff?Ngi zbgiWCr`|<_$YN^tbsDItQ*5n$OII29XWi!Cp)Q6S;O$rqtR8E^%+_y&yGc_pmbVTj ztgr|1!Fp)=(Vf*DH3bYz zDJHrTy-4$=omHn!twx9b2IR%N5b0>M4VY=>Pb_&oVfNR3&@k{EMEk^`=%F0(sM(Ga z3;WSqj(nV6wFwsXt)~U+RzdYn33`4t0lzKYB;@;RC}DdsrV3l3!gGgoh(|2FaodVn zzGfXir)~;+s*4xhJYf^idmAz2O)hOre1t{d2Ms&wh@-k1bNrhHsIPk~<+VH}GtQ~P zGUF^#dQl7asDH+c$34)~J`n;gRN}s+E|}%&${3HDjkoeL( z1-IT$V0P^5M~A(4FfqBK`2MGKj8@8ry&gfZdIHzCIUC9C6_!lV&(~J}X`S;S{~S+H za4syrWeb*W3$bt;pBfflpwBs9AnHIaypMTJ?$5hR=bTAEt?L2kJgSHtH=|%t=TahA zUqlaoPQqsVs&t5LJ$E18xrP5_!3^4B zbCKrlIE#-1rb6U{do<#;I8lk129f5Q@L6bRD@4cGDj6rd6mg??XoU50~V^ zg}cM}G5S3zdm+ji1tmaB&^pkuc!ZAcCo{J!jKOe^323>v!a&y{c=1V;(bY0y6XeEt zA7e|wFkT$a=*~y)l8+D)y^hi0-J&5o(@5+jIY{{VrxGZ7w1?||MPBWcpx5S z;`@MU4a01WH2if?53T0~g7O3%tVxtaR^lwaQu8MQS`0*n9zlyKBH-p5M4g4lQ}${K z4Z3Uy&lNa-UFj967=29r?_UM2^EaRo{=y#LwUCzn5-aAtB046MnfH>2H5X#w;aVj& zx*>?FH{RlP-%UVY6(!L1Da1PGL-2uHgMW96v(86sF@BaQNqM*e@UJaZsx)V-N;b2M z^gZZXW=x%28?0hdD|jWD#t?Km7m7`~;I!8z-u~9RaMRL&wyNZV`w6b+`85tV>Ab)k zeG~TPi~V>egv&-J8Zz!>JMjCTW^@_TvAUTuo4MsJ%<3*$j?=HIQ42*L*}riQN1LYa9Q=2(Bk z;m<=DjA!ZjyJAd2n*_H`bfe*XTR8LSCiXUavBA}W5FO#hiP`hO>b(}RczTj2o=pgbMe3r7PbEOp%`r5#*{s|tuYX}mjYtZm}Fbdk0ljNuZByNIC zozqJ=ly!hqZPN#BhDS{nj$_|!G=fl=OXnr|@ORq9z}lZD;Vyd?Oh&iEqV_3x!3)7n zF$6x$kiLlG)qBVEVsGUTgOsx}9$XpM1sH=Z-Px;C~(zHo3sP zf<2hMqz2D6)nUp%9k7^J14pWBae?C<=((x`KC$01cJ5*DKA_7Kl|IGb35j6;B?ruJ zltavtDD=7#2RRe1n52~t;DXsf80`s!RflJjqbFopgO}&QIrlf%ahl%8=qQ}aAIC5T zkN+3{<$vE3{x3gS^S_PY&o6s%_oqB$t~c@&KkA_4zIk}&UpRi$&;zT38(}0c1{40S z!NoPVQ88)~-(cZu%l_aG)H*JgA6B@Jx8K!`HMPIY+nbXOM#X1f;rJuy7>-ypbvcrP zL3sN;2JW2HrIo>PbTqXNjl^#fW^y*~4VSCVoj-v8uYdF2?GkpJ_zq*+rI~8`ouF}f zgqH36!M8s75Z0GA!_7t&JY{%^SdEe%p1gxX|P>&6<|7d zm_O|+T1k{@AbxYkncB%HlF>=t?_@yp*KW|ASC5U$v{C+JB4o9E;A<@U1`>Yp#NcWG zT%Y=bXWz}8x1=l}sAxR%CjC88m>7*?)w`kTo;UrZqQus`nTAU}pVFP%C*av>!yuPO z!K1ka&S2xG^q)-cVDb5(le^_LT*YXRU;4 z3+v(cf>^4p!)1YL9I)ocPa^zlJoB(1lQ;4#93Rw&J z7q*biI2RYGhhjqeVeFo1$&Q7{u|{q0AbFr0WZ z4=|@Sm?sUtfjP-_Nrba$czqRYUD1z<_Ssx_-jLbb!_tdVJ^ZBFE|)nzY*O|mqM3%Hbi#c!8b1X zAd)?uXHs#OXzp`^*LnUe;uTJ(R}7Ecx^bnbc?u;G2_omKh*)b|W$d5g7CPG8o56q1<6b@~`}~<$vm&^grL{ z|7?BypYQX(otXI_FZ^#G=*;P6o=$WaZ=B#i$g5ZenjQuCU2Q49{Z9a7&8eZC^Jnpo zg$Xji<&3>v{v+$>a4~BxHx8{1cu+c%4CmkG9d{5!;a(o+@4GVfmnOq+@hNb7_6K74 zb|rj#atb1|x9~K&-qIDC&1BE**Ko&aGAp*g8k~pxAp15S;`$!Jv0Zn`q|2W{)^j2# zsh`JF>m8wO;2KTSols>8y?|{s-8D#N?A8_)|DDItd zihok*3|jorbmY}oCB?|iFinb9lkS{MR@Pg5bhG@W+r@ z`ahpxQf&y1`#G7;|Mmp~N^hcz^F!F3ri4O4zrk|~!R)s?FmROt4wV2&D~TlEdn6gd zX+`*C>MOp*oNw5dya_&5uOpUrr_q`=lDlOxZ2XRM*xhsq7j()HU;XE>`1TTbBc{Re zho-ReXfzZiuA(!(-k^^C`x#YNT_P4@3l&jMv2V{hQ1kc%jZ$gwSNtVOU*SxQU*4fH z8HyOwo(2{(o#@TU1JL?A7jBC7gO)6(8$Y;0d|M_%ZEGRd;cut^g3gj^uN)dzEr1q$ z1(w}bjgRXdkb4{M;V#$7$UF%{Kj(b(a*9LSh34$1*ccq=JH*5BT9B;!8pDO__%S!S zp`fLkJf3$QCf>M0I!zm4WVlSt1qV5{%fdYz|0o_^=6&F@HJWU3 z{U&_8vkmIz66)KqlRn?BMW;&cXKG4+W4D?(tNr^arHgB5@B=s8t+sFA) znFDYtR*O|%ejdfnnK30=jqr3l*LCcCMQY;O!I0}g_OWx&(z_B9v-!{>a}@(dhiE5m zMtmZL%<{_+{@_I6U#{!qJ@6Q%Ud>?C)=y=8rK;fvUlW9q&(PNeDxkY{9;uHi#OkVa zq8iyoP6P&l`J-$iY+ebArZw>PLWpGiSManwIRcNEQGwr%`~p=0`2u&M=shcF?NrJdBzf*xczA+ znoc9|^mi1;MOZ*aS`YEWJcA+D>$Grin-bAlm<~I({(@GI3|gyak9sHLVD0K#Fe~{SI`ZUDQC@|8mVXH)xAEcj)|D`I zvlvVaNd>h}*Xb5m#7-FGzMs<_E4>tbcGa?S`bO#w92$thT(1JyQ?Q2B%aLMhLMo}Y zLoW9943e^E*CAuzXtD2!c-{bZKcI#{^EnvhjPMPh=xCQ?A1|FVZ0E=ma*%vR|MbPu5?_!YkLf zz2e0veCkpL=JHMA?LQGye`JBja0n!=Y@q35q3AgH0mgfeVdB^sbndT&g3G3yC$|kG z9=rul`zi>vxB$VNcA~_0qCE}bT+Tm_H0*ywYl@Pf@8A?1x}nJV1q*Sviz8?aZ{aj- zdz^Vvl7$v47$__iHKt%PL|G)Wwe6l=lo+i#@Jgkg?9P6Klp4PujUnz|qP zLdj1{7#w@X^X2#`Zkr8g_vu=g!QKV6QLfK^LW7Z)tHF*oL%PP?f~d(>^F|^&VDVfz zNY-fpqci?c$Z1o{F0|3yrbx8mdORg3PNCqp3y}4(6mk^xc}R!-jt~$QJlBp4t=~H3yK4Q|0x(ImL_AYeBsRCA>aO7YzIvto${Oaa>~p zR;$9G(MN{SQu&RKHH)dAg)7~0C60XA83+FM^TF3A5`0%Evls0wn8|aeFxonw>6fBG z4Co!^m`6e6)xi*OY@5iQnbrgq$CcQ<_F43%RRt+qqQKjqxgB=BJPsy@^%)o(BBxKN zgV_EY_$`SP9?I&8#22^kqnx{{(n;U6HZhv=UYeR^Y>UDW+jBmkW+t z0WyBl;Iilr-k73Fx>HZl{hP|cK|`C#_jy;G$DP3v+h4=}kY+0L&I&?AV&UrQiTFu= zGqvD9hR(ce9KXmNMinL4?#DBsWTPuh?kpl!UvEMGmED;7P9KQ7Qn9A;ef)i@7!syM zVr2OjcxKXr(?i~X)cF-?A1{C_L~arDr&`4Cpb@wCav6rBNvNcp3w1|rK{`3OxMl8D z{H>W!>azsEWZpVt2Lte#-yi;hC<*4t+i~pBD-SRUoyoq6i6dLyDx&SuJX$i?LH#01 zVgFpDNfDRf#gsVCE4+jP_5;N~OWvcg$us^zya4v6l$my`SFl)))5(vYgKJ~8*bs&& znNk7jBl4tqcNr+0D(0)zUj&c6U*UyiBz7MeAp2WH*p82{X|nAla`2EjgnXEfNzT)8 z!N3!~^U(%kXNY|NQv<*ToTqUCE70WM7KoX32ZOmRh-q#n%$*Sh&CaeQB4`^_Ww{c= z2QIj2#W19^W8BYgMf#<_6XCo>I_>I8IM}rw4>=$mTd2(H9Oh#!^NR+)ILnVv4r7Hk za9JHu26&v;>VBaFi`^Nls1wAA-!zEf`cFiAvkvTiBMzEMvq-G3BK6MJz>%B?T$t@e z{S8Xc$zBq(nYr|u**pB&APXC&p|uoxlpGngr!T3Eom*9YfD0^0>h7LUNB42)0g7+>vey)EsHSOkux~Bs4zB7gL zGqq%P-!T-tUkIMd=VA7KQ?@bUeDRK*3QXXynK-319KUDVW41vfT;;Urym&!&{7+xd zdQd@}A52At6<~7rUqC18E3kN$KlaGlkyU&-UigAOh;aJCyV)^|rm3gFrePs8YxKrs z!8Le$-x=DIqR2en>&)1tRgrC9=D?|GC!xMKlE%COe5J1o@psO{g(@|?_HZ*VebHvL zm7W7LMgbMPhe?Mf_c_QeW_HP{!mXTzw0~qTt@J6RtzXunWW-*o$N2`a6=l@eE|BO= z5hjoFy zPNIq{!?}BO9y%&LBITJn*fnPkctne^>hulSHbsDOk18e4_rxM8?}0snlh9g{%Ub;( z$_W3TdHH|qAxzFoo}=kEShVaax;?c;Jiq(>T-PAAo3wYiZA)%Gc>1NphM@SNUz9qDp$7}A|pAU4jd6gwZZUPAgD$BpzF8y zL7Gb;=Dm_6e;bUzXM+lAd}<*F6ys=#`ag8vFUi}c_L47JyaG!%)Pmk5eH`07O5Y4- zg442lFl0A?$A77_8~?72ILlPgzk|%bhqfE-b*FPkheXCYR`g(RBsrR*#O%XULxI( z1JF_KHuP>5hht9DLG{-SdN#v>K7J92T8<(lA!;){0T=0sGcA`szy}XCp4CcOSZ+9vxA$T)nNgv`M8Eh;hDGJz%<=2=T4OUL zItFnJyk%e!-CZ13dWD{nd<6ne&x5;-9oD$ngJqC8JO6nkQFlqgzN6{5N{-9EDm_4( z-dmuWm5n1eUy%R{SKjWcYf;Emj8X4XWTa-S#WU+CFhXgAeD}m0o@e_NQvdoFWVoD# zOfy@cXJ>%!r9$32<30H4ZXyf|2%=kQ7;b*M0oT0LWMi8e2}hNHRjVmVwgvI9;|Wda zT?M7QE>Iq4BD@I0Sp3H(^mzvuyU^-$#Y7;eEn5ST>2V_ADGB?Zm1@k_KLGn zvX!JEn-rT(;P`frWWhQ588xsqW;cocAQ7#puxrarTmzRNy6p<@jLIzb;m`3rwe!Oe ztbLbXX#Wik&zQv2ZqJ4VE~@xcQIWW;u0gx|bNI5me-e=$72tEc0JXl}KFb!y-F3N2FJr3gD zCc{{k3!-xms5lJcfSDhiDJRV|ANWK1^tx%xiuE*Iw20TR&I2|HorTEn4mfLp7h3x1 zF#n37Pl>=A|jU9maGj<<Ai9Dnh$4_IcHu%5PV#6ZFw zg54a@^zlWw{7(fOYbgwU4ube++TbNtiSYgc82-&gp|f%1mm{Z*mpsQbUvoBL@>(>T zAj-zuCKQiyeA|e{LC}EBeAjn^OzxH6F#9vYj-yKCx3MG2H0Bro=D0QQWdd=~?lY}E zIRWn#iLhN`rab>oGPEV)2{D%3PM)phczms&iq(HigIO?+Cp-HzR8I7xGSyt~mM;aS z9~*e_`|gq@ha!mmF$uV4ehqQ@O7It)#0Z$nGnWm{k`uQCK&FPvv|8*0vxk0Iaxw_E z#wsxjB|G7I@jWP5ugj?a)CQZ6FL_V=R)NmRczQ?T2|2R+GkiK?0uf&xqsiynAYdDY z29JGE;qN8<&Yk%!QYJAEtGNHepYLeFv;|PQtOlc1>uJ9{*V*@5LGMg0whCFN1J`~? z!>2RBa3U}cO+p{TS${cJOYi}&`P?6p^+bZ1C*i~OPQxkLW6B!a8o@U8*LdtkF-}_x zD2f3oY+6upu+ldoCif-e!Pv9+`cY(MVi znB4?b%G02B_d`0SZN*%fa{!`qG}--m&Gc&UoZ_<6jaV4C1IA4rLywE^@yN^Nbkoo0 zc;KZtk6PCQ<6PitR4)x48} zUtaVUiLTbhu8d~TUl58vb&f-^`8%Ta`3uIVNHM2m!_cW#iOrdEmpq>>4BbbiK}SM` z5&gOpI~6u!1(`}+xIUAVq%yc2i^aJW5^QBoIvG7#OEqrKrKghLK~cFqGq*#S`l-3G z-!6`l&ZsBg>~{;axw}R5bup;?hq&>|MR2vhOU>3z!FOp9co>wK;r*T5IczFB2jk%Q z!rS}}-t);OL(UKH8$*-Lk9f7`OmV%LCOE_-@achQ%-;5|SW9L;WIy?dr=+UT$Y&XT z+86|u58NpGat@JDO-DA-5W6>g72 zD2PF^*)OF58{#Ol`8lain9aDZ$;4w_ z;c)7FC13d7Bxco@0*n_A2Zfq=vT&LrI}&mUZh!s@w}ee_%N}7SCfx&Oos~w78+EiY z>?U+R@}gwLZn{xdo|x6v)7QUOkti8DFDjr@WO>EwjG zB71w6Dts4+hcMYiaB%!o^6|6_6mEHeUd`hef6HMK*zSb4zwHIBKkHFjCCH8d(4?bK+f?;ojyKX$2X;Tlf%MBItj{n*E*D-v=RN0$NP88}_k|o&mN|@ft+m-c!F71Zw2FlL zzXJn~9U6z8h2h6ia8XAV_8n1W6O=|!Xjc#r-U%>YeGiA%4D$Pxr7$8xk=OeE1S>R9 zfZ5Mv=m{@bp0ryin0%kg`ITa@JL3v=-p&Il`$7CU^%YS%@tRicp1@dz0`y4TBVNUY zSSsU)9^u*GH+UJ7k~7g~ycsUvQc0}e{=+XpQfw~%gBXsH<+$M(ir&@cuW-{xEISN#p|By9`ny}#hFFaFY!gJTN;Th^LA(dIuZ1iUt zX5i5pP?FV!R$dxOtPdkHS!Tr5*n-BGi?hcoRGDU1Q+Cp32RJC;#fzGwipy`<;R=pP zP+R>Ne0IoVNBto*Dx3(J3F~n5!3m7EeFwcOH{i#R{qWf_h+Nmt#}&NG;4$$zwwsUg z51qIS`Eq$yPn^5?O}WwNt5Ze2m+Qf@1A8Hmc?kV-VZ0IJa6H3X!s}zgsP4}F)FS=? z71>>g#^&5J8Mq1?!d+qe8&ec=Xyes8#G>X+3)ZpWGX1<{5A$iwM>61c%_>DdlIFL` z^(6`RIJah$mbBB><3j9?nM1H|a{^BP`;nIK6+;?m02ae6 zozNIYDnmn{C3PFA^v$4}XC`sE%xYLQB7#n82GCKvmHn&!kn8UY@$wvFQQayEcYF%Q z0n_IYbL%zje0iC=PjKPOT>D68+s$U5m8O%T)O#4+EX`b*tO$**;aGfBhTR~;!jwpH z*p_t;gTsE3Sq5P+5@ifay&r+Dcql3_ctXlwD>Ft>-yk>m87=J^APQW^W6UIoX7?mv z&Gl1s4$86#p*=WrFqP0E9i8sphy6){MB zvyu!b1L}McM2#bt$g4pmOkL1~S{fa6-ZoFN-n|EX)@GyR=lgt7RgT@gr5!x?`hc8f zI`!R_hD)2R@LSS)@VzI1LK8GWyH<&Wl6;uIXCk(3l3@pAOh}NxLNuKihOz!E*sdR- zP0@<1X6I4fl4docwPqN@p9I3O{-xNaD2#r(dFbMy2MJ@HWNJz`#&{1>QSV@o`*0i2 zADPO$PdCASsdoOV6$e*zEYGTk<}hO% zb14M9v)+srb`r^CdEPwx9lUlyF+48$ge)mdfgKmj*;#`L=>9H;V+2bx8B>dKN47U{ zt@>gm9V|n|YPS*1PA=Cks>BEd--A)rmn7$AHe7va&ClC4go!PE0j}gB7bwK75g+$`Gy_$E@9p zOUf0QvJHNyd|ij9qa2RDh7wG&VnrZp)+;`_;9`r|O1Jnjv@1I}^%4LO>ul|v_RtT%}(T^K1iDSCfp{Zq#p=3p#BnpxPnguyU;|2A(_s!;?Z$Xn7Kb@p7SBC>i4F z#felYLq-mdLt%UfD;_yAVv}FO>AK0qS=)56_LU4bq{fzP|I)Eo1hALS+3k% zgh_0tRvMmd=_M-yZh`lzNZOFPgK^q;9q&Xv#OB^SvhCY*;v#klWN-N~_YUXtZ0<%7 zlV{aXZdHvAdw5uJDu~~{k0sA^{(*JODBSXCAce9qeA%sKyzk>K(As?lL_YmGEt;pz znlcvT_?Ar=?i>ITx z{*&h&M5F!}IP57v`Qcc;)y7Mxw$_@J&lRAN?u&@ciwo%E=>*&QN}+7pN8BrNhe~F6 z!{6g926)wx#6SJKpsq{A_n`%Pi$sy8ltSwJ+YUC1?trGz#c)X53HIemv6c5Gu^PJ8 z=*?yB4A$<370Tkw_G`*))Y}_4(ig~c93KZ}&cd`&{2K1NQHDYZ;kd|$yEk|nTwmku0}J4Bxw-h#^GQ{b0u649G5ot^O`l%$^O zBAeMqtVnyXZCzC4sWOoz>OlTb8mBD-z-X1FpW z1_f#*)Z)-)a8NpK#ilLA4F@Xuy`DVA#xRh`-IRf8Dc5-0d-Rxyy%%{Iy5VR(yc46$ zI4z=39ka(P3kXCVL~Q8j($X=>s}E5r>i07>3!}FH4TIx&u5>_ zX@-SS0#LQq8(tF;HY(DHy!23ovcQEx z=$~{AC8TpXJ?l6$D-?shx(PR{WsLaC>%#R%%jgWo8-U9{b^a3@E*SPp$~cQ_;bL z@aAzU@i0CI*2Uj(EK2|rgyV3-d|4<{KFwdX!IlS-dAyE&qPWQF7Tl^fX50VO!-gX+ z%s$W6d|kUGFtUy7B7{x=o^=RcJS++)U@Je-T93;NUq$29eO3}1qez#VIul^6#FT8= zzz6g9D5RnQ(jo+J{|kV;5N9mIW;*&{3A?@TEE>zcgO;I(q#@H0Mpx@&Qg^XVDV(2!a-ogou$H7 zOlB=V{(_rB=XpgXk=XxTmdIc6$3<;N@z&f_?6uV)3I?2qHGLdg(piELqD3gHHXU!7 zsgQv@K0o}_S-LJtnlWCGgQ+s#Y2alA2wM;X1DXB2?vh?&zIGy=uWLzj7g@3MAz48N&lpEXTQ~@C{C;qDSa4`n-tKw6x1Wpt2LQMwR%w+xk&buZTvt%);8z zl~6ca99DQ8N1p%(d>tgq)-E??6$}w=pFf~5U5{~UeuFtKQN?2nA9Y*$No_yp1Mg_# zy08|ErAiFUzCTRv884tNfvMQg_!tDY=fizDS-i*fh5YBO1^2FE3}12vB91&I3RZ%Q z>NG{%|KuI)x304CsTz-&wi_v`&cWZ#pZM8i~7j5!{ zo?5~4|MD9rJrE^+c9QHD(TNykJU~nzO+x$gwHQ#FLbrL#(ufQdIj6riV^BHV1{0It^m= z=g?AUgnzLp5?9z3;XU#1MCe5kJ(ib6tF82za?32lTd`p9c@_q4%%^uQ6k(unHT>>q zr%KvdtmLfcWQXZxY%05I^?QLB)#Pcy(2r>BZJ)+wa$V5>^!emM%33<>rV;iFS6Q0b zBtw%T$F14N(9OO6Xg7BT{W;ZQ?YH zV_4$Z1oN)ffl}i{h)8@-j_KxL%%W|~;@@JdvQ8XNz1M&W{I8Q_ng2ygka!@xleS428Eg!}b1s5Dj^??Q6`#8Q^IcdMMo{Y%kqq(9F zYwfH-$_{VFfDuz}o>4rQxmDBlqGxd5TAuOQ%FU)at%~!sEI?Ad4JP%hMCaa6{6=4J zjMw+YzBMZBbX9j8@@l6e+C?<@#%#!X){GU|;!Nk@OdQ)lVV|!k?(Qgqp@|dl&<`Qp zcl#{99-jd|4psOt`3;x}JU}VO8oF$u3#xM++rGATVk#%V_~_pv%5S^in0p1rm!!gt z8);PHg)x-tsI!{4G>DE_EtS?@hSzNyFgwDqSS#Zp72R1%zx;Sfqf*b~rmXj5@LdW? z+qsLq_F4oI&*@S7kPme2V{xX5ca5f;l7^wc0npLj&T)e?K=yt(Z7l794*xl1F+B|l zUw#mYd~VKIf*vDxSQ!VWD>G@al_W1tm`#zZ1Do+p)IGrkGV3*9vlgdeMyHU2Lr36H z#3fi}lmfLv)5x6fad0fT2OriK;6YBWXdAtRQtOIwSWp$o%6k6uFFY`4+71U_sF85< z^C)+~4I?L%@YfFB0&A^w?r(M;-ygIFw>xKW(}g*VP0S3;7rBmB%kM!&!dx1YpUAI! zeiZDQC&TxDv)Q7G_2}6=5ob&1k+A+OrdGF*Unjp08|sTNi<$FQ!ZzM^cpR_;U%CCp8GQ=$V*5L*O4&qQ7Ip;JyG8O!?+oCm zegs&if92=h84rimjp!=Jop9@PXYsyEy72yh07i{V!o8QuNY8@XXw;H~asCv1paQg1 z>cFFG7xl`!2tErvAY`5iUcGY%rC*2gH~aGF(4Z!4G!B6sFF#VcqXU-f3cv|}DJJWO zE0LRUllP}ojTv<6#AD4jawX{HbjfDkNQjm_5ESYQy#T7E$Ls) zt@OM28wfLb&+!?7S=il$)vnHTi`Q<9KCzC}=yJUmxko&~h1n>k@|1ilk0!~1GH|SL zDx-d}6Arm^TxAn!Fy{DZ^OrcIos>3P@>?Ihx$M%1PZ3mGr~was*$-D8oWWt&CeX9_ z3{%hcQZ;b}3^9ttzL`O|DgH1v%@<`xYmY%y{c3Q{t3`Kndm7@N50YO;fZ4Jdm{3j^ zy5x&>JCC8ukv(9&Ukf|BOt2#&AKgxwu{LeXu}N4MHY^+gov(3ZweB1yVWK#QIjS%(u)E$|$=J*$mg>6Vp zEW*hG<27W{+Hq{9b{gOH*$w)pXcn8}vL33en=tnNJ6ba*h(_AoMwPdgIM!ti9l0NH zNA)-8%{omjFAL$@6T*{&(lT0D`BvO5n0 zBa1*ip5smah@j?=r!Y=-F~|=W!IdtuXp?;%kIdkcNM-}b{Ss%$;3O{Cqyk&YWm%cR zZYU~igKrZX=uz89@?~u$@4KEZma_Jgc{mZbIv&C;WujbvrWWuVibFjRl*z%dVZxU-3e25G0D+DMK5$ch0s z0XJ~Ce3cjW=r+oji_%HY2qpSv=&j!a20cCGlj=26Q-l!x#1Gk>N_eO)0y?eou;1(< zy`Q4NtNu8i70h`@9HWD9&73gk7U1$XLYj Date: Sun, 1 Sep 2019 02:00:03 +0800 Subject: [PATCH 22/50] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dsequence=20labeling=20?= =?UTF-8?q?=E6=B5=8B=E8=AF=95=E6=8A=A5=E9=94=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/models/sequence_labeling.py | 41 ++++++++++----------------- test/models/test_sequence_labeling.py | 17 ++++++++++- 2 files changed, 31 insertions(+), 27 deletions(-) diff --git a/fastNLP/models/sequence_labeling.py b/fastNLP/models/sequence_labeling.py index 0c573a90..6e839bea 100644 --- a/fastNLP/models/sequence_labeling.py +++ b/fastNLP/models/sequence_labeling.py @@ -39,14 +39,14 @@ class BiLSTMCRF(BaseModel): self.embed = get_embeddings(embed) if num_layers>1: - self.lstm = LSTM(embed.embedding_dim, num_layers=num_layers, hidden_size=hidden_size, bidirectional=True, + self.lstm = LSTM(self.embed.embedding_dim, num_layers=num_layers, hidden_size=hidden_size, bidirectional=True, batch_first=True, dropout=dropout) else: - self.lstm = LSTM(embed.embedding_dim, num_layers=num_layers, hidden_size=hidden_size, bidirectional=True, + self.lstm = LSTM(self.embed.embedding_dim, num_layers=num_layers, hidden_size=hidden_size, bidirectional=True, batch_first=True) self.dropout = nn.Dropout(dropout) - self.fc = nn.Linear(hidden_size, num_classes) + self.fc = nn.Linear(hidden_size*2, num_classes) trans = None if target_vocab is not None and encoding_type is not None: @@ -56,7 +56,7 @@ class BiLSTMCRF(BaseModel): def _forward(self, words, seq_len=None, target=None): words = self.embed(words) - feats = self.lstm(words, seq_len=seq_len) + feats, _ = self.lstm(words, seq_len=seq_len) feats = self.fc(feats) feats = self.dropout(feats) logits = F.log_softmax(feats, dim=-1) @@ -142,8 +142,6 @@ class SeqLabeling(BaseModel): """ x = x.float() y = y.long() - assert x.shape[:2] == y.shape - assert y.shape == self.mask.shape total_loss = self.crf(x, y, mask) return torch.mean(total_loss) @@ -195,36 +193,29 @@ class AdvSeqLabel(nn.Module): allowed_transitions=allowed_transitions(id2words, encoding_type=encoding_type)) - def _decode(self, x): + def _decode(self, x, mask): """ :param torch.FloatTensor x: [batch_size, max_len, tag_size] + :param torch.ByteTensor mask: [batch_size, max_len] :return torch.LongTensor, [batch_size, max_len] """ - tag_seq, _ = self.Crf.viterbi_decode(x, self.mask) + tag_seq, _ = self.Crf.viterbi_decode(x, mask) return tag_seq - def _internal_loss(self, x, y): + def _internal_loss(self, x, y, mask): """ Negative log likelihood loss. :param x: Tensor, [batch_size, max_len, tag_size] :param y: Tensor, [batch_size, max_len] + :param mask: Tensor, [batch_size, max_len] :return loss: a scalar Tensor """ x = x.float() y = y.long() - assert x.shape[:2] == y.shape - assert y.shape == self.mask.shape - total_loss = self.Crf(x, y, self.mask) + total_loss = self.Crf(x, y, mask) return torch.mean(total_loss) - def _make_mask(self, x, seq_len): - batch_size, max_len = x.size(0), x.size(1) - mask = seq_len_to_mask(seq_len) - mask = mask.view(batch_size, max_len) - mask = mask.to(x).float() - return mask - def _forward(self, words, seq_len, target=None): """ :param torch.LongTensor words: [batch_size, mex_len] @@ -236,15 +227,13 @@ class AdvSeqLabel(nn.Module): words = words.long() seq_len = seq_len.long() - self.mask = self._make_mask(words, seq_len) - - # seq_len = seq_len.long() + mask = seq_len_to_mask(seq_len, max_len=words.size(1)) + target = target.long() if target is not None else None if next(self.parameters()).is_cuda: words = words.cuda() - self.mask = self.mask.cuda() - + x = self.Embedding(words) x = self.norm1(x) # [batch_size, max_len, word_emb_dim] @@ -257,9 +246,9 @@ class AdvSeqLabel(nn.Module): x = self.drop(x) x = self.Linear2(x) if target is not None: - return {"loss": self._internal_loss(x, target)} + return {"loss": self._internal_loss(x, target, mask)} else: - return {"pred": self._decode(x)} + return {"pred": self._decode(x, mask)} def forward(self, words, seq_len, target): """ diff --git a/test/models/test_sequence_labeling.py b/test/models/test_sequence_labeling.py index 3a70e381..815d7047 100644 --- a/test/models/test_sequence_labeling.py +++ b/test/models/test_sequence_labeling.py @@ -3,9 +3,24 @@ import unittest from .model_runner import * -from fastNLP.models.sequence_labeling import SeqLabeling, AdvSeqLabel +from fastNLP.models.sequence_labeling import SeqLabeling, AdvSeqLabel, BiLSTMCRF from fastNLP.core.losses import LossInForward +class TestBiLSTM(unittest.TestCase): + def test_case1(self): + # 测试能否正常运行CNN + init_emb = (VOCAB_SIZE, 30) + model = BiLSTMCRF(init_emb, + hidden_size=30, + num_classes=NUM_CLS) + + data = RUNNER.prepare_pos_tagging_data() + data.set_input('target') + loss = LossInForward() + metric = AccuracyMetric(pred=C.OUTPUT, target=C.TARGET, seq_len=C.INPUT_LEN) + RUNNER.run_model(model, data, loss, metric) + + class TesSeqLabel(unittest.TestCase): def test_case1(self): # 测试能否正常运行CNN From 091f24e393f434eba66937af65adcbcd8ea3d3cf Mon Sep 17 00:00:00 2001 From: Yige Xu Date: Sun, 1 Sep 2019 10:15:11 +0800 Subject: [PATCH 23/50] fix some bugs in test code. --- test/__init__.py | 3 +++ test/core/test_utils.py | 17 +++++++++++------ test/models/__init__.py | 0 test/models/test_bert.py | 2 +- 4 files changed, 15 insertions(+), 7 deletions(-) create mode 100644 test/__init__.py create mode 100644 test/models/__init__.py diff --git a/test/__init__.py b/test/__init__.py new file mode 100644 index 00000000..c7a5f082 --- /dev/null +++ b/test/__init__.py @@ -0,0 +1,3 @@ +import fastNLP + +__all__ = ["fastNLP"] diff --git a/test/core/test_utils.py b/test/core/test_utils.py index 363d5fa1..29645fb1 100644 --- a/test/core/test_utils.py +++ b/test/core/test_utils.py @@ -119,7 +119,8 @@ class TestCache(unittest.TestCase): def test_cache_save(self): try: start_time = time.time() - embed, vocab, d = process_data_1('test/data_for_tests/word2vec_test.txt', 'test/data_for_tests/cws_train') + embed, vocab, d = process_data_1('test/data_for_tests/embedding/small_static_embedding/word2vec_test.txt', + 'test/data_for_tests/cws_train') end_time = time.time() pre_time = end_time - start_time with open('test/demo1.pkl', 'rb') as f: @@ -128,7 +129,8 @@ class TestCache(unittest.TestCase): for i in range(embed.shape[0]): self.assertListEqual(embed[i].tolist(), _embed[i].tolist()) start_time = time.time() - embed, vocab, d = process_data_1('test/data_for_tests/word2vec_test.txt', 'test/data_for_tests/cws_train') + embed, vocab, d = process_data_1('test/data_for_tests/embedding/small_static_embedding/word2vec_test.txt', + 'test/data_for_tests/cws_train') end_time = time.time() read_time = end_time - start_time print("Read using {:.3f}, while prepare using:{:.3f}".format(read_time, pre_time)) @@ -139,7 +141,7 @@ class TestCache(unittest.TestCase): def test_cache_save_overwrite_path(self): try: start_time = time.time() - embed, vocab, d = process_data_1('test/data_for_tests/word2vec_test.txt', 'test/data_for_tests/cws_train', + embed, vocab, d = process_data_1('test/data_for_tests/embedding/small_static_embedding/word2vec_test.txt', 'test/data_for_tests/cws_train', _cache_fp='test/demo_overwrite.pkl') end_time = time.time() pre_time = end_time - start_time @@ -149,7 +151,8 @@ class TestCache(unittest.TestCase): for i in range(embed.shape[0]): self.assertListEqual(embed[i].tolist(), _embed[i].tolist()) start_time = time.time() - embed, vocab, d = process_data_1('test/data_for_tests/word2vec_test.txt', 'test/data_for_tests/cws_train', + embed, vocab, d = process_data_1('test/data_for_tests/embedding/small_static_embedding/word2vec_test.txt', + 'test/data_for_tests/cws_train', _cache_fp='test/demo_overwrite.pkl') end_time = time.time() read_time = end_time - start_time @@ -161,7 +164,8 @@ class TestCache(unittest.TestCase): def test_cache_refresh(self): try: start_time = time.time() - embed, vocab, d = process_data_1('test/data_for_tests/word2vec_test.txt', 'test/data_for_tests/cws_train', + embed, vocab, d = process_data_1('test/data_for_tests/embedding/small_static_embedding/word2vec_test.txt', + 'test/data_for_tests/cws_train', _refresh=True) end_time = time.time() pre_time = end_time - start_time @@ -171,7 +175,8 @@ class TestCache(unittest.TestCase): for i in range(embed.shape[0]): self.assertListEqual(embed[i].tolist(), _embed[i].tolist()) start_time = time.time() - embed, vocab, d = process_data_1('test/data_for_tests/word2vec_test.txt', 'test/data_for_tests/cws_train', + embed, vocab, d = process_data_1('test/data_for_tests/embedding/small_static_embedding/word2vec_test.txt', + 'test/data_for_tests/cws_train', _refresh=True) end_time = time.time() read_time = end_time - start_time diff --git a/test/models/__init__.py b/test/models/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/models/test_bert.py b/test/models/test_bert.py index 2b310edf..969a8594 100644 --- a/test/models/test_bert.py +++ b/test/models/test_bert.py @@ -82,7 +82,7 @@ class TestBert(unittest.TestCase): def test_bert_5(self): vocab = Vocabulary().add_word_lst("this is a test [SEP] .".split()) - embed = BertEmbedding(vocab, model_dir_or_name='./../data_for_tests/embedding/small_bert', + embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert', include_cls_sep=True) model = BertForSentenceMatching(embed) From 1c2ee50c47b0b59b81a828838bf531c54fea5181 Mon Sep 17 00:00:00 2001 From: yunfan Date: Sun, 1 Sep 2019 10:31:14 +0800 Subject: [PATCH 24/50] [fix] EchoCallback --- fastNLP/core/callback.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fastNLP/core/callback.py b/fastNLP/core/callback.py index dde9a31a..5167b09f 100644 --- a/fastNLP/core/callback.py +++ b/fastNLP/core/callback.py @@ -1031,12 +1031,11 @@ class EchoCallback(Callback): def __init__(self, name, out=sys.stdout): super(EchoCallback, self).__init__() self.name = name - self.out = out + self.out = out # deprecated def __getattribute__(self, item): if item.startswith('on_'): - logger.info('{}.{} has been called at pid: {}'.format(self.name, item, os.getpid()), - file=self.out) + logger.info('{}.{} has been called at pid: {}'.format(self.name, item, os.getpid())) return super(EchoCallback, self).__getattribute__(item) From b9aa05f6cf371a9ceb99463c445fa000a724fa21 Mon Sep 17 00:00:00 2001 From: Yige Xu Date: Sun, 1 Sep 2019 11:22:42 +0800 Subject: [PATCH 25/50] add testing codes and data for loader and pipe. --- test/data_for_tests/io/cws_msra/dev.txt | 2 ++ test/data_for_tests/io/cws_msra/test.txt | 2 ++ test/data_for_tests/io/cws_msra/train.txt | 3 +++ test/data_for_tests/io/imdb/dev.txt | 2 ++ test/data_for_tests/io/imdb/test.txt | 2 ++ test/data_for_tests/io/imdb/train.txt | 2 ++ test/data_for_tests/io/rte/dev.tsv | 3 +++ test/data_for_tests/io/rte/test.tsv | 3 +++ test/data_for_tests/io/rte/train.tsv | 4 ++++ test/io/loader/test_classification_loader.py | 8 ++++++++ test/io/loader/test_conll_loader.py | 14 ++++++++++++-- test/io/loader/test_cws_loader.py | 13 ++++++++++++- test/io/loader/test_matching_loader.py | 8 ++++++++ test/io/pipe/test_classification.py | 8 ++++++++ test/io/pipe/test_conll.py | 14 ++++++++++++-- test/io/pipe/test_cws.py | 12 +++++++++++- test/io/pipe/test_matching.py | 8 ++++++++ 17 files changed, 102 insertions(+), 6 deletions(-) create mode 100644 test/data_for_tests/io/cws_msra/dev.txt create mode 100644 test/data_for_tests/io/cws_msra/test.txt create mode 100644 test/data_for_tests/io/cws_msra/train.txt create mode 100644 test/data_for_tests/io/imdb/dev.txt create mode 100644 test/data_for_tests/io/imdb/test.txt create mode 100644 test/data_for_tests/io/imdb/train.txt create mode 100644 test/data_for_tests/io/rte/dev.tsv create mode 100644 test/data_for_tests/io/rte/test.tsv create mode 100644 test/data_for_tests/io/rte/train.tsv diff --git a/test/data_for_tests/io/cws_msra/dev.txt b/test/data_for_tests/io/cws_msra/dev.txt new file mode 100644 index 00000000..9c6b34ee --- /dev/null +++ b/test/data_for_tests/io/cws_msra/dev.txt @@ -0,0 +1,2 @@ +“ 人们 常 说 生活 是 一 部 教科书 , 而 血 与 火 的 战争 更 是 不可多得 的 教科书 , 她 确实 是 名副其实 的 ‘ 我 的 大学 ’ 。 +他 “ 严格要求 自己 , 从 一个 科举 出身 的 进士 成为 一个 伟大 的 民主主义 者 , 进而 成为 一 位 杰出 的 党外 共产主义 战士 , 献身 于 崇高 的 共产主义 事业 。 diff --git a/test/data_for_tests/io/cws_msra/test.txt b/test/data_for_tests/io/cws_msra/test.txt new file mode 100644 index 00000000..8d5c6b3c --- /dev/null +++ b/test/data_for_tests/io/cws_msra/test.txt @@ -0,0 +1,2 @@ +扬帆 远东 做 与 中国 合作 的 先行 +希腊 的 经济 结构 较 特殊 。 diff --git a/test/data_for_tests/io/cws_msra/train.txt b/test/data_for_tests/io/cws_msra/train.txt new file mode 100644 index 00000000..35c2cad0 --- /dev/null +++ b/test/data_for_tests/io/cws_msra/train.txt @@ -0,0 +1,3 @@ +“ 心 静 渐 知 春 似 海 , 花 深 每 觉 影 生 香 。 +“ 吃 屎 的 东西 , 连 一 捆 麦 也 铡 不 动 呀 ? +复旦大学 百年 校庆 。 \ No newline at end of file diff --git a/test/data_for_tests/io/imdb/dev.txt b/test/data_for_tests/io/imdb/dev.txt new file mode 100644 index 00000000..6b548a0c --- /dev/null +++ b/test/data_for_tests/io/imdb/dev.txt @@ -0,0 +1,2 @@ +neg It, at all, you have seen when harry met sally, then avoid this one. It will not only make you bang your head on the table as why can't bollywood even make a good remake; but also annoy you with the so called funny moments in it. The charm of the movie is missing. Ranee looks terrible. Saif tries to act like he is one hell of an actor. The plots that have been picked up from the original, don't look effective either. The part where both of them bring their friends along and they hit a note, it just doesn't look appealing. What can be more disastrous? you wanna waste some money, this is what you can get. Otherwise, put some more bucks, and watch the original. Its too good to miss.. +neg The monster from Enemy Mine somehow made his way into a small mountain community, where he has taken up residence. He's being hunted by a female doctor-turned-vigilante who is out to exterminate him. This female assassin, who looks like a refugee from a Motley Crue video, rides around on a motorcycle and tries to save a bunch of kids who have chosen to have a Big Chill weekend right smack dab in the middle of the monster's turf. Decapitations and lots of blood are primarily in place to draw attention away from the story which limps along like a bad version of the Island of Dr. Moreau (and yes, it's worse than the one with Val Kilmer). diff --git a/test/data_for_tests/io/imdb/test.txt b/test/data_for_tests/io/imdb/test.txt new file mode 100644 index 00000000..c9bfae74 --- /dev/null +++ b/test/data_for_tests/io/imdb/test.txt @@ -0,0 +1,2 @@ +neg Alan Rickman & Emma Thompson give good performances with southern/New Orleans accents in this detective flick. It's worth seeing for their scenes- and Rickman's scene with Hal Holbrook. These three actors mannage to entertain us no matter what the movie, it seems. The plot for the movie shows potential, but one gets the impression in watching the film that it was not pulled off as well as it could have been. The fact that it is cluttered by a rather uninteresting subplot and mostly uninteresting kidnappers really muddles things. The movie is worth a view- if for nothing more than entertaining performances by Rickman, Thompson, and Holbrook. +neg I have seen this movie and I did not care for this movie anyhow. I would not think about going to Paris because I do not like this country and its national capital. I do not like to learn french anyhow because I do not understand their language. Why would I go to France when I rather go to Germany or the United Kingdom? Germany and the United Kingdom are the nations I tolerate. Apparently the Olsen Twins do not understand the French language just like me. Therefore I will not bother the France trip no matter what. I might as well stick to the United Kingdom and meet single women and play video games if there is a video arcade. That is all. diff --git a/test/data_for_tests/io/imdb/train.txt b/test/data_for_tests/io/imdb/train.txt new file mode 100644 index 00000000..d6ac6b68 --- /dev/null +++ b/test/data_for_tests/io/imdb/train.txt @@ -0,0 +1,2 @@ +neg I'll try to use words to describe this on....

I saw the original, which was good in its own way, but back then I should have feared a sequel.

And I was 'afraid' when I picked this one up, but now that I've seen it, I have to say, it's even worse then I thought. Why these movies still get money still makes my mind spin.

Let's start with the actors;they aren't all that good, but it has to be said, some make heads turn by being just plain awful. But what can an actor do with a script like this one. It's trying to be a copy of the original only this time the places have changed, any form of story is gone and any attempt of actually coming up with something that hasn't been done before, fails miserably. In a futile attempt to get it up-to-date, they try to make it exciting by making use of the whole 'big-brother' theme , but that has been worn out ages ago and offers nothing but a filler for between the beginning and the end. An attempt was made to try to save the movie by making a ton of references to the '83 original, but it just ended up being plain funny and sometimes a bit sad. In conclusion, if you have nothing , and I mean nothing , to do... go watch it, or play Frisbee... with the DVD.... by yourself. It'll offer you the same amount of fun.. I promise +pos This movie is totally wicked! It's really great to see MJH in a different role than her Sabrina character! The plot is totally cool, and the characters are excellently written. Definitely one of the best movies!! diff --git a/test/data_for_tests/io/rte/dev.tsv b/test/data_for_tests/io/rte/dev.tsv new file mode 100644 index 00000000..725d7542 --- /dev/null +++ b/test/data_for_tests/io/rte/dev.tsv @@ -0,0 +1,3 @@ +index sentence1 sentence2 label +0 Dana Reeve, the widow of the actor Christopher Reeve, has died of lung cancer at age 44, according to the Christopher Reeve Foundation. Christopher Reeve had an accident. not_entailment +1 Yet, we now are discovering that antibiotics are losing their effectiveness against illness. Disease-causing bacteria are mutating faster than we can come up with new antibiotics to fight the new variations. Bacteria is winning the war against antibiotics. entailment diff --git a/test/data_for_tests/io/rte/test.tsv b/test/data_for_tests/io/rte/test.tsv new file mode 100644 index 00000000..aeceb467 --- /dev/null +++ b/test/data_for_tests/io/rte/test.tsv @@ -0,0 +1,3 @@ +index sentence1 sentence2 +0 Mangla was summoned after Madhumita's sister Nidhi Shukla, who was the first witness in the case. Shukla is related to Mangla. +1 Authorities in Brazil say that more than 200 people are being held hostage in a prison in the country's remote, Amazonian-jungle state of Rondonia. Authorities in Brazil hold 200 people as hostage. diff --git a/test/data_for_tests/io/rte/train.tsv b/test/data_for_tests/io/rte/train.tsv new file mode 100644 index 00000000..9f3dab6e --- /dev/null +++ b/test/data_for_tests/io/rte/train.tsv @@ -0,0 +1,4 @@ +index sentence1 sentence2 label +0 No Weapons of Mass Destruction Found in Iraq Yet. Weapons of Mass Destruction Found in Iraq. not_entailment +1 A place of sorrow, after Pope John Paul II died, became a place of celebration, as Roman Catholic faithful gathered in downtown Chicago to mark the installation of new Pope Benedict XVI. Pope Benedict XVI is the new leader of the Roman Catholic Church. entailment +2 Herceptin was already approved to treat the sickest breast cancer patients, and the company said, Monday, it will discuss with federal regulators the possibility of prescribing the drug for more breast cancer patients. Herceptin can be used to treat breast cancer. entailment diff --git a/test/io/loader/test_classification_loader.py b/test/io/loader/test_classification_loader.py index 28f08921..1438a014 100644 --- a/test/io/loader/test_classification_loader.py +++ b/test/io/loader/test_classification_loader.py @@ -17,3 +17,11 @@ class TestDownload(unittest.TestCase): for loader in [YelpFullLoader, YelpPolarityLoader, IMDBLoader, SST2Loader, SSTLoader]: data_bundle = loader().load() print(data_bundle) + + +class TestLoad(unittest.TestCase): + + def test_load(self): + for loader in [IMDBLoader]: + data_bundle = loader().load('test/data_for_tests/io/imdb') + print(data_bundle) diff --git a/test/io/loader/test_conll_loader.py b/test/io/loader/test_conll_loader.py index e44b8a2a..861de5a5 100644 --- a/test/io/loader/test_conll_loader.py +++ b/test/io/loader/test_conll_loader.py @@ -1,7 +1,9 @@ import unittest import os -from fastNLP.io.loader.conll import MsraNERLoader, PeopleDailyNERLoader, WeiboNERLoader +from fastNLP.io.loader.conll import MsraNERLoader, PeopleDailyNERLoader, WeiboNERLoader, \ + Conll2003Loader + class MSRANERTest(unittest.TestCase): @unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") @@ -10,12 +12,20 @@ class MSRANERTest(unittest.TestCase): data_bundle = MsraNERLoader().load() print(data_bundle) + class PeopleDailyTest(unittest.TestCase): @unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") def test_download(self): PeopleDailyNERLoader().download() + class WeiboNERTest(unittest.TestCase): @unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") def test_download(self): - WeiboNERLoader().download() \ No newline at end of file + WeiboNERLoader().download() + + +class TestConll2003Loader(unittest.TestCase): + def test__load(self): + Conll2003Loader()._load('test/data_for_tests/conll_2003_example.txt') + diff --git a/test/io/loader/test_cws_loader.py b/test/io/loader/test_cws_loader.py index 6ad607c3..8b5d4081 100644 --- a/test/io/loader/test_cws_loader.py +++ b/test/io/loader/test_cws_loader.py @@ -10,4 +10,15 @@ class CWSLoaderTest(unittest.TestCase): for dataset_name in dataset_names: with self.subTest(dataset_name=dataset_name): data_bundle = CWSLoader(dataset_name=dataset_name).load() - print(data_bundle) \ No newline at end of file + print(data_bundle) + + +class RunCWSLoaderTest(unittest.TestCase): + def test_cws_loader(self): + dataset_names = ['msra'] + for dataset_name in dataset_names: + with self.subTest(dataset_name=dataset_name): + data_bundle = CWSLoader(dataset_name=dataset_name).load( + f'test/data_for_tests/io/cws_{dataset_name}' + ) + print(data_bundle) diff --git a/test/io/loader/test_matching_loader.py b/test/io/loader/test_matching_loader.py index 5c1a91f1..652cf161 100644 --- a/test/io/loader/test_matching_loader.py +++ b/test/io/loader/test_matching_loader.py @@ -20,3 +20,11 @@ class TestDownload(unittest.TestCase): data_bundle = loader().load() print(data_bundle) + +class TestLoad(unittest.TestCase): + + def test_load(self): + for loader in [RTELoader]: + data_bundle = loader().load('test/data_for_tests/io/rte') + print(data_bundle) + diff --git a/test/io/pipe/test_classification.py b/test/io/pipe/test_classification.py index 39dc71e0..c6e2005e 100644 --- a/test/io/pipe/test_classification.py +++ b/test/io/pipe/test_classification.py @@ -11,3 +11,11 @@ class TestPipe(unittest.TestCase): print(pipe) data_bundle = pipe(tokenizer='raw').process_from_file() print(data_bundle) + + +class TestRunPipe(unittest.TestCase): + + def test_load(self): + for pipe in [IMDBPipe]: + data_bundle = pipe(tokenizer='raw').process_from_file('test/data_for_tests/io/imdb') + print(data_bundle) diff --git a/test/io/pipe/test_conll.py b/test/io/pipe/test_conll.py index e8879d71..6f6c4fad 100644 --- a/test/io/pipe/test_conll.py +++ b/test/io/pipe/test_conll.py @@ -1,6 +1,7 @@ import unittest import os -from fastNLP.io import MsraNERPipe, PeopleDailyPipe, WeiboNERPipe +from fastNLP.io import MsraNERPipe, PeopleDailyPipe, WeiboNERPipe, Conll2003Pipe, Conll2003NERPipe + @unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") class TestPipe(unittest.TestCase): @@ -9,4 +10,13 @@ class TestPipe(unittest.TestCase): with self.subTest(pipe=pipe): print(pipe) data_bundle = pipe().process_from_file() - print(data_bundle) \ No newline at end of file + print(data_bundle) + + +class TestRunPipe(unittest.TestCase): + def test_conll2003(self): + for pipe in [Conll2003Pipe, Conll2003NERPipe]: + with self.subTest(pipe=pipe): + print(pipe) + data_bundle = pipe().process_from_file('test/data_for_tests/conll_2003_example.txt') + print(data_bundle) diff --git a/test/io/pipe/test_cws.py b/test/io/pipe/test_cws.py index 2fc57ae2..dd901a25 100644 --- a/test/io/pipe/test_cws.py +++ b/test/io/pipe/test_cws.py @@ -3,6 +3,7 @@ import unittest import os from fastNLP.io.pipe.cws import CWSPipe + class CWSPipeTest(unittest.TestCase): @unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") def test_process_from_file(self): @@ -10,4 +11,13 @@ class CWSPipeTest(unittest.TestCase): for dataset_name in dataset_names: with self.subTest(dataset_name=dataset_name): data_bundle = CWSPipe(dataset_name=dataset_name).process_from_file() - print(data_bundle) \ No newline at end of file + print(data_bundle) + + +class RunCWSPipeTest(unittest.TestCase): + def test_process_from_file(self): + dataset_names = ['msra'] + for dataset_name in dataset_names: + with self.subTest(dataset_name=dataset_name): + data_bundle = CWSPipe().process_from_file(f'test/data_for_tests/io/cws_{dataset_name}') + print(data_bundle) diff --git a/test/io/pipe/test_matching.py b/test/io/pipe/test_matching.py index c057bb0c..33904e7a 100644 --- a/test/io/pipe/test_matching.py +++ b/test/io/pipe/test_matching.py @@ -24,3 +24,11 @@ class TestBertPipe(unittest.TestCase): print(pipe) data_bundle = pipe(tokenizer='raw').process_from_file() print(data_bundle) + + +class TestRunPipe(unittest.TestCase): + + def test_load(self): + for pipe in [RTEPipe, RTEBertPipe]: + data_bundle = pipe(tokenizer='raw').process_from_file('test/data_for_tests/io/rte') + print(data_bundle) From 1994029ab84fb70ee8d790732006747f5d918a02 Mon Sep 17 00:00:00 2001 From: yh Date: Mon, 2 Sep 2019 15:59:45 +0800 Subject: [PATCH 26/50] =?UTF-8?q?1.=E5=BD=93=E5=89=8D=E6=94=AF=E6=8C=81?= =?UTF-8?q?=E7=9A=84encoding=5Ftype=E9=83=BD=E6=94=AF=E6=8C=81=E4=BB=8Etag?= =?UTF-8?q?=5Fvocab=E4=B8=AD=E8=87=AA=E5=8A=A8=E5=88=A4=E6=96=AD;=E9=81=BF?= =?UTF-8?q?=E5=85=8D=E8=A7=A6=E5=8F=91=E6=97=A0=E6=84=8F=E8=AF=86=E5=AF=BC?= =?UTF-8?q?=E8=87=B4=E7=9A=84metric=20bug;=202.=20=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=E9=83=A8=E5=88=86inplace=E6=93=8D=E4=BD=9C=E6=97=A0=E6=B3=95?= =?UTF-8?q?=E6=B1=82=E5=AF=BC=E7=9A=84=E9=97=AE=E9=A2=98;=203.Vocabulary?= =?UTF-8?q?=E5=B0=86=E4=B8=80=E4=BA=9B=E5=B1=9E=E6=80=A7=E9=80=9A=E8=BF=87?= =?UTF-8?q?property=E6=9A=B4=E9=9C=B2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/metrics.py | 85 +++++++++++++++++++------ fastNLP/core/vocabulary.py | 70 +++++++++++++-------- fastNLP/io/data_bundle.py | 43 ++++++++++++- fastNLP/io/pipe/conll.py | 2 +- fastNLP/models/biaffine_parser.py | 15 +++-- fastNLP/modules/decoder/crf.py | 38 ++++++++---- test/core/test_metrics.py | 41 +++++++++++- test/modules/decoder/test_CRF.py | 100 +++++++++++++++++++++++++++++- 8 files changed, 321 insertions(+), 73 deletions(-) diff --git a/fastNLP/core/metrics.py b/fastNLP/core/metrics.py index 0dc601a3..b06e5459 100644 --- a/fastNLP/core/metrics.py +++ b/fastNLP/core/metrics.py @@ -24,7 +24,7 @@ from .utils import seq_len_to_mask from .vocabulary import Vocabulary from abc import abstractmethod import warnings - +from typing import Union class MetricBase(object): """ @@ -337,15 +337,18 @@ class AccuracyMetric(MetricBase): raise TypeError(f"`seq_lens` in {_get_func_signature(self.evaluate)} must be torch.Tensor," f"got {type(seq_len)}.") - if seq_len is not None: - masks = seq_len_to_mask(seq_len=seq_len) + if seq_len is not None and target.dim()>1: + max_len = target.size(1) + masks = seq_len_to_mask(seq_len=seq_len, max_len=max_len) else: masks = None - if pred.size() == target.size(): + if pred.dim() == target.dim(): pass - elif len(pred.size()) == len(target.size()) + 1: + elif pred.dim() == target.dim() + 1: pred = pred.argmax(dim=-1) + if seq_len is None: + warnings.warn("You are not passing `seq_len` to exclude pad when calculate accuracy.") else: raise RuntimeError(f"In {_get_func_signature(self.evaluate)}, when pred have " f"size:{pred.size()}, target should have size: {pred.size()} or " @@ -493,20 +496,63 @@ def _bio_tag_to_spans(tags, ignore_labels=None): return [(span[0], (span[1][0], span[1][1] + 1)) for span in spans if span[0] not in ignore_labels] -def _check_tag_vocab_and_encoding_type(vocab:Vocabulary, encoding_type:str): +def _get_encoding_type_from_tag_vocab(tag_vocab:Union[Vocabulary, dict])->str: + """ + 给定Vocabulary自动判断是哪种类型的encoding, 支持判断bmes, bioes, bmeso, bio + + :param tag_vocab: 支持传入tag Vocabulary; 或者传入形如{0:"O", 1:"B-tag1"},即index在前,tag在后的dict。 + :return: + """ + tag_set = set() + unk_token = '' + pad_token = '' + if isinstance(tag_vocab, Vocabulary): + unk_token = tag_vocab.unknown + pad_token = tag_vocab.padding + tag_vocab = tag_vocab.idx2word + for idx, tag in tag_vocab.items(): + if tag in (unk_token, pad_token): + continue + tag = tag[:1].lower() + tag_set.add(tag) + + bmes_tag_set = set('bmes') + if tag_set == bmes_tag_set: + return 'bmes' + bio_tag_set = set('bio') + if tag_set == bio_tag_set: + return 'bio' + bmeso_tag_set = set('bmeso') + if tag_set == bmeso_tag_set: + return 'bmeso' + bioes_tag_set = set('bioes') + if tag_set == bioes_tag_set: + return 'bioes' + raise RuntimeError("encoding_type cannot be inferred automatically. Only support " + "'bio', 'bmes', 'bmeso', 'bioes' type.") + + +def _check_tag_vocab_and_encoding_type(tag_vocab:Union[Vocabulary, dict], encoding_type:str): """ 检查vocab中的tag是否与encoding_type是匹配的 - :param vocab: target的Vocabulary + :param tag_vocab: 支持传入tag Vocabulary; 或者传入形如{0:"O", 1:"B-tag1"},即index在前,tag在后的dict。 :param encoding_type: bio, bmes, bioes, bmeso :return: """ tag_set = set() - for tag, idx in vocab: - if idx in (vocab.unknown_idx, vocab.padding_idx): + unk_token = '' + pad_token = '' + if isinstance(tag_vocab, Vocabulary): + unk_token = tag_vocab.unknown + pad_token = tag_vocab.padding + tag_vocab = tag_vocab.idx2word + for idx, tag in tag_vocab.items(): + if tag in (unk_token, pad_token): continue tag = tag[:1].lower() tag_set.add(tag) + tags = encoding_type for tag in tag_set: assert tag in tags, f"{tag} is not a valid tag in encoding type:{encoding_type}. Please check your " \ @@ -549,7 +595,7 @@ class SpanFPreRecMetric(MetricBase): :param str pred: 用该key在evaluate()时从传入dict中取出prediction数据。 为None,则使用 `pred` 取数据 :param str target: 用该key在evaluate()时从传入dict中取出target数据。 为None,则使用 `target` 取数据 :param str seq_len: 用该key在evaluate()时从传入dict中取出sequence length数据。为None,则使用 `seq_len` 取数据。 - :param str encoding_type: 目前支持bio, bmes, bmeso, bioes + :param str encoding_type: 目前支持bio, bmes, bmeso, bioes。默认为None,通过tag_vocab自动判断. :param list ignore_labels: str 组成的list. 这个list中的class不会被用于计算。例如在POS tagging时传入['NN'],则不会计算'NN'这 个label :param bool only_gross: 是否只计算总的f1, precision, recall的值;如果为False,不仅返回总的f1, pre, rec, 还会返回每个 @@ -560,18 +606,21 @@ class SpanFPreRecMetric(MetricBase): 常用为beta=0.5, 1, 2. 若为0.5则精确率的权重高于召回率;若为1,则两者平等;若为2,则召回率权重高于精确率。 """ - def __init__(self, tag_vocab, pred=None, target=None, seq_len=None, encoding_type='bio', ignore_labels=None, + def __init__(self, tag_vocab, pred=None, target=None, seq_len=None, encoding_type=None, ignore_labels=None, only_gross=True, f_type='micro', beta=1): - - encoding_type = encoding_type.lower() - + if not isinstance(tag_vocab, Vocabulary): raise TypeError("tag_vocab can only be fastNLP.Vocabulary, not {}.".format(type(tag_vocab))) if f_type not in ('micro', 'macro'): raise ValueError("f_type only supports `micro` or `macro`', got {}.".format(f_type)) - - self.encoding_type = encoding_type - _check_tag_vocab_and_encoding_type(tag_vocab, encoding_type) + + if encoding_type: + encoding_type = encoding_type.lower() + _check_tag_vocab_and_encoding_type(tag_vocab, encoding_type) + self.encoding_type = encoding_type + else: + self.encoding_type = _get_encoding_type_from_tag_vocab(tag_vocab) + if self.encoding_type == 'bmes': self.tag_to_span_func = _bmes_tag_to_spans elif self.encoding_type == 'bio': @@ -581,7 +630,7 @@ class SpanFPreRecMetric(MetricBase): elif self.encoding_type == 'bioes': self.tag_to_span_func = _bioes_tag_to_spans else: - raise ValueError("Only support 'bio', 'bmes', 'bmeso' type.") + raise ValueError("Only support 'bio', 'bmes', 'bmeso', 'bioes' type.") self.ignore_labels = ignore_labels self.f_type = f_type diff --git a/fastNLP/core/vocabulary.py b/fastNLP/core/vocabulary.py index cd4f2c0f..b0f9650a 100644 --- a/fastNLP/core/vocabulary.py +++ b/fastNLP/core/vocabulary.py @@ -39,7 +39,7 @@ def _check_build_vocab(func): @wraps(func) # to solve missing docstring def _wrapper(self, *args, **kwargs): - if self.word2idx is None or self.rebuild is True: + if self._word2idx is None or self.rebuild is True: self.build_vocab() return func(self, *args, **kwargs) @@ -95,12 +95,30 @@ class Vocabulary(object): self.word_count = Counter() self.unknown = unknown self.padding = padding - self.word2idx = None - self.idx2word = None + self._word2idx = None + self._idx2word = None self.rebuild = True # 用于承载不需要单独创建entry的词语,具体见from_dataset()方法 self._no_create_word = Counter() - + + @property + @_check_build_vocab + def word2idx(self): + return self._word2idx + + @word2idx.setter + def word2idx(self, value): + self._word2idx = value + + @property + @_check_build_vocab + def idx2word(self): + return self._idx2word + + @idx2word.setter + def idx2word(self, value): + self._word2idx = value + @_check_build_status def update(self, word_lst, no_create_entry=False): """依次增加序列中词在词典中的出现频率 @@ -187,21 +205,21 @@ class Vocabulary(object): 但已经记录在词典中的词, 不会改变对应的 `int` """ - if self.word2idx is None: - self.word2idx = {} + if self._word2idx is None: + self._word2idx = {} if self.padding is not None: - self.word2idx[self.padding] = len(self.word2idx) + self._word2idx[self.padding] = len(self._word2idx) if self.unknown is not None: - self.word2idx[self.unknown] = len(self.word2idx) + self._word2idx[self.unknown] = len(self._word2idx) max_size = min(self.max_size, len(self.word_count)) if self.max_size else None words = self.word_count.most_common(max_size) if self.min_freq is not None: words = filter(lambda kv: kv[1] >= self.min_freq, words) - if self.word2idx is not None: - words = filter(lambda kv: kv[0] not in self.word2idx, words) - start_idx = len(self.word2idx) - self.word2idx.update({w: i + start_idx for i, (w, _) in enumerate(words)}) + if self._word2idx is not None: + words = filter(lambda kv: kv[0] not in self._word2idx, words) + start_idx = len(self._word2idx) + self._word2idx.update({w: i + start_idx for i, (w, _) in enumerate(words)}) self.build_reverse_vocab() self.rebuild = False return self @@ -211,12 +229,12 @@ class Vocabulary(object): 基于 `word to index` dict, 构建 `index to word` dict. """ - self.idx2word = {i: w for w, i in self.word2idx.items()} + self._idx2word = {i: w for w, i in self._word2idx.items()} return self @_check_build_vocab def __len__(self): - return len(self.word2idx) + return len(self._word2idx) @_check_build_vocab def __contains__(self, item): @@ -226,7 +244,7 @@ class Vocabulary(object): :param item: the word :return: True or False """ - return item in self.word2idx + return item in self._word2idx def has_word(self, w): """ @@ -248,10 +266,10 @@ class Vocabulary(object): vocab[w] """ - if w in self.word2idx: - return self.word2idx[w] + if w in self._word2idx: + return self._word2idx[w] if self.unknown is not None: - return self.word2idx[self.unknown] + return self._word2idx[self.unknown] else: raise ValueError("word `{}` not in vocabulary".format(w)) @@ -405,7 +423,7 @@ class Vocabulary(object): """ if self.unknown is None: return None - return self.word2idx[self.unknown] + return self._word2idx[self.unknown] @property @_check_build_vocab @@ -415,7 +433,7 @@ class Vocabulary(object): """ if self.padding is None: return None - return self.word2idx[self.padding] + return self._word2idx[self.padding] @_check_build_vocab def to_word(self, idx): @@ -425,7 +443,7 @@ class Vocabulary(object): :param int idx: the index :return str word: the word """ - return self.idx2word[idx] + return self._idx2word[idx] def clear(self): """ @@ -434,8 +452,8 @@ class Vocabulary(object): :return: """ self.word_count.clear() - self.word2idx = None - self.idx2word = None + self._word2idx = None + self._idx2word = None self.rebuild = True self._no_create_word.clear() return self @@ -446,8 +464,8 @@ class Vocabulary(object): """ len(self) # make sure vocab has been built state = self.__dict__.copy() - # no need to pickle idx2word as it can be constructed from word2idx - del state['idx2word'] + # no need to pickle _idx2word as it can be constructed from _word2idx + del state['_idx2word'] return state def __setstate__(self, state): @@ -462,5 +480,5 @@ class Vocabulary(object): @_check_build_vocab def __iter__(self): - for word, index in self.word2idx.items(): + for word, index in self._word2idx.items(): yield word, index diff --git a/fastNLP/io/data_bundle.py b/fastNLP/io/data_bundle.py index f30add34..3e7f39d3 100644 --- a/fastNLP/io/data_bundle.py +++ b/fastNLP/io/data_bundle.py @@ -8,7 +8,7 @@ __all__ = [ from ..core.dataset import DataSet from ..core.vocabulary import Vocabulary - +from typing import Union class DataBundle: """ @@ -191,7 +191,7 @@ class DataBundle: raise KeyError(f"{field_name} not found DataSet:{name}.") return self - def rename_field(self, field_name, new_field_name, ignore_miss_dataset=True): + def rename_field(self, field_name, new_field_name, ignore_miss_dataset=True, rename_vocab=True): """ 将DataBundle中所有DataSet中名为field_name的field重命名为new_field_name. @@ -199,6 +199,7 @@ class DataBundle: :param str new_field_name: :param bool ignore_miss_dataset: 当某个field名称在某个dataset不存在时,如果为True,则直接忽略该DataSet; 如果为False,则报错 + :param bool rename_vocab: 如果该field同时也存在于vocabs中,会将该field的名称对应修改 :return: self """ for name, dataset in self.datasets.items(): @@ -206,15 +207,20 @@ class DataBundle: dataset.rename_field(field_name=field_name, new_field_name=new_field_name) elif not ignore_miss_dataset: raise KeyError(f"{field_name} not found DataSet:{name}.") + if rename_vocab: + if field_name in self.vocabs: + self.vocabs[new_field_name] = self.vocabs.pop(field_name) + return self - def delete_field(self, field_name, ignore_miss_dataset=True): + def delete_field(self, field_name, ignore_miss_dataset=True, delete_vocab=True): """ 将DataBundle中所有DataSet中名为field_name的field删除掉. :param str field_name: :param bool ignore_miss_dataset: 当某个field名称在某个dataset不存在时,如果为True,则直接忽略该DataSet; 如果为False,则报错 + :param bool delete_vocab: 如果该field也在vocabs中存在,将该值也一并删除 :return: self """ for name, dataset in self.datasets.items(): @@ -222,8 +228,39 @@ class DataBundle: dataset.delete_field(field_name=field_name) elif not ignore_miss_dataset: raise KeyError(f"{field_name} not found DataSet:{name}.") + if delete_vocab: + if field_name in self.vocabs: + self.vocabs.pop(field_name) return self + def iter_datasets(self)->Union[str, DataSet]: + """ + 迭代data_bundle中的DataSet + + Example:: + + for name, dataset in data_bundle.iter_datasets(): + pass + + :return: + """ + for name, dataset in self.datasets.items(): + yield name, dataset + + def iter_vocabs(self)->Union[str, Vocabulary]: + """ + 迭代data_bundle中的DataSet + + Example: + + for field_name, vocab in data_bundle.iter_vocabs(): + pass + + :return: + """ + for field_name, vocab in self.vocabs.items(): + yield field_name, vocab + def apply_field(self, func, field_name:str, new_field_name:str, ignore_miss_dataset=True, **kwargs): """ 对DataBundle中所有的dataset使用apply_field方法 diff --git a/fastNLP/io/pipe/conll.py b/fastNLP/io/pipe/conll.py index eb7d4909..2edc9008 100644 --- a/fastNLP/io/pipe/conll.py +++ b/fastNLP/io/pipe/conll.py @@ -193,7 +193,7 @@ class OntoNotesNERPipe(_NERPipe): """ 处理OntoNotes的NER数据,处理之后DataSet中的field情况为 - .. csv-table:: Following is a demo layout of DataSet returned by Conll2003Loader + .. csv-table:: :header: "raw_words", "words", "target", "seq_len" "[Nadim, Ladki]", "[2, 3]", "[1, 2]", 2 diff --git a/fastNLP/models/biaffine_parser.py b/fastNLP/models/biaffine_parser.py index bead09fc..6b0829bd 100644 --- a/fastNLP/models/biaffine_parser.py +++ b/fastNLP/models/biaffine_parser.py @@ -207,7 +207,7 @@ class ArcBiaffine(nn.Module): output = dep.matmul(self.U) output = output.bmm(head.transpose(-1, -2)) if self.has_bias: - output += head.matmul(self.bias).unsqueeze(1) + output = output + head.matmul(self.bias).unsqueeze(1) return output @@ -234,7 +234,7 @@ class LabelBilinear(nn.Module): :return output: [batch, seq_len, num_cls] 每个元素对应类别的概率图 """ output = self.bilinear(x1, x2) - output += self.lin(torch.cat([x1, x2], dim=2)) + output = output + self.lin(torch.cat([x1, x2], dim=2)) return output @@ -363,7 +363,7 @@ class BiaffineParser(GraphParser): # print('forward {} {}'.format(batch_size, seq_len)) # get sequence mask - mask = seq_len_to_mask(seq_len).long() + mask = seq_len_to_mask(seq_len, max_len=length).long() word = self.word_embedding(words1) # [N,L] -> [N,L,C_0] pos = self.pos_embedding(words2) # [N,L] -> [N,L,C_1] @@ -435,10 +435,10 @@ class BiaffineParser(GraphParser): """ batch_size, length, _ = pred1.shape - mask = seq_len_to_mask(seq_len) + mask = seq_len_to_mask(seq_len, max_len=length) flip_mask = (mask == 0) _arc_pred = pred1.clone() - _arc_pred.masked_fill_(flip_mask.unsqueeze(1), -float('inf')) + _arc_pred = _arc_pred.masked_fill(flip_mask.unsqueeze(1), -float('inf')) arc_logits = F.log_softmax(_arc_pred, dim=2) label_logits = F.log_softmax(pred2, dim=2) batch_index = torch.arange(batch_size, device=arc_logits.device, dtype=torch.long).unsqueeze(1) @@ -446,9 +446,8 @@ class BiaffineParser(GraphParser): arc_loss = arc_logits[batch_index, child_index, target1] label_loss = label_logits[batch_index, child_index, target2] - byte_mask = flip_mask.byte() - arc_loss.masked_fill_(byte_mask, 0) - label_loss.masked_fill_(byte_mask, 0) + arc_loss = arc_loss.masked_fill(flip_mask, 0) + label_loss = label_loss.masked_fill(flip_mask, 0) arc_nll = -arc_loss.mean() label_nll = -label_loss.mean() return arc_nll + label_nll diff --git a/fastNLP/modules/decoder/crf.py b/fastNLP/modules/decoder/crf.py index f63d46e3..c13ea50c 100644 --- a/fastNLP/modules/decoder/crf.py +++ b/fastNLP/modules/decoder/crf.py @@ -10,33 +10,45 @@ from torch import nn from ..utils import initial_parameter from ...core.vocabulary import Vocabulary +from ...core.metrics import _get_encoding_type_from_tag_vocab, _check_tag_vocab_and_encoding_type +from typing import Union - -def allowed_transitions(id2target, encoding_type='bio', include_start_end=False): +def allowed_transitions(tag_vocab:Union[Vocabulary, dict], encoding_type=None, include_start_end=False): """ 别名::class:`fastNLP.modules.allowed_transitions` :class:`fastNLP.modules.decoder.allowed_transitions` 给定一个id到label的映射表,返回所有可以跳转的(from_tag_id, to_tag_id)列表。 - :param dict, ~fastNLP.Vocabulary id2target: key是label的indices,value是str类型的tag或tag-label。value可以是只有tag的, 比如"B", "M"; 也可以是 - "B-NN", "M-NN", tag和label之间一定要用"-"隔开。一般可以通过Vocabulary.idx2word得到id2label。 - :param str encoding_type: 支持"bio", "bmes", "bmeso", "bioes"。 + :param ~fastNLP.Vocabulary,dict tag_vocab: 支持类型为tag或tag-label。只有tag的,比如"B", "M"; 也可以是"B-NN", "M-NN", + tag和label之间一定要用"-"隔开。如果传入dict,格式需要形如{0:"O", 1:"B-tag1"},即index在前,tag在后。 + :param str encoding_type: 支持"bio", "bmes", "bmeso", "bioes"。默认为None,通过vocab自动推断 :param bool include_start_end: 是否包含开始与结尾的转换。比如在bio中,b/o可以在开头,但是i不能在开头; 为True,返回的结果中会包含(start_idx, b_idx), (start_idx, o_idx), 但是不包含(start_idx, i_idx); start_idx=len(id2label), end_idx=len(id2label)+1。为False, 返回的结果中不含与开始结尾相关的内容 :return: List[Tuple(int, int)]], 内部的Tuple是可以进行跳转的(from_tag_id, to_tag_id)。 """ - if isinstance(id2target, Vocabulary): - id2target = id2target.idx2word - num_tags = len(id2target) + if encoding_type is None: + encoding_type = _get_encoding_type_from_tag_vocab(tag_vocab) + else: + encoding_type = encoding_type.lower() + _check_tag_vocab_and_encoding_type(tag_vocab, encoding_type) + + pad_token = '' + unk_token = '' + + if isinstance(tag_vocab, Vocabulary): + id_label_lst = list(tag_vocab.idx2word.items()) + pad_token = tag_vocab.padding + unk_token = tag_vocab.unknown + else: + id_label_lst = list(tag_vocab.items()) + + num_tags = len(tag_vocab) start_idx = num_tags end_idx = num_tags + 1 - encoding_type = encoding_type.lower() allowed_trans = [] - id_label_lst = list(id2target.items()) if include_start_end: id_label_lst += [(start_idx, 'start'), (end_idx, 'end')] - def split_tag_label(from_label): from_label = from_label.lower() if from_label in ['start', 'end']: @@ -48,11 +60,11 @@ def allowed_transitions(id2target, encoding_type='bio', include_start_end=False) return from_tag, from_label for from_id, from_label in id_label_lst: - if from_label in ['', '']: + if from_label in [pad_token, unk_token]: continue from_tag, from_label = split_tag_label(from_label) for to_id, to_label in id_label_lst: - if to_label in ['', '']: + if to_label in [pad_token, unk_token]: continue to_tag, to_label = split_tag_label(to_label) if _is_transition_allowed(encoding_type, from_tag, from_label, to_tag, to_label): diff --git a/test/core/test_metrics.py b/test/core/test_metrics.py index 5a7c55cf..8a472a62 100644 --- a/test/core/test_metrics.py +++ b/test/core/test_metrics.py @@ -11,6 +11,12 @@ from fastNLP.core.metrics import SpanFPreRecMetric, ExtractiveQAMetric def _generate_tags(encoding_type, number_labels=4): + """ + + :param encoding_type: 例如BIOES, BMES, BIO等 + :param number_labels: 多少个label,大于1 + :return: + """ vocab = {} for i in range(number_labels): label = str(i) @@ -184,7 +190,7 @@ class TestAccuracyMetric(unittest.TestCase): self.assertDictEqual(metric.get_metric(), {'acc': 1.}) -class SpanF1PreRecMetric(unittest.TestCase): +class SpanFPreRecMetricTest(unittest.TestCase): def test_case1(self): from fastNLP.core.metrics import _bmes_tag_to_spans from fastNLP.core.metrics import _bio_tag_to_spans @@ -338,6 +344,39 @@ class SpanF1PreRecMetric(unittest.TestCase): for key, value in expected_metric.items(): self.assertAlmostEqual(value, metric_value[key], places=5) + def test_auto_encoding_type_infer(self): + # 检查是否可以自动check encode的类型 + vocabs = {} + import random + for encoding_type in ['bio', 'bioes', 'bmeso']: + vocab = Vocabulary(unknown=None, padding=None) + for i in range(random.randint(10, 100)): + label = str(random.randint(1, 10)) + for tag in encoding_type: + if tag!='o': + vocab.add_word(f'{tag}-{label}') + else: + vocab.add_word('o') + vocabs[encoding_type] = vocab + for e in ['bio', 'bioes', 'bmeso']: + with self.subTest(e=e): + metric = SpanFPreRecMetric(tag_vocab=vocabs[e]) + assert metric.encoding_type == e + + bmes_vocab = _generate_tags('bmes') + vocab = Vocabulary() + for tag, index in bmes_vocab.items(): + vocab.add_word(tag) + metric = SpanFPreRecMetric(vocab) + assert metric.encoding_type == 'bmes' + + # 一些无法check的情况 + vocab = Vocabulary() + for i in range(10): + vocab.add_word(str(i)) + with self.assertRaises(Exception): + metric = SpanFPreRecMetric(vocab) + def test_encoding_type(self): # 检查传入的tag_vocab与encoding_type不符合时,是否会报错 vocabs = {} diff --git a/test/modules/decoder/test_CRF.py b/test/modules/decoder/test_CRF.py index 647af7d3..94b4ab7a 100644 --- a/test/modules/decoder/test_CRF.py +++ b/test/modules/decoder/test_CRF.py @@ -1,6 +1,6 @@ import unittest - +from fastNLP import Vocabulary class TestCRF(unittest.TestCase): def test_case1(self): @@ -14,7 +14,8 @@ class TestCRF(unittest.TestCase): id2label = {0: 'B', 1:'M', 2:'E', 3:'S'} expected_res = {(0, 1), (0, 2), (1, 1), (1, 2), (2, 0), (2, 3), (2, 5), (3, 0), (3, 3), (3, 5), (4, 0), (4, 3)} - self.assertSetEqual(expected_res, set(allowed_transitions(id2label, encoding_type='BMES', include_start_end=True))) + self.assertSetEqual(expected_res, set( + allowed_transitions(id2label, encoding_type='BMES', include_start_end=True))) id2label = {0: 'B', 1: 'I', 2:'O', 3: '', 4:""} allowed_transitions(id2label, include_start_end=True) @@ -37,7 +38,100 @@ class TestCRF(unittest.TestCase): expected_res = {(0, 1), (0, 2), (1, 1), (1, 2), (2, 0), (2, 3), (2, 4), (2, 7), (2, 9), (3, 0), (3, 3), (3, 4), (3, 7), (3, 9), (4, 5), (4, 6), (5, 5), (5, 6), (6, 0), (6, 3), (6, 4), (6, 7), (6, 9), (7, 0), (7, 3), (7, 4), (7, 7), (7, 9), (8, 0), (8, 3), (8, 4), (8, 7)} - self.assertSetEqual(expected_res, set(allowed_transitions(id2label, encoding_type='BMES', include_start_end=True))) + self.assertSetEqual(expected_res, set( + allowed_transitions(id2label, include_start_end=True))) + + def test_case11(self): + # 测试自动推断encoding类型 + from fastNLP.modules.decoder.crf import allowed_transitions + + id2label = {0: 'B', 1: 'I', 2: 'O'} + expected_res = {(0, 0), (0, 1), (0, 2), (0, 4), (1, 0), (1, 1), (1, 2), (1, 4), (2, 0), (2, 2), + (2, 4), (3, 0), (3, 2)} + self.assertSetEqual(expected_res, set(allowed_transitions(id2label, include_start_end=True))) + + id2label = {0: 'B', 1: 'M', 2: 'E', 3: 'S'} + expected_res = {(0, 1), (0, 2), (1, 1), (1, 2), (2, 0), (2, 3), (2, 5), (3, 0), (3, 3), (3, 5), (4, 0), (4, 3)} + self.assertSetEqual(expected_res, set( + allowed_transitions(id2label, include_start_end=True))) + + id2label = {0: 'B', 1: 'I', 2: 'O', 3: '', 4: ""} + allowed_transitions(id2label, include_start_end=True) + + labels = ['O'] + for label in ['X', 'Y']: + for tag in 'BI': + labels.append('{}-{}'.format(tag, label)) + id2label = {idx: label for idx, label in enumerate(labels)} + expected_res = {(0, 0), (0, 1), (0, 3), (0, 6), (1, 0), (1, 1), (1, 2), (1, 3), (1, 6), (2, 0), (2, 1), + (2, 2), (2, 3), (2, 6), (3, 0), (3, 1), (3, 3), (3, 4), (3, 6), (4, 0), (4, 1), (4, 3), + (4, 4), (4, 6), (5, 0), (5, 1), (5, 3)} + self.assertSetEqual(expected_res, set(allowed_transitions(id2label, include_start_end=True))) + + labels = [] + for label in ['X', 'Y']: + for tag in 'BMES': + labels.append('{}-{}'.format(tag, label)) + id2label = {idx: label for idx, label in enumerate(labels)} + expected_res = {(0, 1), (0, 2), (1, 1), (1, 2), (2, 0), (2, 3), (2, 4), (2, 7), (2, 9), (3, 0), (3, 3), (3, 4), + (3, 7), (3, 9), (4, 5), (4, 6), (5, 5), (5, 6), (6, 0), (6, 3), (6, 4), (6, 7), (6, 9), (7, 0), + (7, 3), (7, 4), (7, 7), (7, 9), (8, 0), (8, 3), (8, 4), (8, 7)} + self.assertSetEqual(expected_res, set( + allowed_transitions(id2label, include_start_end=True))) + + def test_case12(self): + # 测试能否通过vocab生成转移矩阵 + from fastNLP.modules.decoder.crf import allowed_transitions + + id2label = {0: 'B', 1: 'I', 2: 'O'} + vocab = Vocabulary(unknown=None, padding=None) + for idx, tag in id2label.items(): + vocab.add_word(tag) + expected_res = {(0, 0), (0, 1), (0, 2), (0, 4), (1, 0), (1, 1), (1, 2), (1, 4), (2, 0), (2, 2), + (2, 4), (3, 0), (3, 2)} + self.assertSetEqual(expected_res, set(allowed_transitions(vocab, include_start_end=True))) + + id2label = {0: 'B', 1: 'M', 2: 'E', 3: 'S'} + vocab = Vocabulary(unknown=None, padding=None) + for idx, tag in id2label.items(): + vocab.add_word(tag) + expected_res = {(0, 1), (0, 2), (1, 1), (1, 2), (2, 0), (2, 3), (2, 5), (3, 0), (3, 3), (3, 5), (4, 0), (4, 3)} + self.assertSetEqual(expected_res, set( + allowed_transitions(vocab, include_start_end=True))) + + id2label = {0: 'B', 1: 'I', 2: 'O', 3: '', 4: ""} + vocab = Vocabulary() + for idx, tag in id2label.items(): + vocab.add_word(tag) + allowed_transitions(vocab, include_start_end=True) + + labels = ['O'] + for label in ['X', 'Y']: + for tag in 'BI': + labels.append('{}-{}'.format(tag, label)) + id2label = {idx: label for idx, label in enumerate(labels)} + expected_res = {(0, 0), (0, 1), (0, 3), (0, 6), (1, 0), (1, 1), (1, 2), (1, 3), (1, 6), (2, 0), (2, 1), + (2, 2), (2, 3), (2, 6), (3, 0), (3, 1), (3, 3), (3, 4), (3, 6), (4, 0), (4, 1), (4, 3), + (4, 4), (4, 6), (5, 0), (5, 1), (5, 3)} + vocab = Vocabulary(unknown=None, padding=None) + for idx, tag in id2label.items(): + vocab.add_word(tag) + self.assertSetEqual(expected_res, set(allowed_transitions(vocab, include_start_end=True))) + + labels = [] + for label in ['X', 'Y']: + for tag in 'BMES': + labels.append('{}-{}'.format(tag, label)) + id2label = {idx: label for idx, label in enumerate(labels)} + vocab = Vocabulary(unknown=None, padding=None) + for idx, tag in id2label.items(): + vocab.add_word(tag) + expected_res = {(0, 1), (0, 2), (1, 1), (1, 2), (2, 0), (2, 3), (2, 4), (2, 7), (2, 9), (3, 0), (3, 3), (3, 4), + (3, 7), (3, 9), (4, 5), (4, 6), (5, 5), (5, 6), (6, 0), (6, 3), (6, 4), (6, 7), (6, 9), (7, 0), + (7, 3), (7, 4), (7, 7), (7, 9), (8, 0), (8, 3), (8, 4), (8, 7)} + self.assertSetEqual(expected_res, set( + allowed_transitions(vocab, include_start_end=True))) + def test_case2(self): # 测试CRF能否避免解码出非法跃迁, 使用allennlp做了验证。 From 53f744a87d9d48cc3beaa43a17010a9628261f72 Mon Sep 17 00:00:00 2001 From: ChenXin Date: Mon, 2 Sep 2019 19:43:28 +0800 Subject: [PATCH 27/50] fix some bugs in docs --- docs/source/tutorials/tutorial_9_callback.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/tutorials/tutorial_9_callback.rst b/docs/source/tutorials/tutorial_9_callback.rst index 8e2742bb..dc50aca5 100644 --- a/docs/source/tutorials/tutorial_9_callback.rst +++ b/docs/source/tutorials/tutorial_9_callback.rst @@ -23,7 +23,7 @@ Callback的构建和使用 class LRDecay(fastNLP.Callback): def __init__(self): - super(MyCallback, self).__init__() + super(LRDecay, self).__init__() self.base_lrs = [] self.delta = [] From b3718b10dcda636883f0267b76b264c904e807ff Mon Sep 17 00:00:00 2001 From: Yige Xu Date: Tue, 3 Sep 2019 23:19:18 +0800 Subject: [PATCH 28/50] 1. rename init_embed to embed in models/*; 2. update documents in models/bert.py; 3. update tutorial six. --- docs/source/fastNLP.models.bert.rst | 6 + docs/source/fastNLP.models.rst | 3 +- .../fastNLP.models.sequence_labeling.rst | 2 +- .../tutorials/tutorial_6_seq_labeling.rst | 92 +++---- fastNLP/models/__init__.py | 10 +- fastNLP/models/bert.py | 241 +++++++++++------- fastNLP/models/biaffine_parser.py | 6 +- fastNLP/models/cnn_text_classification.py | 6 +- fastNLP/models/snli.py | 10 +- fastNLP/models/star_transformer.py | 24 +- test/models/test_bert.py | 84 +++++- test/models/test_biaffine_parser.py | 4 +- 12 files changed, 312 insertions(+), 176 deletions(-) create mode 100644 docs/source/fastNLP.models.bert.rst diff --git a/docs/source/fastNLP.models.bert.rst b/docs/source/fastNLP.models.bert.rst new file mode 100644 index 00000000..b0c813f9 --- /dev/null +++ b/docs/source/fastNLP.models.bert.rst @@ -0,0 +1,6 @@ +fastNLP.models.bert +=================== + +.. automodule:: fastNLP.models.bert + :members: BertForSequenceClassification, BertForSentenceMatching, BertForMultipleChoice, BertForTokenClassification, BertForQuestionAnswering + diff --git a/docs/source/fastNLP.models.rst b/docs/source/fastNLP.models.rst index fb782de1..21cf41a7 100644 --- a/docs/source/fastNLP.models.rst +++ b/docs/source/fastNLP.models.rst @@ -2,7 +2,7 @@ fastNLP.models ============== .. automodule:: fastNLP.models - :members: CNNText, SeqLabeling, AdvSeqLabel, ESIM, StarTransEnc, STSeqLabel, STNLICls, STSeqCls, BiaffineParser, GraphParser + :members: CNNText, SeqLabeling, AdvSeqLabel, ESIM, StarTransEnc, STSeqLabel, STNLICls, STSeqCls, BiaffineParser, GraphParser, BertForSequenceClassification, BertForSentenceMatching, BertForMultipleChoice, BertForTokenClassification, BertForQuestionAnswering 子模块 ------ @@ -10,6 +10,7 @@ fastNLP.models .. toctree:: :maxdepth: 1 + fastNLP.models.bert fastNLP.models.biaffine_parser fastNLP.models.cnn_text_classification fastNLP.models.sequence_labeling diff --git a/docs/source/fastNLP.models.sequence_labeling.rst b/docs/source/fastNLP.models.sequence_labeling.rst index f6551f8b..dcd1300e 100644 --- a/docs/source/fastNLP.models.sequence_labeling.rst +++ b/docs/source/fastNLP.models.sequence_labeling.rst @@ -2,5 +2,5 @@ fastNLP.models.sequence_labeling ================================ .. automodule:: fastNLP.models.sequence_labeling - :members: SeqLabeling, AdvSeqLabel + :members: SeqLabeling, AdvSeqLabel, BiLSTMCRF diff --git a/docs/source/tutorials/tutorial_6_seq_labeling.rst b/docs/source/tutorials/tutorial_6_seq_labeling.rst index 09a53cdc..7fcf97b3 100644 --- a/docs/source/tutorials/tutorial_6_seq_labeling.rst +++ b/docs/source/tutorials/tutorial_6_seq_labeling.rst @@ -3,64 +3,52 @@ ===================== 这一部分的内容主要展示如何使用fastNLP 实现序列标注任务。你可以使用fastNLP的各个组件快捷,方便地完成序列标注任务,达到出色的效果。 -在阅读这篇Tutorial前,希望你已经熟悉了fastNLP的基础使用,包括基本数据结构以及数据预处理,embedding的嵌入等,希望你对之前的教程有更进一步的掌握。 -我们将对CoNLL-03的英文数据集进行处理,展示如何完成命名实体标注任务整个训练的过程。 +在阅读这篇Tutorial前,希望你已经熟悉了fastNLP的基础使用,尤其是数据的载入以及模型的构建,通过这个小任务的能让你进一步熟悉fastNLP的使用。 +我们将对基于Weibo的中文社交数据集进行处理,展示如何完成命名实体标注任务的整个过程。 载入数据 =================================== -fastNLP可以方便地载入各种类型的数据。同时,针对常见的数据集,我们已经预先实现了载入方法,其中包含CoNLL-03数据集。 +fastNLP的数据载入主要是由Loader与Pipe两个基类衔接完成的。通过Loader可以方便地载入各种类型的数据。同时,针对常见的数据集,我们已经预先实现了载入方法,其中包含weibo数据集。 在设计dataloader时,以DataSetLoader为基类,可以改写并应用于其他数据集的载入。 .. code-block:: python - class Conll2003DataLoader(DataSetLoader): - def __init__(self, task:str='ner', encoding_type:str='bioes'): - assert task in ('ner', 'pos', 'chunk') - index = {'ner':3, 'pos':1, 'chunk':2}[task] - #ConllLoader是fastNLP内置的类 - self._loader = ConllLoader(headers=['raw_words', 'target'], indexes=[0, index]) - self._tag_converters = None - if task in ('ner', 'chunk'): - #iob和iob2bioes会对tag进行统一,标准化 - self._tag_converters = [iob2] - if encoding_type == 'bioes': - self._tag_converters.append(iob2bioes) - - def load(self, path: str): - dataset = self._loader.load(path) - def convert_tag_schema(tags): - for converter in self._tag_converters: - tags = converter(tags) - return tags - if self._tag_converters: - #使用apply实现convert_tag_schema函数,实际上也支持匿名函数 - dataset.apply_field(convert_tag_schema, field_name=Const.TARGET, new_field_name=Const.TARGET) - return dataset - -输出数据格式如: - - {'raw_words': ['on', 'Friday', ':'] type=list, - 'target': ['O', 'O', 'O'] type=list}, + from fastNLP.io import WeiboNERLoader + data_bundle = WeiboNERLoader().load() + + + +载入后的数据如 :: + + {'dev': DataSet( + {{'raw_chars': ['用', '最', '大', '努', '力', '去', '做''人', '生', '。', '哈', '哈', '哈', '哈', '哈', '哈', ' + 'target': ['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O',, 'O', 'O', 'O', 'O', 'O', 'O'] type=list})} + + {'test': DataSet( + {{'raw_chars': ['感', '恩', '大', '回', '馈'] type=list, 'target': ['O', 'O', 'O', 'O', 'O'] type=list})} + + {'train': DataSet( + {'raw_chars': ['国', '安', '老', '球', '迷'] type=list, 'target': ['B-ORG.NAM', 'I-ORG.NAM', 'B-PER.NOM', 'I-PER.NOM', 'I-PER.NOM'] type=list})} + 数据处理 ---------------------------- -我们进一步处理数据。将数据和词表封装在 :class:`~fastNLP.DataBundle` 类中。data是DataBundle的实例。 -我们输入模型的数据包括char embedding,以及word embedding。在数据处理部分,我们尝试完成词表的构建。 -使用fastNLP中的Vocabulary类来构建词表。 +我们进一步处理数据。通过Pipe基类处理Loader载入的数据。 如果你还有印象,应该还能想起,实现自定义数据集的Pipe时,至少要编写process 函数或者process_from_file 函数。前者接受 :class:`~fastNLP.DataBundle` 类的数据,并返回该 :class:`~fastNLP.DataBundle` 。后者接收数据集所在文件夹为参数,读取并处理为 :class:`~fastNLP.DataBundle` 后,通过process 函数处理数据。 +这里我们已经实现通过Loader载入数据,并已返回 :class:`~fastNLP.DataBundle` 类的数据。我们编写process 函数以处理Loader载入后的数据。 .. code-block:: python - word_vocab = Vocabulary(min_freq=2) - word_vocab.from_dataset(data.datasets['train'], field_name=Const.INPUT) - word_vocab.index_dataset(*data.datasets.values(),field_name=Const.INPUT, new_field_name=Const.INPUT) + from fastNLP.io import ChineseNERPipe + data_bundle = ChineseNERPipe(encoding_type='bioes', bigram=True).process(data_bundle) -处理后的data对象内部为: +载入后的数据如下 :: - dataset - vocabs - dataset保存了train和test中的数据,并保存为dataset类型 - vocab保存了words,raw-words以及target的词表。 + {'raw_chars': ['用', '最', '大', '努', '力', '去', '做', '值', '得', '的', '事', '人', '生', '。', '哈', '哈', '哈', '哈', '哈', '哈', '我', '在'] type=list, + 'target': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] type=list, + 'chars': [97, 71, 34, 422, 104, 72, 144, 628, 66, 3, 158, 2, 9, 647, 485, 196, 2,19] type=list, + 'bigrams': [5948, 1950, 34840, 98, 8413, 3961, 34841, 631, 34842, 407, 462, 45, 3 1959, 1619, 3, 3, 3, 3, 3, 2663, 29, 90] type=list, + 'seq_len': 30 type=int} 模型构建 -------------------------------- @@ -69,27 +57,23 @@ fastNLP可以方便地载入各种类型的数据。同时,针对常见的数 模型的训练 首先实例化模型,导入所需的char embedding以及word embedding。Embedding的载入可以参考教程。 -也可以查看 :mod:`~fastNLP.modules.encoder.embedding` 使用所需的embedding 载入方法。 -fastNLP将模型的训练过程封装在了 :class:`~fastnlp.trainer` 类中。 +也可以查看 :mod:`~fastNLP.embedding` 使用所需的embedding 载入方法。 +fastNLP将模型的训练过程封装在了 :class:`~fastnlp.Trainer` 类中。 根据不同的任务调整trainer中的参数即可。通常,一个trainer实例需要有:指定的训练数据集,模型,优化器,loss函数,评测指标,以及指定训练的epoch数,batch size等参数。 .. code-block:: python #实例化模型 - model = CNNBiLSTMCRF(word_embed, char_embed, hidden_size=200, num_layers=1, tag_vocab=data.vocabs[Const.TARGET], encoding_type=encoding_type) - #定义优化器 - optimizer = Adam(model.parameters(), lr=0.005) + model = CNBiLSTMCRFNER(char_embed, num_classes=len(data_bundle.vocabs['target']), bigram_embed=bigram_embed) #定义评估指标 - Metrics=SpanFPreRecMetric(tag_vocab=data.vocabs[Const.TARGET], encoding_type=encoding_type) - #实例化trainer - trainer = Trainer(train_data=data.datasets['train'], model=model, optimizer=optimizer, dev_data=data.datasets['test'], batch_size=10, metrics=Metrics,callbacks=callbacks, n_epochs=100) - #开始训练 - trainer.train() + Metrics=SpanFPreRecMetric(data_bundle.vocabs['target'], encoding_type='bioes') + #实例化trainer并训练 + Trainer(data_bundle.datasets['train'], model, batch_size=20, metrics=Metrics, num_workers=2, dev_data=data_bundle. datasets['dev']).train() + 训练中会保存最优的参数配置。 -训练的结果如下: -.. code-block:: python +训练的结果如下 :: Evaluation on DataSet test: SpanFPreRecMetric: f=0.727661, pre=0.732293, rec=0.723088 diff --git a/fastNLP/models/__init__.py b/fastNLP/models/__init__.py index 14314049..a659e1d5 100644 --- a/fastNLP/models/__init__.py +++ b/fastNLP/models/__init__.py @@ -21,12 +21,18 @@ __all__ = [ "STSeqCls", "BiaffineParser", - "GraphParser" + "GraphParser", + + "BertForSequenceClassification", + "BertForSentenceMatching", + "BertForMultipleChoice", + "BertForTokenClassification", + "BertForQuestionAnswering" ] from .base_model import BaseModel from .bert import BertForMultipleChoice, BertForQuestionAnswering, BertForSequenceClassification, \ - BertForTokenClassification + BertForTokenClassification, BertForSentenceMatching from .biaffine_parser import BiaffineParser, GraphParser from .cnn_text_classification import CNNText from .sequence_labeling import SeqLabeling, AdvSeqLabel diff --git a/fastNLP/models/bert.py b/fastNLP/models/bert.py index 08f16db2..4a04bd6d 100644 --- a/fastNLP/models/bert.py +++ b/fastNLP/models/bert.py @@ -1,9 +1,35 @@ -"""undocumented -bert.py is modified from huggingface/pytorch-pretrained-BERT, which is licensed under the Apache License 2.0. +""" +fastNLP提供了BERT应用到五个下游任务的模型代码,可以直接调用。这五个任务分别为 + + - 文本分类任务: :class:`~fastNLP.models.BertForSequenceClassification` + - Matching任务: :class:`~fastNLP.models.BertForSentenceMatching` + - 多选任务: :class:`~fastNLP.models.BertForMultipleChoice` + - 序列标注任务: :class:`~fastNLP.models.BertForTokenClassification` + - 抽取式QA任务: :class:`~fastNLP.models.BertForQuestionAnswering` + +每一个模型必须要传入一个名字为 `embed` 的 :class:`fastNLP.embeddings.BertEmbedding` ,这个参数包含了 +:class:`fastNLP.modules.encoder.BertModel` ,是下游模型的编码器(encoder)。 + +除此以外,还需要传入一个数字,这个数字在不同下游任务模型上的意义如下:: + + 下游任务模型 参数名称 含义 + BertForSequenceClassification num_labels 文本分类类别数目,默认值为2 + BertForSentenceMatching num_labels Matching任务类别数目,默认值为2 + BertForMultipleChoice num_choices 多选任务选项数目,默认值为2 + BertForTokenClassification num_labels 序列标注标签数目,无默认值 + BertForQuestionAnswering num_labels 抽取式QA列数,默认值为2(即第一列为start_span, 第二列为end_span) + +最后还可以传入dropout的大小,默认值为0.1。 """ -__all__ = [] +__all__ = [ + "BertForSequenceClassification", + "BertForSentenceMatching", + "BertForMultipleChoice", + "BertForTokenClassification", + "BertForQuestionAnswering" +] import warnings @@ -13,28 +39,40 @@ from torch import nn from .base_model import BaseModel from ..core.const import Const from ..core._logger import logger -from ..modules.encoder import BertModel -from ..modules.encoder.bert import BertConfig, CONFIG_FILE -from ..embeddings.bert_embedding import BertEmbedding +from ..embeddings import BertEmbedding class BertForSequenceClassification(BaseModel): - """BERT model for classification. """ - def __init__(self, init_embed: BertEmbedding, num_labels: int=2): + 别名: :class:`fastNLP.models.BertForSequenceClassification` + :class:`fastNLP.models.bert.BertForSequenceClassification` + + BERT model for classification. + + :param fastNLP.embeddings.BertEmbedding embed: 下游模型的编码器(encoder). + :param int num_labels: 文本分类类别数目,默认值为2. + :param float dropout: dropout的大小,默认值为0.1. + """ + def __init__(self, embed: BertEmbedding, num_labels: int=2, dropout=0.1): super(BertForSequenceClassification, self).__init__() self.num_labels = num_labels - self.bert = init_embed - self.dropout = nn.Dropout(0.1) + self.bert = embed + self.dropout = nn.Dropout(p=dropout) self.classifier = nn.Linear(self.bert.embedding_dim, num_labels) if not self.bert.model.include_cls_sep: - warn_msg = "Bert for sequence classification excepts BertEmbedding `include_cls_sep` True, but got False." + self.bert.model.include_cls_sep = True + warn_msg = "Bert for sequence classification excepts BertEmbedding `include_cls_sep` True, " \ + "but got False. FastNLP has changed it to True." logger.warn(warn_msg) warnings.warn(warn_msg) def forward(self, words): + """ + :param torch.LongTensor words: [batch_size, seq_len] + :return: { :attr:`fastNLP.Const.OUTPUT` : logits}: torch.Tensor [batch_size, num_labels] + """ hidden = self.dropout(self.bert(words)) cls_hidden = hidden[:, 0] logits = self.classifier(cls_hidden) @@ -42,172 +80,193 @@ class BertForSequenceClassification(BaseModel): return {Const.OUTPUT: logits} def predict(self, words): + """ + :param torch.LongTensor words: [batch_size, seq_len] + :return: { :attr:`fastNLP.Const.OUTPUT` : logits}: torch.LongTensor [batch_size] + """ logits = self.forward(words)[Const.OUTPUT] return {Const.OUTPUT: torch.argmax(logits, dim=-1)} class BertForSentenceMatching(BaseModel): + """ + 别名: :class:`fastNLP.models.BertForSentenceMatching` + :class:`fastNLP.models.bert.BertForSentenceMatching` + + BERT model for sentence matching. - """BERT model for matching. + :param fastNLP.embeddings.BertEmbedding embed: 下游模型的编码器(encoder). + :param int num_labels: Matching任务类别数目,默认值为2. + :param float dropout: dropout的大小,默认值为0.1. """ - def __init__(self, init_embed: BertEmbedding, num_labels: int=2): + def __init__(self, embed: BertEmbedding, num_labels: int=2, dropout=0.1): super(BertForSentenceMatching, self).__init__() self.num_labels = num_labels - self.bert = init_embed - self.dropout = nn.Dropout(0.1) + self.bert = embed + self.dropout = nn.Dropout(p=dropout) self.classifier = nn.Linear(self.bert.embedding_dim, num_labels) if not self.bert.model.include_cls_sep: - error_msg = "Bert for sentence matching excepts BertEmbedding `include_cls_sep` True, but got False." - logger.error(error_msg) - raise RuntimeError(error_msg) + self.bert.model.include_cls_sep = True + warn_msg = "Bert for sentence matching excepts BertEmbedding `include_cls_sep` True, " \ + "but got False. FastNLP has changed it to True." + logger.warn(warn_msg) + warnings.warn(warn_msg) def forward(self, words): - hidden = self.dropout(self.bert(words)) - cls_hidden = hidden[:, 0] + """ + :param torch.LongTensor words: [batch_size, seq_len] + :return: { :attr:`fastNLP.Const.OUTPUT` : logits}: torch.Tensor [batch_size, num_labels] + """ + hidden = self.bert(words) + cls_hidden = self.dropout(hidden[:, 0]) logits = self.classifier(cls_hidden) return {Const.OUTPUT: logits} def predict(self, words): + """ + :param torch.LongTensor words: [batch_size, seq_len] + :return: { :attr:`fastNLP.Const.OUTPUT` : logits}: torch.LongTensor [batch_size] + """ logits = self.forward(words)[Const.OUTPUT] return {Const.OUTPUT: torch.argmax(logits, dim=-1)} class BertForMultipleChoice(BaseModel): - """BERT model for multiple choice tasks. """ - def __init__(self, init_embed: BertEmbedding, num_choices=2): + 别名: :class:`fastNLP.models.BertForMultipleChoice` + :class:`fastNLP.models.bert.BertForMultipleChoice` + + BERT model for multiple choice. + + :param fastNLP.embeddings.BertEmbedding embed: 下游模型的编码器(encoder). + :param int num_choices: 多选任务选项数目,默认值为2. + :param float dropout: dropout的大小,默认值为0.1. + """ + def __init__(self, embed: BertEmbedding, num_choices=2, dropout=0.1): super(BertForMultipleChoice, self).__init__() self.num_choices = num_choices - self.bert = init_embed - self.dropout = nn.Dropout(0.1) + self.bert = embed + self.dropout = nn.Dropout(p=dropout) self.classifier = nn.Linear(self.bert.embedding_dim, 1) - self.include_cls_sep = init_embed.model.include_cls_sep if not self.bert.model.include_cls_sep: - error_msg = "Bert for multiple choice excepts BertEmbedding `include_cls_sep` True, but got False." - logger.error(error_msg) - raise RuntimeError(error_msg) + self.bert.model.include_cls_sep = True + warn_msg = "Bert for multiple choice excepts BertEmbedding `include_cls_sep` True, " \ + "but got False. FastNLP has changed it to True." + logger.warn(warn_msg) + warnings.warn(warn_msg) def forward(self, words): """ - :param torch.Tensor words: [batch_size, num_choices, seq_len] - :return: [batch_size, num_labels] + :param torch.LongTensor words: [batch_size, num_choices, seq_len] + :return: { :attr:`fastNLP.Const.OUTPUT` : logits}: torch.LongTensor [batch_size, num_choices] """ batch_size, num_choices, seq_len = words.size() input_ids = words.view(batch_size * num_choices, seq_len) hidden = self.bert(input_ids) - pooled_output = hidden[:, 0] - pooled_output = self.dropout(pooled_output) + pooled_output = self.dropout(hidden[:, 0]) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, self.num_choices) return {Const.OUTPUT: reshaped_logits} def predict(self, words): + """ + :param torch.LongTensor words: [batch_size, num_choices, seq_len] + :return: { :attr:`fastNLP.Const.OUTPUT` : logits}: torch.LongTensor [batch_size] + """ logits = self.forward(words)[Const.OUTPUT] return {Const.OUTPUT: torch.argmax(logits, dim=-1)} class BertForTokenClassification(BaseModel): - """BERT model for token-level classification. """ - def __init__(self, init_embed: BertEmbedding, num_labels): + 别名: :class:`fastNLP.models.BertForTokenClassification` + :class:`fastNLP.models.bert.BertForTokenClassification` + + BERT model for token classification. + + :param fastNLP.embeddings.BertEmbedding embed: 下游模型的编码器(encoder). + :param int num_labels: 序列标注标签数目,无默认值. + :param float dropout: dropout的大小,默认值为0.1. + """ + def __init__(self, embed: BertEmbedding, num_labels, dropout=0.1): super(BertForTokenClassification, self).__init__() self.num_labels = num_labels - self.bert = init_embed - self.dropout = nn.Dropout(0.1) + self.bert = embed + self.dropout = nn.Dropout(p=dropout) self.classifier = nn.Linear(self.bert.embedding_dim, num_labels) - self.include_cls_sep = init_embed.model.include_cls_sep - if self.include_cls_sep: - warn_msg = "Bert for token classification excepts BertEmbedding `include_cls_sep` False, but got True." - warnings.warn(warn_msg) + if self.bert.model.include_cls_sep: + self.bert.model.include_cls_sep = False + warn_msg = "Bert for token classification excepts BertEmbedding `include_cls_sep` False, " \ + "but got True. FastNLP has changed it to False." logger.warn(warn_msg) + warnings.warn(warn_msg) def forward(self, words): """ - :param torch.Tensor words: [batch_size, seq_len] - :return: [batch_size, seq_len, num_labels] + :param torch.LongTensor words: [batch_size, seq_len] + :return: { :attr:`fastNLP.Const.OUTPUT` : logits}: torch.Tensor [batch_size, seq_len, num_labels] """ - sequence_output = self.bert(words) - if self.include_cls_sep: - sequence_output = sequence_output[:, 1: -1] # [batch_size, seq_len, embed_dim] + sequence_output = self.bert(words) # [batch_size, seq_len, embed_dim] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) return {Const.OUTPUT: logits} def predict(self, words): + """ + :param torch.LongTensor words: [batch_size, seq_len] + :return: { :attr:`fastNLP.Const.OUTPUT` : logits}: torch.LongTensor [batch_size, seq_len] + """ logits = self.forward(words)[Const.OUTPUT] return {Const.OUTPUT: torch.argmax(logits, dim=-1)} class BertForQuestionAnswering(BaseModel): - """BERT model for Question Answering (span extraction). - This module is composed of the BERT model with a linear layer on top of - the sequence output that computes start_logits and end_logits - Params: - `config`: a BertConfig class instance with the configuration to build a new model. - `bert_dir`: a dir which contains the bert parameters within file `pytorch_model.bin` - Inputs: - `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] - with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts - `extract_features.py`, `run_classifier.py` and `run_squad.py`) - `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token - types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to - a `sentence B` token (see BERT paper for more details). - `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices - selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max - input sequence length in the current batch. It's the mask that we typically use for attention when - a batch has varying length sentences. - `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size]. - Positions are clamped to the length of the sequence and position outside of the sequence are not taken - into account for computing the loss. - `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size]. - Positions are clamped to the length of the sequence and position outside of the sequence are not taken - into account for computing the loss. - Outputs: - if `start_positions` and `end_positions` are not `None`: - Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions. - if `start_positions` or `end_positions` is `None`: - Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end - position tokens of shape [batch_size, sequence_length]. - Example usage: - ```python - # Already been converted into WordPiece token ids - input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) - input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) - token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) - config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, - num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) - bert_dir = 'your-bert-file-dir' - model = BertForQuestionAnswering(config, bert_dir) - start_logits, end_logits = model(input_ids, token_type_ids, input_mask) - ``` """ - def __init__(self, init_embed: BertEmbedding, num_labels=2): + 别名: :class:`fastNLP.models.BertForQuestionAnswering` + :class:`fastNLP.models.bert.BertForQuestionAnswering` + + BERT model for classification. + + :param fastNLP.embeddings.BertEmbedding embed: 下游模型的编码器(encoder). + :param int num_labels: 抽取式QA列数,默认值为2(即第一列为start_span, 第二列为end_span). + """ + def __init__(self, embed: BertEmbedding, num_labels=2): super(BertForQuestionAnswering, self).__init__() - self.bert = init_embed + self.bert = embed self.num_labels = num_labels self.qa_outputs = nn.Linear(self.bert.embedding_dim, self.num_labels) if not self.bert.model.include_cls_sep: - error_msg = "Bert for multiple choice excepts BertEmbedding `include_cls_sep` True, but got False." - logger.error(error_msg) - raise RuntimeError(error_msg) + self.bert.model.include_cls_sep = True + warn_msg = "Bert for question answering excepts BertEmbedding `include_cls_sep` True, " \ + "but got False. FastNLP has changed it to True." + logger.warn(warn_msg) + warnings.warn(warn_msg) def forward(self, words): + """ + :param torch.LongTensor words: [batch_size, seq_len] + :return: 一个包含num_labels个logit的dict,每一个logit的形状都是[batch_size, seq_len] + """ sequence_output = self.bert(words) logits = self.qa_outputs(sequence_output) # [batch_size, seq_len, num_labels] return {Const.OUTPUTS(i): logits[:, :, i] for i in range(self.num_labels)} def predict(self, words): + """ + :param torch.LongTensor words: [batch_size, seq_len] + :return: 一个包含num_labels个logit的dict,每一个logit的形状都是[batch_size] + """ logits = self.forward(words) return {Const.OUTPUTS(i): torch.argmax(logits[Const.OUTPUTS(i)], dim=-1) for i in range(self.num_labels)} diff --git a/fastNLP/models/biaffine_parser.py b/fastNLP/models/biaffine_parser.py index 6b0829bd..455d27a7 100644 --- a/fastNLP/models/biaffine_parser.py +++ b/fastNLP/models/biaffine_parser.py @@ -245,7 +245,7 @@ class BiaffineParser(GraphParser): Biaffine Dependency Parser 实现. 论文参考 `Deep Biaffine Attention for Neural Dependency Parsing (Dozat and Manning, 2016) `_ . - :param init_embed: 单词词典, 可以是 tuple, 包括(num_embedings, embedding_dim), 即 + :param embed: 单词词典, 可以是 tuple, 包括(num_embedings, embedding_dim), 即 embedding的大小和每个词的维度. 也可以传入 nn.Embedding 对象, 此时就以传入的对象作为embedding :param pos_vocab_size: part-of-speech 词典大小 @@ -262,7 +262,7 @@ class BiaffineParser(GraphParser): """ def __init__(self, - init_embed, + embed, pos_vocab_size, pos_emb_dim, num_label, @@ -276,7 +276,7 @@ class BiaffineParser(GraphParser): super(BiaffineParser, self).__init__() rnn_out_size = 2 * rnn_hidden_size word_hid_dim = pos_hid_dim = rnn_hidden_size - self.word_embedding = get_embeddings(init_embed) + self.word_embedding = get_embeddings(embed) word_emb_dim = self.word_embedding.embedding_dim self.pos_embedding = nn.Embedding(num_embeddings=pos_vocab_size, embedding_dim=pos_emb_dim) self.word_fc = nn.Linear(word_emb_dim, word_hid_dim) diff --git a/fastNLP/models/cnn_text_classification.py b/fastNLP/models/cnn_text_classification.py index 37a60c35..4bf9c4d1 100644 --- a/fastNLP/models/cnn_text_classification.py +++ b/fastNLP/models/cnn_text_classification.py @@ -23,7 +23,7 @@ class CNNText(torch.nn.Module): 使用CNN进行文本分类的模型 'Yoon Kim. 2014. Convolution Neural Networks for Sentence Classification.' - :param tuple(int,int),torch.FloatTensor,nn.Embedding,numpy.ndarray init_embed: Embedding的大小(传入tuple(int, int), + :param tuple(int,int),torch.FloatTensor,nn.Embedding,numpy.ndarray embed: Embedding的大小(传入tuple(int, int), 第一个int为vocab_zie, 第二个int为embed_dim); 如果为Tensor, Embedding, ndarray等则直接使用该值初始化Embedding :param int num_classes: 一共有多少类 :param int,tuple(int) out_channels: 输出channel的数量。如果为list,则需要与kernel_sizes的数量保持一致 @@ -31,7 +31,7 @@ class CNNText(torch.nn.Module): :param float dropout: Dropout的大小 """ - def __init__(self, init_embed, + def __init__(self, embed, num_classes, kernel_nums=(30, 40, 50), kernel_sizes=(1, 3, 5), @@ -39,7 +39,7 @@ class CNNText(torch.nn.Module): super(CNNText, self).__init__() # no support for pre-trained embedding currently - self.embed = embedding.Embedding(init_embed) + self.embed = embedding.Embedding(embed) self.conv_pool = encoder.ConvMaxpool( in_channels=self.embed.embedding_dim, out_channels=kernel_nums, diff --git a/fastNLP/models/snli.py b/fastNLP/models/snli.py index 5ca4052d..97a14e9f 100644 --- a/fastNLP/models/snli.py +++ b/fastNLP/models/snli.py @@ -24,21 +24,21 @@ class ESIM(BaseModel): ESIM model的一个PyTorch实现 论文参见: https://arxiv.org/pdf/1609.06038.pdf - :param init_embedding: 初始化的Embedding + :param embed: 初始化的Embedding :param int hidden_size: 隐藏层大小,默认值为Embedding的维度 :param int num_labels: 目标标签种类数量,默认值为3 :param float dropout_rate: dropout的比率,默认值为0.3 :param float dropout_embed: 对Embedding的dropout比率,默认值为0.1 """ - def __init__(self, init_embedding, hidden_size=None, num_labels=3, dropout_rate=0.3, + def __init__(self, embed, hidden_size=None, num_labels=3, dropout_rate=0.3, dropout_embed=0.1): super(ESIM, self).__init__() - if isinstance(init_embedding, TokenEmbedding) or isinstance(init_embedding, Embedding): - self.embedding = init_embedding + if isinstance(embed, TokenEmbedding) or isinstance(embed, Embedding): + self.embedding = embed else: - self.embedding = Embedding(init_embedding) + self.embedding = Embedding(embed) self.dropout_embed = EmbedDropout(p=dropout_embed) if hidden_size is None: hidden_size = self.embedding.embed_size diff --git a/fastNLP/models/star_transformer.py b/fastNLP/models/star_transformer.py index b95d1c25..7fe0d343 100644 --- a/fastNLP/models/star_transformer.py +++ b/fastNLP/models/star_transformer.py @@ -23,7 +23,7 @@ class StarTransEnc(nn.Module): 带word embedding的Star-Transformer Encoder - :param init_embed: 单词词典, 可以是 tuple, 包括(num_embedings, embedding_dim), 即 + :param embed: 单词词典, 可以是 tuple, 包括(num_embedings, embedding_dim), 即 embedding的大小和每个词的维度. 也可以传入 nn.Embedding 对象, 此时就以传入的对象作为embedding :param hidden_size: 模型中特征维度. @@ -35,7 +35,7 @@ class StarTransEnc(nn.Module): :param dropout: 模型除词嵌入外的dropout概率. """ - def __init__(self, init_embed, + def __init__(self, embed, hidden_size, num_layers, num_head, @@ -44,7 +44,7 @@ class StarTransEnc(nn.Module): emb_dropout, dropout): super(StarTransEnc, self).__init__() - self.embedding = get_embeddings(init_embed) + self.embedding = get_embeddings(embed) emb_dim = self.embedding.embedding_dim self.emb_fc = nn.Linear(emb_dim, hidden_size) # self.emb_drop = nn.Dropout(emb_dropout) @@ -108,7 +108,7 @@ class STSeqLabel(nn.Module): 用于序列标注的Star-Transformer模型 - :param init_embed: 单词词典, 可以是 tuple, 包括(num_embedings, embedding_dim), 即 + :param embed: 单词词典, 可以是 tuple, 包括(num_embedings, embedding_dim), 即 embedding的大小和每个词的维度. 也可以传入 nn.Embedding 对象, 此时就以传入的对象作为embedding :param num_cls: 输出类别个数 @@ -122,7 +122,7 @@ class STSeqLabel(nn.Module): :param dropout: 模型除词嵌入外的dropout概率. Default: 0.1 """ - def __init__(self, init_embed, num_cls, + def __init__(self, embed, num_cls, hidden_size=300, num_layers=4, num_head=8, @@ -132,7 +132,7 @@ class STSeqLabel(nn.Module): emb_dropout=0.1, dropout=0.1, ): super(STSeqLabel, self).__init__() - self.enc = StarTransEnc(init_embed=init_embed, + self.enc = StarTransEnc(embed=embed, hidden_size=hidden_size, num_layers=num_layers, num_head=num_head, @@ -173,7 +173,7 @@ class STSeqCls(nn.Module): 用于分类任务的Star-Transformer - :param init_embed: 单词词典, 可以是 tuple, 包括(num_embedings, embedding_dim), 即 + :param embed: 单词词典, 可以是 tuple, 包括(num_embedings, embedding_dim), 即 embedding的大小和每个词的维度. 也可以传入 nn.Embedding 对象, 此时就以传入的对象作为embedding :param num_cls: 输出类别个数 @@ -187,7 +187,7 @@ class STSeqCls(nn.Module): :param dropout: 模型除词嵌入外的dropout概率. Default: 0.1 """ - def __init__(self, init_embed, num_cls, + def __init__(self, embed, num_cls, hidden_size=300, num_layers=4, num_head=8, @@ -197,7 +197,7 @@ class STSeqCls(nn.Module): emb_dropout=0.1, dropout=0.1, ): super(STSeqCls, self).__init__() - self.enc = StarTransEnc(init_embed=init_embed, + self.enc = StarTransEnc(embed=embed, hidden_size=hidden_size, num_layers=num_layers, num_head=num_head, @@ -238,7 +238,7 @@ class STNLICls(nn.Module): 用于自然语言推断(NLI)的Star-Transformer - :param init_embed: 单词词典, 可以是 tuple, 包括(num_embedings, embedding_dim), 即 + :param embed: 单词词典, 可以是 tuple, 包括(num_embedings, embedding_dim), 即 embedding的大小和每个词的维度. 也可以传入 nn.Embedding 对象, 此时就以传入的对象作为embedding :param num_cls: 输出类别个数 @@ -252,7 +252,7 @@ class STNLICls(nn.Module): :param dropout: 模型除词嵌入外的dropout概率. Default: 0.1 """ - def __init__(self, init_embed, num_cls, + def __init__(self, embed, num_cls, hidden_size=300, num_layers=4, num_head=8, @@ -262,7 +262,7 @@ class STNLICls(nn.Module): emb_dropout=0.1, dropout=0.1, ): super(STNLICls, self).__init__() - self.enc = StarTransEnc(init_embed=init_embed, + self.enc = StarTransEnc(embed=embed, hidden_size=hidden_size, num_layers=num_layers, num_head=num_head, diff --git a/test/models/test_bert.py b/test/models/test_bert.py index 969a8594..9cab3a88 100644 --- a/test/models/test_bert.py +++ b/test/models/test_bert.py @@ -23,10 +23,25 @@ class TestBert(unittest.TestCase): self.assertTrue(Const.OUTPUT in pred) self.assertEqual(tuple(pred[Const.OUTPUT].shape), (2, 2)) - pred = model.predict(input_ids) + pred = model(input_ids) self.assertTrue(isinstance(pred, dict)) self.assertTrue(Const.OUTPUT in pred) - self.assertEqual(tuple(pred[Const.OUTPUT].shape), (2,)) + self.assertEqual(tuple(pred[Const.OUTPUT].shape), (2, 2)) + + def test_bert_1_w(self): + vocab = Vocabulary().add_word_lst("this is a test .".split()) + embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert', + include_cls_sep=False) + + with self.assertWarns(Warning): + model = BertForSequenceClassification(embed, 2) + + input_ids = torch.LongTensor([[1, 2, 3], [5, 6, 0]]) + + pred = model.predict(input_ids) + self.assertTrue(isinstance(pred, dict)) + self.assertTrue(Const.OUTPUT in pred) + self.assertEqual(tuple(pred[Const.OUTPUT].shape), (2,)) def test_bert_2(self): @@ -44,6 +59,23 @@ class TestBert(unittest.TestCase): self.assertTrue(Const.OUTPUT in pred) self.assertEqual(tuple(pred[Const.OUTPUT].shape), (1, 2)) + def test_bert_2_w(self): + + vocab = Vocabulary().add_word_lst("this is a test [SEP] .".split()) + embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert', + include_cls_sep=False) + + with self.assertWarns(Warning): + model = BertForMultipleChoice(embed, 2) + + input_ids = torch.LongTensor([[[2, 6, 7], [1, 6, 5]]]) + print(input_ids.size()) + + pred = model.predict(input_ids) + self.assertTrue(isinstance(pred, dict)) + self.assertTrue(Const.OUTPUT in pred) + self.assertEqual(tuple(pred[Const.OUTPUT].shape), (1,)) + def test_bert_3(self): vocab = Vocabulary().add_word_lst("this is a test [SEP] .".split()) @@ -58,6 +90,22 @@ class TestBert(unittest.TestCase): self.assertTrue(Const.OUTPUT in pred) self.assertEqual(tuple(pred[Const.OUTPUT].shape), (2, 3, 7)) + def test_bert_3_w(self): + + vocab = Vocabulary().add_word_lst("this is a test [SEP] .".split()) + embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert', + include_cls_sep=True) + + with self.assertWarns(Warning): + model = BertForTokenClassification(embed, 7) + + input_ids = torch.LongTensor([[1, 2, 3], [6, 5, 0]]) + + pred = model.predict(input_ids) + self.assertTrue(isinstance(pred, dict)) + self.assertTrue(Const.OUTPUT in pred) + self.assertEqual(tuple(pred[Const.OUTPUT].shape), (2, 3)) + def test_bert_4(self): vocab = Vocabulary().add_word_lst("this is a test [SEP] .".split()) @@ -79,6 +127,22 @@ class TestBert(unittest.TestCase): self.assertTrue(isinstance(pred, dict)) self.assertEqual(len(pred), 7) + def test_bert_4_w(self): + + vocab = Vocabulary().add_word_lst("this is a test [SEP] .".split()) + embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert', + include_cls_sep=False) + + with self.assertWarns(Warning): + model = BertForQuestionAnswering(embed) + + input_ids = torch.LongTensor([[1, 2, 3], [6, 5, 0]]) + + pred = model.predict(input_ids) + self.assertTrue(isinstance(pred, dict)) + self.assertTrue(Const.OUTPUTS(1) in pred) + self.assertEqual(tuple(pred[Const.OUTPUTS(1)].shape), (2,)) + def test_bert_5(self): vocab = Vocabulary().add_word_lst("this is a test [SEP] .".split()) @@ -93,3 +157,19 @@ class TestBert(unittest.TestCase): self.assertTrue(Const.OUTPUT in pred) self.assertEqual(tuple(pred[Const.OUTPUT].shape), (2, 2)) + def test_bert_5_w(self): + + vocab = Vocabulary().add_word_lst("this is a test [SEP] .".split()) + embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert', + include_cls_sep=False) + + with self.assertWarns(Warning): + model = BertForSentenceMatching(embed) + + input_ids = torch.LongTensor([[1, 2, 3], [6, 5, 0]]) + + pred = model.predict(input_ids) + self.assertTrue(isinstance(pred, dict)) + self.assertTrue(Const.OUTPUT in pred) + self.assertEqual(tuple(pred[Const.OUTPUT].shape), (2,)) + diff --git a/test/models/test_biaffine_parser.py b/test/models/test_biaffine_parser.py index 4f93b994..4b38d816 100644 --- a/test/models/test_biaffine_parser.py +++ b/test/models/test_biaffine_parser.py @@ -27,7 +27,7 @@ def prepare_parser_data(): class TestBiaffineParser(unittest.TestCase): def test_train(self): - model = BiaffineParser(init_embed=(VOCAB_SIZE, 10), + model = BiaffineParser(embed=(VOCAB_SIZE, 10), pos_vocab_size=VOCAB_SIZE, pos_emb_dim=10, rnn_hidden_size=10, arc_mlp_size=10, @@ -37,7 +37,7 @@ class TestBiaffineParser(unittest.TestCase): RUNNER.run_model(model, ds, loss=ParserLoss(), metrics=ParserMetric()) def test_train2(self): - model = BiaffineParser(init_embed=(VOCAB_SIZE, 10), + model = BiaffineParser(embed=(VOCAB_SIZE, 10), pos_vocab_size=VOCAB_SIZE, pos_emb_dim=10, rnn_hidden_size=16, arc_mlp_size=10, From d15ad75d96f3b72fe6b439ef8ce6e4829987ce0f Mon Sep 17 00:00:00 2001 From: Yige Xu Date: Tue, 3 Sep 2019 23:33:10 +0800 Subject: [PATCH 29/50] fix a bug in test code --- test/modules/decoder/test_bert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/modules/decoder/test_bert.py b/test/modules/decoder/test_bert.py index 0fcf01e4..56946f5d 100644 --- a/test/modules/decoder/test_bert.py +++ b/test/modules/decoder/test_bert.py @@ -3,7 +3,7 @@ import unittest import torch -from fastNLP.models.bert import BertModel +from fastNLP.modules.encoder.bert import BertModel class TestBert(unittest.TestCase): From e903db0e70bb4cd9e9b45907fc33db4b4fce9765 Mon Sep 17 00:00:00 2001 From: yh_cc Date: Wed, 4 Sep 2019 12:47:52 +0800 Subject: [PATCH 30/50] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E4=B8=AD=E6=96=87?= =?UTF-8?q?=E5=88=86=E7=B1=BBPipe;=E4=BD=BF=E7=94=A8=E7=9F=A9=E9=98=B5?= =?UTF-8?q?=E5=8A=A0=E9=80=9FBertEmbedding=E9=83=A8=E5=88=86pool=5Fmethod;?= =?UTF-8?q?=E8=B0=83=E6=95=B4=E9=83=A8=E5=88=86=E6=B5=8B=E8=AF=95=E7=94=A8?= =?UTF-8?q?=E4=BE=8B=E5=90=8D=E7=A7=B0;=E4=BF=AE=E5=A4=8Dmetric=E4=B8=AD?= =?UTF-8?q?=E5=AF=B9warning=E7=9A=84=E8=AF=AF=E6=8A=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/losses.py | 4 +- fastNLP/core/metrics.py | 2 +- fastNLP/embeddings/bert_embedding.py | 42 +++++--- fastNLP/io/__init__.py | 4 +- fastNLP/io/data_bundle.py | 15 +-- fastNLP/io/file_utils.py | 7 +- fastNLP/io/loader/__init__.py | 3 +- fastNLP/io/loader/classification.py | 57 +++++++++++ fastNLP/io/pipe/__init__.py | 3 +- fastNLP/io/pipe/classification.py | 101 ++++++++++++++++++- fastNLP/io/pipe/conll.py | 40 ++++++-- test/embeddings/test_bert_embedding.py | 6 ++ test/io/loader/test_classification_loader.py | 6 +- test/io/loader/test_conll_loader.py | 6 +- test/io/loader/test_cws_loader.py | 4 +- test/io/loader/test_matching_loader.py | 5 +- test/io/pipe/test_classification.py | 13 ++- test/io/pipe/test_conll.py | 6 +- test/io/pipe/test_cws.py | 4 +- test/io/pipe/test_matching.py | 6 +- 20 files changed, 274 insertions(+), 60 deletions(-) diff --git a/fastNLP/core/losses.py b/fastNLP/core/losses.py index d5549cec..7402a568 100644 --- a/fastNLP/core/losses.py +++ b/fastNLP/core/losses.py @@ -238,8 +238,8 @@ class CrossEntropyLoss(LossBase): pred = pred.tranpose(-1, pred) pred = pred.reshape(-1, pred.size(-1)) target = target.reshape(-1) - if seq_len is not None: - mask = seq_len_to_mask(seq_len).reshape(-1).eq(0) + if seq_len is not None and target.dim()>1: + mask = seq_len_to_mask(seq_len, max_len=target.size(1)).reshape(-1).eq(0) target = target.masked_fill(mask, self.padding_idx) return F.cross_entropy(input=pred, target=target, diff --git a/fastNLP/core/metrics.py b/fastNLP/core/metrics.py index b06e5459..c0f14c90 100644 --- a/fastNLP/core/metrics.py +++ b/fastNLP/core/metrics.py @@ -347,7 +347,7 @@ class AccuracyMetric(MetricBase): pass elif pred.dim() == target.dim() + 1: pred = pred.argmax(dim=-1) - if seq_len is None: + if seq_len is None and target.dim()>1: warnings.warn("You are not passing `seq_len` to exclude pad when calculate accuracy.") else: raise RuntimeError(f"In {_get_func_signature(self.evaluate)}, when pred have " diff --git a/fastNLP/embeddings/bert_embedding.py b/fastNLP/embeddings/bert_embedding.py index f6c36623..08615fe0 100644 --- a/fastNLP/embeddings/bert_embedding.py +++ b/fastNLP/embeddings/bert_embedding.py @@ -68,7 +68,7 @@ class BertEmbedding(ContextualEmbedding): def __init__(self, vocab: Vocabulary, model_dir_or_name: str = 'en-base-uncased', layers: str = '-1', pool_method: str = 'first', word_dropout=0, dropout=0, include_cls_sep: bool = False, - pooled_cls=True, requires_grad: bool = False, auto_truncate: bool = False): + pooled_cls=True, requires_grad: bool = True, auto_truncate: bool = False): super(BertEmbedding, self).__init__(vocab, word_dropout=word_dropout, dropout=dropout) if model_dir_or_name.lower() in PRETRAINED_BERT_MODEL_DIR: @@ -165,7 +165,7 @@ class BertWordPieceEncoder(nn.Module): """ def __init__(self, model_dir_or_name: str = 'en-base-uncased', layers: str = '-1', pooled_cls: bool = False, - word_dropout=0, dropout=0, requires_grad: bool = False): + word_dropout=0, dropout=0, requires_grad: bool = True): super().__init__() self.model = _WordPieceBertModel(model_dir_or_name=model_dir_or_name, layers=layers, pooled_cls=pooled_cls) @@ -288,7 +288,7 @@ class _WordBertModel(nn.Module): self.auto_truncate = auto_truncate # 将所有vocab中word的wordpiece计算出来, 需要额外考虑[CLS]和[SEP] - logger.info("Start to generating word pieces for word.") + logger.info("Start to generate word pieces for word.") # 第一步统计出需要的word_piece, 然后创建新的embed和word_piece_vocab, 然后填入值 word_piece_dict = {'[CLS]': 1, '[SEP]': 1} # 用到的word_piece以及新增的 found_count = 0 @@ -374,7 +374,8 @@ class _WordBertModel(nn.Module): else: raise RuntimeError( "After split words into word pieces, the lengths of word pieces are longer than the " - f"maximum allowed sequence length:{self._max_position_embeddings} of bert.") + f"maximum allowed sequence length:{self._max_position_embeddings} of bert. You can set " + f"`auto_truncate=True` for BertEmbedding to automatically truncate overlong input.") # +2是由于需要加入[CLS]与[SEP] word_pieces = words.new_full((batch_size, min(word_piece_length + 2, self._max_position_embeddings)), @@ -407,15 +408,26 @@ class _WordBertModel(nn.Module): # output_layers = [self.layers] # len(self.layers) x batch_size x real_word_piece_length x hidden_size if self.include_cls_sep: - outputs = bert_outputs[-1].new_zeros(len(self.layers), batch_size, max_word_len + 2, - bert_outputs[-1].size(-1)) s_shift = 1 + outputs = bert_outputs[-1].new_zeros(len(self.layers), batch_size, max_word_len + 2, + bert_outputs[-1].size(-1)) + else: + s_shift = 0 outputs = bert_outputs[-1].new_zeros(len(self.layers), batch_size, max_word_len, bert_outputs[-1].size(-1)) - s_shift = 0 batch_word_pieces_cum_length = batch_word_pieces_length.new_zeros(batch_size, max_word_len + 1) batch_word_pieces_cum_length[:, 1:] = batch_word_pieces_length.cumsum(dim=-1) # batch_size x max_len + + if self.pool_method == 'first': + batch_word_pieces_cum_length = batch_word_pieces_cum_length[:, :seq_len.max()] + batch_word_pieces_cum_length.masked_fill_(batch_word_pieces_cum_length.ge(word_piece_length), 0) + batch_indexes = batch_indexes[:, None].expand((batch_size, batch_word_pieces_cum_length.size(1))) + elif self.pool_method == 'last': + batch_word_pieces_cum_length = batch_word_pieces_cum_length[:, 1:seq_len.max()+1] - 1 + batch_word_pieces_cum_length.masked_fill_(batch_word_pieces_cum_length.ge(word_piece_length), 0) + batch_indexes = batch_indexes[:, None].expand((batch_size, batch_word_pieces_cum_length.size(1))) + for l_index, l in enumerate(self.layers): output_layer = bert_outputs[l] real_word_piece_length = output_layer.size(1) - 2 @@ -426,16 +438,15 @@ class _WordBertModel(nn.Module): output_layer = torch.cat((output_layer, paddings), dim=1).contiguous() # 从word_piece collapse到word的表示 truncate_output_layer = output_layer[:, 1:-1] # 删除[CLS]与[SEP] batch_size x len x hidden_size - outputs_seq_len = seq_len + s_shift if self.pool_method == 'first': - for i in range(batch_size): - i_word_pieces_cum_length = batch_word_pieces_cum_length[i, :seq_len[i]] # 每个word的start位置 - outputs[l_index, i, s_shift:outputs_seq_len[i]] = truncate_output_layer[ - i, i_word_pieces_cum_length] # num_layer x batch_size x len x hidden_size + tmp = truncate_output_layer[batch_indexes, batch_word_pieces_cum_length] + tmp = tmp.masked_fill(word_mask[:, :batch_word_pieces_cum_length.size(1), None].eq(0), 0) + outputs[l_index, :, s_shift:batch_word_pieces_cum_length.size(1)+s_shift] = tmp + elif self.pool_method == 'last': - for i in range(batch_size): - i_word_pieces_cum_length = batch_word_pieces_cum_length[i, 1:seq_len[i] + 1] - 1 # 每个word的end - outputs[l_index, i, s_shift:outputs_seq_len[i]] = truncate_output_layer[i, i_word_pieces_cum_length] + tmp = truncate_output_layer[batch_indexes, batch_word_pieces_cum_length] + tmp = tmp.masked_fill(word_mask[:, :batch_word_pieces_cum_length.size(1), None].eq(0), 0) + outputs[l_index, :, s_shift:batch_word_pieces_cum_length.size(1)+s_shift] = tmp elif self.pool_method == 'max': for i in range(batch_size): for j in range(seq_len[i]): @@ -452,5 +463,6 @@ class _WordBertModel(nn.Module): else: outputs[l_index, :, 0] = output_layer[:, 0] outputs[l_index, batch_indexes, seq_len + s_shift] = output_layer[batch_indexes, seq_len + s_shift] + # 3. 最终的embedding结果 return outputs diff --git a/fastNLP/io/__init__.py b/fastNLP/io/__init__.py index 251b7292..6f727f05 100644 --- a/fastNLP/io/__init__.py +++ b/fastNLP/io/__init__.py @@ -24,6 +24,7 @@ __all__ = [ 'IMDBLoader', 'SSTLoader', 'SST2Loader', + "ChnSentiCorpLoader", 'ConllLoader', 'Conll2003Loader', @@ -52,8 +53,9 @@ __all__ = [ "SSTPipe", "SST2Pipe", "IMDBPipe", - "Conll2003Pipe", + "ChnSentiCorpPipe", + "Conll2003Pipe", "Conll2003NERPipe", "OntoNotesNERPipe", "MsraNERPipe", diff --git a/fastNLP/io/data_bundle.py b/fastNLP/io/data_bundle.py index 3e7f39d3..19b48828 100644 --- a/fastNLP/io/data_bundle.py +++ b/fastNLP/io/data_bundle.py @@ -306,12 +306,15 @@ class DataBundle: return self def __repr__(self): - _str = 'In total {} datasets:\n'.format(len(self.datasets)) - for name, dataset in self.datasets.items(): - _str += '\t{} has {} instances.\n'.format(name, len(dataset)) - _str += 'In total {} vocabs:\n'.format(len(self.vocabs)) - for name, vocab in self.vocabs.items(): - _str += '\t{} has {} entries.\n'.format(name, len(vocab)) + _str = '' + if len(self.datasets): + _str += 'In total {} datasets:\n'.format(len(self.datasets)) + for name, dataset in self.datasets.items(): + _str += '\t{} has {} instances.\n'.format(name, len(dataset)) + if len(self.vocabs): + _str += 'In total {} vocabs:\n'.format(len(self.vocabs)) + for name, vocab in self.vocabs.items(): + _str += '\t{} has {} entries.\n'.format(name, len(vocab)) return _str diff --git a/fastNLP/io/file_utils.py b/fastNLP/io/file_utils.py index 8ecdff25..f76bcd26 100644 --- a/fastNLP/io/file_utils.py +++ b/fastNLP/io/file_utils.py @@ -77,6 +77,9 @@ PRETRAIN_STATIC_FILES = { 'cn-tencent': "tencent_cn.zip", 'cn-fasttext': "cc.zh.300.vec.gz", 'cn-sgns-literature-word': 'sgns.literature.word.txt.zip', + 'cn-char-fastnlp-100d': "cn_char_fastnlp_100d.zip", + 'cn-bi-fastnlp-100d': "cn_bi_fastnlp_100d.zip", + "cn-tri-fastnlp-100d": "cn_tri_fastnlp_100d.zip" } DATASET_DIR = { @@ -96,7 +99,9 @@ DATASET_DIR = { "cws-pku": 'cws_pku.zip', "cws-cityu": "cws_cityu.zip", "cws-as": 'cws_as.zip', - "cws-msra": 'cws_msra.zip' + "cws-msra": 'cws_msra.zip', + + "chn-senti-corp":"chn_senti_corp.zip" } PRETRAIN_MAP = {'elmo': PRETRAINED_ELMO_MODEL_DIR, diff --git a/fastNLP/io/loader/__init__.py b/fastNLP/io/loader/__init__.py index 6c23f213..3ad1b47d 100644 --- a/fastNLP/io/loader/__init__.py +++ b/fastNLP/io/loader/__init__.py @@ -52,6 +52,7 @@ __all__ = [ 'IMDBLoader', 'SSTLoader', 'SST2Loader', + "ChnSentiCorpLoader", 'ConllLoader', 'Conll2003Loader', @@ -73,7 +74,7 @@ __all__ = [ "QNLILoader", "RTELoader" ] -from .classification import YelpLoader, YelpFullLoader, YelpPolarityLoader, IMDBLoader, SSTLoader, SST2Loader +from .classification import YelpLoader, YelpFullLoader, YelpPolarityLoader, IMDBLoader, SSTLoader, SST2Loader, ChnSentiCorpLoader from .conll import ConllLoader, Conll2003Loader, Conll2003NERLoader, OntoNotesNERLoader, CTBLoader from .csv import CSVLoader from .cws import CWSLoader diff --git a/fastNLP/io/loader/classification.py b/fastNLP/io/loader/classification.py index ec00d2b4..4ebd58e1 100644 --- a/fastNLP/io/loader/classification.py +++ b/fastNLP/io/loader/classification.py @@ -7,6 +7,7 @@ __all__ = [ "IMDBLoader", "SSTLoader", "SST2Loader", + "ChnSentiCorpLoader" ] import glob @@ -346,3 +347,59 @@ class SST2Loader(Loader): """ output_dir = self._get_dataset_path(dataset_name='sst-2') return output_dir + + +class ChnSentiCorpLoader(Loader): + """ + 支持读取的数据的格式为,第一行为标题(具体内容会被忽略),之后一行为一个sample,第一个制表符之前被认为是label,第 + 一个制表符及之后认为是句子 + + Example:: + + label raw_chars + 1 這間酒店環境和服務態度亦算不錯,但房間空間太小~~ + 1 <荐书> 推荐所有喜欢<红楼>的红迷们一定要收藏这本书,要知道... + 0 商品的不足暂时还没发现,京东的订单处理速度实在.......周二就打包完成,周五才发货... + + 读取后的DataSet具有以下的field + + .. csv-table:: + :header: "raw_chars", "target" + + "這間酒店環境和服務態度亦算不錯,但房間空間太小~~", "1" + "<荐书> 推荐所有喜欢<红楼>...", "1" + "..." + + """ + def __init__(self): + super().__init__() + + def _load(self, path:str): + """ + 从path中读取数据 + + :param path: + :return: + """ + ds = DataSet() + with open(path, 'r', encoding='utf-8') as f: + f.readline() + for line in f: + line = line.strip() + tab_index = line.index('\t') + if tab_index!=-1: + target = line[:tab_index] + raw_chars = line[tab_index+1:] + if raw_chars: + ds.append(Instance(raw_chars=raw_chars, target=target)) + return ds + + def download(self)->str: + """ + 自动下载数据,该数据取自https://github.com/pengming617/bert_classification/tree/master/data,在 + https://arxiv.org/pdf/1904.09223.pdf与https://arxiv.org/pdf/1906.08101.pdf有使用 + + :return: + """ + output_dir = self._get_dataset_path('chn-senti-corp') + return output_dir diff --git a/fastNLP/io/pipe/__init__.py b/fastNLP/io/pipe/__init__.py index 048e4cfe..943709e7 100644 --- a/fastNLP/io/pipe/__init__.py +++ b/fastNLP/io/pipe/__init__.py @@ -17,6 +17,7 @@ __all__ = [ "SSTPipe", "SST2Pipe", "IMDBPipe", + "ChnSentiCorpPipe", "Conll2003NERPipe", "OntoNotesNERPipe", @@ -39,7 +40,7 @@ __all__ = [ "MNLIPipe", ] -from .classification import YelpFullPipe, YelpPolarityPipe, SSTPipe, SST2Pipe, IMDBPipe +from .classification import YelpFullPipe, YelpPolarityPipe, SSTPipe, SST2Pipe, IMDBPipe, ChnSentiCorpPipe from .conll import Conll2003NERPipe, OntoNotesNERPipe, MsraNERPipe, WeiboNERPipe, PeopleDailyPipe from .matching import MatchingBertPipe, RTEBertPipe, SNLIBertPipe, QuoraBertPipe, QNLIBertPipe, MNLIBertPipe, \ MatchingPipe, RTEPipe, SNLIPipe, QuoraPipe, QNLIPipe, MNLIPipe diff --git a/fastNLP/io/pipe/classification.py b/fastNLP/io/pipe/classification.py index 30c591a4..d1c7aa0e 100644 --- a/fastNLP/io/pipe/classification.py +++ b/fastNLP/io/pipe/classification.py @@ -5,7 +5,8 @@ __all__ = [ "YelpPolarityPipe", "SSTPipe", "SST2Pipe", - 'IMDBPipe' + 'IMDBPipe', + "ChnSentiCorpPipe" ] import re @@ -13,18 +14,18 @@ import re from nltk import Tree from .pipe import Pipe -from .utils import get_tokenizer, _indexize, _add_words_field, _drop_empty_instance +from .utils import get_tokenizer, _indexize, _add_words_field, _drop_empty_instance, _add_chars_field from ..data_bundle import DataBundle from ..loader.classification import IMDBLoader, YelpFullLoader, SSTLoader, SST2Loader, YelpPolarityLoader from ...core.const import Const from ...core.dataset import DataSet from ...core.instance import Instance from ...core.vocabulary import Vocabulary +from ..loader.classification import ChnSentiCorpLoader nonalpnum = re.compile('[^0-9a-zA-Z?!\']+') - class _CLSPipe(Pipe): """ 分类问题的基类,负责对classification的数据进行tokenize操作。默认是对raw_words列操作,然后生成words列 @@ -457,3 +458,97 @@ class IMDBPipe(_CLSPipe): data_bundle = self.process(data_bundle) return data_bundle + + +class ChnSentiCorpPipe(Pipe): + """ + 处理之后的DataSet有以下的结构 + + .. csv-table:: + :header: "raw_chars", "chars", "target", "seq_len" + + "這間酒店環境和服務態度亦算不錯,但房間空間太小~~", "[2, 3, 4, 5, ...]", 1, 31 + "<荐书> 推荐所有喜欢<红楼>...", "[10, 21, ....]", 1, 25 + "..." + + 其中chars, seq_len是input,target是target + + :param bool bigrams: 是否增加一列bigrams. bigrams的构成是['复', '旦', '大', '学', ...]->["复旦", "旦大", ...]。如果 + 设置为True,返回的DataSet将有一列名为bigrams, 且已经转换为了index并设置为input,对应的vocab可以通过 + data_bundle.get_vocab('bigrams')获取. + :param bool trigrams: 是否增加一列trigrams. trigrams的构成是 ['复', '旦', '大', '学', ...]->["复旦大", "旦大学", ...] + 。如果设置为True,返回的DataSet将有一列名为trigrams, 且已经转换为了index并设置为input,对应的vocab可以通过 + data_bundle.get_vocab('trigrams')获取. + """ + def __init__(self, bigrams=False, trigrams=False): + super().__init__() + + self.bigrams = bigrams + self.trigrams = trigrams + + def _tokenize(self, data_bundle): + """ + 将DataSet中的"复旦大学"拆分为["复", "旦", "大", "学"]. 未来可以通过扩展这个函数实现分词。 + + :param data_bundle: + :return: + """ + data_bundle.apply_field(list, field_name=Const.CHAR_INPUT, new_field_name=Const.CHAR_INPUT) + return data_bundle + + def process(self, data_bundle:DataBundle): + """ + 可以处理的DataSet应该具备以下的field + + .. csv-table:: + :header: "raw_chars", "target" + + "這間酒店環境和服務態度亦算不錯,但房間空間太小~~", "1" + "<荐书> 推荐所有喜欢<红楼>...", "1" + "..." + + :param data_bundle: + :return: + """ + _add_chars_field(data_bundle, lower=False) + + data_bundle = self._tokenize(data_bundle) + + input_field_names = [Const.CHAR_INPUT] + if self.bigrams: + for name, dataset in data_bundle.iter_datasets(): + dataset.apply_field(lambda chars: [c1 + c2 for c1, c2 in zip(chars, chars[1:] + [''])], + field_name=Const.CHAR_INPUT, new_field_name='bigrams') + input_field_names.append('bigrams') + if self.trigrams: + for name, dataset in data_bundle.iter_datasets(): + dataset.apply_field(lambda chars: [c1 + c2 + c3 for c1, c2, c3 in + zip(chars, chars[1:] + [''], chars[2:] + [''] * 2)], + field_name=Const.CHAR_INPUT, new_field_name='trigrams') + input_field_names.append('trigrams') + + # index + _indexize(data_bundle, input_field_names, Const.TARGET) + + input_fields = [Const.TARGET, Const.INPUT_LEN] + input_field_names + target_fields = [Const.TARGET] + + for name, dataset in data_bundle.datasets.items(): + dataset.add_seq_len(Const.CHAR_INPUT) + + data_bundle.set_input(*input_fields) + data_bundle.set_target(*target_fields) + + return data_bundle + + def process_from_file(self, paths=None): + """ + + :param paths: 支持路径类型参见 :class:`fastNLP.io.loader.Loader` 的load函数。 + :return: DataBundle + """ + # 读取数据 + data_bundle = ChnSentiCorpLoader().load(paths) + data_bundle = self.process(data_bundle) + + return data_bundle \ No newline at end of file diff --git a/fastNLP/io/pipe/conll.py b/fastNLP/io/pipe/conll.py index 2edc9008..a96b259a 100644 --- a/fastNLP/io/pipe/conll.py +++ b/fastNLP/io/pipe/conll.py @@ -222,14 +222,23 @@ class _CNNERPipe(Pipe): target。返回的DataSet中被设置为input有chars, target, seq_len; 设置为target有target, seq_len。 :param: str encoding_type: target列使用什么类型的encoding方式,支持bioes, bio两种。 + :param bool bigrams: 是否增加一列bigrams. bigrams的构成是['复', '旦', '大', '学', ...]->["复旦", "旦大", ...]。如果 + 设置为True,返回的DataSet将有一列名为bigrams, 且已经转换为了index并设置为input,对应的vocab可以通过 + data_bundle.get_vocab('bigrams')获取. + :param bool trigrams: 是否增加一列trigrams. trigrams的构成是 ['复', '旦', '大', '学', ...]->["复旦大", "旦大学", ...] + 。如果设置为True,返回的DataSet将有一列名为trigrams, 且已经转换为了index并设置为input,对应的vocab可以通过 + data_bundle.get_vocab('trigrams')获取. """ - def __init__(self, encoding_type: str = 'bio'): + def __init__(self, encoding_type: str = 'bio', bigrams=False, trigrams=False): if encoding_type == 'bio': self.convert_tag = iob2 else: self.convert_tag = lambda words: iob2bioes(iob2(words)) - + + self.bigrams = bigrams + self.trigrams = trigrams + def process(self, data_bundle: DataBundle) -> DataBundle: """ 支持的DataSet的field为 @@ -241,11 +250,11 @@ class _CNNERPipe(Pipe): "[青, 岛, 海, 牛, 队, 和, ...]", "[B-ORG, I-ORG, I-ORG, ...]" "[...]", "[...]" - raw_chars列为List[str], 是未转换的原始数据; chars列为List[int],是转换为index的输入数据; target列是List[int],是转换为index的 - target。返回的DataSet中被设置为input有chars, target, seq_len; 设置为target有target。 + raw_chars列为List[str], 是未转换的原始数据; chars列为List[int],是转换为index的输入数据; target列是List[int], + 是转换为index的target。返回的DataSet中被设置为input有chars, target, seq_len; 设置为target有target。 - :param ~fastNLP.DataBundle data_bundle: 传入的DataBundle中的DataSet必须包含raw_words和ner两个field,且两个field的内容均为List[str]。 - 在传入DataBundle基础上原位修改。 + :param ~fastNLP.DataBundle data_bundle: 传入的DataBundle中的DataSet必须包含raw_words和ner两个field,且两个field + 的内容均为List[str]。在传入DataBundle基础上原位修改。 :return: DataBundle """ # 转换tag @@ -253,11 +262,24 @@ class _CNNERPipe(Pipe): dataset.apply_field(self.convert_tag, field_name=Const.TARGET, new_field_name=Const.TARGET) _add_chars_field(data_bundle, lower=False) - + + input_field_names = [Const.CHAR_INPUT] + if self.bigrams: + for name, dataset in data_bundle.datasets.items(): + dataset.apply_field(lambda chars: [c1 + c2 for c1, c2 in zip(chars, chars[1:] + [''])], + field_name=Const.CHAR_INPUT, new_field_name='bigrams') + input_field_names.append('bigrams') + if self.trigrams: + for name, dataset in data_bundle.datasets.items(): + dataset.apply_field(lambda chars: [c1 + c2 + c3 for c1, c2, c3 in + zip(chars, chars[1:] + [''], chars[2:] + [''] * 2)], + field_name=Const.CHAR_INPUT, new_field_name='trigrams') + input_field_names.append('trigrams') + # index - _indexize(data_bundle, input_field_names=Const.CHAR_INPUT, target_field_names=Const.TARGET) + _indexize(data_bundle, input_field_names, Const.TARGET) - input_fields = [Const.TARGET, Const.CHAR_INPUT, Const.INPUT_LEN] + input_fields = [Const.TARGET, Const.INPUT_LEN] + input_field_names target_fields = [Const.TARGET, Const.INPUT_LEN] for name, dataset in data_bundle.datasets.items(): diff --git a/test/embeddings/test_bert_embedding.py b/test/embeddings/test_bert_embedding.py index 46ad74c3..6a4a0ffa 100644 --- a/test/embeddings/test_bert_embedding.py +++ b/test/embeddings/test_bert_embedding.py @@ -13,6 +13,12 @@ class TestDownload(unittest.TestCase): words = torch.LongTensor([[2, 3, 4, 0]]) print(embed(words).size()) + for pool_method in ['first', 'last', 'max', 'avg']: + for include_cls_sep in [True, False]: + embed = BertEmbedding(vocab, model_dir_or_name='en', pool_method=pool_method, + include_cls_sep=include_cls_sep) + print(embed(words).size()) + def test_word_drop(self): vocab = Vocabulary().add_word_lst("This is a test .".split()) embed = BertEmbedding(vocab, model_dir_or_name='en', dropout=0.1, word_dropout=0.2) diff --git a/test/io/loader/test_classification_loader.py b/test/io/loader/test_classification_loader.py index 1438a014..f099c1b2 100644 --- a/test/io/loader/test_classification_loader.py +++ b/test/io/loader/test_classification_loader.py @@ -5,22 +5,22 @@ from fastNLP.io.loader.classification import YelpPolarityLoader from fastNLP.io.loader.classification import IMDBLoader from fastNLP.io.loader.classification import SST2Loader from fastNLP.io.loader.classification import SSTLoader +from fastNLP.io.loader.classification import ChnSentiCorpLoader import os @unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") class TestDownload(unittest.TestCase): def test_download(self): - for loader in [YelpFullLoader, YelpPolarityLoader, IMDBLoader, SST2Loader, SSTLoader]: + for loader in [YelpFullLoader, YelpPolarityLoader, IMDBLoader, SST2Loader, SSTLoader, ChnSentiCorpLoader]: loader().download() def test_load(self): - for loader in [YelpFullLoader, YelpPolarityLoader, IMDBLoader, SST2Loader, SSTLoader]: + for loader in [YelpFullLoader, YelpPolarityLoader, IMDBLoader, SST2Loader, SSTLoader, ChnSentiCorpLoader]: data_bundle = loader().load() print(data_bundle) class TestLoad(unittest.TestCase): - def test_load(self): for loader in [IMDBLoader]: data_bundle = loader().load('test/data_for_tests/io/imdb') diff --git a/test/io/loader/test_conll_loader.py b/test/io/loader/test_conll_loader.py index 861de5a5..31859a6b 100644 --- a/test/io/loader/test_conll_loader.py +++ b/test/io/loader/test_conll_loader.py @@ -5,7 +5,7 @@ from fastNLP.io.loader.conll import MsraNERLoader, PeopleDailyNERLoader, WeiboNE Conll2003Loader -class MSRANERTest(unittest.TestCase): +class TestMSRANER(unittest.TestCase): @unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") def test_download(self): MsraNERLoader().download(re_download=False) @@ -13,13 +13,13 @@ class MSRANERTest(unittest.TestCase): print(data_bundle) -class PeopleDailyTest(unittest.TestCase): +class TestPeopleDaily(unittest.TestCase): @unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") def test_download(self): PeopleDailyNERLoader().download() -class WeiboNERTest(unittest.TestCase): +class TestWeiboNER(unittest.TestCase): @unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") def test_download(self): WeiboNERLoader().download() diff --git a/test/io/loader/test_cws_loader.py b/test/io/loader/test_cws_loader.py index 8b5d4081..55e48910 100644 --- a/test/io/loader/test_cws_loader.py +++ b/test/io/loader/test_cws_loader.py @@ -3,7 +3,7 @@ import os from fastNLP.io.loader import CWSLoader -class CWSLoaderTest(unittest.TestCase): +class TestCWSLoader(unittest.TestCase): @unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") def test_download(self): dataset_names = ['pku', 'cityu', 'as', 'msra'] @@ -13,7 +13,7 @@ class CWSLoaderTest(unittest.TestCase): print(data_bundle) -class RunCWSLoaderTest(unittest.TestCase): +class TestRunCWSLoader(unittest.TestCase): def test_cws_loader(self): dataset_names = ['msra'] for dataset_name in dataset_names: diff --git a/test/io/loader/test_matching_loader.py b/test/io/loader/test_matching_loader.py index 652cf161..cb1334e0 100644 --- a/test/io/loader/test_matching_loader.py +++ b/test/io/loader/test_matching_loader.py @@ -8,7 +8,7 @@ from fastNLP.io.loader.matching import MNLILoader import os @unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") -class TestDownload(unittest.TestCase): +class TestMatchingDownload(unittest.TestCase): def test_download(self): for loader in [RTELoader, QNLILoader, SNLILoader, MNLILoader]: loader().download() @@ -21,8 +21,7 @@ class TestDownload(unittest.TestCase): print(data_bundle) -class TestLoad(unittest.TestCase): - +class TestMatchingLoad(unittest.TestCase): def test_load(self): for loader in [RTELoader]: data_bundle = loader().load('test/data_for_tests/io/rte') diff --git a/test/io/pipe/test_classification.py b/test/io/pipe/test_classification.py index c6e2005e..45c276a3 100644 --- a/test/io/pipe/test_classification.py +++ b/test/io/pipe/test_classification.py @@ -2,9 +2,10 @@ import unittest import os from fastNLP.io.pipe.classification import SSTPipe, SST2Pipe, IMDBPipe, YelpFullPipe, YelpPolarityPipe +from fastNLP.io.pipe.classification import ChnSentiCorpPipe @unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") -class TestPipe(unittest.TestCase): +class TestClassificationPipe(unittest.TestCase): def test_process_from_file(self): for pipe in [YelpPolarityPipe, SST2Pipe, IMDBPipe, YelpFullPipe, SSTPipe]: with self.subTest(pipe=pipe): @@ -14,8 +15,16 @@ class TestPipe(unittest.TestCase): class TestRunPipe(unittest.TestCase): - def test_load(self): for pipe in [IMDBPipe]: data_bundle = pipe(tokenizer='raw').process_from_file('test/data_for_tests/io/imdb') print(data_bundle) + + +@unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") +class TestCNClassificationPipe(unittest.TestCase): + def test_process_from_file(self): + for pipe in [ChnSentiCorpPipe]: + with self.subTest(pipe=pipe): + data_bundle = pipe(bigrams=True, trigrams=True).process_from_file() + print(data_bundle) \ No newline at end of file diff --git a/test/io/pipe/test_conll.py b/test/io/pipe/test_conll.py index 6f6c4fad..4ecd7969 100644 --- a/test/io/pipe/test_conll.py +++ b/test/io/pipe/test_conll.py @@ -4,12 +4,14 @@ from fastNLP.io import MsraNERPipe, PeopleDailyPipe, WeiboNERPipe, Conll2003Pipe @unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") -class TestPipe(unittest.TestCase): +class TestConllPipe(unittest.TestCase): def test_process_from_file(self): for pipe in [MsraNERPipe, PeopleDailyPipe, WeiboNERPipe]: with self.subTest(pipe=pipe): print(pipe) - data_bundle = pipe().process_from_file() + data_bundle = pipe(bigrams=True, trigrams=True).process_from_file() + print(data_bundle) + data_bundle = pipe(encoding_type='bioes').process_from_file() print(data_bundle) diff --git a/test/io/pipe/test_cws.py b/test/io/pipe/test_cws.py index dd901a25..063b6d9a 100644 --- a/test/io/pipe/test_cws.py +++ b/test/io/pipe/test_cws.py @@ -4,7 +4,7 @@ import os from fastNLP.io.pipe.cws import CWSPipe -class CWSPipeTest(unittest.TestCase): +class TestCWSPipe(unittest.TestCase): @unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") def test_process_from_file(self): dataset_names = ['pku', 'cityu', 'as', 'msra'] @@ -14,7 +14,7 @@ class CWSPipeTest(unittest.TestCase): print(data_bundle) -class RunCWSPipeTest(unittest.TestCase): +class TestRunCWSPipe(unittest.TestCase): def test_process_from_file(self): dataset_names = ['msra'] for dataset_name in dataset_names: diff --git a/test/io/pipe/test_matching.py b/test/io/pipe/test_matching.py index 33904e7a..932d8289 100644 --- a/test/io/pipe/test_matching.py +++ b/test/io/pipe/test_matching.py @@ -7,7 +7,7 @@ from fastNLP.io.pipe.matching import SNLIBertPipe, RTEBertPipe, QNLIBertPipe, MN @unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") -class TestPipe(unittest.TestCase): +class TestMatchingPipe(unittest.TestCase): def test_process_from_file(self): for pipe in [SNLIPipe, RTEPipe, QNLIPipe, MNLIPipe]: with self.subTest(pipe=pipe): @@ -17,7 +17,7 @@ class TestPipe(unittest.TestCase): @unittest.skipIf('TRAVIS' in os.environ, "Skip in travis") -class TestBertPipe(unittest.TestCase): +class TestMatchingBertPipe(unittest.TestCase): def test_process_from_file(self): for pipe in [SNLIBertPipe, RTEBertPipe, QNLIBertPipe, MNLIBertPipe]: with self.subTest(pipe=pipe): @@ -26,7 +26,7 @@ class TestBertPipe(unittest.TestCase): print(data_bundle) -class TestRunPipe(unittest.TestCase): +class TestRunMatchingPipe(unittest.TestCase): def test_load(self): for pipe in [RTEPipe, RTEBertPipe]: From 113ef8b11a34ca72fd0a1b6a1496dd42e272b94d Mon Sep 17 00:00:00 2001 From: ChenXin Date: Wed, 4 Sep 2019 14:31:45 +0800 Subject: [PATCH 31/50] add code to detect the defined location automatically --- fastNLP/__init__.py | 4 ++++ fastNLP/doc_utils.py | 21 +++++++++++++++++++++ fastNLP/embeddings/__init__.py | 4 ++++ fastNLP/io/__init__.py | 4 ++++ fastNLP/models/__init__.py | 4 ++++ fastNLP/modules/__init__.py | 4 ++++ 6 files changed, 41 insertions(+) create mode 100644 fastNLP/doc_utils.py diff --git a/fastNLP/__init__.py b/fastNLP/__init__.py index 19efac31..aceaf47f 100644 --- a/fastNLP/__init__.py +++ b/fastNLP/__init__.py @@ -70,3 +70,7 @@ from . import models from . import modules from .core import * from .io import loader, pipe + +import sys +from .doc_utils import doc_process +doc_process(sys.modules[__name__]) \ No newline at end of file diff --git a/fastNLP/doc_utils.py b/fastNLP/doc_utils.py new file mode 100644 index 00000000..924b7a6a --- /dev/null +++ b/fastNLP/doc_utils.py @@ -0,0 +1,21 @@ +import inspect +import sys + + +def doc_process(m): + for name, obj in inspect.getmembers(m): + if inspect.isclass(obj) or inspect.isfunction(obj): + if obj.__module__ != m.__name__: + if obj.__doc__ is None: + print(name, obj.__doc__) + else: + module_name = obj.__module__ + while 1: + defined_m = sys.modules[module_name] + if "undocumented" not in defined_m.__doc__ and name in defined_m.__all__: + obj.__doc__ = r"定义在 :class:`" + module_name + "." + name + "`\n" + obj.__doc__ + break + module_name = ".".join(module_name.split('.')[:-1]) + if module_name == m.__name__: + print(name, ": not found defined doc.") + break diff --git a/fastNLP/embeddings/__init__.py b/fastNLP/embeddings/__init__.py index 8a970e25..ea99154e 100644 --- a/fastNLP/embeddings/__init__.py +++ b/fastNLP/embeddings/__init__.py @@ -25,3 +25,7 @@ from .bert_embedding import BertEmbedding, BertWordPieceEncoder from .char_embedding import CNNCharEmbedding, LSTMCharEmbedding from .stack_embedding import StackEmbedding from .utils import get_embeddings + +import sys +from ..doc_utils import doc_process +doc_process(sys.modules[__name__]) \ No newline at end of file diff --git a/fastNLP/io/__init__.py b/fastNLP/io/__init__.py index 6f727f05..c8b3dfaa 100644 --- a/fastNLP/io/__init__.py +++ b/fastNLP/io/__init__.py @@ -88,3 +88,7 @@ from .model_io import ModelLoader, ModelSaver from .loader import * from .pipe import * + +import sys +from ..doc_utils import doc_process +doc_process(sys.modules[__name__]) \ No newline at end of file diff --git a/fastNLP/models/__init__.py b/fastNLP/models/__init__.py index a659e1d5..62adbf69 100644 --- a/fastNLP/models/__init__.py +++ b/fastNLP/models/__init__.py @@ -38,3 +38,7 @@ from .cnn_text_classification import CNNText from .sequence_labeling import SeqLabeling, AdvSeqLabel from .snli import ESIM from .star_transformer import StarTransEnc, STSeqCls, STNLICls, STSeqLabel + +import sys +from ..doc_utils import doc_process +doc_process(sys.modules[__name__]) \ No newline at end of file diff --git a/fastNLP/modules/__init__.py b/fastNLP/modules/__init__.py index 7959e454..769dc42a 100644 --- a/fastNLP/modules/__init__.py +++ b/fastNLP/modules/__init__.py @@ -54,3 +54,7 @@ from . import encoder from .decoder import * from .dropout import TimestepDropout from .encoder import * + +import sys +from ..doc_utils import doc_process +doc_process(sys.modules[__name__]) From 3651d61f41c267ef4801dc53e5ac359f8b71606f Mon Sep 17 00:00:00 2001 From: ChenXin Date: Wed, 4 Sep 2019 14:47:45 +0800 Subject: [PATCH 32/50] delete the alias in files. --- fastNLP/embeddings/bert_embedding.py | 2 -- fastNLP/embeddings/char_embedding.py | 4 ---- fastNLP/embeddings/elmo_embedding.py | 2 -- fastNLP/embeddings/embedding.py | 2 -- fastNLP/embeddings/stack_embedding.py | 2 -- fastNLP/embeddings/static_embedding.py | 2 -- fastNLP/modules/decoder/crf.py | 2 -- fastNLP/modules/decoder/mlp.py | 2 -- fastNLP/modules/decoder/utils.py | 2 -- fastNLP/modules/encoder/attention.py | 1 - fastNLP/modules/encoder/bert.py | 2 -- fastNLP/modules/encoder/char_encoder.py | 6 ------ fastNLP/modules/encoder/conv_maxpool.py | 2 -- fastNLP/modules/encoder/lstm.py | 2 -- fastNLP/modules/encoder/pooling.py | 8 -------- fastNLP/modules/encoder/star_transformer.py | 3 --- fastNLP/modules/encoder/transformer.py | 3 --- fastNLP/modules/encoder/variational_rnn.py | 6 ------ reproduction/text_classification/data/sstloader.py | 8 ++++---- reproduction/text_classification/model/awdlstm_module.py | 2 -- 20 files changed, 4 insertions(+), 59 deletions(-) diff --git a/fastNLP/embeddings/bert_embedding.py b/fastNLP/embeddings/bert_embedding.py index 08615fe0..17f6769d 100644 --- a/fastNLP/embeddings/bert_embedding.py +++ b/fastNLP/embeddings/bert_embedding.py @@ -26,8 +26,6 @@ from ..core import logger class BertEmbedding(ContextualEmbedding): """ - 别名::class:`fastNLP.embeddings.BertEmbedding` :class:`fastNLP.embeddings.bert_embedding.BertEmbedding` - 使用BERT对words进行编码的Embedding。建议将输入的words长度限制在430以内,而不要使用512(根据预训练模型参数,可能有变化)。这是由于 预训练的bert模型长度限制为512个token,而因为输入的word是未进行word piece分割的(word piece的分割有BertEmbedding在输入word 时切分),在分割之后长度可能会超过最大长度限制。 diff --git a/fastNLP/embeddings/char_embedding.py b/fastNLP/embeddings/char_embedding.py index 379d4eee..59109206 100644 --- a/fastNLP/embeddings/char_embedding.py +++ b/fastNLP/embeddings/char_embedding.py @@ -24,8 +24,6 @@ from ..core import logger class CNNCharEmbedding(TokenEmbedding): """ - 别名::class:`fastNLP.embeddings.CNNCharEmbedding` :class:`fastNLP.embeddings.char_embedding.CNNCharEmbedding` - 使用CNN生成character embedding。CNN的结构为, embed(x) -> Dropout(x) -> CNN(x) -> activation(x) -> pool -> fc -> Dropout. 不同的kernel大小的fitler结果是concat起来然后通过一层fully connected layer, 然后输出word的表示。 @@ -179,8 +177,6 @@ class CNNCharEmbedding(TokenEmbedding): class LSTMCharEmbedding(TokenEmbedding): """ - 别名::class:`fastNLP.embeddings.LSTMCharEmbedding` :class:`fastNLP.embeddings.char_embedding.LSTMCharEmbedding` - 使用LSTM的方式对character进行encode. embed(x) -> Dropout(x) -> LSTM(x) -> activation(x) -> pool -> Dropout Example:: diff --git a/fastNLP/embeddings/elmo_embedding.py b/fastNLP/embeddings/elmo_embedding.py index d82344e4..0ec0caa0 100644 --- a/fastNLP/embeddings/elmo_embedding.py +++ b/fastNLP/embeddings/elmo_embedding.py @@ -22,8 +22,6 @@ from ..core import logger class ElmoEmbedding(ContextualEmbedding): """ - 别名::class:`fastNLP.embeddings.ElmoEmbedding` :class:`fastNLP.embeddings.elmo_embedding.ElmoEmbedding` - 使用ELMo的embedding。初始化之后,只需要传入words就可以得到对应的embedding。当前支持的使用名称初始化的模型有以下的这些(待补充) Example:: diff --git a/fastNLP/embeddings/embedding.py b/fastNLP/embeddings/embedding.py index 5e7b9803..255b0823 100644 --- a/fastNLP/embeddings/embedding.py +++ b/fastNLP/embeddings/embedding.py @@ -17,8 +17,6 @@ from .utils import get_embeddings class Embedding(nn.Module): """ - 别名::class:`fastNLP.embeddings.Embedding` :class:`fastNLP.embeddings.embedding.Embedding` - 词向量嵌入,支持输入多种方式初始化. 可以通过self.num_embeddings获取词表大小; self.embedding_dim获取embedding的维度. Example:: diff --git a/fastNLP/embeddings/stack_embedding.py b/fastNLP/embeddings/stack_embedding.py index 14781945..e83a275c 100644 --- a/fastNLP/embeddings/stack_embedding.py +++ b/fastNLP/embeddings/stack_embedding.py @@ -17,8 +17,6 @@ from .embedding import TokenEmbedding class StackEmbedding(TokenEmbedding): """ - 别名::class:`fastNLP.embeddings.StackEmbedding` :class:`fastNLP.embeddings.stack_embedding.StackEmbedding` - 支持将多个embedding集合成一个embedding。 Example:: diff --git a/fastNLP/embeddings/static_embedding.py b/fastNLP/embeddings/static_embedding.py index c768f32f..8249aa11 100644 --- a/fastNLP/embeddings/static_embedding.py +++ b/fastNLP/embeddings/static_embedding.py @@ -24,8 +24,6 @@ from ..core import logger class StaticEmbedding(TokenEmbedding): """ - 别名::class:`fastNLP.embeddings.StaticEmbedding` :class:`fastNLP.embeddings.static_embedding.StaticEmbedding` - StaticEmbedding组件. 给定预训练embedding的名称或路径,根据vocab从embedding中抽取相应的数据(只会将出现在vocab中的词抽取出来, 如果没有找到,则会随机初始化一个值(但如果该word是被标记为no_create_entry的话,则不会单独创建一个值,而是会被指向unk的index))。 当前支持自动下载的预训练vector有以下的几种(待补充); diff --git a/fastNLP/modules/decoder/crf.py b/fastNLP/modules/decoder/crf.py index c13ea50c..e2a751f8 100644 --- a/fastNLP/modules/decoder/crf.py +++ b/fastNLP/modules/decoder/crf.py @@ -15,8 +15,6 @@ from typing import Union def allowed_transitions(tag_vocab:Union[Vocabulary, dict], encoding_type=None, include_start_end=False): """ - 别名::class:`fastNLP.modules.allowed_transitions` :class:`fastNLP.modules.decoder.allowed_transitions` - 给定一个id到label的映射表,返回所有可以跳转的(from_tag_id, to_tag_id)列表。 :param ~fastNLP.Vocabulary,dict tag_vocab: 支持类型为tag或tag-label。只有tag的,比如"B", "M"; 也可以是"B-NN", "M-NN", diff --git a/fastNLP/modules/decoder/mlp.py b/fastNLP/modules/decoder/mlp.py index f6e687a7..3e594de1 100644 --- a/fastNLP/modules/decoder/mlp.py +++ b/fastNLP/modules/decoder/mlp.py @@ -12,8 +12,6 @@ from ..utils import initial_parameter class MLP(nn.Module): """ - 别名::class:`fastNLP.modules.MLP` :class:`fastNLP.modules.decoder.MLP` - 多层感知器 :param List[int] size_layer: 一个int的列表,用来定义MLP的层数,列表中的数字为每一层是hidden数目。MLP的层数为 len(size_layer) - 1 diff --git a/fastNLP/modules/decoder/utils.py b/fastNLP/modules/decoder/utils.py index 118b1414..e0d2af68 100644 --- a/fastNLP/modules/decoder/utils.py +++ b/fastNLP/modules/decoder/utils.py @@ -8,8 +8,6 @@ import torch def viterbi_decode(logits, transitions, mask=None, unpad=False): r""" - 别名::class:`fastNLP.modules.viterbi_decode` :class:`fastNLP.modules.decoder.viterbi_decode` - 给定一个特征矩阵以及转移分数矩阵,计算出最佳的路径以及对应的分数 :param torch.FloatTensor logits: batch_size x max_len x num_tags,特征矩阵。 diff --git a/fastNLP/modules/encoder/attention.py b/fastNLP/modules/encoder/attention.py index 6a973864..0d832653 100644 --- a/fastNLP/modules/encoder/attention.py +++ b/fastNLP/modules/encoder/attention.py @@ -45,7 +45,6 @@ class DotAttention(nn.Module): class MultiHeadAttention(nn.Module): """ - 别名::class:`fastNLP.modules.MultiHeadAttention` :class:`fastNLP.modules.encoder.MultiHeadAttention` :param input_size: int, 输入维度的大小。同时也是输出维度的大小。 :param key_size: int, 每个head的维度大小。 diff --git a/fastNLP/modules/encoder/bert.py b/fastNLP/modules/encoder/bert.py index 6f6c4291..12379718 100644 --- a/fastNLP/modules/encoder/bert.py +++ b/fastNLP/modules/encoder/bert.py @@ -348,8 +348,6 @@ class BertPooler(nn.Module): class BertModel(nn.Module): """ - 别名::class:`fastNLP.modules.BertModel` :class:`fastNLP.modules.encoder.BertModel` - BERT(Bidirectional Embedding Representations from Transformers). 用预训练权重矩阵来建立BERT模型:: diff --git a/fastNLP/modules/encoder/char_encoder.py b/fastNLP/modules/encoder/char_encoder.py index e40bd0dd..dc73f447 100644 --- a/fastNLP/modules/encoder/char_encoder.py +++ b/fastNLP/modules/encoder/char_encoder.py @@ -13,8 +13,6 @@ from ..utils import initial_parameter # from torch.nn.init import xavier_uniform class ConvolutionCharEncoder(nn.Module): """ - 别名::class:`fastNLP.modules.ConvolutionCharEncoder` :class:`fastNLP.modules.encoder.ConvolutionCharEncoder` - char级别的卷积编码器. :param int char_emb_size: char级别embedding的维度. Default: 50 @@ -60,11 +58,7 @@ class ConvolutionCharEncoder(nn.Module): class LSTMCharEncoder(nn.Module): """ - 别名::class:`fastNLP.modules.LSTMCharEncoder` :class:`fastNLP.modules.encoder.LSTMCharEncoder` - char级别基于LSTM的encoder. - - """ def __init__(self, char_emb_size=50, hidden_size=None, initial_method=None): diff --git a/fastNLP/modules/encoder/conv_maxpool.py b/fastNLP/modules/encoder/conv_maxpool.py index 68415189..bf629eba 100644 --- a/fastNLP/modules/encoder/conv_maxpool.py +++ b/fastNLP/modules/encoder/conv_maxpool.py @@ -10,8 +10,6 @@ import torch.nn.functional as F class ConvMaxpool(nn.Module): """ - 别名::class:`fastNLP.modules.ConvMaxpool` :class:`fastNLP.modules.encoder.ConvMaxpool` - 集合了Convolution和Max-Pooling于一体的层。给定一个batch_size x max_len x input_size的输入,返回batch_size x sum(output_channels) 大小的matrix。在内部,是先使用CNN给输入做卷积,然后经过activation激活层,在通过在长度(max_len) 这一维进行max_pooling。最后得到每个sample的一个向量表示。 diff --git a/fastNLP/modules/encoder/lstm.py b/fastNLP/modules/encoder/lstm.py index 1f3eae6d..1dd1f0df 100644 --- a/fastNLP/modules/encoder/lstm.py +++ b/fastNLP/modules/encoder/lstm.py @@ -14,8 +14,6 @@ import torch.nn.utils.rnn as rnn class LSTM(nn.Module): """ - 别名::class:`fastNLP.modules.LSTM` :class:`fastNLP.modules.encoder.LSTM` - LSTM 模块, 轻量封装的Pytorch LSTM. 在提供seq_len的情况下,将自动使用pack_padded_sequence; 同时默认将forget gate的bias初始化 为1; 且可以应对DataParallel中LSTM的使用问题。 diff --git a/fastNLP/modules/encoder/pooling.py b/fastNLP/modules/encoder/pooling.py index b1272284..c248601d 100644 --- a/fastNLP/modules/encoder/pooling.py +++ b/fastNLP/modules/encoder/pooling.py @@ -12,8 +12,6 @@ import torch.nn as nn class MaxPool(nn.Module): """ - 别名::class:`fastNLP.modules.MaxPool` :class:`fastNLP.modules.encoder.MaxPool` - Max-pooling模块。 :param stride: 窗口移动大小,默认为kernel_size @@ -61,8 +59,6 @@ class MaxPool(nn.Module): class MaxPoolWithMask(nn.Module): """ - 别名::class:`fastNLP.modules.MaxPoolWithMask` :class:`fastNLP.modules.encoder.MaxPoolWithMask` - 带mask矩阵的max pooling。在做max-pooling的时候不会考虑mask值为0的位置。 """ @@ -101,8 +97,6 @@ class KMaxPool(nn.Module): class AvgPool(nn.Module): """ - 别名::class:`fastNLP.modules.AvgPool` :class:`fastNLP.modules.encoder.AvgPool` - 给定形如[batch_size, max_len, hidden_size]的输入,在最后一维进行avg pooling. 输出为[batch_size, hidden_size] """ @@ -128,8 +122,6 @@ class AvgPool(nn.Module): class AvgPoolWithMask(nn.Module): """ - 别名::class:`fastNLP.modules.AvgPoolWithMask` :class:`fastNLP.modules.encoder.AvgPoolWithMask` - 给定形如[batch_size, max_len, hidden_size]的输入,在最后一维进行avg pooling. 输出为[batch_size, hidden_size], pooling 的时候只会考虑mask为1的位置 """ diff --git a/fastNLP/modules/encoder/star_transformer.py b/fastNLP/modules/encoder/star_transformer.py index 02d7a6a0..bb47d9b5 100644 --- a/fastNLP/modules/encoder/star_transformer.py +++ b/fastNLP/modules/encoder/star_transformer.py @@ -14,9 +14,6 @@ from torch.nn import functional as F class StarTransformer(nn.Module): """ - 别名::class:`fastNLP.modules.StarTransformer` :class:`fastNLP.modules.encoder.StarTransformer` - - Star-Transformer 的encoder部分。 输入3d的文本输入, 返回相同长度的文本编码 paper: https://arxiv.org/abs/1902.09113 diff --git a/fastNLP/modules/encoder/transformer.py b/fastNLP/modules/encoder/transformer.py index d8a612a0..d29a10c3 100644 --- a/fastNLP/modules/encoder/transformer.py +++ b/fastNLP/modules/encoder/transformer.py @@ -10,9 +10,6 @@ from .attention import MultiHeadAttention class TransformerEncoder(nn.Module): """ - 别名::class:`fastNLP.modules.TransformerEncoder` :class:`fastNLP.modules.encoder.TransformerEncoder` - - transformer的encoder模块,不包含embedding层 :param int num_layers: transformer的层数 diff --git a/fastNLP/modules/encoder/variational_rnn.py b/fastNLP/modules/encoder/variational_rnn.py index 933555c8..17e2ad23 100644 --- a/fastNLP/modules/encoder/variational_rnn.py +++ b/fastNLP/modules/encoder/variational_rnn.py @@ -223,8 +223,6 @@ class VarRNNBase(nn.Module): class VarLSTM(VarRNNBase): """ - 别名::class:`fastNLP.modules.VarLSTM` :class:`fastNLP.modules.encoder.VarLSTM` - Variational Dropout LSTM. :param input_size: 输入 `x` 的特征维度 @@ -248,8 +246,6 @@ class VarLSTM(VarRNNBase): class VarRNN(VarRNNBase): """ - 别名::class:`fastNLP.modules.VarRNN` :class:`fastNLP.modules.encoder.VarRNN` - Variational Dropout RNN. :param input_size: 输入 `x` 的特征维度 @@ -273,8 +269,6 @@ class VarRNN(VarRNNBase): class VarGRU(VarRNNBase): """ - 别名::class:`fastNLP.modules.VarGRU` :class:`fastNLP.modules.encoder.VarGRU` - Variational Dropout GRU. :param input_size: 输入 `x` 的特征维度 diff --git a/reproduction/text_classification/data/sstloader.py b/reproduction/text_classification/data/sstloader.py index b635a14a..4e860279 100644 --- a/reproduction/text_classification/data/sstloader.py +++ b/reproduction/text_classification/data/sstloader.py @@ -11,11 +11,7 @@ from reproduction.utils import check_dataloader_paths, get_tokenizer class SSTLoader(DataSetLoader): - URL = 'https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip' - DATA_DIR = 'sst/' - """ - 别名::class:`fastNLP.io.SSTLoader` :class:`fastNLP.io.dataset_loader.SSTLoader` 读取SST数据集, DataSet包含fields:: words: list(str) 需要分类的文本 target: str 文本的标签 @@ -23,6 +19,10 @@ class SSTLoader(DataSetLoader): :param subtree: 是否将数据展开为子树,扩充数据量. Default: ``False`` :param fine_grained: 是否使用SST-5标准,若 ``False`` , 使用SST-2。Default: ``False`` """ + + URL = 'https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip' + DATA_DIR = 'sst/' + def __init__(self, subtree=False, fine_grained=False): self.subtree = subtree tag_v = {'0': 'very negative', '1': 'negative', '2': 'neutral', diff --git a/reproduction/text_classification/model/awdlstm_module.py b/reproduction/text_classification/model/awdlstm_module.py index 87bfe730..a586ed2d 100644 --- a/reproduction/text_classification/model/awdlstm_module.py +++ b/reproduction/text_classification/model/awdlstm_module.py @@ -17,8 +17,6 @@ from .weight_drop import WeightDrop class LSTM(nn.Module): """ - 别名::class:`fastNLP.modules.LSTM` :class:`fastNLP.modules.encoder.lstm.LSTM` - LSTM 模块, 轻量封装的Pytorch LSTM. 在提供seq_len的情况下,将自动使用pack_padded_sequence; 同时默认将forget gate的bias初始化 为1; 且可以应对DataParallel中LSTM的使用问题。 From 4caacadeae607ebd0699d05457213321874fb786 Mon Sep 17 00:00:00 2001 From: ChenXin Date: Wed, 4 Sep 2019 14:51:50 +0800 Subject: [PATCH 33/50] delete the alias in files. --- fastNLP/core/batch.py | 2 -- fastNLP/core/callback.py | 23 +++-------------------- fastNLP/core/dataset.py | 2 -- fastNLP/core/field.py | 6 ------ fastNLP/core/instance.py | 2 -- fastNLP/core/losses.py | 12 ------------ fastNLP/core/metrics.py | 7 ------- fastNLP/core/optimizer.py | 5 ----- fastNLP/core/sampler.py | 9 --------- fastNLP/core/tester.py | 2 -- fastNLP/core/trainer.py | 2 -- fastNLP/core/utils.py | 2 -- fastNLP/core/vocabulary.py | 2 -- fastNLP/io/embed_loader.py | 2 -- fastNLP/io/loader/classification.py | 6 ------ fastNLP/io/loader/conll.py | 2 -- fastNLP/io/loader/csv.py | 2 -- fastNLP/io/loader/json.py | 2 -- fastNLP/io/model_io.py | 4 ---- fastNLP/io/pipe/classification.py | 2 -- fastNLP/io/pipe/pipe.py | 4 +++- fastNLP/models/bert.py | 15 --------------- fastNLP/models/biaffine_parser.py | 8 -------- fastNLP/models/cnn_text_classification.py | 2 -- fastNLP/models/sequence_labeling.py | 4 ---- fastNLP/models/snli.py | 2 -- fastNLP/models/star_transformer.py | 8 -------- fastNLP/modules/decoder/crf.py | 5 +---- 28 files changed, 7 insertions(+), 137 deletions(-) diff --git a/fastNLP/core/batch.py b/fastNLP/core/batch.py index ff710b30..ad07341a 100644 --- a/fastNLP/core/batch.py +++ b/fastNLP/core/batch.py @@ -145,8 +145,6 @@ class BatchIter: class DataSetIter(BatchIter): """ - 别名::class:`fastNLP.DataSetIter` :class:`fastNLP.core.batch.DataSetIter` - DataSetIter 用于从 `DataSet` 中按一定的顺序, 依次按 ``batch_size`` 的大小将数据取出, 组成 `x` 和 `y`:: diff --git a/fastNLP/core/callback.py b/fastNLP/core/callback.py index 5167b09f..3cdc0f8d 100644 --- a/fastNLP/core/callback.py +++ b/fastNLP/core/callback.py @@ -96,8 +96,6 @@ except: class Callback(object): """ - 别名::class:`fastNLP.Callback` :class:`fastNLP.core.callback.Callback` - Callback是fastNLP中被设计用于增强 :class:`~fastNLP.Trainer` 的类。 如果Callback被传递给了 Trainer , 则 Trainer 会在对应的阶段调用Callback的函数, 具体调用时机可以通过 :doc:`trainer 模块` 查看。 @@ -436,8 +434,6 @@ class DistCallbackManager(CallbackManager): class GradientClipCallback(Callback): """ - 别名::class:`fastNLP.GradientClipCallback` :class:`fastNLP.core.callback.GradientClipCallback` - 每次backward前,将parameter的gradient clip到某个范围。 :param None,torch.Tensor,List[torch.Tensor] parameters: 一般通过model.parameters()获得。 @@ -481,8 +477,6 @@ class GradientClipCallback(Callback): class EarlyStopCallback(Callback): """ - 别名::class:`fastNLP.EarlyStopCallback` :class:`fastNLP.core.callback.EarlyStopCallback` - 多少个epoch没有变好就停止训练,相关类 :class:`EarlyStopError` :param int patience: epoch的数量 @@ -512,12 +506,10 @@ class EarlyStopCallback(Callback): class FitlogCallback(Callback): """ - 别名: :class:`fastNLP.FitlogCallback` :class:`fastNLP.core.callback.FitlogCallback` - 该callback可将loss和progress写入到fitlog中; 如果Trainer有dev的数据,将自动把dev的结果写入到log中; 同时还支持传入 - 一个(或多个)test数据集进行测试(只有在trainer具有dev时才能使用),每次在dev上evaluate之后会在这些数据集上验证一下。 - 并将验证结果写入到fitlog中。这些数据集的结果是根据dev上最好的结果报道的,即如果dev在第3个epoch取得了最佳,则 - fitlog中记录的关于这些数据集的结果就是来自第三个epoch的结果。 + 一个(或多个)test数据集进行测试(只有在trainer具有dev时才能使用),每次在dev上evaluate之后会在这些数据集上验证一下。 + 并将验证结果写入到fitlog中。这些数据集的结果是根据dev上最好的结果报道的,即如果dev在第3个epoch取得了最佳,则 + fitlog中记录的关于这些数据集的结果就是来自第三个epoch的结果。 :param ~fastNLP.DataSet,Dict[~fastNLP.DataSet] data: 传入DataSet对象,会使用多个Trainer中的metric对数据进行验证。如果需要 传入多个DataSet请通过dict的方式传入,dict的key将作为对应dataset的name传递给fitlog。data的结果的名称以'data'开头。 @@ -611,8 +603,6 @@ class FitlogCallback(Callback): class EvaluateCallback(Callback): """ - 别名: :class:`fastNLP.EvaluateCallback` :class:`fastNLP.core.callback.EvaluateCallback` - 该callback用于扩展Trainer训练过程中只能对dev数据进行验证的问题。 :param ~fastNLP.DataSet,Dict[~fastNLP.DataSet] data: 传入DataSet对象,会使用多个Trainer中的metric对数据进行验证。如果需要传入多个 @@ -673,8 +663,6 @@ class EvaluateCallback(Callback): class LRScheduler(Callback): """ - 别名::class:`fastNLP.LRScheduler` :class:`fastNLP.core.callback.LRScheduler` - 对PyTorch LR Scheduler的包装以使得其可以被Trainer所使用 :param torch.optim.lr_scheduler._LRScheduler lr_scheduler: PyTorch的lr_scheduler @@ -695,7 +683,6 @@ class LRScheduler(Callback): class ControlC(Callback): """ - 别名::class:`fastNLP.ControlC` :class:`fastNLP.core.callback.ControlC` :param bool quit_all: 若为True,则检测到control+C 直接退出程序;否则只退出Trainer """ @@ -732,8 +719,6 @@ class SmoothValue(object): class LRFinder(Callback): """ - 别名::class:`fastNLP.LRFinder` :class:`fastNLP.core.callback.LRFinder` - 用第一个 epoch 找最佳的学习率,从第二个epoch开始应用它 :param float start_lr: 学习率下界 @@ -804,8 +789,6 @@ class LRFinder(Callback): class TensorboardCallback(Callback): """ - 别名::class:`fastNLP.TensorboardCallback` :class:`fastNLP.core.callback.TensorboardCallback` - 接受以下一个或多个字符串作为参数: - "model" - "loss" diff --git a/fastNLP/core/dataset.py b/fastNLP/core/dataset.py index 551cf1f8..441f9907 100644 --- a/fastNLP/core/dataset.py +++ b/fastNLP/core/dataset.py @@ -304,8 +304,6 @@ from ._logger import logger class DataSet(object): """ - 别名::class:`fastNLP.DataSet` :class:`fastNLP.core.dataset.DataSet` - fastNLP的数据容器,详细的使用方法见文档 :doc:`fastNLP.core.dataset` :param data: 如果为dict类型,则每个key的value应该为等长的list; 如果为list, diff --git a/fastNLP/core/field.py b/fastNLP/core/field.py index 859dfb1f..468c248d 100644 --- a/fastNLP/core/field.py +++ b/fastNLP/core/field.py @@ -464,8 +464,6 @@ def _get_ele_type_and_dim(cell: Any, dim=0): class Padder: """ - 别名::class:`fastNLP.Padder` :class:`fastNLP.core.field.Padder` - 所有padder都需要继承这个类,并覆盖__call__方法。 用于对batch进行padding操作。传入的element是inplace的,即直接修改element可能导致数据变化,建议inplace修改之前deepcopy一份。 @@ -534,8 +532,6 @@ class Padder: class AutoPadder(Padder): """ - 别名::class:`fastNLP.AutoPadder` :class:`fastNLP.core.field.AutoPadder` - 根据contents的数据自动判定是否需要做padding。 1 如果元素类型(元素类型是指field中最里层元素的数据类型, 可以通过FieldArray.dtype查看,比如['This', 'is', ...]的元素类 @@ -628,8 +624,6 @@ class AutoPadder(Padder): class EngChar2DPadder(Padder): """ - 别名::class:`fastNLP.EngChar2DPadder` :class:`fastNLP.core.field.EngChar2DPadder` - 用于为英语执行character级别的2D padding操作。对应的field内容应该类似[['T', 'h', 'i', 's'], ['a'], ['d', 'e', 'm', 'o']], 但这个Padder只能处理index为int的情况。 diff --git a/fastNLP/core/instance.py b/fastNLP/core/instance.py index 9a5d9edf..2285e4a4 100644 --- a/fastNLP/core/instance.py +++ b/fastNLP/core/instance.py @@ -10,8 +10,6 @@ __all__ = [ class Instance(object): """ - 别名::class:`fastNLP.Instance` :class:`fastNLP.core.instance.Instance` - Instance是fastNLP中对应一个sample的类。每个sample在fastNLP中是一个Instance对象。 Instance一般与 :class:`~fastNLP.DataSet` 一起使用, Instance的初始化如下面的Example所示:: diff --git a/fastNLP/core/losses.py b/fastNLP/core/losses.py index 7402a568..b2f5ce0a 100644 --- a/fastNLP/core/losses.py +++ b/fastNLP/core/losses.py @@ -167,8 +167,6 @@ class LossBase(object): class LossFunc(LossBase): """ - 别名::class:`fastNLP.LossFunc` :class:`fastNLP.core.losses.LossFunc` - 提供给用户使用自定义损失函数的类 :param func: 用户自行定义的损失函数,应当为一个函数或者callable(func)为True的ojbect @@ -200,8 +198,6 @@ class LossFunc(LossBase): class CrossEntropyLoss(LossBase): """ - 别名::class:`fastNLP.CrossEntropyLoss` :class:`fastNLP.core.losses.CrossEntropyLoss` - 交叉熵损失函数 :param pred: 参数映射表中 `pred` 的映射关系,None表示映射关系为 `pred` -> `pred` @@ -248,8 +244,6 @@ class CrossEntropyLoss(LossBase): class L1Loss(LossBase): """ - 别名::class:`fastNLP.L1Loss` :class:`fastNLP.core.losses.L1Loss` - L1损失函数 :param pred: 参数映射表中 `pred` 的映射关系,None表示映射关系为 `pred` -> `pred` @@ -270,8 +264,6 @@ class L1Loss(LossBase): class BCELoss(LossBase): """ - 别名::class:`fastNLP.BCELoss` :class:`fastNLP.core.losses.BCELoss` - 二分类交叉熵损失函数 :param pred: 参数映射表中 `pred` 的映射关系,None表示映射关系为 `pred` -> `pred` @@ -291,8 +283,6 @@ class BCELoss(LossBase): class NLLLoss(LossBase): """ - 别名::class:`fastNLP.NLLLoss` :class:`fastNLP.core.losses.NLLLoss` - 负对数似然损失函数 :param pred: 参数映射表中 `pred` 的映射关系,None表示映射关系为 `pred` -> `pred` @@ -315,8 +305,6 @@ class NLLLoss(LossBase): class LossInForward(LossBase): """ - 别名::class:`fastNLP.LossInForward` :class:`fastNLP.core.losses.LossInForward` - 从forward()函数返回结果中获取loss :param str loss_key: 在forward函数中loss的键名,默认为loss diff --git a/fastNLP/core/metrics.py b/fastNLP/core/metrics.py index c0f14c90..2dc6d9d8 100644 --- a/fastNLP/core/metrics.py +++ b/fastNLP/core/metrics.py @@ -294,9 +294,6 @@ class MetricBase(object): class AccuracyMetric(MetricBase): """ - - 别名::class:`fastNLP.AccuracyMetric` :class:`fastNLP.core.metrics.AccuracyMetric` - 准确率Metric(其它的Metric参见 :doc:`fastNLP.core.metrics` ) :param pred: 参数映射表中 `pred` 的映射关系,None表示映射关系为 `pred` -> `pred` @@ -565,8 +562,6 @@ def _check_tag_vocab_and_encoding_type(tag_vocab:Union[Vocabulary, dict], encodi class SpanFPreRecMetric(MetricBase): r""" - 别名::class:`fastNLP.SpanFPreRecMetric` :class:`fastNLP.core.metrics.SpanFPreRecMetric` - 在序列标注问题中,以span的方式计算F, pre, rec. 比如中文Part of speech中,会以character的方式进行标注,句子 `中国在亚洲` 对应的POS可能为(以BMES为例) ['B-NN', 'E-NN', 'S-DET', 'B-NN', 'E-NN']。该metric就是为类似情况下的F1计算。 @@ -832,8 +827,6 @@ def _pred_topk(y_prob, k=1): class ExtractiveQAMetric(MetricBase): r""" - 别名::class:`fastNLP.ExtractiveQAMetric` :class:`fastNLP.core.metrics.ExtractiveQAMetric` - 抽取式QA(如SQuAD)的metric. :param pred1: 参数映射表中 `pred1` 的映射关系,None表示映射关系为 `pred1` -> `pred1` diff --git a/fastNLP/core/optimizer.py b/fastNLP/core/optimizer.py index e95047b4..c30c7e34 100644 --- a/fastNLP/core/optimizer.py +++ b/fastNLP/core/optimizer.py @@ -17,7 +17,6 @@ from torch.optim.optimizer import Optimizer as TorchOptimizer class Optimizer(object): """ - 别名::class:`fastNLP.Optimizer` :class:`fastNLP.core.optimizer.Optimizer` :param model_params: a generator. E.g. ``model.parameters()`` for PyTorch models. :param kwargs: additional parameters. @@ -60,7 +59,6 @@ class NullOptimizer(Optimizer): class SGD(Optimizer): """ - 别名::class:`fastNLP.SGD` :class:`fastNLP.core.optimizer.SGD` :param float lr: learning rate. Default: 0.01 :param float momentum: momentum. Default: 0 @@ -82,7 +80,6 @@ class SGD(Optimizer): class Adam(Optimizer): """ - 别名::class:`fastNLP.Adam` :class:`fastNLP.core.optimizer.Adam` :param float lr: learning rate :param float weight_decay: @@ -105,8 +102,6 @@ class Adam(Optimizer): class AdamW(TorchOptimizer): r""" - 别名::class:`fastNLP.AdamW` :class:`fastNLP.core.optimizer.AdamW` - 对AdamW的实现,该实现应该会在pytorch更高版本中出现,https://github.com/pytorch/pytorch/pull/21250。这里提前加入 .. todo:: diff --git a/fastNLP/core/sampler.py b/fastNLP/core/sampler.py index 9ca04fa0..d0df9129 100644 --- a/fastNLP/core/sampler.py +++ b/fastNLP/core/sampler.py @@ -15,9 +15,6 @@ import numpy as np class Sampler(object): """ - 别名::class:`fastNLP.Sampler` :class:`fastNLP.core.sampler.Sampler` - - `Sampler` 类的基类. 规定以何种顺序取出data中的元素 子类必须实现 ``__call__`` 方法. 输入 `DataSet` 对象, 返回其中元素的下标序列 @@ -33,8 +30,6 @@ class Sampler(object): class SequentialSampler(Sampler): """ - 别名::class:`fastNLP.SequentialSampler` :class:`fastNLP.core.sampler.SequentialSampler` - 顺序取出元素的 `Sampler` """ @@ -45,8 +40,6 @@ class SequentialSampler(Sampler): class RandomSampler(Sampler): """ - 别名::class:`fastNLP.RandomSampler` :class:`fastNLP.core.sampler.RandomSampler` - 随机化取元素的 `Sampler` """ @@ -57,8 +50,6 @@ class RandomSampler(Sampler): class BucketSampler(Sampler): """ - 别名::class:`fastNLP.BucketSampler` :class:`fastNLP.core.sampler.BucketSampler` - 带Bucket的 `Random Sampler`. 可以随机地取出长度相似的元素 :param int num_buckets: bucket的数量 diff --git a/fastNLP/core/tester.py b/fastNLP/core/tester.py index e549df81..344e24a8 100644 --- a/fastNLP/core/tester.py +++ b/fastNLP/core/tester.py @@ -65,8 +65,6 @@ __all__ = [ class Tester(object): """ - 别名::class:`fastNLP.Tester` :class:`fastNLP.core.tester.Tester` - Tester是在提供数据,模型以及metric的情况下进行性能测试的类。需要传入模型,数据以及metric进行验证。 :param ~fastNLP.DataSet data: 需要测试的数据集 diff --git a/fastNLP/core/trainer.py b/fastNLP/core/trainer.py index a47f108b..9f262fb5 100644 --- a/fastNLP/core/trainer.py +++ b/fastNLP/core/trainer.py @@ -357,8 +357,6 @@ from ._logger import logger class Trainer(object): """ - 别名::class:`fastNLP.Trainer` :class:`fastNLP.core.trainer.Trainer` - Trainer在fastNLP中用于组织单任务的训练过程,可以避免用户在不同训练任务中重复撰写 (1) epoch循环; (2) 将数据分成不同的Batch; diff --git a/fastNLP/core/utils.py b/fastNLP/core/utils.py index fcb2a07b..814e0bd5 100644 --- a/fastNLP/core/utils.py +++ b/fastNLP/core/utils.py @@ -66,8 +66,6 @@ def _prepare_cache_filepath(filepath): def cache_results(_cache_fp, _refresh=False, _verbose=1): """ - 别名::class:`fastNLP.cache_results` :class:`fastNLP.core.uitls.cache_results` - cache_results是fastNLP中用于cache数据的装饰器。通过下面的例子看一下如何使用:: import time diff --git a/fastNLP/core/vocabulary.py b/fastNLP/core/vocabulary.py index b0f9650a..d4ff6077 100644 --- a/fastNLP/core/vocabulary.py +++ b/fastNLP/core/vocabulary.py @@ -66,8 +66,6 @@ def _check_build_status(func): class Vocabulary(object): """ - 别名::class:`fastNLP.Vocabulary` :class:`fastNLP.core.vocabulary.Vocabulary` - 用于构建, 存储和使用 `str` 到 `int` 的一一映射:: vocab = Vocabulary() diff --git a/fastNLP/io/embed_loader.py b/fastNLP/io/embed_loader.py index a157901f..73a7a1de 100644 --- a/fastNLP/io/embed_loader.py +++ b/fastNLP/io/embed_loader.py @@ -33,8 +33,6 @@ class EmbeddingOption(Option): class EmbedLoader: """ - 别名::class:`fastNLP.io.EmbedLoader` :class:`fastNLP.io.embed_loader.EmbedLoader` - 用于读取预训练的embedding, 读取结果可直接载入为模型参数。 """ diff --git a/fastNLP/io/loader/classification.py b/fastNLP/io/loader/classification.py index 4ebd58e1..9efcf5d2 100644 --- a/fastNLP/io/loader/classification.py +++ b/fastNLP/io/loader/classification.py @@ -24,8 +24,6 @@ from ...core.instance import Instance class YelpLoader(Loader): """ - 别名::class:`fastNLP.io.YelpLoader` :class:`fastNLP.io.loader.YelpLoader` - 原始数据中内容应该为, 每一行为一个sample,第一个逗号之前为target,第一个逗号之后为文本内容。 Example:: @@ -164,8 +162,6 @@ class YelpPolarityLoader(YelpLoader): class IMDBLoader(Loader): """ - 别名::class:`fastNLP.io.IMDBLoader` :class:`fastNLP.io.loader.IMDBLoader` - IMDBLoader读取后的数据将具有以下两列内容: raw_words: str, 需要分类的文本; target: str, 文本的标签 DataSet具备以下的结构: @@ -244,8 +240,6 @@ class IMDBLoader(Loader): class SSTLoader(Loader): """ - 别名::class:`fastNLP.io.SSTLoader` :class:`fastNLP.io.loader.SSTLoader` - 读取之后的DataSet具有以下的结构 .. csv-table:: 下面是使用SSTLoader读取的DataSet所具备的field diff --git a/fastNLP/io/loader/conll.py b/fastNLP/io/loader/conll.py index 1bd1b448..f30b031f 100644 --- a/fastNLP/io/loader/conll.py +++ b/fastNLP/io/loader/conll.py @@ -27,8 +27,6 @@ from ...core.instance import Instance class ConllLoader(Loader): """ - 别名::class:`fastNLP.io.ConllLoader` :class:`fastNLP.io.loader.ConllLoader` - ConllLoader支持读取的数据格式: 以空行隔开两个sample,除了分割行,每一行用空格或者制表符隔开不同的元素。如下例所示: Example:: diff --git a/fastNLP/io/loader/csv.py b/fastNLP/io/loader/csv.py index 0d6e35fa..aaf38c00 100644 --- a/fastNLP/io/loader/csv.py +++ b/fastNLP/io/loader/csv.py @@ -12,8 +12,6 @@ from ...core.instance import Instance class CSVLoader(Loader): """ - 别名::class:`fastNLP.io.CSVLoader` :class:`fastNLP.io.loader.CSVLoader` - 读取CSV格式的数据集, 返回 ``DataSet`` 。 :param List[str] headers: CSV文件的文件头.定义每一列的属性名称,即返回的DataSet中`field`的名称 diff --git a/fastNLP/io/loader/json.py b/fastNLP/io/loader/json.py index 012dee5a..671769fe 100644 --- a/fastNLP/io/loader/json.py +++ b/fastNLP/io/loader/json.py @@ -12,8 +12,6 @@ from ...core.instance import Instance class JsonLoader(Loader): """ - 别名::class:`fastNLP.io.JsonLoader` :class:`fastNLP.io.loader.JsonLoader` - 读取json格式数据.数据必须按行存储,每行是一个包含各类属性的json对象 :param dict fields: 需要读入的json属性名称, 和读入后在DataSet中存储的field_name diff --git a/fastNLP/io/model_io.py b/fastNLP/io/model_io.py index a1899f51..9da921df 100644 --- a/fastNLP/io/model_io.py +++ b/fastNLP/io/model_io.py @@ -11,8 +11,6 @@ import torch class ModelLoader: """ - 别名::class:`fastNLP.io.ModelLoader` :class:`fastNLP.io.model_io.ModelLoader` - 用于读取模型 """ @@ -41,8 +39,6 @@ class ModelLoader: class ModelSaver(object): """ - 别名::class:`fastNLP.io.ModelSaver` :class:`fastNLP.io.model_io.ModelSaver` - 用于保存模型 Example:: diff --git a/fastNLP/io/pipe/classification.py b/fastNLP/io/pipe/classification.py index d1c7aa0e..3834a570 100644 --- a/fastNLP/io/pipe/classification.py +++ b/fastNLP/io/pipe/classification.py @@ -228,8 +228,6 @@ class YelpPolarityPipe(_CLSPipe): class SSTPipe(_CLSPipe): """ - 别名::class:`fastNLP.io.SSTPipe` :class:`fastNLP.io.pipe.SSTPipe` - 经过该Pipe之后,DataSet中具备的field如下所示 .. csv-table:: 下面是使用SSTPipe处理后的DataSet所具备的field diff --git a/fastNLP/io/pipe/pipe.py b/fastNLP/io/pipe/pipe.py index 12d9c1cb..db65ece6 100644 --- a/fastNLP/io/pipe/pipe.py +++ b/fastNLP/io/pipe/pipe.py @@ -9,7 +9,9 @@ from .. import DataBundle class Pipe: """ - 别名::class:`fastNLP.io.Pipe` :class:`fastNLP.io.pipe.Pipe` + .. todo:: + doc + """ def process(self, data_bundle: DataBundle) -> DataBundle: """ diff --git a/fastNLP/models/bert.py b/fastNLP/models/bert.py index 4a04bd6d..85c3af8c 100644 --- a/fastNLP/models/bert.py +++ b/fastNLP/models/bert.py @@ -44,9 +44,6 @@ from ..embeddings import BertEmbedding class BertForSequenceClassification(BaseModel): """ - 别名: :class:`fastNLP.models.BertForSequenceClassification` - :class:`fastNLP.models.bert.BertForSequenceClassification` - BERT model for classification. :param fastNLP.embeddings.BertEmbedding embed: 下游模型的编码器(encoder). @@ -90,9 +87,6 @@ class BertForSequenceClassification(BaseModel): class BertForSentenceMatching(BaseModel): """ - 别名: :class:`fastNLP.models.BertForSentenceMatching` - :class:`fastNLP.models.bert.BertForSentenceMatching` - BERT model for sentence matching. :param fastNLP.embeddings.BertEmbedding embed: 下游模型的编码器(encoder). @@ -135,9 +129,6 @@ class BertForSentenceMatching(BaseModel): class BertForMultipleChoice(BaseModel): """ - 别名: :class:`fastNLP.models.BertForMultipleChoice` - :class:`fastNLP.models.bert.BertForMultipleChoice` - BERT model for multiple choice. :param fastNLP.embeddings.BertEmbedding embed: 下游模型的编码器(encoder). @@ -185,9 +176,6 @@ class BertForMultipleChoice(BaseModel): class BertForTokenClassification(BaseModel): """ - 别名: :class:`fastNLP.models.BertForTokenClassification` - :class:`fastNLP.models.bert.BertForTokenClassification` - BERT model for token classification. :param fastNLP.embeddings.BertEmbedding embed: 下游模型的编码器(encoder). @@ -231,9 +219,6 @@ class BertForTokenClassification(BaseModel): class BertForQuestionAnswering(BaseModel): """ - 别名: :class:`fastNLP.models.BertForQuestionAnswering` - :class:`fastNLP.models.bert.BertForQuestionAnswering` - BERT model for classification. :param fastNLP.embeddings.BertEmbedding embed: 下游模型的编码器(encoder). diff --git a/fastNLP/models/biaffine_parser.py b/fastNLP/models/biaffine_parser.py index 455d27a7..5d094472 100644 --- a/fastNLP/models/biaffine_parser.py +++ b/fastNLP/models/biaffine_parser.py @@ -130,8 +130,6 @@ def _find_cycle(vertices, edges): class GraphParser(BaseModel): """ - 别名::class:`fastNLP.models.GraphParser` :class:`fastNLP.models.baffine_parser.GraphParser` - 基于图的parser base class, 支持贪婪解码和最大生成树解码 """ @@ -240,8 +238,6 @@ class LabelBilinear(nn.Module): class BiaffineParser(GraphParser): """ - 别名::class:`fastNLP.models.BiaffineParser` :class:`fastNLP.models.baffine_parser.BiaffineParser` - Biaffine Dependency Parser 实现. 论文参考 `Deep Biaffine Attention for Neural Dependency Parsing (Dozat and Manning, 2016) `_ . @@ -475,8 +471,6 @@ class BiaffineParser(GraphParser): class ParserLoss(LossFunc): """ - 别名::class:`fastNLP.models.ParserLoss` :class:`fastNLP.models.baffine_parser.ParserLoss` - 计算parser的loss :param pred1: [batch_size, seq_len, seq_len] 边预测logits @@ -500,8 +494,6 @@ class ParserLoss(LossFunc): class ParserMetric(MetricBase): """ - 别名::class:`fastNLP.models.ParserMetric` :class:`fastNLP.models.baffine_parser.ParserMetric` - 评估parser的性能 :param pred1: 边预测logits diff --git a/fastNLP/models/cnn_text_classification.py b/fastNLP/models/cnn_text_classification.py index 4bf9c4d1..65c20a55 100644 --- a/fastNLP/models/cnn_text_classification.py +++ b/fastNLP/models/cnn_text_classification.py @@ -18,8 +18,6 @@ from ..modules import encoder class CNNText(torch.nn.Module): """ - 别名::class:`fastNLP.models.CNNText` :class:`fastNLP.models.cnn_text_classification.CNNText` - 使用CNN进行文本分类的模型 'Yoon Kim. 2014. Convolution Neural Networks for Sentence Classification.' diff --git a/fastNLP/models/sequence_labeling.py b/fastNLP/models/sequence_labeling.py index 6e839bea..d5bc250b 100644 --- a/fastNLP/models/sequence_labeling.py +++ b/fastNLP/models/sequence_labeling.py @@ -77,8 +77,6 @@ class BiLSTMCRF(BaseModel): class SeqLabeling(BaseModel): """ - 别名::class:`fastNLP.models.SeqLabeling` :class:`fastNLP.models.sequence_labeling.SeqLabeling` - 一个基础的Sequence labeling的模型。 用于做sequence labeling的基础类。结构包含一层Embedding,一层LSTM(单向,一层),一层FC,以及一层CRF。 @@ -156,8 +154,6 @@ class SeqLabeling(BaseModel): class AdvSeqLabel(nn.Module): """ - 别名::class:`fastNLP.models.AdvSeqLabel` :class:`fastNLP.models.sequence_labeling.AdvSeqLabel` - 更复杂的Sequence Labelling模型。结构为Embedding, LayerNorm, 双向LSTM(两层),FC,LayerNorm,DropOut,FC,CRF。 :param tuple(int,int),torch.FloatTensor,nn.Embedding,numpy.ndarray embed: Embedding的大小(传入tuple(int, int), diff --git a/fastNLP/models/snli.py b/fastNLP/models/snli.py index 97a14e9f..07303ddc 100644 --- a/fastNLP/models/snli.py +++ b/fastNLP/models/snli.py @@ -19,8 +19,6 @@ from ..embeddings.embedding import TokenEmbedding, Embedding class ESIM(BaseModel): """ - 别名::class:`fastNLP.models.ESIM` :class:`fastNLP.models.snli.ESIM` - ESIM model的一个PyTorch实现 论文参见: https://arxiv.org/pdf/1609.06038.pdf diff --git a/fastNLP/models/star_transformer.py b/fastNLP/models/star_transformer.py index 7fe0d343..e4d5af84 100644 --- a/fastNLP/models/star_transformer.py +++ b/fastNLP/models/star_transformer.py @@ -19,8 +19,6 @@ from ..core.const import Const class StarTransEnc(nn.Module): """ - 别名::class:`fastNLP.models.StarTransEnc` :class:`fastNLP.models.star_transformer.StarTransEnc` - 带word embedding的Star-Transformer Encoder :param embed: 单词词典, 可以是 tuple, 包括(num_embedings, embedding_dim), 即 @@ -104,8 +102,6 @@ class _NLICls(nn.Module): class STSeqLabel(nn.Module): """ - 别名::class:`fastNLP.models.STSeqLabel` :class:`fastNLP.models.star_transformer.STSeqLabel` - 用于序列标注的Star-Transformer模型 :param embed: 单词词典, 可以是 tuple, 包括(num_embedings, embedding_dim), 即 @@ -169,8 +165,6 @@ class STSeqLabel(nn.Module): class STSeqCls(nn.Module): """ - 别名::class:`fastNLP.models.STSeqCls` :class:`fastNLP.models.star_transformer.STSeqCls` - 用于分类任务的Star-Transformer :param embed: 单词词典, 可以是 tuple, 包括(num_embedings, embedding_dim), 即 @@ -234,8 +228,6 @@ class STSeqCls(nn.Module): class STNLICls(nn.Module): """ - 别名::class:`fastNLP.models.STNLICls` :class:`fastNLP.models.star_transformer.STNLICls` - 用于自然语言推断(NLI)的Star-Transformer :param embed: 单词词典, 可以是 tuple, 包括(num_embedings, embedding_dim), 即 diff --git a/fastNLP/modules/decoder/crf.py b/fastNLP/modules/decoder/crf.py index e2a751f8..aeb73d76 100644 --- a/fastNLP/modules/decoder/crf.py +++ b/fastNLP/modules/decoder/crf.py @@ -166,10 +166,7 @@ def _is_transition_allowed(encoding_type, from_tag, from_label, to_tag, to_label class ConditionalRandomField(nn.Module): """ - 别名::class:`fastNLP.modules.ConditionalRandomField` :class:`fastNLP.modules.decoder.ConditionalRandomField` - - 条件随机场。 - 提供forward()以及viterbi_decode()两个方法,分别用于训练与inference。 + 条件随机场。提供forward()以及viterbi_decode()两个方法,分别用于训练与inference。 :param int num_tags: 标签的数量 :param bool include_start_end_trans: 是否考虑各个tag作为开始以及结尾的分数。 From a2e31584883abb68e4d7354ca0c95fc250e35605 Mon Sep 17 00:00:00 2001 From: ChenXin Date: Wed, 4 Sep 2019 15:50:01 +0800 Subject: [PATCH 34/50] update the auto alias tool --- fastNLP/doc_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fastNLP/doc_utils.py b/fastNLP/doc_utils.py index 924b7a6a..5801dd53 100644 --- a/fastNLP/doc_utils.py +++ b/fastNLP/doc_utils.py @@ -13,7 +13,8 @@ def doc_process(m): while 1: defined_m = sys.modules[module_name] if "undocumented" not in defined_m.__doc__ and name in defined_m.__all__: - obj.__doc__ = r"定义在 :class:`" + module_name + "." + name + "`\n" + obj.__doc__ + obj.__doc__ = r"别名 :class:`" + m.__name__ + "." + name + "`" \ + + " :class:`" + module_name + "." + name + "`\n" + obj.__doc__ break module_name = ".".join(module_name.split('.')[:-1]) if module_name == m.__name__: From b1fe5f5321a1953b41c544c92d074becde003194 Mon Sep 17 00:00:00 2001 From: ChenXin Date: Wed, 4 Sep 2019 16:53:31 +0800 Subject: [PATCH 35/50] split the class's doc & __init__'s doc (core part) --- fastNLP/core/batch.py | 36 ++++++------ fastNLP/core/callback.py | 115 +++++++++++++++++++++---------------- fastNLP/core/dataset.py | 21 +++---- fastNLP/core/field.py | 12 ++-- fastNLP/core/instance.py | 3 +- fastNLP/core/losses.py | 23 ++++---- fastNLP/core/metrics.py | 66 ++++++++++----------- fastNLP/core/optimizer.py | 56 +++++++++++------- fastNLP/core/predictor.py | 6 +- fastNLP/core/sampler.py | 12 ++-- fastNLP/core/tester.py | 49 ++++++++-------- fastNLP/core/trainer.py | 98 +++++++++++++++---------------- fastNLP/core/vocabulary.py | 28 ++++----- 13 files changed, 286 insertions(+), 239 deletions(-) diff --git a/fastNLP/core/batch.py b/fastNLP/core/batch.py index ad07341a..b14b21de 100644 --- a/fastNLP/core/batch.py +++ b/fastNLP/core/batch.py @@ -9,15 +9,16 @@ __all__ = [ ] import atexit +from numbers import Number import numpy as np import torch import torch.utils.data -from numbers import Number -from .sampler import SequentialSampler -from .dataset import DataSet from ._logger import logger +from .dataset import DataSet +from .sampler import SequentialSampler + _python_is_exit = False @@ -153,23 +154,26 @@ class DataSetIter(BatchIter): for batch_x, batch_y in batch: # do stuff ... - :param dataset: :class:`~fastNLP.DataSet` 对象, 数据集 - :param int batch_size: 取出的batch大小 - :param sampler: 规定使用的 :class:`~fastNLP.Sampler` 方式. 若为 ``None`` , 使用 :class:`~fastNLP.SequentialSampler`. - - Default: ``None`` - :param bool as_numpy: 若为 ``True`` , 输出batch为 numpy.array. 否则为 :class:`torch.Tensor`. - - Default: ``False`` - :param int num_workers: 使用多少个进程来预处理数据 - :param bool pin_memory: 是否将产生的tensor使用pin memory, 可能会加快速度。 - :param bool drop_last: 如果最后一个batch没有batch_size这么多sample,就扔掉最后一个 - :param timeout: - :param worker_init_fn: 在每个worker启动时调用该函数,会传入一个值,该值是worker的index。 """ def __init__(self, dataset, batch_size=1, sampler=None, as_numpy=False, num_workers=0, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None): + """ + + :param dataset: :class:`~fastNLP.DataSet` 对象, 数据集 + :param int batch_size: 取出的batch大小 + :param sampler: 规定使用的 :class:`~fastNLP.Sampler` 方式. 若为 ``None`` , 使用 :class:`~fastNLP.SequentialSampler`. + + Default: ``None`` + :param bool as_numpy: 若为 ``True`` , 输出batch为 numpy.array. 否则为 :class:`torch.Tensor`. + + Default: ``False`` + :param int num_workers: 使用多少个进程来预处理数据 + :param bool pin_memory: 是否将产生的tensor使用pin memory, 可能会加快速度。 + :param bool drop_last: 如果最后一个batch没有batch_size这么多sample,就扔掉最后一个 + :param timeout: + :param worker_init_fn: 在每个worker启动时调用该函数,会传入一个值,该值是worker的index。 + """ super().__init__() assert isinstance(dataset, DataSet) if not isinstance(sampler, torch.utils.data.Sampler): diff --git a/fastNLP/core/callback.py b/fastNLP/core/callback.py index 3cdc0f8d..fe198acc 100644 --- a/fastNLP/core/callback.py +++ b/fastNLP/core/callback.py @@ -317,9 +317,11 @@ def _transfer(func): class CallbackManager(Callback): + """ + 内部使用的Callback管理类 + """ def __init__(self, env, callbacks=None): """ - 内部使用的Callback管理类 :param dict env: The key is the name of the Trainer attribute(str). The value is the attribute itself. :param List[Callback] callbacks: @@ -435,23 +437,23 @@ class DistCallbackManager(CallbackManager): class GradientClipCallback(Callback): """ 每次backward前,将parameter的gradient clip到某个范围。 - - :param None,torch.Tensor,List[torch.Tensor] parameters: 一般通过model.parameters()获得。 - 如果为None则默认对Trainer的model中所有参数进行clip - :param float clip_value: 将gradient 限制到[-clip_value, clip_value]。clip_value应该为正数 - :param str clip_type: 支持'norm', 'value' - 两种:: - - 1 'norm', 将gradient的norm rescale到[-clip_value, clip_value] - - 2 'value', 将gradient限制在[-clip_value, clip_value], - 小于-clip_value的gradient被赋值为-clip_value; - 大于clip_value的gradient被赋值为clip_value. - """ def __init__(self, parameters=None, clip_value=1, clip_type='norm'): + """ + :param None,torch.Tensor,List[torch.Tensor] parameters: 一般通过model.parameters()获得。 + 如果为None则默认对Trainer的model中所有参数进行clip + :param float clip_value: 将gradient 限制到[-clip_value, clip_value]。clip_value应该为正数 + :param str clip_type: 支持'norm', 'value' + 两种:: + + 1 'norm', 将gradient的norm rescale到[-clip_value, clip_value] + + 2 'value', 将gradient限制在[-clip_value, clip_value], + 小于-clip_value的gradient被赋值为-clip_value; + 大于clip_value的gradient被赋值为clip_value. + """ super().__init__() from torch import nn @@ -477,12 +479,14 @@ class GradientClipCallback(Callback): class EarlyStopCallback(Callback): """ - 多少个epoch没有变好就停止训练,相关类 :class:`EarlyStopError` - - :param int patience: epoch的数量 + 多少个epoch没有变好就停止训练,相关类 :class:`~fastNLP.core.callback.EarlyStopError` """ def __init__(self, patience): + """ + + :param int patience: epoch的数量 + """ super(EarlyStopCallback, self).__init__() self.patience = patience self.wait = 0 @@ -510,17 +514,19 @@ class FitlogCallback(Callback): 一个(或多个)test数据集进行测试(只有在trainer具有dev时才能使用),每次在dev上evaluate之后会在这些数据集上验证一下。 并将验证结果写入到fitlog中。这些数据集的结果是根据dev上最好的结果报道的,即如果dev在第3个epoch取得了最佳,则 fitlog中记录的关于这些数据集的结果就是来自第三个epoch的结果。 - - :param ~fastNLP.DataSet,Dict[~fastNLP.DataSet] data: 传入DataSet对象,会使用多个Trainer中的metric对数据进行验证。如果需要 - 传入多个DataSet请通过dict的方式传入,dict的key将作为对应dataset的name传递给fitlog。data的结果的名称以'data'开头。 - :param ~fastNLP.Tester,Dict[~fastNLP.Tester] tester: Tester对象,将在on_valid_end时调用。tester的结果的名称以'tester'开头 - :param int log_loss_every: 多少个step记录一次loss(记录的是这几个batch的loss平均值),如果数据集较大建议将该值设置得 - 大一些,不然会导致log文件巨大。默认为0, 即不要记录loss。 - :param int verbose: 是否在终端打印evaluation的结果,0不打印。 - :param bool log_exception: fitlog是否记录发生的exception信息 """ def __init__(self, data=None, tester=None, log_loss_every=0, verbose=0, log_exception=False): + """ + + :param ~fastNLP.DataSet,Dict[~fastNLP.DataSet] data: 传入DataSet对象,会使用多个Trainer中的metric对数据进行验证。如果需要 + 传入多个DataSet请通过dict的方式传入,dict的key将作为对应dataset的name传递给fitlog。data的结果的名称以'data'开头。 + :param ~fastNLP.Tester,Dict[~fastNLP.Tester] tester: Tester对象,将在on_valid_end时调用。tester的结果的名称以'tester'开头 + :param int log_loss_every: 多少个step记录一次loss(记录的是这几个batch的loss平均值),如果数据集较大建议将该值设置得 + 大一些,不然会导致log文件巨大。默认为0, 即不要记录loss。 + :param int verbose: 是否在终端打印evaluation的结果,0不打印。 + :param bool log_exception: fitlog是否记录发生的exception信息 + """ super().__init__() self.datasets = {} self.testers = {} @@ -604,13 +610,14 @@ class FitlogCallback(Callback): class EvaluateCallback(Callback): """ 该callback用于扩展Trainer训练过程中只能对dev数据进行验证的问题。 - - :param ~fastNLP.DataSet,Dict[~fastNLP.DataSet] data: 传入DataSet对象,会使用多个Trainer中的metric对数据进行验证。如果需要传入多个 - DataSet请通过dict的方式传入。 - :param ~fastNLP.Tester,Dict[~fastNLP.DataSet] tester: Tester对象,将在on_valid_end时调用。 """ def __init__(self, data=None, tester=None): + """ + :param ~fastNLP.DataSet,Dict[~fastNLP.DataSet] data: 传入DataSet对象,会使用多个Trainer中的metric对数据进行验证。如果需要传入多个 + DataSet请通过dict的方式传入。 + :param ~fastNLP.Tester,Dict[~fastNLP.DataSet] tester: Tester对象,将在on_valid_end时调用。 + """ super().__init__() self.datasets = {} self.testers = {} @@ -664,12 +671,12 @@ class EvaluateCallback(Callback): class LRScheduler(Callback): """ 对PyTorch LR Scheduler的包装以使得其可以被Trainer所使用 - - :param torch.optim.lr_scheduler._LRScheduler lr_scheduler: PyTorch的lr_scheduler """ def __init__(self, lr_scheduler): - + """ + :param torch.optim.lr_scheduler._LRScheduler lr_scheduler: PyTorch的lr_scheduler + """ super(LRScheduler, self).__init__() import torch.optim if isinstance(lr_scheduler, torch.optim.lr_scheduler._LRScheduler): @@ -683,12 +690,13 @@ class LRScheduler(Callback): class ControlC(Callback): """ - - :param bool quit_all: 若为True,则检测到control+C 直接退出程序;否则只退出Trainer + 检测到 control+C 时的反馈 """ def __init__(self, quit_all): - + """ + :param bool quit_all: 若为True,则检测到control+C 直接退出程序;否则只退出Trainer + """ super(ControlC, self).__init__() if type(quit_all) != bool: raise ValueError("In KeyBoardInterrupt, quit_all arguemnt must be a bool.") @@ -720,13 +728,14 @@ class SmoothValue(object): class LRFinder(Callback): """ 用第一个 epoch 找最佳的学习率,从第二个epoch开始应用它 - - :param float start_lr: 学习率下界 - :param float end_lr: 学习率上界 """ def __init__(self, start_lr=1e-6, end_lr=10): + """ + :param float start_lr: 学习率下界 + :param float end_lr: 学习率上界 + """ super(LRFinder, self).__init__() self.start_lr, self.end_lr = start_lr, end_lr @@ -864,13 +873,15 @@ class TensorboardCallback(Callback): class WarmupCallback(Callback): """ 按一定的周期调节Learning rate的大小。 - - :param int,float warmup: 如果warmup为int,则在该step之前,learning rate根据schedule的策略变化; 如果warmup为float, - 如0.1, 则前10%的step是按照schedule策略调整learning rate。 - :param str schedule: 以哪种方式调整。linear: 前warmup的step上升到指定的learning rate(从Trainer中的optimizer处获取的), 后 - warmup的step下降到0; constant前warmup的step上升到指定learning rate,后面的step保持learning rate. """ def __init__(self, warmup=0.1, schedule='constant'): + """ + + :param int,float warmup: 如果warmup为int,则在该step之前,learning rate根据schedule的策略变化; 如果warmup为float, + 如0.1, 则前10%的step是按照schedule策略调整learning rate。 + :param str schedule: 以哪种方式调整。linear: 前warmup的step上升到指定的learning rate(从Trainer中的optimizer处获取的), 后 + warmup的step下降到0; constant前warmup的step上升到指定learning rate,后面的step保持learning rate. + """ super().__init__() self.warmup = max(warmup, 0.) @@ -920,13 +931,15 @@ class SaveModelCallback(Callback): -epoch:1_step:40_{metric_key}:{evaluate_performance}.pt -2019-07-03-15-10-00 -epoch:0_step:20_{metric_key}:{evaluate_performance}.pt # metric是给定的metric_key, evaluate_perfomance是性能 - - :param str save_dir: 将模型存放在哪个目录下,会在该目录下创建以时间戳命名的目录,并存放模型 - :param int top: 保存dev表现top多少模型。-1为保存所有模型。 - :param bool only_param: 是否只保存模型d饿权重。 - :param save_on_exception: 发生exception时,是否保存一份发生exception的模型。模型名称为epoch:x_step:x_Exception:{exception_name}. """ def __init__(self, save_dir, top=3, only_param=False, save_on_exception=False): + """ + + :param str save_dir: 将模型存放在哪个目录下,会在该目录下创建以时间戳命名的目录,并存放模型 + :param int top: 保存dev表现top多少模型。-1为保存所有模型。 + :param bool only_param: 是否只保存模型d饿权重。 + :param save_on_exception: 发生exception时,是否保存一份发生exception的模型。模型名称为epoch:x_step:x_Exception:{exception_name}. + """ super().__init__() if not os.path.isdir(save_dir): @@ -992,11 +1005,13 @@ class SaveModelCallback(Callback): class CallbackException(BaseException): """ 当需要通过callback跳出训练的时候可以通过抛出CallbackException并在on_exception中捕获这个值。 - - :param str msg: Exception的信息。 """ def __init__(self, msg): + """ + + :param str msg: Exception的信息。 + """ super(CallbackException, self).__init__(msg) diff --git a/fastNLP/core/dataset.py b/fastNLP/core/dataset.py index 441f9907..ebdc780f 100644 --- a/fastNLP/core/dataset.py +++ b/fastNLP/core/dataset.py @@ -288,30 +288,31 @@ __all__ = [ ] import _pickle as pickle -import warnings +from copy import deepcopy import numpy as np -from copy import deepcopy +from ._logger import logger +from .const import Const +from .field import AppendToTargetOrInputException from .field import AutoPadder from .field import FieldArray +from .field import SetInputOrTargetException from .instance import Instance from .utils import _get_func_signature -from .field import AppendToTargetOrInputException -from .field import SetInputOrTargetException -from .const import Const -from ._logger import logger + class DataSet(object): """ fastNLP的数据容器,详细的使用方法见文档 :doc:`fastNLP.core.dataset` - - :param data: 如果为dict类型,则每个key的value应该为等长的list; 如果为list, - 每个元素应该为具有相同field的 :class:`~fastNLP.Instance` 。 - """ def __init__(self, data=None): + """ + + :param data: 如果为dict类型,则每个key的value应该为等长的list; 如果为list, + 每个元素应该为具有相同field的 :class:`~fastNLP.Instance` 。 + """ self.field_arrays = {} if data is not None: if isinstance(data, dict): diff --git a/fastNLP/core/field.py b/fastNLP/core/field.py index 468c248d..82fcc523 100644 --- a/fastNLP/core/field.py +++ b/fastNLP/core/field.py @@ -468,18 +468,18 @@ class Padder: 用于对batch进行padding操作。传入的element是inplace的,即直接修改element可能导致数据变化,建议inplace修改之前deepcopy一份。 .. py:function:: __call__(self, contents, field_name, field_ele_dtype): + + """ + + def __init__(self, pad_val=0, **kwargs): + """ - 传入的是List内容。假设有以下的DataSet。 - :param List[Any] contents: 传入的element是inplace的,即直接修改element可能导致数据变化,建议inplace修改之前 deepcopy一份。 :param str, field_name: field的名称。 :param np.int64,np.float64,np.str,None, field_ele_dtype: 该field的内层元素的类型。如果该field的ignore_type为True,该这个值为None。 :return: np.array([padded_element]) - - """ - - def __init__(self, pad_val=0, **kwargs): + """ self.pad_val = pad_val def set_pad_val(self, pad_val): diff --git a/fastNLP/core/instance.py b/fastNLP/core/instance.py index 2285e4a4..9460b5e4 100644 --- a/fastNLP/core/instance.py +++ b/fastNLP/core/instance.py @@ -37,7 +37,8 @@ class Instance(object): def items(self): """ 返回一个迭代器,迭代器返回两个内容,第一个内容是field_name, 第二个内容是field_value - :return: + + :return: 一个迭代器 """ return self.fields.items() diff --git a/fastNLP/core/losses.py b/fastNLP/core/losses.py index b2f5ce0a..9b32babb 100644 --- a/fastNLP/core/losses.py +++ b/fastNLP/core/losses.py @@ -20,7 +20,6 @@ from collections import defaultdict import torch import torch.nn.functional as F -from ..core.const import Const from .utils import _CheckError from .utils import _CheckRes from .utils import _build_args @@ -28,7 +27,7 @@ from .utils import _check_arg_dict_list from .utils import _check_function_or_method from .utils import _get_func_signature from .utils import seq_len_to_mask -import warnings +from ..core.const import Const class LossBase(object): @@ -284,15 +283,17 @@ class BCELoss(LossBase): class NLLLoss(LossBase): """ 负对数似然损失函数 - - :param pred: 参数映射表中 `pred` 的映射关系,None表示映射关系为 `pred` -> `pred` - :param target: 参数映射表中 `target` 的映射关系,None表示映射关系为 `target` -> `target` - :param ignore_idx: ignore的index,在计算loss时将忽略target中标号为ignore_idx的内容, 可以通过该值代替 - 传入seq_len. - :param str reduction: 支持 `mean` ,`sum` 和 `none` . """ def __init__(self, pred=None, target=None, ignore_idx=-100, reduction='mean'): + """ + + :param pred: 参数映射表中 `pred` 的映射关系,None表示映射关系为 `pred` -> `pred` + :param target: 参数映射表中 `target` 的映射关系,None表示映射关系为 `target` -> `target` + :param ignore_idx: ignore的index,在计算loss时将忽略target中标号为ignore_idx的内容, 可以通过该值代替 + 传入seq_len. + :param str reduction: 支持 `mean` ,`sum` 和 `none` . + """ super(NLLLoss, self).__init__() self._init_param_map(pred=pred, target=target) assert reduction in ('mean', 'sum', 'none') @@ -306,11 +307,13 @@ class NLLLoss(LossBase): class LossInForward(LossBase): """ 从forward()函数返回结果中获取loss - - :param str loss_key: 在forward函数中loss的键名,默认为loss """ def __init__(self, loss_key=Const.LOSS): + """ + + :param str loss_key: 在forward函数中loss的键名,默认为loss + """ super().__init__() if not isinstance(loss_key, str): raise TypeError(f"Only str allowed for loss_key, got {type(loss_key)}.") diff --git a/fastNLP/core/metrics.py b/fastNLP/core/metrics.py index 2dc6d9d8..ec1a1864 100644 --- a/fastNLP/core/metrics.py +++ b/fastNLP/core/metrics.py @@ -10,7 +10,10 @@ __all__ = [ ] import inspect +import warnings +from abc import abstractmethod from collections import defaultdict +from typing import Union import numpy as np import torch @@ -22,9 +25,7 @@ from .utils import _check_arg_dict_list from .utils import _get_func_signature from .utils import seq_len_to_mask from .vocabulary import Vocabulary -from abc import abstractmethod -import warnings -from typing import Union + class MetricBase(object): """ @@ -295,13 +296,15 @@ class MetricBase(object): class AccuracyMetric(MetricBase): """ 准确率Metric(其它的Metric参见 :doc:`fastNLP.core.metrics` ) - - :param pred: 参数映射表中 `pred` 的映射关系,None表示映射关系为 `pred` -> `pred` - :param target: 参数映射表中 `target` 的映射关系,None表示映射关系为 `target` -> `target` - :param seq_len: 参数映射表中 `seq_len` 的映射关系,None表示映射关系为 `seq_len` -> `seq_len` """ def __init__(self, pred=None, target=None, seq_len=None): + """ + + :param pred: 参数映射表中 `pred` 的映射关系,None表示映射关系为 `pred` -> `pred` + :param target: 参数映射表中 `target` 的映射关系,None表示映射关系为 `target` -> `target` + :param seq_len: 参数映射表中 `seq_len` 的映射关系,None表示映射关系为 `seq_len` -> `seq_len` + """ super().__init__() @@ -584,25 +587,23 @@ class SpanFPreRecMetric(MetricBase): 'rec-label':xxx, ... } - - :param tag_vocab: 标签的 :class:`~fastNLP.Vocabulary` 。支持的标签为"B"(没有label);或"B-xxx"(xxx为某种label,比如POS中的NN), - 在解码时,会将相同xxx的认为是同一个label,比如['B-NN', 'E-NN']会被合并为一个'NN'. - :param str pred: 用该key在evaluate()时从传入dict中取出prediction数据。 为None,则使用 `pred` 取数据 - :param str target: 用该key在evaluate()时从传入dict中取出target数据。 为None,则使用 `target` 取数据 - :param str seq_len: 用该key在evaluate()时从传入dict中取出sequence length数据。为None,则使用 `seq_len` 取数据。 - :param str encoding_type: 目前支持bio, bmes, bmeso, bioes。默认为None,通过tag_vocab自动判断. - :param list ignore_labels: str 组成的list. 这个list中的class不会被用于计算。例如在POS tagging时传入['NN'],则不会计算'NN'这 - 个label - :param bool only_gross: 是否只计算总的f1, precision, recall的值;如果为False,不仅返回总的f1, pre, rec, 还会返回每个 - label的f1, pre, rec - :param str f_type: `micro` 或 `macro` . `micro` :通过先计算总体的TP,FN和FP的数量,再计算f, precision, recall; `macro` : - 分布计算每个类别的f, precision, recall,然后做平均(各类别f的权重相同) - :param float beta: f_beta分数, :math:`f_{beta} = \frac{(1 + {beta}^{2})*(pre*rec)}{({beta}^{2}*pre + rec)}` . - 常用为beta=0.5, 1, 2. 若为0.5则精确率的权重高于召回率;若为1,则两者平等;若为2,则召回率权重高于精确率。 """ def __init__(self, tag_vocab, pred=None, target=None, seq_len=None, encoding_type=None, ignore_labels=None, only_gross=True, f_type='micro', beta=1): + r""" + + :param tag_vocab: 标签的 :class:`~fastNLP.Vocabulary` 。支持的标签为"B"(没有label);或"B-xxx"(xxx为某种label,比如POS中的NN), + 在解码时,会将相同xxx的认为是同一个label,比如['B-NN', 'E-NN']会被合并为一个'NN'. + :param str pred: 用该key在evaluate()时从传入dict中取出prediction数据。 为None,则使用 `pred` 取数据 + :param str target: 用该key在evaluate()时从传入dict中取出target数据。 为None,则使用 `target` 取数据 + :param str seq_len: 用该key在evaluate()时从传入dict中取出sequence length数据。为None,则使用 `seq_len` 取数据。 + :param str encoding_type: 目前支持bio, bmes, bmeso, bioes。默认为None,通过tag_vocab自动判断. + :param list ignore_labels: str 组成的list. 这个list中的class不会被用于计算。例如在POS tagging时传入['NN'],则不会计算'NN'个label + :param bool only_gross: 是否只计算总的f1, precision, recall的值;如果为False,不仅返回总的f1, pre, rec, 还会返回每个label的f1, pre, rec + :param str f_type: `micro` 或 `macro` . `micro` :通过先计算总体的TP,FN和FP的数量,再计算f, precision, recall; `macro` : 分布计算每个类别的f, precision, recall,然后做平均(各类别f的权重相同) + :param float beta: f_beta分数, :math:`f_{beta} = \frac{(1 + {beta}^{2})*(pre*rec)}{({beta}^{2}*pre + rec)}` . 常用为 `beta=0.5, 1, 2` 若为0.5则精确率的权重高于召回率;若为1,则两者平等;若为2,则召回率权重高于精确率。 + """ if not isinstance(tag_vocab, Vocabulary): raise TypeError("tag_vocab can only be fastNLP.Vocabulary, not {}.".format(type(tag_vocab))) @@ -829,20 +830,21 @@ class ExtractiveQAMetric(MetricBase): r""" 抽取式QA(如SQuAD)的metric. - :param pred1: 参数映射表中 `pred1` 的映射关系,None表示映射关系为 `pred1` -> `pred1` - :param pred2: 参数映射表中 `pred2` 的映射关系,None表示映射关系为 `pred2` -> `pred2` - :param target1: 参数映射表中 `target1` 的映射关系,None表示映射关系为 `target1` -> `target1` - :param target2: 参数映射表中 `target2` 的映射关系,None表示映射关系为 `target2` -> `target2` - :param float beta: f_beta分数, :math:`f_{beta} = \frac{(1 + {beta}^{2})*(pre*rec)}{({beta}^{2}*pre + rec)}` . - 常用为beta=0.5, 1, 2. 若为0.5则精确率的权重高于召回率;若为1,则两者平等;若为2,则召回率权重高于精确率。 - :param bool right_open: right_open为true表示start跟end指针指向一个左闭右开区间,为false表示指向一个左闭右闭区间。 - :param bool print_predict_stat: True则输出预测答案是否为空与正确答案是否为空的统计信息, False则不输出 - """ def __init__(self, pred1=None, pred2=None, target1=None, target2=None, beta=1, right_open=True, print_predict_stat=False): - + r""" + + :param pred1: 参数映射表中 `pred1` 的映射关系,None表示映射关系为 `pred1` -> `pred1` + :param pred2: 参数映射表中 `pred2` 的映射关系,None表示映射关系为 `pred2` -> `pred2` + :param target1: 参数映射表中 `target1` 的映射关系,None表示映射关系为 `target1` -> `target1` + :param target2: 参数映射表中 `target2` 的映射关系,None表示映射关系为 `target2` -> `target2` + :param float beta: f_beta分数, :math:`f_{beta} = \frac{(1 + {beta}^{2})*(pre*rec)}{({beta}^{2}*pre + rec)}` . + 常用为beta=0.5, 1, 2. 若为0.5则精确率的权重高于召回率;若为1,则两者平等;若为2,则召回率权重高于精确率。 + :param bool right_open: right_open为true表示start跟end指针指向一个左闭右开区间,为false表示指向一个左闭右闭区间。 + :param bool print_predict_stat: True则输出预测答案是否为空与正确答案是否为空的统计信息, False则不输出 + """ super(ExtractiveQAMetric, self).__init__() self._init_param_map(pred1=pred1, pred2=pred2, target1=target1, target2=target2) diff --git a/fastNLP/core/optimizer.py b/fastNLP/core/optimizer.py index c30c7e34..5e7c1cba 100644 --- a/fastNLP/core/optimizer.py +++ b/fastNLP/core/optimizer.py @@ -9,20 +9,23 @@ __all__ = [ "AdamW" ] -import torch import math + import torch from torch.optim.optimizer import Optimizer as TorchOptimizer class Optimizer(object): """ - - :param model_params: a generator. E.g. ``model.parameters()`` for PyTorch models. - :param kwargs: additional parameters. + Optimizer """ def __init__(self, model_params, **kwargs): + """ + + :param model_params: a generator. E.g. ``model.parameters()`` for PyTorch models. + :param kwargs: additional parameters. + """ if model_params is not None and not hasattr(model_params, "__next__"): raise RuntimeError("model parameters should be a generator, rather than {}.".format(type(model_params))) self.model_params = model_params @@ -59,13 +62,15 @@ class NullOptimizer(Optimizer): class SGD(Optimizer): """ - - :param float lr: learning rate. Default: 0.01 - :param float momentum: momentum. Default: 0 - :param model_params: a generator. E.g. ``model.parameters()`` for PyTorch models. + SGD """ def __init__(self, lr=0.001, momentum=0, model_params=None): + """ + :param float lr: learning rate. Default: 0.01 + :param float momentum: momentum. Default: 0 + :param model_params: a generator. E.g. ``model.parameters()`` for PyTorch models. + """ if not isinstance(lr, float): raise TypeError("learning rate has to be float.") super(SGD, self).__init__(model_params, lr=lr, momentum=momentum) @@ -81,12 +86,17 @@ class SGD(Optimizer): class Adam(Optimizer): """ - :param float lr: learning rate - :param float weight_decay: - :param model_params: a generator. E.g. ``model.parameters()`` for PyTorch models. """ def __init__(self, lr=0.001, weight_decay=0, betas=(0.9, 0.999), eps=1e-8, amsgrad=False, model_params=None): + """ + + :param float lr: learning rate + :param float weight_decay: + :param eps: + :param amsgrad: + :param model_params: a generator. E.g. ``model.parameters()`` for PyTorch models. + """ if not isinstance(lr, float): raise TypeError("learning rate has to be float.") super(Adam, self).__init__(model_params, lr=lr, betas=betas, eps=eps, amsgrad=amsgrad, @@ -110,17 +120,6 @@ class AdamW(TorchOptimizer): The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_. The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_. - :param params (iterable): iterable of parameters to optimize or dicts defining - parameter groups - :param lr (float, optional): learning rate (default: 1e-3) - :param betas (Tuple[float, float], optional): coefficients used for computing - running averages of gradient and its square (default: (0.9, 0.99)) - :param eps (float, optional): term added to the denominator to improve - numerical stability (default: 1e-8) - :param weight_decay (float, optional): weight decay coefficient (default: 1e-2) - algorithm from the paper `On the Convergence of Adam and Beyond`_ - (default: False) - .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _Decoupled Weight Decay Regularization: @@ -131,6 +130,19 @@ class AdamW(TorchOptimizer): def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False): + """ + + :param params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + :param lr (float, optional): learning rate (default: 1e-3) + :param betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.99)) + :param eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + :param weight_decay (float, optional): weight decay coefficient (default: 1e-2) + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + """ if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: diff --git a/fastNLP/core/predictor.py b/fastNLP/core/predictor.py index c6b8fc90..e4112d5f 100644 --- a/fastNLP/core/predictor.py +++ b/fastNLP/core/predictor.py @@ -20,11 +20,13 @@ class Predictor(object): 与测试器(Tester)不同的是,predictor不关心模型性能的评价指标,只做inference。 这是一个fastNLP调用的高级模型包装器。它与Trainer、Tester不共享任何操作。 - - :param torch.nn.Module network: 用来完成预测任务的模型 """ def __init__(self, network): + """ + + :param torch.nn.Module network: 用来完成预测任务的模型 + """ if not isinstance(network, torch.nn.Module): raise ValueError( "Only fastNLP.models.BaseModel or torch.nn,Module is allowed, not {}".format(type(network))) diff --git a/fastNLP/core/sampler.py b/fastNLP/core/sampler.py index d0df9129..6e025688 100644 --- a/fastNLP/core/sampler.py +++ b/fastNLP/core/sampler.py @@ -51,14 +51,16 @@ class RandomSampler(Sampler): class BucketSampler(Sampler): """ 带Bucket的 `Random Sampler`. 可以随机地取出长度相似的元素 - - :param int num_buckets: bucket的数量 - :param int batch_size: batch的大小. 默认为None,Trainer在调用BucketSampler时,会将该值正确设置,如果是非Trainer场景使用,需 - 要显示传递该值 - :param str seq_len_field_name: 对应序列长度的 `field` 的名字 """ def __init__(self, num_buckets=10, batch_size=None, seq_len_field_name='seq_len'): + """ + + :param int num_buckets: bucket的数量 + :param int batch_size: batch的大小. 默认为None,Trainer在调用BucketSampler时,会将该值正确设置,如果是非Trainer场景使用,需 + 要显示传递该值 + :param str seq_len_field_name: 对应序列长度的 `field` 的名字 + """ self.num_buckets = num_buckets self.batch_size = batch_size self.seq_len_field_name = seq_len_field_name diff --git a/fastNLP/core/tester.py b/fastNLP/core/tester.py index 344e24a8..d1d5d41e 100644 --- a/fastNLP/core/tester.py +++ b/fastNLP/core/tester.py @@ -66,30 +66,32 @@ __all__ = [ class Tester(object): """ Tester是在提供数据,模型以及metric的情况下进行性能测试的类。需要传入模型,数据以及metric进行验证。 - - :param ~fastNLP.DataSet data: 需要测试的数据集 - :param torch.nn.module model: 使用的模型 - :param ~fastNLP.core.metrics.MetricBase,List[~fastNLP.core.metrics.MetricBase] metrics: 测试时使用的metrics - :param int batch_size: evaluation时使用的batch_size有多大。 - :param str,int,torch.device,list(int) device: 将模型load到哪个设备。默认为None,即Trainer不对模型 - 的计算位置进行管理。支持以下的输入: - - 1. str: ['cpu', 'cuda', 'cuda:0', 'cuda:1', ...] 依次为'cpu'中, 可见的第一个GPU中,可见的第一个GPU中,可见的第二个GPU中; - - 2. torch.device:将模型装载到torch.device上。 - - 3. int: 将使用device_id为该值的gpu进行训练 - - 4. list(int):如果多于1个device,将使用torch.nn.DataParallel包裹model, 并使用传入的device。 - - 5. None. 为None则不对模型进行任何处理,如果传入的model为torch.nn.DataParallel该值必须为None。 - - 如果模型是通过predict()进行预测的话,那么将不能使用多卡(DataParallel)进行验证,只会使用第一张卡上的模型。 - :param int verbose: 如果为0不输出任何信息; 如果为1,打印出验证结果。 - :param bool use_tqdm: 是否使用tqdm来显示测试进度; 如果为False,则不会显示任何内容。 """ def __init__(self, data, model, metrics, batch_size=16, num_workers=0, device=None, verbose=1, use_tqdm=True): + """ + + :param ~fastNLP.DataSet data: 需要测试的数据集 + :param torch.nn.module model: 使用的模型 + :param ~fastNLP.core.metrics.MetricBase,List[~fastNLP.core.metrics.MetricBase] metrics: 测试时使用的metrics + :param int batch_size: evaluation时使用的batch_size有多大。 + :param str,int,torch.device,list(int) device: 将模型load到哪个设备。默认为None,即Trainer不对模型 + 的计算位置进行管理。支持以下的输入: + + 1. str: ['cpu', 'cuda', 'cuda:0', 'cuda:1', ...] 依次为'cpu'中, 可见的第一个GPU中,可见的第一个GPU中,可见的第二个GPU中; + + 2. torch.device:将模型装载到torch.device上。 + + 3. int: 将使用device_id为该值的gpu进行训练 + + 4. list(int):如果多于1个device,将使用torch.nn.DataParallel包裹model, 并使用传入的device。 + + 5. None. 为None则不对模型进行任何处理,如果传入的model为torch.nn.DataParallel该值必须为None。 + + 如果模型是通过predict()进行预测的话,那么将不能使用多卡(DataParallel)进行验证,只会使用第一张卡上的模型。 + :param int verbose: 如果为0不输出任何信息; 如果为1,打印出验证结果。 + :param bool use_tqdm: 是否使用tqdm来显示测试进度; 如果为False,则不会显示任何内容。 + """ super(Tester, self).__init__() if not isinstance(model, nn.Module): @@ -137,10 +139,9 @@ class Tester(object): self._predict_func_wrapper = self._model.forward def test(self): - """开始进行验证,并返回验证结果。 + r"""开始进行验证,并返回验证结果。 - :return Dict[Dict] : dict的二层嵌套结构,dict的第一层是metric的名称; 第二层是这个metric的指标。 - 一个AccuracyMetric的例子为{'AccuracyMetric': {'acc': 1.0}}。 + :return Dict[Dict]: dict的二层嵌套结构,dict的第一层是metric的名称; 第二层是这个metric的指标。一个AccuracyMetric的例子为{'AccuracyMetric': {'acc': 1.0}}。 """ # turn on the testing mode; clean up the history self._model_device = _get_model_device(self._model) diff --git a/fastNLP/core/trainer.py b/fastNLP/core/trainer.py index 9f262fb5..a2c3b1f7 100644 --- a/fastNLP/core/trainer.py +++ b/fastNLP/core/trainer.py @@ -365,54 +365,6 @@ class Trainer(object): (5) 保存获得更好验证性能的模型等。 详细的介绍参见 :doc:`fastNLP.core.trainer` - - :param train_data: 训练集, :class:`~fastNLP.DataSet` 类型。 - :param nn.modules model: 待训练的模型 - :param optimizer: `torch.optim.Optimizer` 优化器。如果为None,则Trainer使用默认的Adam(model.parameters(), lr=4e-3)这个优化器 - :param int batch_size: 训练和验证的时候的batch大小。 - :param loss: 使用的 :class:`~fastNLP.core.losses.LossBase` 对象。当为None时,默认使用 :class:`~fastNLP.LossInForward` - :param sampler: Batch数据生成的顺序, :class:`~fastNLP.Sampler` 类型。如果为None,默认使用 :class:`~fastNLP.RandomSampler` - :param drop_last: 如果最后一个batch没有正好为batch_size这么多数据,就扔掉最后一个batch - :param num_workers: int, 有多少个线程来进行数据pad处理。 - :param update_every: int, 多少步更新一次梯度。用于希望累计梯度的场景,比如需要128的batch_size, 但是直接设为128 - 会导致内存不足,通过设置batch_size=32, update_every=4达到目的。当optimizer为None时,该参数无效。 - :param int n_epochs: 需要优化迭代多少次。 - :param int print_every: 多少次反向传播更新tqdm显示的loss; 如果use_tqdm=False, 则多少次反向传播打印loss。 - :param dev_data: 用于做验证的DataSet, :class:`~fastNLP.DataSet` 类型。 - :param metrics: 验证的评估函数。可以只使用一个 :class:`Metric` , - 也可以使用多个 :class:`Metric` ,通过列表传入。 - 如验证时取得了更好的验证结果(如果有多个Metric,以列表中第一个Metric为准),且save_path不为None, - 则保存当前模型。Metric种类详见 :doc:`metrics模块 ` 。仅在传入dev_data时有效。 - :param str,None metric_key: :class:`Metric` 有时会有多个指标, - 比如 :class:`~fastNLP.core.metrics.SpanFPreRecMetric` 中包含了'f', 'pre', 'rec'。此时需 - 要指定以哪个指标为准。另外有些指标是越小效果越好,比如语言模型的困惑度,这种情况下,在key前面增加一个'-'来表 - 明验证时,值越小越好(比如: "-ppl")。仅在传入dev_data时有效。 - :param int validate_every: 多少个step在验证集上验证一次; 如果为-1,则每个epoch结束验证一次。仅在传入dev_data时有效。 - :param str,None save_path: 将模型保存路径,如果路径不存在,将自动创建文件夹。如果为None,则不保存模型。如果dev_data为None,则保存 - 最后一次迭代的模型。保存的时候不仅保存了参数,还保存了模型结构。即便使用DataParallel,这里也只保存模型。 - :param bool use_tqdm: 是否使用tqdm来显示训练进度; 如果为False,则将loss打印在终端中。 - :param str,int,torch.device,list(int) device: 将模型load到哪个设备。默认为None,即Trainer不对模型 - 的计算位置进行管理。支持以下的输入: - - 1. str: ['cpu', 'cuda', 'cuda:0', 'cuda:1', ...] 依次为'cpu'中, 可见的第一个GPU中, 可见的第一个GPU中, - 可见的第二个GPU中; - - 2. torch.device:将模型装载到torch.device上。 - - 3. int: 将使用device_id为该值的gpu进行训练 - - 4. list(int):如果多于1个device,将使用torch.nn.DataParallel包裹model, 并使用传入的device。 - - 5. None. 为None则不对模型进行任何处理,如果传入的model为torch.nn.DataParallel该值必须为None。 - - 已知可能会出现的问题:Adagrad优化器可能无法正常使用这个参数,请手动管理模型位置。 - - :param list(callbacks) callbacks: 用于在train过程中起调节作用的回调函数。比如early stop,negative sampling等可以 - 通过callback机制实现。 可使用的callback参见 :doc:`callback模块 ` - :param int check_code_level: 模型检查等级. -1: 不进行检查; 0: 仅出现错误时停止; 1: 如果有field没有被使用, - 报告警告信息; 2: 有任何field没有被使用都报错. 检查的原理是通过使用很小的batch(默认2个sample)来运行代码,但是 - 这个过程理论上不会修改任何参数,只是会检查能否运行。但如果(1)模型中存在将batch_size写为某个固定值的情况; - (2)模型中存在累加前向计算次数的,可能会多计算1次。以上情况建议将check_code_level设置为-1。 """ def __init__(self, train_data, model, optimizer=None, loss=None, @@ -421,6 +373,56 @@ class Trainer(object): dev_data=None, metrics=None, metric_key=None, validate_every=-1, save_path=None, use_tqdm=True, device=None, callbacks=None, check_code_level=0, **kwargs): + """ + + :param train_data: 训练集, :class:`~fastNLP.DataSet` 类型。 + :param nn.modules model: 待训练的模型 + :param optimizer: `torch.optim.Optimizer` 优化器。如果为None,则Trainer使用默认的Adam(model.parameters(), lr=4e-3)这个优化器 + :param int batch_size: 训练和验证的时候的batch大小。 + :param loss: 使用的 :class:`~fastNLP.core.losses.LossBase` 对象。当为None时,默认使用 :class:`~fastNLP.LossInForward` + :param sampler: Batch数据生成的顺序, :class:`~fastNLP.Sampler` 类型。如果为None,默认使用 :class:`~fastNLP.RandomSampler` + :param drop_last: 如果最后一个batch没有正好为batch_size这么多数据,就扔掉最后一个batch + :param num_workers: int, 有多少个线程来进行数据pad处理。 + :param update_every: int, 多少步更新一次梯度。用于希望累计梯度的场景,比如需要128的batch_size, 但是直接设为128 + 会导致内存不足,通过设置batch_size=32, update_every=4达到目的。当optimizer为None时,该参数无效。 + :param int n_epochs: 需要优化迭代多少次。 + :param int print_every: 多少次反向传播更新tqdm显示的loss; 如果use_tqdm=False, 则多少次反向传播打印loss。 + :param dev_data: 用于做验证的DataSet, :class:`~fastNLP.DataSet` 类型。 + :param metrics: 验证的评估函数。可以只使用一个 :class:`Metric` , + 也可以使用多个 :class:`Metric` ,通过列表传入。 + 如验证时取得了更好的验证结果(如果有多个Metric,以列表中第一个Metric为准),且save_path不为None, + 则保存当前模型。Metric种类详见 :doc:`metrics模块 ` 。仅在传入dev_data时有效。 + :param str,None metric_key: :class:`Metric` 有时会有多个指标, + 比如 :class:`~fastNLP.core.metrics.SpanFPreRecMetric` 中包含了'f', 'pre', 'rec'。此时需 + 要指定以哪个指标为准。另外有些指标是越小效果越好,比如语言模型的困惑度,这种情况下,在key前面增加一个'-'来表 + 明验证时,值越小越好(比如: "-ppl")。仅在传入dev_data时有效。 + :param int validate_every: 多少个step在验证集上验证一次; 如果为-1,则每个epoch结束验证一次。仅在传入dev_data时有效。 + :param str,None save_path: 将模型保存路径,如果路径不存在,将自动创建文件夹。如果为None,则不保存模型。如果dev_data为None,则保存 + 最后一次迭代的模型。保存的时候不仅保存了参数,还保存了模型结构。即便使用DataParallel,这里也只保存模型。 + :param bool use_tqdm: 是否使用tqdm来显示训练进度; 如果为False,则将loss打印在终端中。 + :param str,int,torch.device,list(int) device: 将模型load到哪个设备。默认为None,即Trainer不对模型 + 的计算位置进行管理。支持以下的输入: + + 1. str: ['cpu', 'cuda', 'cuda:0', 'cuda:1', ...] 依次为'cpu'中, 可见的第一个GPU中, 可见的第一个GPU中, + 可见的第二个GPU中; + + 2. torch.device:将模型装载到torch.device上。 + + 3. int: 将使用device_id为该值的gpu进行训练 + + 4. list(int):如果多于1个device,将使用torch.nn.DataParallel包裹model, 并使用传入的device。 + + 5. None. 为None则不对模型进行任何处理,如果传入的model为torch.nn.DataParallel该值必须为None。 + + 已知可能会出现的问题:Adagrad优化器可能无法正常使用这个参数,请手动管理模型位置。 + + :param list(callbacks) callbacks: 用于在train过程中起调节作用的回调函数。比如early stop,negative sampling等可以 + 通过callback机制实现。 可使用的callback参见 :doc:`callback模块 ` + :param int check_code_level: 模型检查等级. -1: 不进行检查; 0: 仅出现错误时停止; 1: 如果有field没有被使用, + 报告警告信息; 2: 有任何field没有被使用都报错. 检查的原理是通过使用很小的batch(默认2个sample)来运行代码,但是 + 这个过程理论上不会修改任何参数,只是会检查能否运行。但如果(1)模型中存在将batch_size写为某个固定值的情况; + (2)模型中存在累加前向计算次数的,可能会多计算1次。以上情况建议将check_code_level设置为-1。 + """ super(Trainer, self).__init__() if not isinstance(model, nn.Module): raise TypeError(f"The type of model must be torch.nn.Module, got {type(model)}.") diff --git a/fastNLP/core/vocabulary.py b/fastNLP/core/vocabulary.py index d4ff6077..6d530eb6 100644 --- a/fastNLP/core/vocabulary.py +++ b/fastNLP/core/vocabulary.py @@ -73,21 +73,23 @@ class Vocabulary(object): vocab.update(word_list) vocab["word"] # str to int vocab.to_word(5) # int to str - - :param int max_size: `Vocabulary` 的最大大小, 即能存储词的最大数量 - 若为 ``None`` , 则不限制大小. Default: ``None`` - :param int min_freq: 能被记录下的词在文本中的最小出现频率, 应大于或等于 1. - 若小于该频率, 词语将被视为 `unknown`. 若为 ``None`` , 所有文本中的词都被记录. Default: ``None`` - :param str optional padding: padding的字符. 如果设置为 ``None`` , - 则vocabulary中不考虑padding, 也不计入词表大小,为 ``None`` 的情况多在为label建立Vocabulary的情况. - Default: '' - :param str optional unknown: unknown的字符,所有未被记录的词在转为 `int` 时将被视为unknown. - 如果设置为 ``None`` ,则vocabulary中不考虑unknow, 也不计入词表大小. - 为 ``None`` 的情况多在为label建立Vocabulary的情况. - Default: '' """ def __init__(self, max_size=None, min_freq=None, padding='', unknown=''): + """ + + :param int max_size: `Vocabulary` 的最大大小, 即能存储词的最大数量 + 若为 ``None`` , 则不限制大小. Default: ``None`` + :param int min_freq: 能被记录下的词在文本中的最小出现频率, 应大于或等于 1. + 若小于该频率, 词语将被视为 `unknown`. 若为 ``None`` , 所有文本中的词都被记录. Default: ``None`` + :param str optional padding: padding的字符. 如果设置为 ``None`` , + 则vocabulary中不考虑padding, 也不计入词表大小,为 ``None`` 的情况多在为label建立Vocabulary的情况. + Default: '' + :param str optional unknown: unknown的字符,所有未被记录的词在转为 `int` 时将被视为unknown. + 如果设置为 ``None`` ,则vocabulary中不考虑unknow, 也不计入词表大小. + 为 ``None`` 的情况多在为label建立Vocabulary的情况. + Default: '' + """ self.max_size = max_size self.min_freq = min_freq self.word_count = Counter() @@ -402,7 +404,7 @@ class Vocabulary(object): def to_index(self, w): """ - 将词转为数字. 若词不再词典中被记录, 将视为 unknown, 若 ``unknown=None`` , 将抛出``ValueError``:: + 将词转为数字. 若词不再词典中被记录, 将视为 unknown, 若 ``unknown=None`` , 将抛出 ``ValueError`` :: index = vocab.to_index('abc') # equals to From 60a535db08be4621e8b2f52bb83caad81c693075 Mon Sep 17 00:00:00 2001 From: ChenXin Date: Wed, 4 Sep 2019 17:15:21 +0800 Subject: [PATCH 36/50] fix a little error in doc. TODO: fix the bug doc of the class which inherit the class from outer space --- fastNLP/core/metrics.py | 1 + fastNLP/core/optimizer.py | 11 +++++------ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/fastNLP/core/metrics.py b/fastNLP/core/metrics.py index ec1a1864..72380fd6 100644 --- a/fastNLP/core/metrics.py +++ b/fastNLP/core/metrics.py @@ -152,6 +152,7 @@ class MetricBase(object): def get_metric_name(self): """ 返回metric的名称 + :return: """ return self._metric_name diff --git a/fastNLP/core/optimizer.py b/fastNLP/core/optimizer.py index 5e7c1cba..b782cfa6 100644 --- a/fastNLP/core/optimizer.py +++ b/fastNLP/core/optimizer.py @@ -120,12 +120,11 @@ class AdamW(TorchOptimizer): The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_. The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_. - .. _Adam\: A Method for Stochastic Optimization: - https://arxiv.org/abs/1412.6980 - .. _Decoupled Weight Decay Regularization: - https://arxiv.org/abs/1711.05101 - .. _On the Convergence of Adam and Beyond: - https://openreview.net/forum?id=ryQu7f-RZ + .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 + + .. _Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101 + + .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, From 14d048f3406fd05a79e9e61b8e05c410bf8882f0 Mon Sep 17 00:00:00 2001 From: yh Date: Thu, 5 Sep 2019 00:21:46 +0800 Subject: [PATCH 37/50] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dbert=20embedding?= =?UTF-8?q?=E7=9A=84bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/embeddings/bert_embedding.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fastNLP/embeddings/bert_embedding.py b/fastNLP/embeddings/bert_embedding.py index 17f6769d..05351cbd 100644 --- a/fastNLP/embeddings/bert_embedding.py +++ b/fastNLP/embeddings/bert_embedding.py @@ -420,11 +420,11 @@ class _WordBertModel(nn.Module): if self.pool_method == 'first': batch_word_pieces_cum_length = batch_word_pieces_cum_length[:, :seq_len.max()] batch_word_pieces_cum_length.masked_fill_(batch_word_pieces_cum_length.ge(word_piece_length), 0) - batch_indexes = batch_indexes[:, None].expand((batch_size, batch_word_pieces_cum_length.size(1))) + _batch_indexes = batch_indexes[:, None].expand((batch_size, batch_word_pieces_cum_length.size(1))) elif self.pool_method == 'last': batch_word_pieces_cum_length = batch_word_pieces_cum_length[:, 1:seq_len.max()+1] - 1 batch_word_pieces_cum_length.masked_fill_(batch_word_pieces_cum_length.ge(word_piece_length), 0) - batch_indexes = batch_indexes[:, None].expand((batch_size, batch_word_pieces_cum_length.size(1))) + _batch_indexes = batch_indexes[:, None].expand((batch_size, batch_word_pieces_cum_length.size(1))) for l_index, l in enumerate(self.layers): output_layer = bert_outputs[l] @@ -437,12 +437,12 @@ class _WordBertModel(nn.Module): # 从word_piece collapse到word的表示 truncate_output_layer = output_layer[:, 1:-1] # 删除[CLS]与[SEP] batch_size x len x hidden_size if self.pool_method == 'first': - tmp = truncate_output_layer[batch_indexes, batch_word_pieces_cum_length] + tmp = truncate_output_layer[_batch_indexes, batch_word_pieces_cum_length] tmp = tmp.masked_fill(word_mask[:, :batch_word_pieces_cum_length.size(1), None].eq(0), 0) outputs[l_index, :, s_shift:batch_word_pieces_cum_length.size(1)+s_shift] = tmp elif self.pool_method == 'last': - tmp = truncate_output_layer[batch_indexes, batch_word_pieces_cum_length] + tmp = truncate_output_layer[_batch_indexes, batch_word_pieces_cum_length] tmp = tmp.masked_fill(word_mask[:, :batch_word_pieces_cum_length.size(1), None].eq(0), 0) outputs[l_index, :, s_shift:batch_word_pieces_cum_length.size(1)+s_shift] = tmp elif self.pool_method == 'max': From 880e3ad96953bb2ac6ed19b1a54efc06835dfc04 Mon Sep 17 00:00:00 2001 From: Yige Xu Date: Thu, 5 Sep 2019 01:26:22 +0800 Subject: [PATCH 38/50] 1. add mini_elmo.pkl and test codes for testing ElmoEmbedding; 2. update bert testing codes --- fastNLP/embeddings/elmo_embedding.py | 5 +- fastNLP/models/bert.py | 2 +- .../embedding/small_elmo/char.dic | 229 ++++++++++++++++++ .../elmo_1x16_16_32cnn_1xhighway_options.json | 29 +++ .../small_elmo/elmo_mini_for_testing.pkl | Bin 0 -> 37695 bytes test/embeddings/test_bert_embedding.py | 7 +- test/embeddings/test_elmo_embedding.py | 15 ++ 7 files changed, 282 insertions(+), 5 deletions(-) create mode 100644 test/data_for_tests/embedding/small_elmo/char.dic create mode 100644 test/data_for_tests/embedding/small_elmo/elmo_1x16_16_32cnn_1xhighway_options.json create mode 100644 test/data_for_tests/embedding/small_elmo/elmo_mini_for_testing.pkl diff --git a/fastNLP/embeddings/elmo_embedding.py b/fastNLP/embeddings/elmo_embedding.py index 0ec0caa0..d19a3577 100644 --- a/fastNLP/embeddings/elmo_embedding.py +++ b/fastNLP/embeddings/elmo_embedding.py @@ -69,6 +69,7 @@ class ElmoEmbedding(ContextualEmbedding): else: raise ValueError(f"Cannot recognize {model_dir_or_name}.") self.model = _ElmoModel(model_dir, vocab, cache_word_reprs=cache_word_reprs) + num_layers = self.model.encoder.num_layers if layers == 'mix': self.layer_weights = nn.Parameter(torch.zeros(self.model.config['lstm']['n_layers'] + 1), @@ -78,9 +79,9 @@ class ElmoEmbedding(ContextualEmbedding): self._embed_size = self.model.config['lstm']['projection_dim'] * 2 else: layers = list(map(int, layers.split(','))) - assert len(layers) > 0, "Must choose one output" + assert len(layers) > 0, "Must choose at least one output, but got None." for layer in layers: - assert 0 <= layer <= 2, "Layer index should be in range [0, 2]." + assert 0 <= layer <= num_layers, f"Layer index should be in range [0, {num_layers}], but got {layer}." self.layers = layers self._get_outputs = self._get_layer_outputs self._embed_size = len(self.layers) * self.model.config['lstm']['projection_dim'] * 2 diff --git a/fastNLP/models/bert.py b/fastNLP/models/bert.py index 85c3af8c..30ed0cd8 100644 --- a/fastNLP/models/bert.py +++ b/fastNLP/models/bert.py @@ -241,7 +241,7 @@ class BertForQuestionAnswering(BaseModel): def forward(self, words): """ :param torch.LongTensor words: [batch_size, seq_len] - :return: 一个包含num_labels个logit的dict,每一个logit的形状都是[batch_size, seq_len] + :return: 一个包含num_labels个logit的dict,每一个logit的形状都是[batch_size, seq_len + 2] """ sequence_output = self.bert(words) logits = self.qa_outputs(sequence_output) # [batch_size, seq_len, num_labels] diff --git a/test/data_for_tests/embedding/small_elmo/char.dic b/test/data_for_tests/embedding/small_elmo/char.dic new file mode 100644 index 00000000..74285f34 --- /dev/null +++ b/test/data_for_tests/embedding/small_elmo/char.dic @@ -0,0 +1,229 @@ +! 33 +" 34 +# 35 +$ 36 +% 37 +& 38 +' 39 +( 40 +) 41 +* 42 ++ 43 +, 44 +- 45 +. 46 +/ 47 +0 48 +1 49 +2 50 +3 51 +4 52 +5 53 +6 54 +7 55 +8 56 +9 57 +: 58 +; 59 +< 60 += 61 +> 62 +? 63 +@ 64 +A 65 +B 66 +C 67 +D 68 +E 69 +F 70 +G 71 +H 72 +I 73 +J 74 +K 75 +L 76 +M 77 +N 78 +O 79 +P 80 +Q 81 +R 82 +S 83 +T 84 +U 85 +V 86 +W 87 +X 88 +Y 89 +Z 90 +[ 91 +\ 92 +] 93 +^ 94 +_ 95 +` 96 +a 97 +b 98 +c 99 +d 100 +e 101 +f 102 +g 103 +h 104 +i 105 +j 106 +k 107 +l 108 +m 109 +n 110 +o 111 +p 112 +q 113 +r 114 +s 115 +t 116 +u 117 +v 118 +w 119 +x 120 +y 121 +z 122 +{ 123 +| 124 +} 125 +~ 126 + 127 +€ 128 + 129 +‚ 130 +ƒ 131 +„ 132 +† 134 +‡ 135 +ˆ 136 +‰ 137 +Š 138 +‹ 139 +Œ 140 + 141 +Ž 142 + 143 + 144 +‘ 145 +’ 146 +“ 147 +” 148 +• 149 +– 150 +— 151 +˜ 152 +™ 153 +š 154 +› 155 +œ 156 + 157 +ž 158 +Ÿ 159 +  160 +¡ 161 +¢ 162 +£ 163 +¤ 164 +¥ 165 +¦ 166 +§ 167 +¨ 168 +© 169 +ª 170 +« 171 +¬ 172 +­ 173 +® 174 +¯ 175 +° 176 +± 177 +² 178 +³ 179 +´ 180 +µ 181 +¶ 182 +· 183 +¸ 184 +¹ 185 +º 186 +» 187 +¼ 188 +½ 189 +¾ 190 +¿ 191 +À 192 +Á 193 + 194 +à 195 +Ä 196 +Å 197 +Æ 198 +Ç 199 +È 200 +É 201 +Ê 202 +Ë 203 +Ì 204 +Í 205 +Î 206 +Ï 207 +Ð 208 +Ñ 209 +Ò 210 +Ó 211 +Ô 212 +Õ 213 +Ö 214 +× 215 +Ø 216 +Ù 217 +Ú 218 +Û 219 +Ü 220 +Ý 221 +Þ 222 +ß 223 +à 224 +á 225 +â 226 +ã 227 +ä 228 +å 229 +æ 230 +ç 231 +è 232 +é 233 +ê 234 +ë 235 +ì 236 +í 237 +î 238 +ï 239 +ð 240 +ñ 241 +ò 242 +ó 243 +ô 244 +õ 245 +ö 246 +÷ 247 +ø 248 +ù 249 +ú 250 +û 251 +ü 252 +ý 253 +þ 254 +ÿ 255 + 256 + 257 + 258 + 259 + 260 + 1 + -1 diff --git a/test/data_for_tests/embedding/small_elmo/elmo_1x16_16_32cnn_1xhighway_options.json b/test/data_for_tests/embedding/small_elmo/elmo_1x16_16_32cnn_1xhighway_options.json new file mode 100644 index 00000000..9c02ef72 --- /dev/null +++ b/test/data_for_tests/embedding/small_elmo/elmo_1x16_16_32cnn_1xhighway_options.json @@ -0,0 +1,29 @@ +{ + "lstm": { + "use_skip_connections": true, + "projection_dim": 16, + "cell_clip": 3, + "proj_clip": 3, + "dim": 16, + "n_layers": 1 + }, + "char_cnn": { + "activation": "relu", + "filters": [ + [ + 1, + 16 + ], + [ + 2, + 16 + ] + ], + "n_highway": 1, + "embedding": { + "dim": 4 + }, + "n_characters": 262, + "max_characters_per_token": 50 + } +} diff --git a/test/data_for_tests/embedding/small_elmo/elmo_mini_for_testing.pkl b/test/data_for_tests/embedding/small_elmo/elmo_mini_for_testing.pkl new file mode 100644 index 0000000000000000000000000000000000000000..4c72f3d51b692e456b55db006d0f65ccf3a0e8a0 GIT binary patch literal 37695 zcmc$_d00P>E(4BBbek);^ii zpoCNsD)T&xgx?#_{d~U9{kfmt_5JUAu1iJ`?2qjN$O3InAh~a@7!Xh}rE&?8cy#ED-L~ul0ggk_P z@z@X&z!7y3ij4dV^EWj-Vm*i7kB1uU?-#)X4VQ2WTkRj_zj{WHUxWllK;NAssOK?> z$6(Edu#LW9t9^ofH~EM87#jtJghodA1P6uq`-TNYY%<#D9~2N6!4dL{*bwFyXyg+a z5fmIQ;S=V+Dl#Z|wNHe9Nce^@pD0raj&krsk$*);2cg78a&9mK?F)%km42oM zf5|Kz!I5$2$o`|?;Ss(O{{LHWxj>#=fg*u_$2GGt<%|jZ7oPmT@y7na8}|oKA%dgm z&QbE1_z#NuJKm}w-*ArdKcZQhm{^!{R073a?0JhD{~K&4N7aRYCuhPRAhigNx;sb1 zWAc9m;WomA0%+dO_GJYPI_AfBGUz*hUv$BbQ0CgicliWFze*>=a z^;`FU;{$sC1<=&knxp@hDh>XvO2a>ZQ~m%>jo=u$bEf?>;NNxmFQ+s9XFwBEb8C*t zUx23n1~mHvX#NM#B7$S-&av{)<*D?4t?=)Fzl~@8&u|vzR;C=Ae;LpAU&gchWjyK+ z5{lqpcMk2L_urBJxApA*8O+?+%#`Esm-VLq+j@?F);8l0;>-w+lRIZtWDIAvhd2+_ zXT5)f?`q!&Uyk$dIGLm8!K3nVTw*wLf5+rsxyRLoKior>H}to#=gf=YxCQ)hmYM{b#z?KY$zkFJYI(aF+j`9~r*NLyY%|U!ZT8k6%a#XT@(z z_~>~k{6+O&zv@q#{VRf3{-Y0A*jSoaa(sC{p!_QruHyNCs|de~P$I|AMd(*DT>Zxh z{3AGP+&KXr@_+I7+Yl1P%i#YCp1^-*Zf(U&MM3}a=CyzPi^nyIv+fu3U%v$ZVZT0t z6XMR<@OuyYv#3Ape}_uwKQp(n=A{A-kGULA@UY*^cXGmig-XO9rjZexD0j}rzsYR! zm(0=s$kx)>%-Wo@iN`jU$9D5yG7BVewz!D72>gmT+p5u9!Aob7*;*~I9#%(4H- z+``7j*qjshFPV4zOXm1rGAI0Dx-){4=*~&<7|+wQPatnM+~~V0+~|)b!;Su}@8o}E zZ)t39ZOlpe70_Zl;ZuL{w|CJ@*vZNMwSb&I%=boc_PKNR|5m!s{{-4!N$PF2EB!?~m4b zg6IDc{NNwn1reM>?wrHFHU59{{w;9ff8lJ+^Y5a6X}tJf8ZY@JaOoeWWf7cmcg_)B zdaDR@{vAd~^#Xah?AV{kl=;iy{~b`A<1w5Qe+DK08vGLtoXQx^$$uI#;hc)$RQ(zG zokIVzEa!9#=gc4an18Yc=WGn8`p@{-e;PM3=A4V+)cg|8KNkrV#iW?Uxe&)RF&w_jfTR^q=!Aeib^x|4I9u14RCl_B-kP ztw{PE&SHO?_bXh)|AY1?J|+G}`{nnN|4I8@21)&m_G>ej{!g0euZlwEZ?s=!jqHEY zes7j?|4IA3TaWQKvgaxF*YkQji$Z)Z0%y87j>?@#9`?(ULWLQ~ymAiW8&2Y8X6yjC zXM*473y^`0rc9;~my-WD4D;3YP;sVH$sL`sxYukP@#=Ym=J!UUV+LQ)E`@Mth_8b{ z*DuVJJsI$P&LHaPP{4h@eei6%4purSgng=ou>GO~c6@xX^#9X;0)O-%_Bfxq{iql2 zwl{+Koo0wVHv!pKcfpq&e-Pm>N7dt0;HAh$&@MfKR%jlDrvnkN%T)+YX?MX#VF%>0 zWD>69#!`C6kHL++5NIzbf-Q%Y@%OSeNZPEAQw<)$^!6tx_@_L`&C{gz+^vUAi?y)v z>t?1~y&mxm-D)^!Ktq=85E`{|M)YTW+$E?@6ejE9wueHvdAlcCUSNV}JNcvUpK@Vc zh#;1IW`aN0_n`Xl8=$GH1F>aKKy&P3bjs2Ix0%Xfhc70i?5iHW>N*P==uhavJ`XT@ z^98DB^AWktd_?e>2I(>z2QRi5W2*B8h`a9r0ofijWquBtv40h+EtkPge7g`;r9tGj z&Z8z>8bpQbmQtGqwu1h3GhF?w9o}WAVo5nQY^vRjY`vnP{nQvTF5??Se-VblL)pmG zuM`z}%!M=!Gu$k!N67m}$X>65>3s2->g;N!-u7-m!OfA#P;3XByq1U*Pc)!=JvWhr z^mlYd{WzMmeGkMI%z&k42IRDwFsVJRjU~O5@!PgCX6hw3=K??(04y zzV{Q*u(CQXtg@x(qX;WQU>(!HnwOrN|*H!@$L zS({|=tCL!oU-~`FSZoP(16xpa!5lDk{fTB9P9eoXmr#|+WRkW{4R6j(MT3+Yek3>v z=Qr__1s8R(BKin#$4aA?U>z)Go{h}im5_b?8j6_op@MV~Qc!aq4sO{8e4G0lRzCAZ zntI;Ms=@DwU)dPT897+e<>0#nzH!A@t2 zQQeY*z|GXfNyTD#ncNg2_j4?Dc*z}jQzn3u-}f-PKN=C;sr)$eYzd=s(}XNpkqbWx zZ=iW4GDJcy4mccR%!swYfho>l{L=t^n_UAt9z8{8R@9)aV%f;9#S}Nri05ivSjq@F zj3x8@o}&i4C&>Aj7J2IUiJBuEht@2(kH+OMfkLHIb_shVp?u>GnBgW$8ZKN#%0E+> zgf+DS+fY%-N3a+FK&|k6ij;M|k%g%QR%#K! z)}eirb9kggZ{A>i2lWCf`^9j=VG&ep{~8v5H^M&T4KylpAX9_OmC%N1}tHM0Nz~>nlX+K1_UD83;q!Yfn>0)=8LiE1+8@j0Y0E`Et5jWZp z?;kHm-ilX&X0;BqMYe!kDPc@YTTuKgo(~l2z(`s%YRp#W-uDq8YR{+P_w_#!-<%E` zfeniQAODGR>vi#z_)*Y%bqTu9?nJL=bfHGH!LI8nKPYI*Ph89xf3u2PtE1jO$5ZwjpR1wZ$twHJD5fbOs(SiV*W!_95q#&ymXs|2A@tL z&2nqdhYST$=lY7;`eZ3&6hB5jSrS-QNDp@jn-gWV3P@hl!`!u!!iBeExwC?$@tjNF z(WOa7;KCNeO9ek%ICa9jmr*F=>ubl!1H#rviBCZmu-aDc;L>6Rz{86TbOyf@4`_>Gzqw-kijugpBsfRbEQ*e0hFm$D;B99miG}0_d z&KH~E@(D>mCGe4zW2R6mI&Y)ZpA^X@KT%ZNbPOq4w<68Fdv=eTuA*&fqaZhs&Ags7 zfcUcHxQp&@LmMXX;_87Re!Em2Mey?0%9e?goP`l&MrpuowJw;(Q6l!<(RO+R-LT}) zIP#@s9q{=^HN5-w1@vN?U_h^tSu6AeU9F+fue0>8C%izHWWHL&1o8>M>^TPH7^Oor z&unD|o=qUnHtOKy4SML#V{shqc#oPr;}&E8MhCt9NHcN@bHQ|`7UBEVU*tWp*Q+3D zjxv51vJo49JB(DVHz0qz*=W~xH4^$x8uz?EhV})Bpc(BHN;tWTDLr3~dIPvf*QjVe2QbMCk|CyDRw!I-HYpeKC0}3xTH!nqZ`yMaCP#b1*)6 zzf{bfbQ@kePQ;=H7wz7T5yI;!SMu7&4g^>2fsw>4be_KjvD^3=8((cwEW3{6IGB^- z4?W5iL_^Dih{{kiRsLlQoV~4zl=AnX z2g8{lV#|jg>F{b2ksADZmkIh(Gzav;G^nlbD{+$aJF5E19A?#PL6XfJLIvmNK|qiZ z8sR$(iB^@UAa*(&6PSQhyL!3TYGR>&J8u=QE#ayjM)Ha7a3XOCE_fDlC7x-*Y^#l2 z$C6rjxJm>Ja+}bjmmis`j3@AY(gvh_RTQ5IO9N$VPg1$xiySX0pi*{CA~_?P%^*u1FItUNSWFZf2d1Tu>1D$Np!vPCFqpiF> z)O&X-ik7N;Z8QXry%c+Fd3+hnco`uNGI|->JD4`XPHsaKQVBEKG6dCM&%T!v3 zlPjS|QPIVvhRh5nk=!D1~#?cOtp;Jj78|L*7Tzn8mZ!;dTDiz}!Cp*&g!v z!5CtvxbiWiHf5ug%k0Qlvka(qL-^CD6V#mQ2X=EfA1Ph4Of>epKT|R89PnK^4Uw+# zgstC;N{i!&WI-D`@?|}mG$PL3q~u7wbN`9tj)hafAMCNs6a!)yV?_eC$0Dr?dor=p zhvD9r!B_MbfXs71-?(RxYX1OwKmH{MeEUJgo!f>M*C~@ZJ6s{ZGM?xfZ6%t4SLzSU zdCu$?RmI2LF9K6hK)qX1z^t?pB9^P?llnn7NDN;9fy`o@oTx-r_THq5?|(r`t*_x} zjv0{AA_y6*Lsl>3NX7DlRE_^W=2c2F+SaH5o0uD#CFn@nGZd^h4X565By(+K2s3pXO5JM+=}Qx!YmEt!mKvp``zHa3 zJr0Je_BYg~)^giVFDAqG*BGyW>8N#i4*p;jhgJ3Sn8!4=8YG` zDAkbgRh)!_juPaD;TX_eW4`rbfZ~U3qvEj^{ z%_S%-d_QV=-^8`rau%IUS7+W#%ZADDgbBVa2}ooGHn^BbHCTGX6lwqoO*je_E@f~j zGnrgm+kj$D4MAC`m7RTB4|>#JjczsNVy&ejWQBhbwat1zY_)HL@RW3>A#f?GWp|?; zQ?=2yyZzJ=3x1q6q<}B;Z1yua9=zr^Aou;>q4A9dx-H#+t~*V`<|kC}xah}lxpfx) zHk6G%cwC~K)+*wz+eJ*@I*bl(=!RwcZXvzsVo-1l#@2Az?&ZOGc#=UjLEH1;WJn2| zPH@F8yT@XmswY&zup}C{AsMV;JbD&pPintEU@Q(V!0Ll9VDGf6NXa9L3cCFj*{6;{ zPU|wjv2X^y(yB`y*xBQKtJHD*dv(f%x4-D7K1OHadm!)qAmv!F9N+FQhDVK-pl6ha zYS!wIp0BFp>%9=FX!!-^p+GN;GyjYd_@kgNcRe!wt_>YwHY8j`2Hq^Ggf*Tf*l_kG z(7e(L)T#5}5%3KiKH!D4mFJ-=Wiv?ATuUZm>pHx1nIo=rsKH#Wn^_Q99%LEI3v1mxkBJzs5iT|oGFaRC*O{t*TrbU~F$8pJ-T zL#tmlF=N7IxnmBifwb%%TzK;=$T!>KZ_zG{@!scPysVqMQ!pKRZDfi2xKQ%sdO4b7 z^NgD7x|0Z1$Kp57I&DK2o@TyHde3|}>V!!BJ^0nS*AR1PGq{gmg+BQeQNm{zLAA0L z*q)L`vx8nk;rIkXp528LlXsxb*M@jY@I7dH9*hs1nS*PZx>=* z0Y@Jpv@joIB{?C|7T<@|PhUshuk#V>d0aGkvH|p5F+ipF4WT7`4d|FnCre_IsHnJO zO#4QHat%}QkvdQE^{N6ss-uO*W;^m~*9^FR#}K+-OXJ9JMYPuR6qV2D;q2+V$wGY> zd@ued8WZ<|3ZJzDtTvZ2EoTByPv~6I{3@lBfLD+u= zX)L11=o1lie#Lp@AgDl;vzL(mH^S)B(INCM_aimNVHM~G>?7*+_Nax~Nj@v`k-<@U zGDAoNfBy6W*}4T`Z6uH(v4^z98KMyMQW?UZrm zij5LW?ZyNXDo1Z=-3)8_~ChaP*R{K#_;Pf{0i=9Jd{ddQaa6$GjYT*l=e9 zg?>=Cmrugk`}&!iF=gaRts5$HSO9(T46|NzBT|{$My4%YjiYK=P?aVgj(0<~YdX-I40$A*JdP}1(#lQTc7dT^oMfCj{cyjdHU8u^ zlN{B^WNcdGh}X#+=3;^_ez;&9@qFWhJor`7_dW9Xd(jj!xZ@6rPJ9FdSe%?$NyFk% zemw56E4fi8N<17FK&Adi1kG`1jQbq))_5H8=vYDC=}bfck27IW|72pKy^;8IisEUl z4wUKf9n|Qm$v7!{1jHh4qOWsv88x?TyA}~0C`;79KUYT){=rIwdU^e&v31~*d5*$9 zeAKke1!Uo}O{jTUGo{2=f}V*O6Wf`6+=%y2(cXm{(XGTZOgU(v-DoRvUQ>c>Us$2j z!LcB-xCL6Xg|P0aRw&_{V0X=Ul#xN2jDNvTP_hmoOI=stbB+d3IqNREH}C+S^3LOn z&w3C&6PC%2_J$kg320DHn9!eCsIgNfE_#nBYrZZBy>3h9cE5m|DMqBtY&nGMOkjFj z`S3315JpJMlni;+LE_44WZk|C_HNE*KJ1{#-b4Ik%aALqr&S1%v_yqs&d?U1O}@YW z#+a0eW4CSEyD#VuGvm1N34mKLJyj-umghkXhHR(Rn*?3 zWK#F|pq<>lI7W3*F>|AN2IKx?6`FNtB0kofi#rrM86k5AG~(h)W@d9i_j)AyT5uZL z*G_^IJw2r0vke>)l}WSuNoG-gBFGG%1i?02T(#;MiuJBRdx$T-7;ud#`8l7E)TNM{ zSBL~{3^7M1o@#sR4XTTT@qvp7zwk(;dPV^)OWO-0J3Z`#6JJoe3Z9Hb-z2PbZ8kMA z)`kp-sBtr%>yS)oNyLr(fr|OG@j<~L{L-Qcn#86NI^-F;XrWH2TYHgjHkNquQGW7X zycBIzZiM1cU+fzvjz2{VGkuvBkf@$cMna3A_}LheqW6V+Mm+-`yO#|?=i{+{vKDz> zyMrqEq=N75Ho$IK(quH+9W-Tq$cWBP>c%@8vPR(zrSyCuvLCaAd`!qgeCCmOs`6NT z>&0|3Q-TZWO1&U+eg z6)5baG|=^LkcV#}T)ACgH^(uJS{^P;cx`cN#vNg@@8nEUXC*{-I84R`Wo4*oqcPg6 za15qQo{br&P`uuADPl91lQ&XhLAvn+>d}^hQ;wzZ;pY`}A;lDhXq^WGy*{{TEl8Zg z&57J+YhqWR4Bl5tD4(I(j9vLOd|>-wTeaO+P}vVXazIxAe;TT%RwfJMPdDW;_lqN( zPyA+g#YrFQ?zYEXqMy;S^XVwdWGlYhq>2uIThXvrb31I$>VlQn1Pj+^Qa4`Eq^?;A zD_9DUw%ca-={H04Luv$`5_O!Htxe*tbaDUc188!q(eZ3S;xJZ-Tv;uH*U(L{qm9A= z=Hq~q8HNs@A>dTp&b%0ofQeSAFz?3~rhcL+Ru=E4HuL)LB_E!Ho$zaTKRp3La?Q!g z6I03V6B|jx9wq#uLK~{5^rJ;zA4AjmIoyR2I)q=(hy;#(&wMf*0(E~YvS{Z3`ozBv z%XzgyrJ^)G8#l9IbVwhUCG19v*BOJdbOKBc>_Fqb?uNi0&5UZ7Dz^4dMn#TFAM4r_*tG~ZmCb?Ob5)>m=WMb$;s`o?)(e+94#O-5NoL&-Es~Lc3q)-n zQgy}7_?6xus%=q1FS2)H!AuFPTRaAdd#2fqmr}$(d`eKsWeqA-M;E6y_aM`h>13I; zEgpA15Kn#CfqZf%V%Qmrr9EO$XY6CDqCyC(&DZB@7SDyzu5=_Pl7V%^%PG%!#dw8L zE>*PP4xIbA4BZYBAep0zl>WIubR_OO+;kX4t7t1qL30<>eo?B57OsuMVor&0mm`65yNb|(3#w#e-8RPeYyo3tymV3O8ZpguTdO zfeQ#;xrjD-nLywDUUEB252k3a+ia$RYb346mz!H!`j8 za)Aj{I4wfES}RbH-Z*6Jo{GdXm*Plko^G#akvB$zc79_oF=lInaV^K5h~{Xa8`L8t zy_11B&0M4vQ30n<2w+)faePdBBDR)M!(yZNm^&iNNOtrN>?J3Kr=7IMH|NjCBv*x4 z?u#cv442Dq@rK)ZW**Sn_klxc6{PrYhw^I)SZ`J*GumAP?{>d}K^HUfHbD$@Z=Zzq zvdby4C^_?2hgv z(<|RHW(yOc`pI0leYMlh_8#y1kmN7~M65*DR6R*PufNr9x0kG5vJIa*I~iTIP$h1I z0pQXvNaR&lgWv-fbcTBi-D&6bfNnfRQaP^RmM{awAJ>K<>4P{z_!>-l;zHad7vpP0 z506&YG4yg_q@!7nHd_L6UGN3=>urJ~!lHI+v@awymmxLdDde7xIF8FUz|Th~5&2i| z8M@a28`n(jdxq684{JZ|DEomgETu(UOUw-l2JAN!f>+cP1d zb}sH7J`J(K`qTzBX;Rl|h0h-mCdW3$Q)jJ2n6&#fX!_Dquxe{PQge6)^F^jI2F@Aq zKKdm!QTj5kE)XWl`G|ClJAqO+O5l*molqL@faTl7aH`ZHcv-`fDOZ;4ZW#l9Ul(%k z;%CfDNnso{AssF+X9=X5AQ630B9m7NuQZHpk9v5b<2P2JV66zo-Qxpw>Fqv9aCL&6 z*;Np06-i41#vX_*iIt2CxVORix|u8 zCFr8=PpVRT0-o3*k5A1$&h$CVCPvSuV9gahFkZqMSKZQw^O^!U&B_MK0#eYyt0|zB zyOGScy@Gc1#4#TQ`SAPnQ*ggt5rVItf;+q(=YcM3();iU!)*D2jKu|j#Myepl& zYu?W&mQ>=)Mmx~L%0A#cIDwLn?xpNw4}qz~Fgji@iUnL+kocR6C5R{p3cLPH`r$&v*l!wgon6YEi+utyF%9Ihe{w;QU|_JX`M@6%aTUk3GDM z6qk$vlg>EY9QYngWdj(mbRjhOI-e1#^&~4Y5IOm1m9>FIF@Baam1(MPM~9%$ZjstS zR4D5Pw=T59`y?&cM<-H66E$%_>sMa?J`&s(m7|t(Ik3QECCTDWvzt^YL3#T3!47UR z$vQU`XO*o+0_tDUuH%MCs9l{{Cwrky2g8}rNr#xVg?o{FjV_T@@8_NvV3?NeMNHAW zF>otz4$S$!1-^_m!egI`Vhh@U9ExRNc;FDacVsppx0+!_VIJ^X%8*z2S5Z!%|Z?AXH}P@DUFtY4B2NpgJP_Z)LewhIBfrI0$9B|y9br18ZHVN%&)OIlS@ zA%CzDR8+*#W)|Rm+8NX#xDPQO^zccUd??&hZMW2Q3z9gdfepm=qZFU*%*JooL_>W5 zY~3Wt!ydUq6syGvr*lsY@(Ra1$p?gH+^<40ig zn{3=uE{HCAN1zRAW66%Wp_qvRv|vpdrEm5W@*^&S>x1$5-kUB6V^z>Q`G+vlkj=C-U(vXL3&ilIpQmcPyy4iXFx#>@Q^@!nQ7U5JEaICkhgXVa!Obd3X6Vsu@+~6C#z<3^MBxgM zK6Z+U+P@yfPFhKBNDJV{vua`8mNhU@MvSP`S|hy)>nVYs&tS`}$>`hI*?94u188~D zDqF3fT9hI%gIOX{&dj%*1ggD-s3B#D@u7^-Zjmd{@F9$_ULR3@>JT?7Sq-Wd7F~+17U<%vcTpg!)Crrzo0%)Mr?9|^0%WmOoZ6mS0ir)HGk(vMv8?%I zJmYjQbl>l_yQ4S?{6+Toc^5xEw|4{4UtUQ$EY2o5ns?9|T8s>I{6yY9BS5C7Li;Qy zeE8NUD3N)GnkR+eG~xuxM>NQwt|vL`*9pQ)_oMH}X_CDtgS*&77H+@G=Y3;8h(2}p zf$FO`;&^`zWAoetN(N%>20r-USsVd;`F;X2el-S845UNbjjPOUI0{==UW7dvdhky0 zAu_hR4=1wsgTvZloWEF$WM5#wUH=J+j~7G26UQT`M{f~xx81Jt(GD~+;|eoTei`>J z|6~-fst3L8%ApQr@WauCEb?A^5#=0{L)>l0(e&wI%-vlI*zXdrhjm3ADb|#LxMDK( zvdIjmtk=R*a{bV^uVD9GClJTo_=3Kz@dxRhCg2?V3VHL*z&q|wAl6G4;D93sU~qQ| z(mvS^9p;0ev(1sw&dr9}(<8`M;SP$`QMbLc;1sB4(`2Ba*X}c4Db(@K=65a%ke|D@ zll5-~kxs=lV)eKS?mw3xkrs*I|Hc5De7Z{si3pHeVdvoO%??H@LJY4bC*g6J1@RH} z#lv#q#QH@DBY!XpS_&o+{uB>N*jb1~NC$#pmN2Q#{|u!uAMN~#b|N+3?I8bcGF~Qn znpwI-l&BkB;hyM-BtOPT+im1Kjq>^CfST4)vTlkx^Uj9>&%4Qxd+{oC$FkTfjYdh; zs}Rfkj*50LldrBqTgo{S?qu3C}Wu1@6sj1O=0dXIz?(vY;=W%QObp$QLOA-t*|4lP~D z)hQojXl=e6T;ljBunQLu~Z;MqX=V;(8j^y2j>_o956b=(ie$I;?hrD(Q{ z1-a{V34&6@h$|Zca#Gis<|U;hWP&~Q?R_d7e7_VceU3xybS=Dr_+!T-kGP7vTkRb5 zg^7;tS-T336%v^J3f-BK1z%EP(H&E7bm~qrrF3mCiXV%P7P=_Ls$E+!B7q7;c} zxGUou;Ei8g^dk1ZZv*i0^-ga@v4&2#u-6@hZmWTjA1n#+xJ~TupJYDD@5SZ|&f&Kw zSP+veC0E~ClLNkKkb1WaYhNkHhk8ax$AmLv{ssl~SCg@dL_Ok{th2>%A?by z4&om=`NS{I8b3=DW4~K(#}iM<(-Imd@R6Httj@{~QiaR0NAwf+c4-?)$h?MiuC%lA zYmhzP)EqXzHjM0>pU>`G+Q%wL-J}D~s?w|C^662zY`VzMnr?Hhz{=10*yStN)8|kk z+sca5=fx@Zsk-K{7Ln3gQ+h;vC4Jo6i`7!7 zqa8Xqyt}Ob`8+&Y!G`LHU`IDYimBklKvT|LXXkj%syL{ z#6C}n#n({~*%9~+JDd~7JzXc+qMc7@p+jnH`g;lc2mNblv80Q%e!y2&@Z}!--DEyJ zUTXsVY|Dc^>ilE@!FcSRIgIQ9ZhNK`UKcZ^JfGMQqLmd#X>gjie_#+Aln>z?OE` zuq~V4lAC*VXveShI7_(!kEt?m9Nea8zxuf#+kSW!edy+9HYzio{0yH-Cg_XV&)D#q zZOfR?#yPE~r_;+=l~2m-=8Y`vx@#Hx;6OiJJGu{-t9Ovwde_*RhlNn9{)}!;>B2z) zM@_Wdz?Pv4o zX4V1MPJcw*W)m5!gCFsuT^01X8(6&6ll^)N(?bVV z(udmf+34wS*@nCes(tg|XN(PoH%U zdBx^Owvq9r&20DjN;37j4trPWG<$d2LiWs)Qo6Nd44bk31RnEw37xI4$WAj%A#UA5 z_IZx!wEsaJI%<6YYks~BJFnn<&*&0sJmtNLwQf5zf6xz za}4PhYHE%1&}&>>HHA&9nb5f0dK>-R>;d_5sE{_BTuJ*nu4jkDRax(OU)eDV^H`TB z9<0BID{XK(g5I>Zmt7GPL}YWTsUHuMsO5PfgtM)eywT?;?aATnv_qA2g32N6x>nHM zeM$rV`nUkk()Ob544T<;`$b@Evkr2W2-rVsieu|Gx6q9GMOxx*KCXRzhwgEX#exTR zvv;0nktBgncq7|{>3ij5+_u%s%7cpJjIRUzbEBaBVBTHyuF;s573l?~;}_U0YZ-cI zDWMZbt?7&d%h&{d1-jLKjQxjsU9@`BF}!M#VB?K%uK47m^F*3@L?+ctq6Ad-$g z-AZaz28sEChi z;3&~PtEE_GTLC+|XeNF=u!#=$Iz{wGgBZ!6mt$?51V$^k=0B@N=dEiL%{D zJOYzgy&z9oy26a!Gi18@G zG>-M7?$H??!`Lj~5l*lAMh8aMvmMtru_rqP$bRG1Y>ozhqsPx~HrdLYjq;VTKeqKI zR%c$&x=WIYAG?FhkDSRa(`sOE+T^gCaV9RkB*40mnQWC>39F&lz;d6}upU#Tu}DWQ zM*MH+zMMC(Q7e?~5r2g|qkGx7{G)W({p4wl!%;QO(Vt4Uv!`f1{dAf+ z#JgX}9%ZR@nXHn_1H3?;kInIUNo<^@@y0-Yx_y2K+wGT3kDR_m4l0$fKNlZnt2W!P z13JsFO20T;A9|eLxokQ8EPoz-NjRLfZCk~T5xfskvEJBYzZ7lKb%DO#UY#{rrbwqOlWjbd?n-0p9kiTZ7JYd2TbwZZl)Y15 z$CmI(v$hM)(&^{C===LKXmPh`?5L`-MU$ z@W%chc#rrkw(mwW-5kAw7U(Xg-AotYRd=Kt&)%8EUij|Ler}eq-zTrf*6{r#BFFFH zmD5Y`b|ooxz=+R2Yfme!{$dI1_H>Zdojiy2>8@pf7u;P#PvY6A zWR3#a<8p=Q@4ic&7~W4?c6+i<4}4)0T8oJH`(Rubc^zju#o_VhgG?OXJ50tnV&e-( zU}~!xZSe3GX*S=^rn|`53!Gj=JKVOR$w%J4(?7nE-}Mr$rTmaJ%Z#TT?Z=SJ;8NB< zuaMk0=FB#g?O>N{3)-jNi^M|r3)pe;%J{X*QgSi5jkd0uN3^Q}SG_c0w<(sB&c{dC zn!|;-edf|f>-6ZuuYZusC+q3+uBYjZkB6}K>I7Q*h$3Bl?G$}> zTN3v4DaS<*zLChy8T4mIWBk0}96cgYLw~w^hMuq2{HyfR&J1?>!eS!wr~?(ee}PAz zn2=YWO6bc=^4RQ`3HU-~4-Rk}!LAn%&>9QO*ohM#k=?kOo|QVpuHKMBE2+HW^)jW| zk&Maqa_)8b?$9R^byrD*qg_~Nn?7=oo!sbC)<*V|o=4|A!>BtBe`-F<18jdC!-mFx9sHl3diE1yrlm*~KSNrxcw zpca4>xoG&wt+m}oW7+mu7eZm-v* zY1EosQf`1x_|}u2nNF;<-#I!uriHkFAI2ZPZzGPX33$Pna(3;3v5hC@9)}sL$J6b> zV_63YCpyF|0ejUrvgXe#p*<;to%vJAUhqI7>!1A-Un%^8ZZu2L(z}ft!)8ab-?uqq z-+LMKg#LS2Q1lr6CVD&0m&&9yzVgm=n`K#}2bJs#dM*9Bnq~Lo$+22D20&n(ex?s+4T1dAcd&9r7(4r?8qH29rynkvXy2XOLQZW^r!8;oAv3e8 z@%_`nboL5KdX~N(%_PmHJEa0>l;XyYdfTxJ>s9DW)vsx_gj?)|c74|2susKAg)#lU zJQmDLo}r$2;FAfNVarBmI~;jC>M3&EMR z!ekCBEVzvJ94%lazo)X^0o`)8R6Wj)c`MJxe7;IJ5~uJuvFCJmk}bQbdw@MO zr-$5*wIbuMEX1F-9>gWRtes59bb5x`JeDyThHCt*ugT2{h~e01{o%i&-%-80EeOXU5H}?g>uMFmpIx? z%+y|gRFq!hA4c<;Euf87D%%HedpdIICieynvbm`dd)lWs#iIrac?H+dYnW;_xPZXnkA%ADg*PqH>M`4 zSCL*jY5W(TfpfCVmc@s0&bBgA-64rDY4iFaGlbC>(NHwM>mJ%@EJ0o+NRqd@Jxs;q zGw7{~IjURq0}bFPCSw`KvHxlZ;OyCC<){6y@VhZt7*USH4pfqN4kz*MY2|oS_c~-g zYeI{J^PujTCuxlvi?=6Lk_X8NcxB^BQtuW=`qv3zb-CFjz_^IydN>gW`8YgboGh+0 z7Dw`z1MvJu&tax)KJH#)gLmyxBnK{jLbpOZ2pds`HyZK2P3J1$1AQwYWRDe2`IL;m zzRJV0BcdesFh642ipgP1Phv5Wga;nw!V?>Qvdiu&@Rb*G-S33q`1WjE)yKl6=@;P` zwHaUi`~zY|^theUCg{@na3W^Cm2B00fI>HpLZfOiI+iF!VuQ-KvYRqU{}Xc}Iwl?O z(l;j84OECr@B$*?W`U3Syaut^3(0{yN#vz*Js3&vB-Wj~(Cl4PQR84BdfWbiioR%y zG(sx~sGQ=}VHz;bx*oL(Na4Dh*2L<kN6}<8!lc*z`1l1mOFHYO75s8lRuus zkvDSjdyPnPYu{edY8j3ruf<`$C05*%zFV>FDQ6sIxe6Z|HNbN!G_m_j4g8kbhhC2C zCeo9<3GL}dOwN@4f2^JPH;4(t8{_nx)RS?m08o^{qa``P=mKkxVZwaH_{ zw-CJUDXtSTB=p5*5+xNyEet2(I!zuCj&LPHACHpwu0!NeP#OteWI?@7^+LaA4DnM? zpw-JfNbKyRH1rhL%j>z1y#EQm;2z8b8G1=wl8C95!Hz49*t>5EeHX4xj;nM1mrxV3 zyi1*&d&6Saa51>6IO4LFuWZ~>6Z+tp427jiB;(#8AhofW@?M)pztE%kDR02yA7L|N zKB2GMXP7XSfUyk|=;_}&Wcel?lD%yqlkBZe1Jm8v=)#A%)Nvx|WU5%Vy6fN>Vgo6@ zufew14rt&YRC7A|!42l*Tw^;-owEk_`iT(b*Rj}@&*@Q`@nxaRq_q9XZi5v)d;mIVft%cSwQSwVS5wu!P z;f7f{MELu4aO59{k-7~iUYCRk^ZqizvG+i?U5aE>TOu!Ol;Pbfv42viPKL!!Lc6sj z<;mOO_~H5JS*XHXZm(dx+GgN+9}%LXa}EAiLNIWWq083QGwvqGAXV0! zjvVO4>NoLlIkz08PZRh(Vg}ibEdsfUxh$_TpMT-F8A*D+gtb?0f;7oawrS)MbbObi z6J^w4FhG(?zbZ**FVv;SEah;`hH3PPyE^eZQiv066zJ%Y3gqQv3fyh=P^dti+|P-J z$vjbV)XSJKyT)NzzA74y6hjCvj3xFTfTxzvh=v};O<(otz?Bp{FRxARDX7q@=~@h& zG$wJfqab(hAmcfCB28pkaeJCE(Y!s5D2pD0z=^^jG7yAbn`}V0Lzp_A8pc^SbcwKB zC!`GR!OaI2V}Qv|c-CS_%TipRdJG|Qu@c=?cNkt|kE8k9L}`z&6gpZ*^Tm9kVIrq9 zocs6>o`5{HfBgWGZpXpxRVUE!mO16s#gv!mKVbZF`21fPq&eE+ z<${0g4yF4z+ABpZ7Q~|6-f^_*w+_p;sld+_vSecP9$Z)GhoKQ$;bHY}j9!(1Rn2?& zJ(aqcz4;wGXi~~d7`lX&_w=cpTnbES&}373~A!P}1(q|8#8DtG2pm4h>|Ab0#_ckt zat{P}(^HN-EXsg0i5ir*aVC3e!WK}wUkb&-b#P@pml4zbhn{VdSnHG5G0;#H0tJ^@ zy^`mkqw|~}{I7@cdSFH!_e*iP4J)$QRsn{D3SjWmBK*W&s;=%;}vfbFyfV%c0E>CD}^PSc~;-?6BWb*zYDziaK}09N8Sqi!h=?L3g=a z{2W}QszaPi?*p;8j&Gf%sPLu|m@@W_%b=Y_3(p~p<#Os(BhH|pe+EJ}gfrK-n9wpK zajd>`0_&pv@%g1TEVc4L`M27{A#O66e^8gqD^sEq-VLGZni5vhA`^<5l<=Ld2rcWG zgLUQ&7|fTXj_vEwOV|+~rx?>IzUkn2P?pAyC1U6&b5!?Kr&_az(B@VWW{)moeM`qt zN2y;-fZ8i~c}j~k*+s*Zn;oEf`VG$c@EC6FHzAAT%fP<)6C_w#f@)R`Q*KwziYc4Y zS@YxYbHOB)a-T>$ zKd)yZ#lExca(d*Aj|m-;HK36S3;C)Qp7^Wt9sJ;Qz=Z=*Skb3OL>`*M0m*MLsML)& zM5d730aIdj{XEvqm`F2@Nzu-?g{VDXLiB3;v3<1#Y`(V+opVEqI_mFWE=HRY_PjBc$t%(X zHig}jxf$h$4T;C6b13ZJ4Xz%?SU=ZEbgjHTnf|K)?S?O5q)(+iZ=V@sr6*5D9b1_T zVH(6!tDn(PD`4_o<)ERn4DDF!2fRx<^y_*#(sU#pR!UfqI|t>6#pn!(PRRgX;!=U( z@+$C4kfoohr7+3Q4VM^;(bn}sq->1{#KjbV#oHLBa@=`{?Kp}{Jno{V=mb*sWewD( z=i;TElgRT1LmKsUB8eOP0xzs9IFH3~Y*F-wG~0=Er>_jn`YuLzK7RHFGpA8@-AwrE zdlK_d1@`lei0wIV)X6Sq1J~(bw_ZK?y)c9ui#5ohOL(6cx;DnrUTb*vAJgy`7eN2t6k2j^#xoeQeoM7K4973HXIgHlHIEWOI;j4z(W8jpj zG`&QRnvH4zvvE6)yQxgqMYNz}_$0y3gOVhq`ydm!;xRu~c`AB&zd`%^>Xc{VYM1qRzpSI6-(S`P-I&vk&Nzq#duAo@6;H_i8CKt*Hii*DqA&-*1HEcT$|U zY7lxKa`VEy3N*X75Uy`X0^S^f-QXb}z2N&2C5^vBYGV!_YyS_c%)|r%I^{U*vIVa9 zeS|fJw(#l`_q((mfv1trAYN06BuG!An+hv%O`-*t+u(R27QuXlua}W`;FFBvLF0x2a#lvVGZTZvBYU?RHeo{qPoVw_V2Q!G2^G z@X`P1VSF6RWw2K~0H1AY)O?yYiTIYy1|}-ti(+favvCnr<=+%^hGYx0JpX}Sh8Hf5 zL+pIo#AZi67euCQXK##5B~5BmsN6eLhZ(SzS#pR`j*O*eZ6s~5Ul83zh zwGeb}4;r?Z(0s+4p!cT-qPaXmruR=6dd#i$fnfq)&d>B>sF2N>c@vg7#j)2m7?VTO zUgPk;)yT`R;-ir@Nt5nnhV33;?*v&ATd)PD#irrm_tqpyUKb<#n{bf*!G5hUA-sGS zMpCr`{^aY^o0$^iu;C|Ma#xvnF1g6=ihK?~s#U1+Pa)d6#)A5RG|h|UVUX5(^tRKc zog>yTDd0VR6dOlXB_z?!trGldUt;JXGrZ;XA8vo3LErrOiG#*Uz+5;1u06AG#-TcB zPSK@>en!N#H2_p!nSocJDMmi+WEQB)(O2;XRPsqW?C5=nzvOSDWKkg`GxCI2H=E7Y z4u{4G(}1UE!?&5HNC#_@SRIe$%&rF&(B>YGzBk9w;3w;#v)G6U-Za2Q9F>Mfg=F~c zcmUi2g{Z|bb*SoSVy;Wx0+mq>5;1p>Ev3L{DG1SqV^&n$MUQ++RfQ2B9$iv0nI@!O zN4ZLE;7#Ze>`YOn@w{sMGPjTOBYr^F(3_0m_B?pDOqP67SPvWK>Jr`l=~Q_14Ggo^ zoGmksgyl)`*`XU-TKUWEOGYaf8+oHfF=LQDWSYX_S0%(((O0Vzz1Oa#QK`Uwu za*PjQ&fX4?UAP53CvqA~ln5F1&SB%GO`{`Yg%I$d7@|g|kfc>nu=}|=oh^D7dupU{ z%eXp>{_hiha$gFWJAOcoRW$slS0M{Z>XFxDAQ;rCU>#r069~)Q#zjXh$UG@A@@(ZT z6!q68>E5lFxH1LqGoPS5!v>QTjmW<3zp*jO9D`rJVJ@GZNG*+W;PclET;XUyzBFCO ztGNOcX&Ay#9nSx})SSMYDND+WGVoV2S7VqP)9gQ1bp4hgJRN97(#2lmj17sC#F+Z>qX6EIm{obb*B!c+$_T6c}x8#`|R=3gM@bG(^5yYz_e-AN?m3Xh5Q z%Y-uferB2=YU@KYzBg<|PLTYcI!4d&} zk~c-SQ3+a^Do@*9mtbYsOK@~d=F4Wv(<3&|KwlwonsVI^d5yy&=9$EXRB@ zkS8yzCy+qdR)*It0ea6;kjHbe&!~KdZz{y-RPhP4qij4>e>)8!vqG79S2OUhZ!SDu zC4qsqJ%Vb-TF_M2B6*)o!AM1cUR@JvPaH;sges*-2- z6iN1kzgU%4$Sm&ahabL5bk+MuSY#$l4ffu{zdb$BU2zB8jg*jQ_k%w);~RK-w6V>d z!c^j>B5{nAf;R@^=*w19S_k8~+Vv^cHk;GNck7wv7%A%H*9v{hu7Jwvhgh@sHKsYK zLX?F%Imq#?Mz$BgnI+@N!C^6CXUw9nrWE}(+=|&&lUW~$PQ2NsLNiu`LC2s9eJlPI zJAY^Lv-fb<{;kFi6mVQ8c7UDItwjB=jwd@@vr+q@EDinT13AKXu+UPHyezRG=A+Yz zb+-=w*HMddZSxu4%CSne+YCkxN|^TQQY`ZOj4=tybZFl(*kAS(&vE(Olnrv!YJ)Cy zaMmL?rH-MR$syPyTL^m6zhTWob%@>_(i_<{B}1PQKfc;Dv_G)Ie(ZO#(J zwY`ktos+5JBMVw|wi`#Y^O-k;zd=~J4c`wsV_H}aEAD4bBkVu2C7&meaMcf3 zxJHz?Cai~|B@-baxEVSmML_LIDYV-dP-cu{g`H5N6IfFcDbZe8pYQ}Di&wHt)N5Sd zEJJk)ji|rWGn}=P^GYo|C(t`P0ta<|V|m_Au-z1nBkj3pwrdvSPWaxhkL5vF}LpYncM@Ng@oblxXDk*5yqed5xPR< zAu2yV4Z{8z@cDEv82s(WB}Td=WZZIeEziPFo1($}qc2!oT*@1tfj>?CCq~;U&vdxn8Wc^}Vq{4BPmjsDXu%^sw8LD@%5F0l;Gnc-K z(8%*q{NIc8VNPfk9BqAq%;IE7(zIqv^@G^+$|iJdnlWv>ew;}qy3~697pzIp2cMh9 z)JV;Ml)jOJp0Q*c(2^qy?oK5_idS&shBMG3)WPjlSPD5f zk@MoEJr#n%E3#BcQkMijTgP#p-hi)04-7w>izb|ZomV!!LM~#M)p>iLzmX}1Hp7Wz z>8r_ft*ZuosGkilr-;yNnN}qFILBQWb7q#B8Pk5wclx=Z72GQRu=P z2`Pkq5?q~IO4!xn+C<(wKDPo-aus-WP^_P! zNn+hJ(3T&|j-Rf9ms~!e`!|4wqZ(xY6f<(fONS0|xtA~DEm(Z(CU_=aXS&~AfyT-I zv5p&xaYckYkyA)uCDW~G@#iVj^HeKyljHhTJwD9n%1z&gl#~ZZu~8{ZypJ5~*zGqYgo*{A>0y6VGuPq6Mx251B7r<4Kax zPFC10ljCt%(ej{ZR>#SbsR`0Tx9|5cYnu#7ey2tcCdd*||541SnFO1+n?tg#C9#NC zp}bRj?A!{BX|28}si}E~KYxgjKi?EdLdimIED96bs0220um{&FC~X!J zqL;>9z`6`qnCfu}jy($mPrVsPX6jJA0tI^C=@xidSYWtiubyY*p1B#+$x9R+n+-x+cx8>SbHBd^->pkb*lQIof#ie}bCxw`@_R`)TFm3gRN z#W5fsaek(mkMYHsQg&s7C0U@WNlNxiCqF@fR$BBT8y?_sdn z666^O?8pC1U_87tAV>TS#Mzw1?Dug3xxfI-asC4{Jg&gqTt)Kx-81~V;vH=5&4=y} z|3SR@6dDm`17_RJXs4ALBrKK2%We)hsxggz{W+CX72HOTy2n7||3c)Kv&>U0ft)Az zk*p0v{fbk#mc57>;>%!7>L5lA5ay$b2npU+i^3QBA#HXME8~)l#mj3UqOh8=JmrTX z2h{1fKQg3ORh169tYL+s^09SuJvIzAV0Moz2IkI2y}#Aq>LgFI2ix)P(R{S^uV6Zc zM^Jf*9NpcnL|!l-+2_X;srR2Th_IF5a)@nMrMnH6eBl@v`P$@f;}q%@77EhmPhwx! z9|)0ZL6?&ka83Ua_+WS)^Na%E_1$q~XXPby4PFGDV|{FD*gbf#W+GXfRt=F$Gz3G5qT&tZ#7R;~3YgretW{Vm|PO%b{mdjGD8xeTN zO`h~O$k6DS3iReu9vOeR3xD_eK&GxHZQ7lISw$0wXTAjF#o1u+hrf*DHWLUq`wTYr zn9zR-mZZ`97JE6w9abhhMQs}{do-X!nbs5t9R1Ds>}kXEKcwk&vq_}MVi;WO`WY=Q ztLpyO87|LiW7G5&L3zP_Ca3Evn0Q`6Cg?aArbolMX}QRLm`=xY?`xWkQJhSm8SG_D z^=g2Uea57$?>H=%rkMTv5xdT*4K=DIX~s@TOew5|*>^tRkE_Nc&+HgRmds>NSBsIP zhEUcx@C@V2arfd67?H{DC0NgsAR7LAu&2)*dDasd@gOy-v1dQrb5JFfiNEl&HMB6XtD?7+Ek3#)o7 zS^LjdVD6bwoYY@{I-8={u(dZZWYmhPPL!a{z1PvQE(|TrHsOc$lc~Ms7YO}N3GJ_R zLe)`k#?f>!p$p_R)?AnEx;pYSEtZu=$1&ZWh*i~4z@(@Vx@P(>#ax{J50y^I0G_Ih{@M6_9 z%=*dIZ5t)1sz?^os3-)fR$MMT*b8hnaQm02dVy%9B^4{k1i7?_xUYISsDl|jx2+I1 zubl)RTVA4$@ex6v-yvLf=``cpSPFIX#?f&+Et1;B)sq@u!E=yBH&JzJ*Bg&KpS@70 z62s2DZAQtp0T}Qyru)Z)XynHy?B*RZu%SwsbUQ3VXJJ=(={kjoZ5%~Mnb-QXIejXL37tyj)crtlE(0A}E{5Y79)v`PFvk|` zMAyj!%z@K?kbSI*&R|I){%aVK9XZFZqgQvOn;ZQV|I0>ye0ZTew+cNPMhJNka8S zHdGdH@a{V1K+HWHYcV3jUn?+hKc8jxxq)}MGTjmJ0iLb8f-}~Kq4QJ)`Y%?QtoAV> zyDlBZZ=xEcr`HzQLzYxcI1Pqx>A{1l$;2W{iPf%v*Z^^i=x6crzbv zC7p%uT+e??q&4YNIth_4zgM=YSrVR8gWW$XDdN1|7+04V(XIveLE-9FE)Qu**Quz| zf;Llf+f9n-WNu+cj78`-B~2oIX*=#-R*1h(PJrx_NiaV4F6$Y7hn=(MCB7ChqJ{}g z@XPNPR-L-cT6;-=#kMsV5u;E0QesiTOO{sO{)0Q)Ea@_xKDM?@lUg5>1dWZeVAkp^ zJn(HgElS%9&vd7f`!c_|`}2q30=IwsCwms>4QrF3seAG922-En;1e!Rx1Oy+zNsj!lB!`3jvvCr^)F%B$Th}X^cyY>eF+xH=%U59OY%i*tsFPyvj9@J-ZYu2=5 zOfQ#NF4l~ME@Dg{`rd-b{}_IR&LHa;ZpzjTe_}bcfVxf-!KqJgLgSO|OjVp3%iFP* zzaY<$=*W!6R81wiZq_Jx95klGw?3lEr%5E%GaBw+*P>O1`ux(_u29!@iaE*gjsMP7 zqCNYTL7~DA)Ojb$aZA6$*g|#kOiGRL*3W`qS5f47|6(JfZ?L1UmO)itJwM$}nB?|r z(0b8}=)LDARM~a2wZ{zT!@deIek@NNH;U81mHXHY!pRu1KL8d~Dv%@J?|_K97LMge za{Z#|82PW=e%^p030W`29$54cy3Dx!+IlJGL7fx{Xuc16=1*aHk~OiI84sU4y)h{I zJG?IA_EzOrv2<$()2Wmxh+Lt8bG^h!M^hBXzR+d<^Z9|YlexaFw>8lk$b-$lMexP@ zi_Db#*RV5)(|SD?!s)wKR7{}&ro0YASwDm|k*9FBR2F)T+Cs%CJ>vMkTmb(+ULv`y z$^Xmq|HpUuf4Ii~$pi6%Z3Vr0au|GE3zmcou;Fdupqx07?lqqcwqsxL_52a$?4sLX zuyH@kp0QU@oMB8HLNWwa&jQH3hbvIz&^o5CsS#{hBM`NXZS zsegUQz-l3O?(bBVvDBv{_69V#*n^fW0vhW&na;V;&1_zoj)C2suvTggIdkCzs!d1* z?N4uUuWr784C^s%hwGq$M55`9C_Fjj47R0?)P8gkdAFhj>Rufus~Zw=zV&Z*QR_VX zec~wl@8AuHFe=4|lW*gaJS95tR}4b>U!mHZR;c>sOZ;qJqJxht;l?riozD>agn#hB zWHD8D$-&vfvoPzDHM~gM2{}RC?2U!fq3GczDxw{LI`8#ireh^O?M@=QGondllLJPm z-)0u;EMg-k_>;E_HiF16YpmK7gVS!Y*przE1G9F)p5nJ`)_7suJ~s(_U3p~Hl%3?K zo)hcb{fJfeoE5-9RMkd~ZgWpy?sgl} znRCN^k*usVkdeEoq|3UryIQHVU!}O^CESLzU zWPMRFPR>%I;ZE})qUJn)ynF+t>SoYHCl)rmOanWKeSFX7GIXYBB2hI`BzJGkq3OGX zsOIq?99WozGxq9I;f*RJJ|vnfxt@h=q&*dL65x@y)o}Y?0K^rHC$}?01S`l)ScbE3 z*QP!6QHu%eZgnI(=TO+r>6|CDGU3m1O*X$n29|GAp|{`Y(_@3(VB6_O)?C|)BXOVk zLT#_`ZdW&ipPfLOEpy?Zs4}yxV=stry-(g6X5#6s#pGLwIT?BM0rMBi!ML}<=o{}u zpPbo^8#6@VjDr|n4NRjut)5`#wh8o3+F3l1w;2nKG9lrTIIYbfxU;Ggo<;p(w|!ZR zQT2;SW1;|$Rfn;Lb8{i;j1?YoD`4eMMUf`aPQlqLJ?ubBK69w507AsBf_KDZ{ODK) zEoCM&eBU9`QYb@Kt*!x=ldjCMtB0v-U>(j((O?a2)o3K=S@6Gf2lA70VDiveVl?$B z$YhOz=koKoQ{c=r&NC#At3_!2!VqSek1Mpjc0$qfPAH}rM#I#!@Q<@Lo%Q=4D-kh; zD$o7QIQZw`Wy>GT=P3qY(f%DBgtNdWHW)*=n&s%eGt}s?BYc>gK+k8{q59T%yc2Il zc=J=Be!412G5yNE`T88jE~he{vt?=SuQTwBw85cWqiA2em-#cW4-%Y(h>qTUrZ2LA zj`!Why2QK%_uJCM?o$^0EPsp|dA6|li!My9{ed?7H85fNRy;EQ0#4Rw<?t)K$eBkho7I=Sl5i=IN8+Z4p(!3K*=yF4ZXmIt} z_rDTk&(TMCWzk3GlEPJzANL%plPVZd+d;@pRcGekV`$PJE(f$I3zXzOz=p=N@UUbV zzA%^#?mH?lm&>cp48M=xkH3XO2@Y^hGnPh8>4o&G7qMcZ1D=un59U1kjvD+p$d-*E zBVzi*Zu3HTxBDr>p7W)s=I`2V3p;+Al27J!@b~%@nls0S{>$ou|Gvi4a)oo`L%)FCB777y7M0?Lnr}?W zrBA4BWe!CVK>|1J@BD*G3`zGKW^US9(bz@*v8a!(!McmMc+^Ml+$ zOxBbZI937HF9C(Ld5|;@$eV!64Eu9)Wm4@LlB*L>8-`eDugy0{T4iX zImWyVzl$XnQq-JvB4?I3(TSYT@baN4cv7|)Vhqf2-4B*+_v85P701Y_6c0AxZ$D&u z5{|(-mxld|faPJQD&NmSNV{>AD7u(~$npp1CZSC7re45zLn-XfEmm-IumeKpn!lgJmGwE>2(4u)~tiIu2*2W@(osG%?mvKWfI%hR)f)w zxvcEQrvf4E)9Ca(6Ib#}n1R|H6m2cS$>wWukLz7V_~1Xdr4bH^>Sh$5UBp#E*YNl6 zVc6GkguE^?rZJ+@q;YpR{qry%4n29q?9D&Nb~$`xG+&s~riJHluS5-)v;sZZcO7q6 zXfwyJ`xANDCv34*CR}^6g7OZY!}te_NlnmZyj;5iKCZmPsE6+&{5kpHztoUfL`CVN zg$pXX0^Txn-YCJNq3>vw&oWAH))BDfI1)Px=t?V1(BfAz?}Se<{`-^Z0H?J~+Pw?t z8)0hot_@arX2B7T9h`T%9q)Jwkz4aiIIVvYV^-s1?=g2bI&{6n3&)O=E#Ad;9PJm5 zo)RUBS6-k&&n=K%eiZBb8u4~}8ffc|-R8PcZO(8CQUanZY2>TL87^|!d;%=i)qaR^!oL zgyVFqKZFjauE8m*+h{&11zi1y*-;l?x`98!7+>EAk01l<&YBRN>ZSCUiXG~5|9g+{ zY4Cbj49jnN&<{p$(JI1|nCZGv_ms~ll79-ipY~#qQ8eDXB}Mb@B*PzPfjk0^6%PTa8V?)I>wRmik`>GX}UR{!oI3lGPh<2hE-m}$6sdCN#^ruSzbHnwIwr~hw~xXONbiVd*So& z1*F5J8FRe^;8t%Czx4Lp30>LYs8%_ob0< z&6!zq7Qr=n$;u@47`pm`Ciu+Gf-$v59P4c4H)pOuA?tbwT$l*Qp0`1M_C4_Yxq&F= zT?9MgK=1H$aQl}ge0#YC|5=s5>ob!%j_`SIUR%!f0CHib?Rbbdae~oKd=KkmE|RkG zlJxewbGRfq4?W*_Gwj$hVrG2`4!0`7($^#K^|cUnQ5pt?=3?BwMV5-xR)YBkKQMkD z3CqfjX@h1ku5J{k2?bGD=7sFsqz;S}xj_F&e1oUW33MPJ9<1}9vMUd+C4mJSNcMj= zWN5!4xrH3Fso#NY9aq9UnX!>Q#y;WZWDn9^bO>bnH^Px+eYo<3CmWX=0}fJrI(17w zrlbql3o|d^eYs*bqdS1UdAy#?buDELB|q}VqE9h9#_tA+k8g2dP9KyW=)!-O4im@P zy*SR~2HZ&*fDcP#(RA^8d{eU;U!Gn^3qNq$_V^I0f9V=@L=>{uc;P4!=SUUL|Kl%p z$b>5|#i@6A6iW75LT&RF95mA-Mbq6eXI%m{Xj%*Zc6!nIE(Pc?{x#eRAH?EeZCG;T z6Mw|e8R+w5d=lk^r{1lB{O<E0_8`5*I_t_J6PM{)vbE*NDR#m8dcN)Hgw?ew~Y83t< zNgdK-QAM(VyckhIEZ)S({JOwIw4Py1=ZF*WultGk#Q+dXHUUp@FSaeclu<}NLMpZ7 z;onnvrb#{?JO*XRON9PfAp z37!^YhEEY=Bkw{lv1Vkj1W~L(0?p@TlAoLdyG}-(uCWo|l|&=BH<&^9*+)@L7e!pQ z#|O3ig@{qa88&Fb9oA4Qj`Y;lLD!WW=6atg{*K(jM6cB(Gq#A*6Ec!m;+=?7Io^E` zU4<9I=P>V%F?b|Z9dFH;hO1BVG4dLLSI_w*N8OZNdp@2_`<}znzrPqui5u|TBZB@A z+<@|xBao_mTwro1756ucpqsu272V5YRkn)}jgXaiCdQG>*xQ10rMeh{M+`b_a3Qe+ zy?_hPb8OXa$WG>wnv+owATyQLay{v{`rl!AMhx@k;%e&ZW59j38pc-qWHpMuV5j1L ztbg!0V((GI-t`nF=^-nj@!tkIaft}y@{K#sWbQxMdrJe*W*rITc=5qu<;?hEMcN-0 z2FdeE;l!UMSUyjMPBoUsmT!@``3lFpza>u^rcZ~g+YF6JTu1j+-e6nKo0%Y*SMJxPI0rA|*c5S27D%44i9a}O)j5O^LHj)cTJT(*Plc+E6s7| z{h8$ZacgXU8U`sM!gRv6<5>Oh7OZ;O0zrEO?5YC|g4FvmBxr2`3XPm2Im*JM#e@fi z?H%|zcqNSJVc7b85X!Ap`5m%>=&N%b z?zy^Rn+YOP{HYgfz!D{?YrrO>ybgx%6{Ad?r_KZ1`cv=R{2S)koL0nHZ-i=P!cnVVP z2vPDQA=;51jJx=LUrwr{UM(7zXXB2ER_r{V+wgBPVLp8>hQA#b zprWQ%5VY|uyIH`U6R?ZZpZ)ErlP?@)@NFbqI*d1l)TM zTJkkuO8-1!{ND>yo%a~`ty}^&XU&Pn#yyyGV+M(Gm`^MG&w&?SVyenC=tQ4~EZglt zpBbdVA$u9}$MhSj%&*~VoXKXJ1xLxhwVK$1-`L-ck|b*WbFhpYPaOPgKu<=y+}`yX zdtgRAl<9oK^0^zB5Enh_7EIYk$<>wTT94!Dqhn}U8VZXea==e<2AP*73%3U(m=h~< z$(oy-zI$^MW`DT_eVJ`oyeFHHR`Q2N^VjTk6=~wAIh}sm$Z>+dUSd8pZKbla0_gkT z+wi7q1Kq2pNO>v)@U&E$JoU?h1@p3St6wFndVe|XS7)g37iE5AL?7GTEkk}s<+1bD zon)1-=@6~D61ZUV9xu#`p)o3^sIk+Od|n{KZn`5v=e|8`|Dg5)Xub8tukL-|5SI=y zYtxZkxriLOUsiePm@a*??HRP~QzPr#!w^>XLcO#XrE24`E`>+-zR_bsCmIN*cNlZp zj5Bm@eh3c9-gqJXA+yqIDfxKzprHG79J5x%gE>C_GoJW0ht>Qro{9yg;)lyzz9%Gr zXg-ib>n0~UrDO?N{VJ3!T`xh+9fDXHj&prX*aR85Dt4UYSLTT!%ls9-0=2H%%-#_N zk{pdpX?-jNq+i33#%)|(mWdMDqV!cmJ+9ZCj`d?K9hkVz?7Lj9eEsp}ad;^Cl1p6%{L z&tr~6Zr=uad8$92lviM-@I9XVS`S5OoHkcqC$-G?VYgoVGsCh>PLPxp&vSC!1ufHaE<1$vj-AL zMPPGfnaOK5uOXe&fUD8LWFu9yRU!emL!rh)1V%UB#@?;ov@*V$ZR)%YV`(B}(5`^Y z&%cLnKD#rQXNqv1_{lW0^b1@6Q3C_E_d#EXB0K%*PI6#yCTWm~fV|#Rs#qgNc5cz9 zM^vssh3ZPCaZLhfKV3mSc%2~+ei-1LUPqjAS^$#5yFfhfJlcC)U}mpzr=KJw>8|YS zU@CioX|nr{CUqkV`c?auzndA6;vVfpo z2IT@jz}~G}a7+29ZPv~u=oC_i7C()M|1x9h6uAhl?9yV}RwC|DV(1+r%=O%q@#eE% zu&`f-T2<%qyFA}OQd%*#_4ut$Umdk1M`b_2j(3Plhtff;Y9e*`os8QTU%;PdSCA|>WAfmy2z~ZZk`(;a zr8RVjdD>wC<~2oZW}7rQCEbD-6jE^CG9KvE=+V$a_u-I)0!cHDCYEP(K|3Ilh>b{- z{c+-iJ>o#L%umyP*Indobsx7TM!})x@x(pg4jk8e%x-)lLT2yX$5O7Y-(PSO!h`p* zK1%WMujm`ooG{9K(;H6%3oZ!USL>0V_TnThx&oB=VXT(QS+tTjW6v2bBAWXq!(^KV z;6^{ql$weUtbXJBx@&k()BujOMuKU+E9p7352KEKXOdNxa(ka{SQxyPJZL_I#Tuu$ zoS6!3F`q;REFW`h$aie6+dE8;`hi=6!oW7mjb1-tLc3FJh@gBCZkd>kbGw zuAn(YPx1u{Za>Bx?#xH?C4RJ$ISfCIM2JzBHOxy%VUIqaJ#%056vV_<+*PB1vo)8H zeGw0srrpQD{GI__#c{pWmK|o_a&z>7Ux(oV%q4rzsl$`GrO>_F1%KJO&|e1MFmdT* z`tsOHdVg#keJi;UtFsTH$=^u)eDx~Y#0}w_7wd4)Kpy|oY{26dzd_CaIFZhcAs6G@ zG3KW_JN3dOZ2zN8Bm#Virr$7D+_fTdaRA@af|#>+w8=()DX!MGf@7cg5Ul)#ow4x< zbGCMr@l`B<-R;R(AJN9()lgWpRi19`lqPBienPl&HwjqY0F@^u(c-)_}O`vkPW%#Kgh9K$}z;)kS^SG413g8z`A2k z?2fq@LuTCpTzBa+y|+9bU72;6gsUMos0=E}44D_%fO#Ufg^ay{YuJJNn9&QHRh zK_4&HrE}gpkoaY_jN%spL;EX$_w6KGAZfrXJ?l$NMUIe7HBM+gO^GkMay5OhDF@}G zN?^S2WYB&r4Zpr@B8NAAh8nHO#ADhee3O&Sta@xi%ojSL^V8ihuyQedChtL?@3J7q z3J0O>g%b>XmE>P}{~Xd!)v${F5v+gZS8%x{U`9j#!N4{2KkIQc0Vlv-Gnwk zk=e}X&k#5)HJuio*P%^c<(ckl$1z;{3m#Z-j%i#W#BST|h1REqh~zna=J=31+-Z}g zM{S}Yb-`>w*qUs9?Vg`ZS^i7bX3I7F`{q2ztq8{aG6v#&6=CMlzu<6WBR%WGV(CRY zvfcg?qz*-qJ8Jh>t!`7&de4mD=NwE+|A@7mr*~`YT7k)`1kPjr4INwv*`Opw)kS(L zA0(ooPYZG&QkF1`2wZw53z8MIooXM0pV5!_-Oe=;wzC48Trl3 z2?Gl@#@2(Fw9J5rH#4dC@5^BMEE(pkkf-0~Y-BATF`%t@54MeMgV4%xWdGp`u;O@Y zZ{s;m^ZS4BqH7-Bt6G5T>Qy12|188zSB0J2_#QoZRN#NI733RNQ|*|Gc=>w_)fEvU zS1J)t#lM018>-pE4-;4`FID2F{RwqUir`_xORRmAgLim`*vmIxVqdN_Xd6ed((dQ5 zzo1c2;OfSXdwm1nD}R84?K_a~la0EXSJ~OtE@*uGB`nqWher%pNUj`5^Ri!|m+1l4 z&?*RJ!UHhmoDP2fy%8@R{f`;#oJ>`7AFvNX%y5!-3cGAo0{`MvF(MjKftgn{h|0@L zFtx~&s(+U!OF}I{sc19QOC{k@=w^EN)je2OXNJeC7Q(OScc}VNm+atl><obs9BF%He&l9X*c=A$)-=ZZA%U{m&A{q&1A*plYQb zc?CPI&ST*0f1D=v4sN)QCy|A^bbgd9=qHYWQo}8>{lrqB64!~|2-jqF@q)|u8<-o* zgy@}%s%-sS7Ge)L)6P31IB{+rKW)Qo@@iHw({p4dIhj_E53Wez)x-O77yFyN+dc!O zx4efxNAjTL+#%xpITJP=s=%J}`{??pC73c&jAH-IC0h?(z*e=>;G@)l1&0>GtxwPK zCGCQJ*W%ccbSWC&wFiSrZ}FcFi4&{;UN9pm@0s~(0pwfD572H?pe>_GXxKatnQd*% zx&a&7FXTWH8;!{0&wA7&VH~ZIpGh8R+{8~S<%v`KRZz6N&072}fLC12H-B>&7OF&% z^nzUU*!~|Ab3=nry;=x*8IQR&I$%#nx6Q3IS4Hd^%s5?d6NtBuP{&D3HE>Hxc6e5*S}{jb5!dnXg)tk z1aZyK^-~udKgY7`KlH;t8Kxq%b~~Idwx>`#g`8V-2#u~y0-yA7Hc#~*b{@IL*T)FP zj?=0p9ra=UW=|!xI#DFUZ6>?ofClq;E4G|Nn*~bzmnG?@) z@FAl@GIY4JCTB#^9N7^Zmuf{TCN@R$;n6-=TJ ztR}&qI|jHq>^C@kn9ojm;6coGMAPWk#IVa%PlrU41i& zhg399aWG;m?$2V{=RSn$8B$czE(RyLakX2vBD5RNBWuusH0P&Mp3wjH5`;z_2HWJN z;CJvo1m&%yPsc3ir+hgYxxf=jms^te-$t;!kHG|KZtuR%nhqQIkTgFHdcA8N4g9YH zYwmJq&3HS~bE6B1@*5s~6=6uryT-sG${zg6^k~xUS6oI_hQx;VV8{IP@Z!y3ESiXD z5ImWd8^6Jx9p-2(t_i9fr%q!5m$g3yBv?8L^70)?TFOjf(O%82+38Fj13J;-g9}9R z-?BO(IS~1AGwXNu4meBo!Y&tMvhnP0Qd-Jofvo4!_d)%@(^_1)Zdn+0T$;=BEK`{p z86cTwwvt$V6;k@QkNx}6j>t`MLWHlC@ ziNeeNr_e&`6 z`79h+1ELU&;vr_MfPfoVSBfhvvelIUp|Bz`Xpple5h>saX{aK6^LFJJqZBDLO>?mlSlR?YH+*P~h6YhZ7 zLo;G5YOMFAYTXMIv~irZh8BW$!*B7HpNXtF&CI3cO}O{;Uy#>12$%D6F?CH1j9r_9 z+WlwQ(7WE~5M6Py;gI0i{Y|Ike3)=-+=%AvG>WO%i;n7RAZ2YOJ@*QPw3mXVsU?uIS8MgWwt*JFqiZ$ zP~Wt%nejR(@83pU9Wqqr^1nX*wvqy-^QmqA8nSeUGwp)+q2<*`G}?bjKaLKw_9aEU z{+NJ1j|mnZlS`Vy1vERDL2d=Pcqu_7<#vly>dE~Su12({7D{7{QDkcAhizggPMt46 zoe&~eYPsim>k`r#QrUrDWz*D`NM2L3aO@#cqq-1=(+@FQl_$hUwn6K@6HK%1D3lMZ zq@Yti1SEfQN!M&q#a6VUHiffkb!_bTsY3PFL=SSXo-*D<{~4gcK-rq|Chy)gpA zpKQmD^rrjydSMq3W{0M2bIgWkmWZS_c^Pdwj zBsh`e#fVPtCDK2?TuYOFzAVr(2HhLvq?5Y~sh!cJ+Bgkwn3rP7Vv(+FE(O`*4h`yk(n0*Q9Gok0dm3C6Ed9YH)3~pAU5{sV17N{2a3)zt7jHgu1`c^T#YKfx40{t zJM+rtl3gl+_K9v#e8TKw$-hJ70hwMc*6~FDb~|v{+HeGqzK}o3!NTr zn`u`_Rh~%o-Ov;cR9q*hWAj=9VVW%WF`V znE_9)uP1NscvK!Z!ZO>MnS$RH=cgB9M}a4Kz2FRrs6HK8LmB z7&oNI>C8c>zZi+Vz00UQUr(2=dZ6Ve zXF+@=OwtM|%#;_y>YfEqPxDRCsLEkxg9&R@Cqeg(zm%5p5oi?4DQO@Yb+=}v9M4@; zd%TM!57|I@ArAY_z0TCG%gFMuOtQ1n5?SA&3i(yo6#aKW=J#QzY;B>S){O|>+MO8m x#U+rvc0EEpG$ZvT%}TAuswM3_&g$R!o+Z}}GVy1ih5y_2=P;80>HGg}{{}98;D!JI literal 0 HcmV?d00001 diff --git a/test/embeddings/test_bert_embedding.py b/test/embeddings/test_bert_embedding.py index 6a4a0ffa..71511458 100644 --- a/test/embeddings/test_bert_embedding.py +++ b/test/embeddings/test_bert_embedding.py @@ -29,8 +29,11 @@ class TestDownload(unittest.TestCase): class TestBertEmbedding(unittest.TestCase): def test_bert_embedding_1(self): - vocab = Vocabulary().add_word_lst("this is a test .".split()) - embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert') + vocab = Vocabulary().add_word_lst("this is a test . [SEP]".split()) + embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert', word_dropout=0.1) + requires_grad = embed.requires_grad + embed.requires_grad = not requires_grad + embed.train() words = torch.LongTensor([[2, 3, 4, 0]]) result = embed(words) self.assertEqual(result.size(), (1, 4, 16)) diff --git a/test/embeddings/test_elmo_embedding.py b/test/embeddings/test_elmo_embedding.py index a087f0a4..bfb31659 100644 --- a/test/embeddings/test_elmo_embedding.py +++ b/test/embeddings/test_elmo_embedding.py @@ -18,4 +18,19 @@ class TestDownload(unittest.TestCase): # 首先保证所有权重可以加载;上传权重;验证可以下载 +class TestRunElmo(unittest.TestCase): + def test_elmo_embedding(self): + vocab = Vocabulary().add_word_lst("This is a test .".split()) + elmo_embed = ElmoEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_elmo', layers='0,1') + words = torch.LongTensor([[0, 1, 2]]) + hidden = elmo_embed(words) + print(hidden.size()) + + def test_elmo_embedding_layer_assertion(self): + vocab = Vocabulary().add_word_lst("This is a test .".split()) + try: + elmo_embed = ElmoEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_elmo', + layers='0,1,2') + except AssertionError as e: + print(e) From b5a7db0b669f6956a98300799e060977f8a45a55 Mon Sep 17 00:00:00 2001 From: ChenXin Date: Thu, 5 Sep 2019 14:31:38 +0800 Subject: [PATCH 39/50] delete the output part in dot-utils --- fastNLP/doc_utils.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/fastNLP/doc_utils.py b/fastNLP/doc_utils.py index 5801dd53..5f293d3f 100644 --- a/fastNLP/doc_utils.py +++ b/fastNLP/doc_utils.py @@ -1,3 +1,7 @@ +"""undocumented""" + +__all__ = [] + import inspect import sys @@ -7,7 +11,8 @@ def doc_process(m): if inspect.isclass(obj) or inspect.isfunction(obj): if obj.__module__ != m.__name__: if obj.__doc__ is None: - print(name, obj.__doc__) + # print(name, obj.__doc__) + pass else: module_name = obj.__module__ while 1: @@ -18,5 +23,5 @@ def doc_process(m): break module_name = ".".join(module_name.split('.')[:-1]) if module_name == m.__name__: - print(name, ": not found defined doc.") + # print(name, ": not found defined doc.") break From 5b7e9b6572ff980c9b536b3b8a8b5ea526bd2ad6 Mon Sep 17 00:00:00 2001 From: ChenXin Date: Thu, 5 Sep 2019 14:32:37 +0800 Subject: [PATCH 40/50] update the ChnSentiCorpPipe in docs --- docs/source/fastNLP.io.loader.rst | 2 +- docs/source/fastNLP.io.pipe.rst | 2 +- docs/source/fastNLP.io.rst | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/source/fastNLP.io.loader.rst b/docs/source/fastNLP.io.loader.rst index 060b5450..c1af6c0c 100644 --- a/docs/source/fastNLP.io.loader.rst +++ b/docs/source/fastNLP.io.loader.rst @@ -2,6 +2,6 @@ fastNLP.io.loader ================= .. automodule:: fastNLP.io.loader - :members: Loader, YelpLoader, YelpFullLoader, YelpPolarityLoader, IMDBLoader, SSTLoader, SST2Loader, ConllLoader, Conll2003Loader, Conll2003NERLoader, OntoNotesNERLoader, CTBLoader, MsraNERLoader, PeopleDailyNERLoader, WeiboNERLoader, CSVLoader, JsonLoader, CWSLoader, MNLILoader, QuoraLoader, SNLILoader, QNLILoader, RTELoader + :members: Loader, YelpLoader, YelpFullLoader, YelpPolarityLoader, IMDBLoader, SSTLoader, SST2Loader, ChnSentiCorpLoader, ConllLoader, Conll2003Loader, Conll2003NERLoader, OntoNotesNERLoader, CTBLoader, MsraNERLoader, PeopleDailyNERLoader, WeiboNERLoader, CSVLoader, JsonLoader, CWSLoader, MNLILoader, QuoraLoader, SNLILoader, QNLILoader, RTELoader :inherited-members: diff --git a/docs/source/fastNLP.io.pipe.rst b/docs/source/fastNLP.io.pipe.rst index d35d2ddc..3ef9b5a8 100644 --- a/docs/source/fastNLP.io.pipe.rst +++ b/docs/source/fastNLP.io.pipe.rst @@ -2,6 +2,6 @@ fastNLP.io.pipe =============== .. automodule:: fastNLP.io.pipe - :members: Pipe, CWSPipe, YelpFullPipe, YelpPolarityPipe, SSTPipe, SST2Pipe, IMDBPipe, Conll2003NERPipe, OntoNotesNERPipe, MsraNERPipe, WeiboNERPipe, PeopleDailyPipe, Conll2003Pipe, MatchingBertPipe, RTEBertPipe, SNLIBertPipe, QuoraBertPipe, QNLIBertPipe, MNLIBertPipe, MatchingPipe, RTEPipe, SNLIPipe, QuoraPipe, QNLIPipe, MNLIPipe + :members: Pipe, CWSPipe, YelpFullPipe, YelpPolarityPipe, SSTPipe, SST2Pipe, IMDBPipe, ChnSentiCorpPipe, Conll2003NERPipe, OntoNotesNERPipe, MsraNERPipe, WeiboNERPipe, PeopleDailyPipe, Conll2003Pipe, MatchingBertPipe, RTEBertPipe, SNLIBertPipe, QuoraBertPipe, QNLIBertPipe, MNLIBertPipe, MatchingPipe, RTEPipe, SNLIPipe, QuoraPipe, QNLIPipe, MNLIPipe :inherited-members: diff --git a/docs/source/fastNLP.io.rst b/docs/source/fastNLP.io.rst index 96df9d6c..7118039d 100644 --- a/docs/source/fastNLP.io.rst +++ b/docs/source/fastNLP.io.rst @@ -2,7 +2,7 @@ fastNLP.io ========== .. automodule:: fastNLP.io - :members: DataBundle, EmbedLoader, Loader, YelpLoader, YelpFullLoader, YelpPolarityLoader, IMDBLoader, SSTLoader, SST2Loader, ConllLoader, Conll2003Loader, Conll2003NERLoader, OntoNotesNERLoader, CTBLoader, MsraNERLoader, WeiboNERLoader, PeopleDailyNERLoader, CSVLoader, JsonLoader, CWSLoader, MNLILoader, QuoraLoader, SNLILoader, QNLILoader, RTELoader, Pipe, YelpFullPipe, YelpPolarityPipe, SSTPipe, SST2Pipe, IMDBPipe, Conll2003Pipe, Conll2003NERPipe, OntoNotesNERPipe, MsraNERPipe, PeopleDailyPipe, WeiboNERPipe, CWSPipe, MatchingBertPipe, RTEBertPipe, SNLIBertPipe, QuoraBertPipe, QNLIBertPipe, MNLIBertPipe, MatchingPipe, RTEPipe, SNLIPipe, QuoraPipe, QNLIPipe, MNLIPipe, ModelLoader, ModelSaver + :members: DataBundle, EmbedLoader, Loader, YelpLoader, YelpFullLoader, YelpPolarityLoader, IMDBLoader, SSTLoader, SST2Loader, ChnSentiCorpLoader, ConllLoader, Conll2003Loader, Conll2003NERLoader, OntoNotesNERLoader, CTBLoader, MsraNERLoader, WeiboNERLoader, PeopleDailyNERLoader, CSVLoader, JsonLoader, CWSLoader, MNLILoader, QuoraLoader, SNLILoader, QNLILoader, RTELoader, Pipe, YelpFullPipe, YelpPolarityPipe, SSTPipe, SST2Pipe, IMDBPipe, ChnSentiCorpPipe, Conll2003Pipe, Conll2003NERPipe, OntoNotesNERPipe, MsraNERPipe, PeopleDailyPipe, WeiboNERPipe, CWSPipe, MatchingBertPipe, RTEBertPipe, SNLIBertPipe, QuoraBertPipe, QNLIBertPipe, MNLIBertPipe, MatchingPipe, RTEPipe, SNLIPipe, QuoraPipe, QNLIPipe, MNLIPipe, ModelLoader, ModelSaver :inherited-members: 子模块 From f004a070b4606fa509f6d55ea70a8ac9a82766af Mon Sep 17 00:00:00 2001 From: ChenXin Date: Thu, 5 Sep 2019 15:13:08 +0800 Subject: [PATCH 41/50] update the doc tool --- docs/count.py | 47 ++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 40 insertions(+), 7 deletions(-) diff --git a/docs/count.py b/docs/count.py index 6a5d256b..7118216a 100644 --- a/docs/count.py +++ b/docs/count.py @@ -23,6 +23,13 @@ def _colored_string(string: str, color: str or int) -> str: return "\033[%dm%s\033[0m" % (color, string) +def gr(string, flag): + if flag: + return _colored_string(string, "green") + else: + return _colored_string(string, "red") + + def find_all_modules(): modules = {} children = {} @@ -79,20 +86,46 @@ def create_rst_file(modules, name, children): def check_file(m, name): + names = name.split('.') + test_name = "test." + ".".join(names[1:-1]) + ".test_" + names[-1] + try: + __import__(test_name) + tm = sys.modules[test_name] + except ModuleNotFoundError: + tm = None + tested = tm is not None + funcs = {} + classes = {} for item, obj in inspect.getmembers(m): - if inspect.isclass(obj) and obj.__module__ == name: - print(obj) - if inspect.isfunction(obj) and obj.__module__ == name: - print("FUNC", obj) + if inspect.isclass(obj) and obj.__module__ == name and not obj.__name__.startswith('_'): + this = (obj.__doc__ is not None, tested and obj.__name__ in dir(tm), {}) + for i in dir(obj): + func = getattr(obj, i) + if inspect.isfunction(func) and not i.startswith('_'): + this[2][i] = (func.__doc__ is not None, False) + classes[obj.__name__] = this + if inspect.isfunction(obj) and obj.__module__ == name and not obj.__name__.startswith('_'): + this = (obj.__doc__ is not None, tested and obj.__name__ in dir(tm)) # docs + funcs[obj.__name__] = this + return funcs, classes -def check_files(modules): +def check_files(modules, out=sys.stdout): for name in sorted(modules.keys()): - if name == 'fastNLP.core.utils': - check_file(modules[name], name) + print(name, file=out) + funcs, classes = check_file(modules[name], name) + for f in funcs: + print("%-30s \t %s \t %s" % (f, gr("文档", funcs[f][0]), gr("测试", funcs[f][1])), file=out) + for c in classes: + print("%-30s \t %s \t %s" % (c, gr("文档", classes[c][0]), gr("测试", classes[c][1])), file=out) + methods = classes[c][2] + for f in methods: + print(" %-28s \t %s" % (f, gr("文档", methods[f][0])), file=out) + print(file=out) def main(): + sys.path.append("..") print(_colored_string('Getting modules...', "Blue")) modules, to_doc, children = find_all_modules() print(_colored_string('Done!', "Green")) From 2fbc1d78518d6f75080da8bdab6ddaecd5d3cd87 Mon Sep 17 00:00:00 2001 From: unknown <793736331@qq.com> Date: Sat, 7 Sep 2019 15:22:43 +0800 Subject: [PATCH 42/50] change the print format for dataset and instance --- fastNLP/core/dataset.py | 101 ++++++++++++++-------------- fastNLP/core/instance.py | 20 +++--- fastNLP/core/utils.py | 138 +++++++++++++++++++++++++++------------ 3 files changed, 155 insertions(+), 104 deletions(-) diff --git a/fastNLP/core/dataset.py b/fastNLP/core/dataset.py index ebdc780f..36852b93 100644 --- a/fastNLP/core/dataset.py +++ b/fastNLP/core/dataset.py @@ -300,13 +300,14 @@ from .field import FieldArray from .field import SetInputOrTargetException from .instance import Instance from .utils import _get_func_signature +from .utils import pretty_table_printer class DataSet(object): """ fastNLP的数据容器,详细的使用方法见文档 :doc:`fastNLP.core.dataset` """ - + def __init__(self, data=None): """ @@ -326,26 +327,26 @@ class DataSet(object): for ins in data: assert isinstance(ins, Instance), "Must be Instance type, not {}.".format(type(ins)) self.append(ins) - + else: raise ValueError("data only be dict or list type.") - + def __contains__(self, item): return item in self.field_arrays - + def __iter__(self): def iter_func(): for idx in range(len(self)): yield self[idx] - + return iter_func() - + def _inner_iter(self): class Iter_ptr: def __init__(self, dataset, idx): self.dataset = dataset self.idx = idx - + def __getitem__(self, item): assert item in self.dataset.field_arrays, "no such field:{} in Instance {}".format(item, self.dataset[ self.idx]) @@ -358,13 +359,13 @@ class DataSet(object): def __repr__(self): return self.dataset[self.idx].__repr__() - + def inner_iter_func(): for idx in range(len(self)): yield Iter_ptr(self, idx) - + return inner_iter_func() - + def __getitem__(self, idx): """给定int的index,返回一个Instance; 给定slice,返回包含这个slice内容的新的DataSet。 @@ -397,20 +398,20 @@ class DataSet(object): return dataset else: raise KeyError("Unrecognized type {} for idx in __getitem__ method".format(type(idx))) - + def __getattr__(self, item): # Not tested. Don't use !! if item == "field_arrays": raise AttributeError if isinstance(item, str) and item in self.field_arrays: return self.field_arrays[item] - + def __setstate__(self, state): self.__dict__ = state - + def __getstate__(self): return self.__dict__ - + def __len__(self): """Fetch the length of the dataset. @@ -420,16 +421,10 @@ class DataSet(object): return 0 field = iter(self.field_arrays.values()).__next__() return len(field) - - def __inner_repr__(self): - if len(self) < 20: - return ",\n".join([ins.__repr__() for ins in self]) - else: - return self[:5].__inner_repr__() + "\n...\n" + self[-5:].__inner_repr__() - + def __repr__(self): - return "DataSet(" + self.__inner_repr__() + ")" - + return str(pretty_table_printer(self)) + def append(self, instance): """ 将一个instance对象append到DataSet后面。 @@ -454,7 +449,7 @@ class DataSet(object): except AppendToTargetOrInputException as e: logger.error(f"Cannot append to field:{name}.") raise e - + def add_fieldarray(self, field_name, fieldarray): """ 将fieldarray添加到DataSet中. @@ -469,7 +464,7 @@ class DataSet(object): raise RuntimeError(f"The field to add must have the same size as dataset. " f"Dataset size {len(self)} != field size {len(fieldarray)}") self.field_arrays[field_name] = fieldarray - + def add_field(self, field_name, fields, padder=AutoPadder(), is_input=False, is_target=False, ignore_type=False): """ 新增一个field @@ -481,14 +476,14 @@ class DataSet(object): :param bool is_target: 新加入的field是否是target :param bool ignore_type: 是否忽略对新加入的field的类型检查 """ - + if len(self.field_arrays) != 0: if len(self) != len(fields): raise RuntimeError(f"The field to add must have the same size as dataset. " f"Dataset size {len(self)} != field size {len(fields)}") self.field_arrays[field_name] = FieldArray(field_name, fields, is_target=is_target, is_input=is_input, padder=padder, ignore_type=ignore_type) - + def delete_instance(self, index): """ 删除第index个instance @@ -504,7 +499,7 @@ class DataSet(object): for field in self.field_arrays.values(): field.pop(index) return self - + def delete_field(self, field_name): """ 删除名为field_name的field @@ -538,7 +533,7 @@ class DataSet(object): if isinstance(field_name, str): return field_name in self.field_arrays return False - + def get_field(self, field_name): """ 获取field_name这个field @@ -549,7 +544,7 @@ class DataSet(object): if field_name not in self.field_arrays: raise KeyError("Field name {} not found in DataSet".format(field_name)) return self.field_arrays[field_name] - + def get_all_fields(self): """ 返回一个dict,key为field_name, value为对应的 :class:`~fastNLP.FieldArray` @@ -557,7 +552,7 @@ class DataSet(object): :return dict: 返回如上所述的字典 """ return self.field_arrays - + def get_field_names(self) -> list: """ 返回一个list,包含所有 field 的名字 @@ -565,7 +560,7 @@ class DataSet(object): :return list: 返回如上所述的列表 """ return sorted(self.field_arrays.keys()) - + def get_length(self): """ 获取DataSet的元素数量 @@ -573,7 +568,7 @@ class DataSet(object): :return: int: DataSet中Instance的个数。 """ return len(self) - + def rename_field(self, field_name, new_field_name): """ 将某个field重新命名. @@ -587,7 +582,7 @@ class DataSet(object): else: raise KeyError("DataSet has no field named {}.".format(field_name)) return self - + def set_target(self, *field_names, flag=True, use_1st_ins_infer_dim_type=True): """ 将field_names的field设置为target @@ -614,7 +609,7 @@ class DataSet(object): else: raise KeyError("{} is not a valid field name.".format(name)) return self - + def set_input(self, *field_names, flag=True, use_1st_ins_infer_dim_type=True): """ 将field_names的field设置为input:: @@ -638,7 +633,7 @@ class DataSet(object): else: raise KeyError("{} is not a valid field name.".format(name)) return self - + def set_ignore_type(self, *field_names, flag=True): """ 将field设置为忽略类型状态。当某个field被设置了ignore_type, 则在被设置为target或者input时将不进行类型检查, @@ -655,7 +650,7 @@ class DataSet(object): else: raise KeyError("{} is not a valid field name.".format(name)) return self - + def set_padder(self, field_name, padder): """ 为field_name设置padder:: @@ -671,7 +666,7 @@ class DataSet(object): raise KeyError("There is no field named {}.".format(field_name)) self.field_arrays[field_name].set_padder(padder) return self - + def set_pad_val(self, field_name, pad_val): """ 为某个field设置对应的pad_val. @@ -683,7 +678,7 @@ class DataSet(object): raise KeyError("There is no field named {}.".format(field_name)) self.field_arrays[field_name].set_pad_val(pad_val) return self - + def get_input_name(self): """ 返回所有is_input被设置为True的field名称 @@ -691,7 +686,7 @@ class DataSet(object): :return list: 里面的元素为被设置为input的field名称 """ return [name for name, field in self.field_arrays.items() if field.is_input] - + def get_target_name(self): """ 返回所有is_target被设置为True的field名称 @@ -699,7 +694,7 @@ class DataSet(object): :return list: 里面的元素为被设置为target的field名称 """ return [name for name, field in self.field_arrays.items() if field.is_target] - + def apply_field(self, func, field_name, new_field_name=None, **kwargs): """ 将DataSet中的每个instance中的名为 `field_name` 的field传给func,并获取它的返回值。 @@ -728,16 +723,16 @@ class DataSet(object): results.append(func(ins[field_name])) except Exception as e: if idx != -1: - logger.error("Exception happens at the `{}`th(from 1) instance.".format(idx+1)) + logger.error("Exception happens at the `{}`th(from 1) instance.".format(idx + 1)) raise e if not (new_field_name is None) and len(list(filter(lambda x: x is not None, results))) == 0: # all None raise ValueError("{} always return None.".format(_get_func_signature(func=func))) - + if new_field_name is not None: self._add_apply_field(results, new_field_name, kwargs) - + return results - + def _add_apply_field(self, results, new_field_name, kwargs): """ 将results作为加入到新的field中,field名称为new_field_name @@ -769,7 +764,7 @@ class DataSet(object): self.add_field(field_name=new_field_name, fields=results, is_input=extra_param.get("is_input", None), is_target=extra_param.get("is_target", None), ignore_type=extra_param.get("ignore_type", False)) - + def apply(self, func, new_field_name=None, **kwargs): """ 将DataSet中每个instance传入到func中,并获取它的返回值. @@ -801,13 +796,13 @@ class DataSet(object): # results = [func(ins) for ins in self._inner_iter()] if not (new_field_name is None) and len(list(filter(lambda x: x is not None, results))) == 0: # all None raise ValueError("{} always return None.".format(_get_func_signature(func=func))) - + if new_field_name is not None: self._add_apply_field(results, new_field_name, kwargs) - + return results - def add_seq_len(self, field_name:str, new_field_name=Const.INPUT_LEN): + def add_seq_len(self, field_name: str, new_field_name=Const.INPUT_LEN): """ 将使用len()直接对field_name中每个元素作用,将其结果作为seqence length, 并放入seq_len这个field。 @@ -844,7 +839,7 @@ class DataSet(object): return dataset else: return DataSet() - + def split(self, ratio, shuffle=True): """ 将DataSet按照ratio的比例拆分,返回两个DataSet @@ -870,9 +865,9 @@ class DataSet(object): for field_name in self.field_arrays: train_set.field_arrays[field_name].to(self.field_arrays[field_name]) dev_set.field_arrays[field_name].to(self.field_arrays[field_name]) - + return train_set, dev_set - + def save(self, path): """ 保存DataSet. @@ -881,7 +876,7 @@ class DataSet(object): """ with open(path, 'wb') as f: pickle.dump(self, f) - + @staticmethod def load(path): r""" diff --git a/fastNLP/core/instance.py b/fastNLP/core/instance.py index 9460b5e4..3cf7ab45 100644 --- a/fastNLP/core/instance.py +++ b/fastNLP/core/instance.py @@ -3,10 +3,13 @@ instance 模块实现了Instance 类在fastNLP中对应sample。一个sample可 便于理解的例子可以参考文档 :doc:`fastNLP.core.dataset` 中的表格 """ + __all__ = [ "Instance" ] +from .utils import pretty_table_printer + class Instance(object): """ @@ -20,11 +23,11 @@ class Instance(object): >>>ins.add_field("field_3", [3, 3, 3]) >>>ins = Instance(**{'x1': 1, 'x2':np.zeros((3, 4))}) """ - + def __init__(self, **fields): - + self.fields = fields - + def add_field(self, field_name, field): """ 向Instance中增加一个field @@ -41,18 +44,15 @@ class Instance(object): :return: 一个迭代器 """ return self.fields.items() - + def __getitem__(self, name): if name in self.fields: return self.fields[name] else: raise KeyError("{} not found".format(name)) - + def __setitem__(self, name, field): return self.add_field(name, field) - + def __repr__(self): - s = '\'' - return "{" + ",\n".join( - "\'" + field_name + "\': " + str(self.fields[field_name]) + \ - f" type={(str(type(self.fields[field_name]))).split(s)[1]}" for field_name in self.fields) + "}" + return str(pretty_table_printer(self)) diff --git a/fastNLP/core/utils.py b/fastNLP/core/utils.py index 814e0bd5..dd2afab7 100644 --- a/fastNLP/core/utils.py +++ b/fastNLP/core/utils.py @@ -1,6 +1,7 @@ """ utils模块实现了 fastNLP 内部和外部所需的很多工具。其中用户可以使用的是 :func:`cache_results` 修饰器。 """ + __all__ = [ "cache_results", "seq_len_to_mask", @@ -12,12 +13,12 @@ import inspect import os import warnings from collections import Counter, namedtuple - import numpy as np import torch import torch.nn as nn from typing import List from ._logger import logger +from prettytable import PrettyTable _CheckRes = namedtuple('_CheckRes', ['missing', 'unused', 'duplicated', 'required', 'all_needed', 'varargs']) @@ -25,27 +26,27 @@ _CheckRes = namedtuple('_CheckRes', ['missing', 'unused', 'duplicated', 'require class Option(dict): """a dict can treat keys as attributes""" - + def __getattr__(self, item): try: return self.__getitem__(item) except KeyError: raise AttributeError(item) - + def __setattr__(self, key, value): if key.startswith('__') and key.endswith('__'): raise AttributeError(key) self.__setitem__(key, value) - + def __delattr__(self, item): try: self.pop(item) except KeyError: raise AttributeError(item) - + def __getstate__(self): return self - + def __setstate__(self, state): self.update(state) @@ -112,13 +113,13 @@ def cache_results(_cache_fp, _refresh=False, _verbose=1): :param int _verbose: 是否打印cache的信息。 :return: """ - + def wrapper_(func): signature = inspect.signature(func) for key, _ in signature.parameters.items(): if key in ('_cache_fp', '_refresh', '_verbose'): raise RuntimeError("The function decorated by cache_results cannot have keyword `{}`.".format(key)) - + def wrapper(*args, **kwargs): if '_cache_fp' in kwargs: cache_filepath = kwargs.pop('_cache_fp') @@ -136,7 +137,7 @@ def cache_results(_cache_fp, _refresh=False, _verbose=1): else: verbose = _verbose refresh_flag = True - + if cache_filepath is not None and refresh is False: # load data if os.path.exists(cache_filepath): @@ -145,7 +146,7 @@ def cache_results(_cache_fp, _refresh=False, _verbose=1): if verbose == 1: logger.info("Read cache from {}.".format(cache_filepath)) refresh_flag = False - + if refresh_flag: results = func(*args, **kwargs) if cache_filepath is not None: @@ -155,11 +156,11 @@ def cache_results(_cache_fp, _refresh=False, _verbose=1): with open(cache_filepath, 'wb') as f: _pickle.dump(results, f) logger.info("Save cache to {}.".format(cache_filepath)) - + return results - + return wrapper - + return wrapper_ @@ -187,6 +188,7 @@ def _save_model(model, model_name, save_dir, only_param=False): torch.save(model, model_path) model.to(_model_device) + def _move_model_to_device(model, device): """ 将model移动到device @@ -211,7 +213,7 @@ def _move_model_to_device(model, device): """ # if isinstance(model, torch.nn.parallel.DistributedDataParallel): # raise RuntimeError("model of `torch.nn.parallel.DistributedDataParallel` is not supported right now.") - + if device is None: if isinstance(model, torch.nn.DataParallel): model.cuda() @@ -220,10 +222,10 @@ def _move_model_to_device(model, device): if not torch.cuda.is_available() and ( device != 'cpu' or (isinstance(device, torch.device) and device.type != 'cpu')): raise ValueError("There is no usable gpu. set `device` as `cpu` or `None`.") - + if isinstance(model, torch.nn.DataParallel): raise RuntimeError("When model is `torch.nn.DataParallel`, the device has to be `None`.") - + if isinstance(device, int): assert device > -1, "device can only be non-negative integer" assert torch.cuda.device_count() > device, "Only has {} gpus, cannot use device {}.".format( @@ -267,7 +269,7 @@ def _get_model_device(model): """ # TODO 这个函数存在一定的风险,因为同一个模型可能存在某些parameter不在显卡中,比如BertEmbedding. 或者跨显卡 assert isinstance(model, nn.Module) - + parameters = list(model.parameters()) if len(parameters) == 0: return None @@ -427,10 +429,10 @@ def _move_dict_value_to_device(*args, device: torch.device, non_blocking=False): """ if not torch.cuda.is_available(): return - + if not isinstance(device, torch.device): raise TypeError(f"device must be `torch.device`, got `{type(device)}`") - + for arg in args: if isinstance(arg, dict): for key, value in arg.items(): @@ -445,10 +447,10 @@ class _CheckError(Exception): _CheckError. Used in losses.LossBase, metrics.MetricBase. """ - + def __init__(self, check_res: _CheckRes, func_signature: str): errs = [f'Problems occurred when calling `{func_signature}`'] - + if check_res.varargs: errs.append(f"\tvarargs: {check_res.varargs}(Does not support pass positional arguments, please delete it)") if check_res.missing: @@ -457,9 +459,9 @@ class _CheckError(Exception): errs.append(f"\tduplicated param: {check_res.duplicated}") if check_res.unused: errs.append(f"\tunused param: {check_res.unused}") - + Exception.__init__(self, '\n'.join(errs)) - + self.check_res = check_res self.func_signature = func_signature @@ -479,7 +481,7 @@ def _check_loss_evaluate(prev_func_signature: str, func_signature: str, check_re # if check_res.varargs: # errs.append(f"\tvarargs: *{check_res.varargs}") # suggestions.append(f"Does not support pass positional arguments, please delete *{check_res.varargs}.") - + if check_res.unused: for _unused in check_res.unused: if _unused in target_dict: @@ -490,7 +492,7 @@ def _check_loss_evaluate(prev_func_signature: str, func_signature: str, check_re unuseds.append(f"\tunused field: {_unused_field}") if _unused_param: unuseds.append(f"\tunused param: {_unused_param}") # output from predict or forward - + module_name = func_signature.split('.')[0] if check_res.missing: errs.append(f"\tmissing param: {check_res.missing}") @@ -511,7 +513,7 @@ def _check_loss_evaluate(prev_func_signature: str, func_signature: str, check_re mapped_missing.append(_miss) else: unmapped_missing.append(_miss) - + for _miss in mapped_missing + unmapped_missing: if _miss in dataset: suggestions.append(f"Set `{_miss}` as target.") @@ -524,17 +526,17 @@ def _check_loss_evaluate(prev_func_signature: str, func_signature: str, check_re else: _tmp = f'Provide `{_miss}` in DataSet or output of {prev_func_signature}.' suggestions.append(_tmp) - + if check_res.duplicated: errs.append(f"\tduplicated param: {check_res.duplicated}.") suggestions.append(f"Delete {check_res.duplicated} in the output of " f"{prev_func_signature} or do not set {check_res.duplicated} as targets. ") - + if len(errs) > 0: errs.extend(unuseds) elif check_level == STRICT_CHECK_LEVEL: errs.extend(unuseds) - + if len(errs) > 0: errs.insert(0, f'Problems occurred when calling {func_signature}') sugg_str = "" @@ -561,11 +563,11 @@ def _check_loss_evaluate(prev_func_signature: str, func_signature: str, check_re def _check_forward_error(forward_func, batch_x, dataset, check_level): check_res = _check_arg_dict_list(forward_func, batch_x) func_signature = _get_func_signature(forward_func) - + errs = [] suggestions = [] _unused = [] - + # if check_res.varargs: # errs.append(f"\tvarargs: {check_res.varargs}") # suggestions.append(f"Does not support pass positional arguments, please delete *{check_res.varargs}.") @@ -586,14 +588,14 @@ def _check_forward_error(forward_func, batch_x, dataset, check_level): # _tmp += f"Or you might find it in `unused field:`, you can use DataSet.rename_field() to " \ # f"rename the field in `unused field:`." suggestions.append(_tmp) - + if check_res.unused: _unused = [f"\tunused field: {check_res.unused}"] if len(errs) > 0: errs.extend(_unused) elif check_level == STRICT_CHECK_LEVEL: errs.extend(_unused) - + if len(errs) > 0: errs.insert(0, f'Problems occurred when calling {func_signature}') sugg_str = "" @@ -641,7 +643,7 @@ def seq_len_to_mask(seq_len, max_len=None): max_len = int(max_len) if max_len else int(seq_len.max()) broad_cast_seq_len = np.tile(np.arange(max_len), (len(seq_len), 1)) mask = broad_cast_seq_len < seq_len.reshape(-1, 1) - + elif isinstance(seq_len, torch.Tensor): assert seq_len.dim() == 1, f"seq_len can only have one dimension, got {seq_len.dim() == 1}." batch_size = seq_len.size(0) @@ -650,7 +652,7 @@ def seq_len_to_mask(seq_len, max_len=None): mask = broad_cast_seq_len.lt(seq_len.unsqueeze(1)) else: raise TypeError("Only support 1-d numpy.ndarray or 1-d torch.Tensor.") - + return mask @@ -658,24 +660,25 @@ class _pseudo_tqdm: """ 当无法引入tqdm,或者Trainer中设置use_tqdm为false的时候,用该方法打印数据 """ + def __init__(self, **kwargs): self.logger = logger - + def write(self, info): self.logger.info(info) - + def set_postfix_str(self, info): self.logger.info(info) - + def __getattr__(self, item): def pass_func(*args, **kwargs): pass - + return pass_func - + def __enter__(self): return self - + def __exit__(self, exc_type, exc_val, exc_tb): del self @@ -749,3 +752,56 @@ def get_seq_len(words, pad_value=0): """ mask = words.ne(pad_value) return mask.sum(dim=-1) + + +def pretty_table_printer(dataset_or_ins) -> PrettyTable: + """ + :param dataset_or_ins: 传入一个dataSet或者instance + ins = Instance(field_1=[1, 1, 1], field_2=[2, 2, 2], field_3=["a", "b", "c"]) + +-----------+-----------+-----------------+ + | field_1 | field_2 | field_3 | + +-----------+-----------+-----------------+ + | [1, 1, 1] | [2, 2, 2] | ['a', 'b', 'c'] | + +-----------+-----------+-----------------+ + :return: 以 pretty table的形式返回根据terminal大小进行自动截断 + """ + x = PrettyTable() + try: + sz = os.get_terminal_size() + column = sz.columns + row = sz.lines + except OSError: + column = 144 + row = 11 + if type(dataset_or_ins).__name__ == "DataSet": + x.field_names = list(dataset_or_ins.field_arrays.keys()) + c_size = len(x.field_names) + for ins in dataset_or_ins: + x.add_row([sub_column(ins[k], column, c_size, k) for k in x.field_names]) + row -= 1 + if row < 0: + x.add_row(["..." for _ in range(c_size)]) + break + elif type(dataset_or_ins).__name__ == "Instance": + x.field_names = list(dataset_or_ins.fields.keys()) + c_size = len(x.field_names) + x.add_row([sub_column(dataset_or_ins[k], column, c_size, k) for k in x.field_names]) + + else: + raise Exception("only accept DataSet and Instance") + return x + + +def sub_column(string: str, c: int, c_size: int, title: str) -> str: + """ + :param string: 要被截断的字符串 + :param c: 命令行列数 + :param c_size: instance或dataset field数 + :param title: 列名 + :return: 对一个过长的列进行截断的结果 + """ + avg = max(int(c / c_size), len(title)) + string = str(string) + if len(string) > avg: + string = string[:(avg - 3)] + "..." + return string From 04a54df226763fa4f7b3ddb88f779008206698d8 Mon Sep 17 00:00:00 2001 From: yhcc Date: Sat, 7 Sep 2019 16:06:35 +0800 Subject: [PATCH 43/50] Update requirements.txt --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index f71e2223..db0b89ac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,5 +2,6 @@ numpy>=1.14.2 torch>=1.0.0 tqdm>=4.28.1 nltk>=3.4.1 +prettytable>=0.7.2 requests spacy From 8c8e22cc9baa08a1c8ee9ba887717db41cce57b5 Mon Sep 17 00:00:00 2001 From: yh_cc Date: Sat, 7 Sep 2019 18:47:03 +0800 Subject: [PATCH 44/50] =?UTF-8?q?DataSet=E4=B8=AD=E5=A2=9E=E5=8A=A0print?= =?UTF-8?q?=5Ffield=5Fmeta=E6=96=B9=E6=B3=95=EF=BC=8C=E4=BD=BF=E5=BE=97?= =?UTF-8?q?=E5=85=B6=E5=8F=AF=E4=BB=A5=E8=8E=B7=E5=8F=96field=E7=9A=84inpu?= =?UTF-8?q?t=E5=92=8Ctarget=E4=BF=A1=E6=81=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/dataset.py | 57 +++++++++++++++++++++++++++++++++++++++ fastNLP/core/field.py | 7 +++-- requirements.txt | 1 + test/core/test_dataset.py | 15 ++++++++++- 4 files changed, 77 insertions(+), 3 deletions(-) diff --git a/fastNLP/core/dataset.py b/fastNLP/core/dataset.py index 36852b93..2b548f22 100644 --- a/fastNLP/core/dataset.py +++ b/fastNLP/core/dataset.py @@ -301,6 +301,7 @@ from .field import SetInputOrTargetException from .instance import Instance from .utils import _get_func_signature from .utils import pretty_table_printer +from prettytable import PrettyTable class DataSet(object): @@ -425,6 +426,62 @@ class DataSet(object): def __repr__(self): return str(pretty_table_printer(self)) + def print_field_meta(self): + """ + 输出当前field的meta信息, 形似下列的输出 + + +-------------+-------+-------+ + | field_names | x | y | + +-------------+-------+-------+ + | is_input | True | False | + | is_target | False | False | + | ignore_type | False | | + | pad_value | 0 | | + +-------------+-------+-------+ + + field_names: DataSet中field的名称 + is_input: field是否为input + is_target: field是否为target + ignore_type: 是否忽略该field的type, 一般仅在该field至少为input或target时才有意义 + pad_value: 该field的pad的值,仅在该field为input或target时有意义 + + :return: + """ + if len(self.field_arrays)>0: + field_names = ['field_names'] + is_inputs = ['is_input'] + is_targets = ['is_target'] + pad_values = ['pad_value'] + ignore_types = ['ignore_type'] + + for name, field_array in self.field_arrays.items(): + field_names.append(name) + if field_array.is_input: + is_inputs.append(True) + else: + is_inputs.append(False) + if field_array.is_target: + is_targets.append(True) + else: + is_targets.append(False) + + if (field_array.is_input or field_array.is_target) and field_array.padder is not None: + pad_values.append(field_array.padder.get_pad_val()) + else: + pad_values.append(' ') + + if field_array._ignore_type: + ignore_types.append(True) + elif field_array.is_input or field_array.is_target: + ignore_types.append(False) + else: + ignore_types.append(' ') + table = PrettyTable(field_names=field_names) + fields = [is_inputs, is_targets, ignore_types, pad_values] + for field in fields: + table.add_row(field) + logger.info(table) + def append(self, instance): """ 将一个instance对象append到DataSet后面。 diff --git a/fastNLP/core/field.py b/fastNLP/core/field.py index 82fcc523..1835bafa 100644 --- a/fastNLP/core/field.py +++ b/fastNLP/core/field.py @@ -53,7 +53,7 @@ class FieldArray: self.content = _content self._ignore_type = ignore_type # 根据input的情况设置input,target等 - self._cell_ndim = None # 多少维度 + self._cell_ndim = None # 多少维度, 如果value是1, dim为0; 如果value是[1, 2], dim=2 self.dtype = None # 最内层的element都是什么类型的 self._use_1st_ins_infer_dim_type = bool(use_1st_ins_infer_dim_type) self._is_input = False @@ -484,7 +484,10 @@ class Padder: def set_pad_val(self, pad_val): self.pad_val = pad_val - + + def get_pad_val(self): + return self.pad_val + @abstractmethod def __call__(self, contents, field_name, field_ele_dtype, dim: int): """ diff --git a/requirements.txt b/requirements.txt index f71e2223..bdd4a9e1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,3 +4,4 @@ tqdm>=4.28.1 nltk>=3.4.1 requests spacy +prettytable>=0.7.2 \ No newline at end of file diff --git a/test/core/test_dataset.py b/test/core/test_dataset.py index 059d52d2..9820eff6 100644 --- a/test/core/test_dataset.py +++ b/test/core/test_dataset.py @@ -229,4 +229,17 @@ class TestDataSetIter(unittest.TestCase): def test__repr__(self): ds = DataSet({"x": [[1, 2, 3, 4]] * 10, "y": [[5, 6]] * 10}) for iter in ds: - self.assertEqual(iter.__repr__(), "{'x': [1, 2, 3, 4] type=list,\n'y': [5, 6] type=list}") + self.assertEqual(iter.__repr__(), """+--------------+--------+ +| x | y | ++--------------+--------+ +| [1, 2, 3, 4] | [5, 6] | ++--------------+--------+""") + + +class TestDataSetFieldMeta(unittest.TestCase): + def test_print_field_meta(self): + ds = DataSet({"x": [[1, 2, 3, 4]] * 10, "y": [[5, 6]] * 10}) + ds.print_field_meta() + + ds.set_input('x') + ds.print_field_meta() From 53bcc0b26a9b4e5560946ef2a4b7134bc589a7e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=AD=A6=E4=B9=A0=E7=9A=84=E8=8F=9C=E9=B8=A1=E7=BA=A2?= =?UTF-8?q?=E7=91=9E?= Date: Sun, 8 Sep 2019 16:28:15 +0800 Subject: [PATCH 45/50] loader-pr --- fastNLP/io/file_reader.py | 40 +++++++++++++++++++-------------------- fastNLP/io/test.csv | 6 ++++++ 2 files changed, 26 insertions(+), 20 deletions(-) create mode 100644 fastNLP/io/test.csv diff --git a/fastNLP/io/file_reader.py b/fastNLP/io/file_reader.py index 0ae0a319..17a0a6ca 100644 --- a/fastNLP/io/file_reader.py +++ b/fastNLP/io/file_reader.py @@ -2,6 +2,7 @@ 此模块用于给其它模块提供读取文件的函数,没有为用户提供 API """ import json +import csv def _read_csv(path, encoding='utf-8', headers=None, sep=',', dropna=True): @@ -16,27 +17,26 @@ def _read_csv(path, encoding='utf-8', headers=None, sep=',', dropna=True): :if False, raise ValueError when reading invalid data. default: True :return: generator, every time yield (line number, csv item) """ - with open(path, 'r', encoding=encoding) as f: - start_idx = 0 - if headers is None: - headers = f.readline().rstrip('\r\n') - headers = headers.split(sep) - start_idx += 1 - elif not isinstance(headers, (list, tuple)): - raise TypeError("headers should be list or tuple, not {}." \ + f = csv.reader(open(path, encoding=encoding), delimiter=sep) + start_idx = 0 + if headers is None: + headers = next(f) + start_idx += 1 + elif not isinstance(headers, (list, tuple)): + raise TypeError("headers should be list or tuple, not {}." \ .format(type(headers))) - for line_idx, line in enumerate(f, start_idx): - contents = line.rstrip('\r\n').split(sep) - if len(contents) != len(headers): - if dropna: - continue - else: - raise ValueError("Line {} has {} parts, while header has {} parts." \ - .format(line_idx, len(contents), len(headers))) - _dict = {} - for header, content in zip(headers, contents): - _dict[header] = content - yield line_idx, _dict + for line_idx, line in enumerate(f, start_idx): + contents = line + if len(contents) != len(headers): + if dropna: + continue + else: + raise ValueError("Line {} has {} parts, while header has {} parts." \ + .format(line_idx, len(contents), len(headers))) + _dict = {} + for header, content in zip(headers, contents): + _dict[header] = content + yield line_idx, _dict def _read_json(path, encoding='utf-8', fields=None, dropna=True): diff --git a/fastNLP/io/test.csv b/fastNLP/io/test.csv new file mode 100644 index 00000000..88293b2f --- /dev/null +++ b/fastNLP/io/test.csv @@ -0,0 +1,6 @@ +a b +1 "Contrary to other reviews, I have zero complaints about the service or the prices. I have been getting tire service here for the past 5 years now, and compared to my experience with places like Pep Boys, these guys are experienced and know what they're doing. \nAlso, this is one place that I do not feel like I am being taken advantage of, just because of my gender. Other auto mechanics have been notorious for capitalizing on my ignorance of cars, and have sucked my bank account dry. But here, my service and road coverage has all been well explained - and let up to me to decide. \nAnd they just renovated the waiting room. It looks a lot better than it did in previous years." +2 "Last summer I had an appointment to get new tires and had to wait a super long time. I also went in this week for them to fix a minor problem with a tire they put on. They \""fixed\"" it for free, and the very next morning I had the same issue. I called to complain, and the \""manager\"" didn't even apologize!!! So frustrated. Never going back. They seem overpriced, too." +3 "Friendly staff, same starbucks fair you get anywhere else. Sometimes the lines can get long." +4 "The food is good. Unfortunately the service is very hit or miss. The main issue seems to be with the kitchen, the waiters and waitresses are often very apologetic for the long waits and it's pretty obvious that some of them avoid the tables after taking the initial order to avoid hearing complaints." +5 "Even when we didn't have a car Filene's Basement was worth the bus trip to the Waterfront. I always find something (usually I find 3-4 things and spend about $60) and better still, I am always still wearing the clothes and shoes 3 months later. \n\nI kind of suspect this is the best shopping in Pittsburgh; it's much better than the usual department stores, better than Marshall's and TJ Maxx and better than the Saks downtown, even when it has a sale. Selection, bargains AND quality.\n\nI like this Filene's better than Gabriel Brothers, which are harder to get to. Gabriel Brothers are a real discount shopper's challenge and I'm afraid I didn't live in Pittsburgh long enough to develop the necessary skills . . . Filene's was still up and running in June 2007 when I left town." \ No newline at end of file From 776840439f7f313a97900a52ce57c05cb72ca42f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=AD=A6=E4=B9=A0=E7=9A=84=E8=8F=9C=E9=B8=A1=E7=BA=A2?= =?UTF-8?q?=E7=91=9E?= Date: Sun, 8 Sep 2019 16:28:53 +0800 Subject: [PATCH 46/50] loader-pr --- fastNLP/io/test.csv | 6 ------ 1 file changed, 6 deletions(-) delete mode 100644 fastNLP/io/test.csv diff --git a/fastNLP/io/test.csv b/fastNLP/io/test.csv deleted file mode 100644 index 88293b2f..00000000 --- a/fastNLP/io/test.csv +++ /dev/null @@ -1,6 +0,0 @@ -a b -1 "Contrary to other reviews, I have zero complaints about the service or the prices. I have been getting tire service here for the past 5 years now, and compared to my experience with places like Pep Boys, these guys are experienced and know what they're doing. \nAlso, this is one place that I do not feel like I am being taken advantage of, just because of my gender. Other auto mechanics have been notorious for capitalizing on my ignorance of cars, and have sucked my bank account dry. But here, my service and road coverage has all been well explained - and let up to me to decide. \nAnd they just renovated the waiting room. It looks a lot better than it did in previous years." -2 "Last summer I had an appointment to get new tires and had to wait a super long time. I also went in this week for them to fix a minor problem with a tire they put on. They \""fixed\"" it for free, and the very next morning I had the same issue. I called to complain, and the \""manager\"" didn't even apologize!!! So frustrated. Never going back. They seem overpriced, too." -3 "Friendly staff, same starbucks fair you get anywhere else. Sometimes the lines can get long." -4 "The food is good. Unfortunately the service is very hit or miss. The main issue seems to be with the kitchen, the waiters and waitresses are often very apologetic for the long waits and it's pretty obvious that some of them avoid the tables after taking the initial order to avoid hearing complaints." -5 "Even when we didn't have a car Filene's Basement was worth the bus trip to the Waterfront. I always find something (usually I find 3-4 things and spend about $60) and better still, I am always still wearing the clothes and shoes 3 months later. \n\nI kind of suspect this is the best shopping in Pittsburgh; it's much better than the usual department stores, better than Marshall's and TJ Maxx and better than the Saks downtown, even when it has a sale. Selection, bargains AND quality.\n\nI like this Filene's better than Gabriel Brothers, which are harder to get to. Gabriel Brothers are a real discount shopper's challenge and I'm afraid I didn't live in Pittsburgh long enough to develop the necessary skills . . . Filene's was still up and running in June 2007 when I left town." \ No newline at end of file From 1caa83d0cafbb5df6470627fab8dea86b56df36a Mon Sep 17 00:00:00 2001 From: ZikaiGuo <634500098@qq.com> Date: Sun, 8 Sep 2019 14:54:31 +0200 Subject: [PATCH 47/50] Update transformer.py --- fastNLP/modules/encoder/transformer.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fastNLP/modules/encoder/transformer.py b/fastNLP/modules/encoder/transformer.py index d29a10c3..3d97c306 100644 --- a/fastNLP/modules/encoder/transformer.py +++ b/fastNLP/modules/encoder/transformer.py @@ -40,6 +40,8 @@ class TransformerEncoder(nn.Module): :param seq_mask: [batch, seq_len] :return: [batch, seq_len, model_size] """ + if seq_mask is None: # 防止后续乘法时出错 + seq_mask = 1 input = self.norm1(input) attention = self.atte(input, input, input, atte_mask_out) input = input + self.dropout(attention) From 917cedf808d2c03d1a2be4099ba7d1ef894f47d9 Mon Sep 17 00:00:00 2001 From: yh Date: Tue, 10 Sep 2019 10:38:02 +0800 Subject: [PATCH 48/50] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E6=96=B0=E7=9A=84tutor?= =?UTF-8?q?ial;=20=E5=88=A0=E9=99=A4=E5=90=84embedding=E4=B8=ADrequires=5F?= =?UTF-8?q?grad=E7=9A=84=E8=AE=BE=E7=BD=AE=EF=BC=8C=E7=BB=9F=E4=B8=80?= =?UTF-8?q?=E5=88=B0=E5=9F=BA=E7=B1=BB=E5=AE=9E=E7=8E=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ..._callback.rst => tutorial_10_callback.rst} | 0 ...l_10_fitlog.rst => tutorial_11_fitlog.rst} | 0 .../tutorials/tutorial_1_data_preprocess.rst | 74 ++- .../tutorials/tutorial_2_load_dataset.rst | 150 ------ .../tutorials/tutorial_2_vocabulary.rst | 131 ++++++ .../source/tutorials/tutorial_3_embedding.rst | 437 +++++++++++++++--- .../tutorials/tutorial_4_load_dataset.rst | 219 +++++++++ ...izer.rst => tutorial_6_loss_optimizer.rst} | 0 ...l_8_metrics.rst => tutorial_7_metrics.rst} | 0 ...dels.rst => tutorial_8_modules_models.rst} | 0 ...beling.rst => tutorial_9_seq_labeling.rst} | 0 docs/source/user/tutorials.rst | 15 +- fastNLP/embeddings/bert_embedding.py | 38 -- fastNLP/embeddings/char_embedding.py | 58 --- fastNLP/embeddings/elmo_embedding.py | 23 +- fastNLP/embeddings/embedding.py | 4 + fastNLP/embeddings/stack_embedding.py | 26 +- fastNLP/embeddings/static_embedding.py | 26 +- fastNLP/io/loader/classification.py | 1 - fastNLP/io/loader/loader.py | 26 +- 20 files changed, 804 insertions(+), 424 deletions(-) rename docs/source/tutorials/{tutorial_9_callback.rst => tutorial_10_callback.rst} (100%) rename docs/source/tutorials/{tutorial_10_fitlog.rst => tutorial_11_fitlog.rst} (100%) delete mode 100644 docs/source/tutorials/tutorial_2_load_dataset.rst create mode 100644 docs/source/tutorials/tutorial_2_vocabulary.rst create mode 100644 docs/source/tutorials/tutorial_4_load_dataset.rst rename docs/source/tutorials/{tutorial_4_loss_optimizer.rst => tutorial_6_loss_optimizer.rst} (100%) rename docs/source/tutorials/{tutorial_8_metrics.rst => tutorial_7_metrics.rst} (100%) rename docs/source/tutorials/{tutorial_7_modules_models.rst => tutorial_8_modules_models.rst} (100%) rename docs/source/tutorials/{tutorial_6_seq_labeling.rst => tutorial_9_seq_labeling.rst} (100%) diff --git a/docs/source/tutorials/tutorial_9_callback.rst b/docs/source/tutorials/tutorial_10_callback.rst similarity index 100% rename from docs/source/tutorials/tutorial_9_callback.rst rename to docs/source/tutorials/tutorial_10_callback.rst diff --git a/docs/source/tutorials/tutorial_10_fitlog.rst b/docs/source/tutorials/tutorial_11_fitlog.rst similarity index 100% rename from docs/source/tutorials/tutorial_10_fitlog.rst rename to docs/source/tutorials/tutorial_11_fitlog.rst diff --git a/docs/source/tutorials/tutorial_1_data_preprocess.rst b/docs/source/tutorials/tutorial_1_data_preprocess.rst index 0ec63f87..dfc3bbbe 100644 --- a/docs/source/tutorials/tutorial_1_data_preprocess.rst +++ b/docs/source/tutorials/tutorial_1_data_preprocess.rst @@ -1,21 +1,20 @@ ============================== -使用DataSet预处理文本 +DataSet ============================== -:class:`~fastNLP.DataSet` 是fastNLP中用于承载数据的容器。可以将DataSet看做是一个表格, -每一行是一个sample (在fastNLP中被称为 :mod:`~fastNLP.core.instance` ), -每一列是一个feature (在fastNLP中称为 :mod:`~fastNLP.core.field` )。 +:class:`~fastNLP.DataSet` 是fastNLP用于承载数据的类,一般训练集、验证集和测试集会被加载为三个单独的:class:`~fastNLP.DataSet`对象。 + +:class:`~fastNLP.DataSet`中的数据组织形式类似一个表格,比如下面 :class:`~fastNLP.DataSet` 一共有3列,列在fastNLP中被称为field。 .. csv-table:: - :header: "sentence", "words", "seq_len" + :header: "raw_chars", "chars", "seq_len" - "This is the first instance .", "[This, is, the, first, instance, .]", 6 - "Second instance .", "[Second, instance, .]", 3 + "历任公司副总经理、总工程师,", "[历 任 公 司 副 总 经 理 、 总 工 程 师 ,]", 6 "Third instance .", "[Third, instance, .]", 3 "...", "[...]", "..." -上面是一个样例数据中 DataSet 的存储结构。其中它的每一行是一个 :class:`~fastNLP.Instance` 对象; 每一列是一个 :class:`~fastNLP.FieldArray` 对象。 - +每一行是一个instance (在fastNLP中被称为 :mod:`~fastNLP.core.Instance` ), +每一列是一个field (在fastNLP中称为 :mod:`~fastNLP.core.FieldArray` )。 ----------------------------- 数据集构建和删除 @@ -26,11 +25,23 @@ .. code-block:: python from fastNLP import DataSet - data = {'sentence':["This is the first instance .", "Second instance .", "Third instance ."], + data = {'raw_words':["This is the first instance .", "Second instance .", "Third instance ."], 'words': [['this', 'is', 'the', 'first', 'instance', '.'], ['Second', 'instance', '.'], ['Third', 'instance', '.']], 'seq_len': [6, 3, 3]} dataset = DataSet(data) # 传入的dict的每个key的value应该为具有相同长度的list + print(dataset) + +输出为:: + + +------------------------------+------------------------------------------------+---------+ + | raw_words | words | seq_len | + +------------------------------+------------------------------------------------+---------+ + | This is the first instance . | ['this', 'is', 'the', 'first', 'instance', ... | 6 | + | Second instance . | ['Second', 'instance', '.'] | 3 | + | Third instance . | ['Third', 'instance', '.'] | 3 | + +------------------------------+------------------------------------------------+---------+ + 我们还可以使用 :func:`~fastNLP.DataSet.append` 方法向数据集内增加数据 @@ -39,7 +50,7 @@ from fastNLP import DataSet from fastNLP import Instance dataset = DataSet() - instance = Instance(sentence="This is the first instance", + instance = Instance(raw_words="This is the first instance", words=['this', 'is', 'the', 'first', 'instance', '.'], seq_len=6) dataset.append(instance) @@ -52,10 +63,10 @@ from fastNLP import DataSet from fastNLP import Instance dataset = DataSet([ - Instance(sentence="This is the first instance", + Instance(raw_words="This is the first instance", words=['this', 'is', 'the', 'first', 'instance', '.'], seq_len=6), - Instance(sentence="Second instance .", + Instance(raw_words="Second instance .", words=['Second', 'instance', '.'], seq_len=3) ]) @@ -106,24 +117,49 @@ FastNLP 同样提供了多种删除数据的方法 :func:`~fastNLP.DataSet.drop` .. code-block:: python from fastNLP import DataSet - data = {'sentence':["This is the first instance .", "Second instance .", "Third instance ."]} + data = {'raw_words':["This is the first instance .", "Second instance .", "Third instance ."]} dataset = DataSet(data) # 将句子分成单词形式, 详见DataSet.apply()方法 - dataset.apply(lambda ins: ins['sentence'].split(), new_field_name='words') + dataset.apply(lambda ins: ins['raw_words'].split(), new_field_name='words') # 或使用DataSet.apply_field() - dataset.apply_field(lambda sent:sent.split(), field_name='sentence', new_field_name='words') + dataset.apply_field(lambda sent:sent.split(), field_name='raw_words', new_field_name='words') # 除了匿名函数,也可以定义函数传递进去 def get_words(instance): - sentence = instance['sentence'] + sentence = instance['raw_words'] words = sentence.split() return words dataset.apply(get_words, new_field_name='words') -除了手动处理数据集之外,你还可以使用 fastNLP 提供的各种 :class:`~fastNLP.io.base_loader.DataSetLoader` 来进行数据处理。 -详细请参考这篇教程 :doc:`使用DataSetLoader加载数据集 ` 。 +除了手动处理数据集之外,你还可以使用 fastNLP 提供的各种 :class:`~fastNLP.io.Loader`和:class:`~fastNLP.io.Pipe` 来进行数据处理。 +详细请参考这篇教程 :doc:`使用Loader和Pipe处理数据 ` 。 + +----------------------------- +fastNLP中field的命名习惯 +----------------------------- + +在英文任务中,fastNLP常用的field名称有: + + - raw_words: 表示的是原始的str。例如"This is a demo sentence ."。存在多个raw_words的情况,例如matching任务,它们会被定义为 + raw_words0, raw_words1。但在conll格式下,raw_words列也可能为["This", "is", "a", "demo", "sentence", "."]的形式。 + - words: 表示的是已经tokenize后的词语。例如["This", "is", "a", "demo", "sentence"], 但由于str并不能直接被神经网络所使用, + 所以words中的内容往往被转换为int,如[3, 10, 4, 2, 7, ...]等。多列words的情况,会被命名为words0, words1 + - target: 表示目标值。分类场景下,只有一个值;序列标注场景下是一个序列。 + - seq_len: 一般用于表示words列的长度 + +在中文任务中,fastNLP常用的field名称有: + + - raw_chars: 表示的是原始的连续汉字序列。例如"这是一个示例。" + - chars: 表示已经切分为单独的汉字的序列。例如["这", "是", "一", "个", "示", "例", "。"]。但由于神经网络不能识别汉字,所以一般 + 该列会被转为int形式,如[3, 4, 5, 6, ...]。 + - raw_words: 如果原始汉字序列中已经包含了词语的边界,则该列称为raw_words。如"上海 浦东 开发 与 法制 建设 同步"。 + - words: 表示单独的汉字词语序列。例如["上海", "", "浦东", "开发", "与", "法制", "建设", ...]或[2, 3, 4, ...] + - target: 表示目标值。分类场景下,只有一个值;序列标注场景下是一个序列。 + - seq_len: 表示输入序列的长度 + +# TODO 这一段移动到datasetiter那里 ----------------------------- DataSet与pad diff --git a/docs/source/tutorials/tutorial_2_load_dataset.rst b/docs/source/tutorials/tutorial_2_load_dataset.rst deleted file mode 100644 index 17ad6baf..00000000 --- a/docs/source/tutorials/tutorial_2_load_dataset.rst +++ /dev/null @@ -1,150 +0,0 @@ -======================================= -使用Loader和Pipe加载并处理数据集 -======================================= - -这一部分是一个关于如何加载数据集的教程 - -教程目录: - - - `Part I: 数据集容器DataBundle`_ - - `Part II: 加载数据集的基类Loader`_ - - `Part III: 不同格式类型的基础Loader`_ - - `Part IV: 使用Pipe对数据集进行预处理`_ - - `Part V: fastNLP封装好的Loader和Pipe`_ - - ------------------------------------- -Part I: 数据集容器DataBundle ------------------------------------- - -在fastNLP中,我们使用 :class:`~fastNLP.io.data_bundle.DataBundle` 来存储数据集信息。 -:class:`~fastNLP.io.data_bundle.DataBundle` 类包含了两个重要内容: `datasets` 和 `vocabs` 。 - -`datasets` 是一个 `key` 为数据集名称(如 `train` , `dev` ,和 `test` 等), `value` 为 :class:`~fastNLP.DataSet` 的字典。 - -`vocabs` 是一个 `key` 为词表名称(如 :attr:`fastNLP.Const.INPUT` 表示输入文本的词表名称, :attr:`fastNLP.Const.TARGET` 表示目标 -的真实标签词表的名称,等等), `value` 为词表内容( :class:`~fastNLP.Vocabulary` )的字典。 - -------------------------------------- -Part II: 加载数据集的基类Loader -------------------------------------- - -在fastNLP中,我们采用 :class:`~fastNLP.io.loader.Loader` 来作为加载数据集的基类。 -:class:`~fastNLP.io.loader.Loader` 定义了各种Loader所需的API接口,开发者应该继承它实现各种的Loader。 -在各种数据集的Loader当中,至少应该编写如下内容: - - - _load 函数:从一个数据文件中读取数据,返回一个 :class:`~fastNLP.DataSet` - - load 函数:从文件或者文件夹中读取数据并组装成 :class:`~fastNLP.io.data_bundle.DataBundle` - -Loader的load函数返回的 :class:`~fastNLP.io.data_bundle.DataBundle` 里面包含了数据集的原始数据。 - --------------------------------------------------------- -Part III: 不同格式类型的基础Loader --------------------------------------------------------- - -:class:`~fastNLP.io.loader.CSVLoader` - 读取CSV类型的数据集文件。例子如下: - - .. code-block:: python - - from fastNLP.io.loader import CSVLoader - data_set_loader = CSVLoader( - headers=('words', 'target'), sep='\t' - ) - # 表示将CSV文件中每一行的第一项填入'words' field,第二项填入'target' field。 - # 其中每两项之间由'\t'分割开来 - - data_set = data_set_loader._load('path/to/your/file') - - 数据集内容样例如下 :: - - But it does not leave you with much . 1 - You could hate it for the same reason . 1 - The performances are an absolute joy . 4 - - -:class:`~fastNLP.io.loader.JsonLoader` - 读取Json类型的数据集文件,数据必须按行存储,每行是一个包含各类属性的Json对象。例子如下: - - .. code-block:: python - - from fastNLP.io.loader import JsonLoader - oader = JsonLoader( - fields={'sentence1': 'words1', 'sentence2': 'words2', 'gold_label': 'target'} - ) - # 表示将Json对象中'sentence1'、'sentence2'和'gold_label'对应的值赋给'words1'、'words2'、'target'这三个fields - - data_set = loader._load('path/to/your/file') - - 数据集内容样例如下 :: - - {"annotator_labels": ["neutral"], "captionID": "3416050480.jpg#4", "gold_label": "neutral", "pairID": "3416050480.jpg#4r1n", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is training his horse for a competition.", "sentence2_binary_parse": "( ( A person ) ( ( is ( ( training ( his horse ) ) ( for ( a competition ) ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (VP (VBG training) (NP (PRP$ his) (NN horse)) (PP (IN for) (NP (DT a) (NN competition))))) (. .)))"} - {"annotator_labels": ["contradiction"], "captionID": "3416050480.jpg#4", "gold_label": "contradiction", "pairID": "3416050480.jpg#4r1c", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is at a diner, ordering an omelette.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is ( at ( a diner ) ) ) , ) ( ordering ( an omelette ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (PP (IN at) (NP (DT a) (NN diner))) (, ,) (S (VP (VBG ordering) (NP (DT an) (NN omelette))))) (. .)))"} - {"annotator_labels": ["entailment"], "captionID": "3416050480.jpg#4", "gold_label": "entailment", "pairID": "3416050480.jpg#4r1e", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is outdoors, on a horse.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is outdoors ) , ) ( on ( a horse ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (ADVP (RB outdoors)) (, ,) (PP (IN on) (NP (DT a) (NN horse)))) (. .)))"} - ------------------------------------------- -Part IV: 使用Pipe对数据集进行预处理 ------------------------------------------- - -在fastNLP中,我们采用 :class:`~fastNLP.io.pipe.Pipe` 来作为加载数据集的基类。 -:class:`~fastNLP.io.pipe.Pipe` 定义了各种Pipe所需的API接口,开发者应该继承它实现各种的Pipe。 -在各种数据集的Pipe当中,至少应该编写如下内容: - - - process 函数:对输入的 :class:`~fastNLP.io.data_bundle.DataBundle` 进行处理(如构建词表、 - 将dataset的文本内容转成index等等),然后返回该 :class:`~fastNLP.io.data_bundle.DataBundle` - - process_from_file 函数:输入数据集所在文件夹,读取内容并组装成 :class:`~fastNLP.io.data_bundle.DataBundle` , - 然后调用相对应的process函数对数据进行预处理 - -以SNLI数据集为例,写一个自定义Pipe的例子如下: - -.. code-block:: python - - from fastNLP.io.loader import SNLILoader - from fastNLP.io.pipe import MatchingPipe - - class MySNLIPipe(MatchingPipe): - - def process(self, data_bundle): - data_bundle = super(MySNLIPipe, self).process(data_bundle) - # MatchingPipe类里封装了一个关于matching任务的process函数,可以直接继承使用 - # 如果有需要进行额外的预处理操作可以在这里加入您的代码 - return data_bundle - - def process_from_file(self, paths=None): - data_bundle = SNLILoader().load(paths) # 使用SNLILoader读取原始数据集 - # SNLILoader的load函数中,paths如果为None则会自动下载 - return self.process(data_bundle) # 调用相对应的process函数对data_bundle进行处理 - -调用Pipe示例: - -.. code-block:: python - - from fastNLP.io.pipe import SNLIBertPipe - data_bundle = SNLIBertPipe(lower=True, tokenizer=arg.tokenizer).process_from_file() - print(data_bundle) - -输出的内容是:: - - In total 3 datasets: - train has 549367 instances. - dev has 9842 instances. - test has 9824 instances. - In total 2 vocabs: - words has 34184 entries. - target has 3 entries. - -这里表示一共有3个数据集和2个词表。其中: - - - 3个数据集分别为train、dev、test数据集,分别有549367、9842、9824个instance - - 2个词表分别为words词表与target词表。其中words词表为句子文本所构建的词表,一共有34184个单词; - target词表为目标标签所构建的词表,一共有3种标签。(注:如果有多个输入,则句子文本所构建的词表将 - 会被命名为words1以对应相对应的列名) - ------------------------------------------- -Part V: fastNLP封装好的Loader和Pipe ------------------------------------------- - -fastNLP封装了多种任务/数据集的Loader和Pipe并提供自动下载功能,具体参见文档 - -`fastNLP可加载的embedding与数据集 `_ - diff --git a/docs/source/tutorials/tutorial_2_vocabulary.rst b/docs/source/tutorials/tutorial_2_vocabulary.rst new file mode 100644 index 00000000..9656e4ec --- /dev/null +++ b/docs/source/tutorials/tutorial_2_vocabulary.rst @@ -0,0 +1,131 @@ + +============================== +Vocabulary +============================== + + :class:`~fastNLP.Vocabulary`是包含字或词与index关系的类,用于将文本转换为index。 + +----------------------------- +构建Vocabulary +----------------------------- + +.. code-block:: python + + from fastNLP import Vocabulary + + vocab = Vocabulary() + vocab.add_word_lst(['复', '旦', '大', '学']) # 加入新的字 + vocab.add_word('上海') # `上海`会作为一个整体 + vocab.to_index('复') # 应该会为3 + vocab.to_index('我') # 会输出1,Vocabulary中默认pad的index为0, unk(没有找到的词)的index为1 + + # 在构建target的Vocabulary时,词表中应该用不上pad和unk,可以通过以下的初始化 + vocab = Vocabulary(unknown=None, pad=None) + vocab.add_word_lst(['positive', 'negative']) + vocab.to_index('positive') # 输出0 + vocab.to_index('neutral') # 会报错 + +除了通过以上的方式建立词表,Vocabulary还可以通过使用下面的函数直从 :class:`~fastNLP.DataSet` 中的某一列建立词表以及将该列转换为index + +.. code-block:: python + + from fastNLP import Vocabulary + from fastNLP import DataSet + + dataset = DataSet({'chars': [ + ['今', '天', '天', '气', '很', '好', '。'], + ['被', '这', '部', '电', '影', '浪', '费', '了', '两', '个', '小', '时', '。'] + ], + 'target': ['neutral', 'negative'] + }) + + vocab = Vocabulary() + vocab.from_dataset(dataset, field_name='chars') + vocab.index_dataset(dataset, field_name='chars') + + target_vocab = Vocabulary(padding=None, unknown=None) + target_vocab.from_dataset(dataset, field_name='target') + target_vocab.index_dataset(dataset, field_name='target') + print(dataset) + +输出内容为:: + + +---------------------------------------------------+--------+ + | chars | target | + +---------------------------------------------------+--------+ + | [4, 2, 2, 5, 6, 7, 3] | 0 | + | [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 3] | 1 | + +---------------------------------------------------+--------+ + + +----------------------------- +一些使用tips +----------------------------- + +在通过使用from_dataset()函数在DataSet上建立词表时,将测试集和验证集放入参数no_create_entry_dataset中,如下所示 + +.. code-block:: python + + from fastNLP import Vocabulary + from fastNLP import DataSet + + tr_data = DataSet({'chars': [ + ['今', '天', '心', '情', '很', '好', '。'], + ['被', '这', '部', '电', '影', '浪', '费', '了', '两', '个', '小', '时', '。'] + ], + 'target': ['positive', 'negative'] + }) + dev_data = DataSet({'chars': [ + ['住', '宿', '条', '件', '还', '不', '错'], + ['糟', '糕', '的', '天', '气', ',', '无', '法', '出', '行', '。'] + ], + 'target': ['positive', 'negative'] + }) + + vocab = Vocabulary() + # 将验证集或者测试集在建立词表是放入no_create_entry_dataset这个参数中。 + vocab.from_dataset(tr_data, field_name='chars', no_create_entry_dataset=[dev_data]) + + +:class:`~fastNLP.Vocabulary` 中的`no_create_entry`, 建议在添加来自于测试集和验证集的词的时候将该参数置为True, 或将验证集和测试集 +传入`no_create_entry_dataset`参数。它们的意义是在接下来的模型会使用pretrain的embedding(包括glove, word2vec, elmo与bert)且会finetune的 +情况下,如果仅使用来自于train的数据建立vocabulary,会导致只出现在test与dev中的词语无法充分利用到来自于预训练embedding的信息(因为他们 +会被认为是unk),所以在建立词表的时候将test与dev考虑进来会使得最终的结果更好。通过与fastNLP中的各种Embedding配合使用,会有如下的效果, +如果一个词出现在了train中,但是没在预训练模型中,embedding会为随机初始化,且它单独的一个vector,如果finetune embedding的话, +这个词在更新之后可能会有更好的表示; 而如果这个词仅出现在了dev或test中,那么就不能为它们单独建立vector,而应该让它指向unk这个vector的 +值(当unk的值更新时,这个词也使用的是更新之后的vector)。所以被认为是no_create_entry的token,将首先从预训练的词表中寻找它的表示,如 +果找到了,就使用该表示; 如果没有找到,则认为该词的表示应该为unk的表示。 + +下面我们结合部分:code:`~fastNLP.embeddings.StaticEmbedding`的例子来说明下该值造成的影响,如果您对 +:code:`~fastNLP.embeddings.StaticEmbedding`不太了解,您可以先参考\{Embedding教程的引用}部分再来阅读该部分 + +.. code-block:: python + + import torch + from fastNLP.embeddings import StaticEmbedding + from fastNLP import Vocabulary + + vocab = Vocabulary() + vocab.add_word('train') + vocab.add_word('only_in_train') # 仅在train出现,但肯定在预训练词表中不存在 + vocab.add_word('test', no_create_entry=True) # 该词只在dev或test中出现 + vocab.add_word('only_in_test', no_create_entry=True) # 这个词肯定在预训练中找不到 + + embed = StaticEmbedding(vocab, model_dir_or_name='en-glove-6b-50d') + print(embed(torch.LongTensor([vocab.to_index('train')]))) + print(embed(torch.LongTensor([vocab.to_index('only_in_train')]))) + print(embed(torch.LongTensor([vocab.to_index('test')]))) + print(embed(torch.LongTensor([vocab.to_index('only_in_test')]))) + print(embed(torch.LongTensor([vocab.unknown_idx]))) + +输出结果(只截取了部分vector):: + + tensor([[ 0.9497, 0.3433, 0.8450, -0.8852, ...]], grad_fn=) # train + tensor([[ 0.0540, -0.0557, -0.0514, -0.1688, ...]], grad_fn=) # only_in_train + tensor([[ 0.1318, -0.2552, -0.0679, 0.2619, ...]], grad_fn=) # test + tensor([[0., 0., 0., 0., 0., ...]], grad_fn=) # only_in_test + tensor([[0., 0., 0., 0., 0., ...]], grad_fn=) # unk + +首先train和test都能够从预训练中找到对应的vector,所以它们是各自的vector表示; only_in_train在预训练中找不到,StaticEmbedding为它 +新建了一个entry,所以它有一个单独的vector; 而only_in_dev在预训练中找不到被指向了unk的值(fastNLP用零向量初始化unk),与最后一行unk的 +表示相同。 \ No newline at end of file diff --git a/docs/source/tutorials/tutorial_3_embedding.rst b/docs/source/tutorials/tutorial_3_embedding.rst index 07dc30bc..4e29efed 100644 --- a/docs/source/tutorials/tutorial_3_embedding.rst +++ b/docs/source/tutorials/tutorial_3_embedding.rst @@ -7,161 +7,446 @@ 教程目录: - `Part I: embedding介绍`_ - - `Part II: 使用随机初始化的embedding`_ - - `Part III: 使用预训练的静态embedding`_ - - `Part IV: 使用预训练的Contextual Embedding(ELMo & BERT)`_ - - `Part V: 使用character-level的embedding`_ - - `Part VI: 叠加使用多个embedding`_ - - `Part VII: fastNLP支持的预训练Embedding`_ - - + - `Part II: 使用预训练的静态embedding`_ + - `Part III: 使用随机初始化的embedding`_ + - `Part IV: ELMo Embedding`_ + - `Part V: Bert Embedding`_ + - `Part VI: 使用character-level的embedding`_ + - `Part VII: 叠加使用多个embedding`_ + - `Part VIII: Embedding的其它说明`_ + - `Part IX: StaticEmbedding的使用建议`_ --------------------------------------- Part I: embedding介绍 --------------------------------------- -与torch.nn.Embedding类似,fastNLP的embedding接受的输入是一个被index好的序列,输出的内容是这个序列的embedding结果。 - -fastNLP的embedding包括了预训练embedding和随机初始化embedding。 +Embedding是一种词嵌入技术,可以将字或者词转换为实向量。目前使用较多的预训练词嵌入有word2vec, fasttext, glove, character embedding, +elmo以及bert。 +但使用这些词嵌入方式的时候都需要做一些加载上的处理,比如预训练的word2vec, fasttext以及glove都有着超过几十万个词语的表示,但一般任务大概 +只会用到其中几万个词,如果直接加载所有的词汇,会导致内存占用变大以及运行速度变慢,需要从预训练文件中抽取本次实验的用到的词汇;而对于英文的 +elmo和character embedding, 需要将word拆分成character才能使用;Bert的使用更是涉及到了Byte pair encoding(BPE)相关的内容。为了方便 +大家的使用,fastNLP通过:class:`~fastNLP.Vocabulary`统一了不同embedding的使用。下面我们将讲述一些例子来说明一下 --------------------------------------- -Part II: 使用随机初始化的embedding +Part II: 使用预训练的静态embedding --------------------------------------- -使用随机初始化的embedding参见 :class:`~fastNLP.embeddings.embedding.Embedding` 。 - -可以传入词表大小和embedding维度: +在fastNLP中,加载预训练的word2vec, glove以及fasttext都使用的是 :class:`~fastNLP.embeddings.StaticEmbedding`。另外,为了方便大家的 +使用,fastNLP提供了多种静态词向量的自动下载并缓存(默认缓存到~/.fastNLP/embeddings文件夹下)的功能,支持自动下载的预训练向量可以在 +``_ +查看。 .. code-block:: python - from fastNLP import Embedding - embed = Embedding(10000, 50) + import torch + from fastNLP.embeddings import StaticEmbedding + from fastNLP import Vocabulary -也可以传入一个初始化的参数矩阵: + vocab = Vocabulary() + vocab.add_word_lst("this is a demo .".split()) -.. code-block:: python + embed = StaticEmbedding(vocab, model_dir_or_name='en-glove-6b-50d', requires_grad=True) + + words = torch.LongTensor([[vocab.to_index(word) for word in "this is a demo .".split()]]) + print(embed(words).size()) - from fastNLP import Embedding - embed = Embedding(init_embed) +输出为:: -其中的init_embed可以是torch.FloatTensor、torch.nn.Embedding或者numpy.ndarray。 + torch.Size([1, 5, 50]) +fastNLP的StaticEmbedding在初始化之后,就和pytorch中的Embedding是类似的了。:class:`~fastNLP.embeddings.StaticEmbedding`的初始化 +主要是从model_dir_or_name提供的词向量中抽取出:class:`~fastNLP.Vocabulary`中词语的vector。 + +除了可以通过使用预先提供的Embedding,:class:`~fastNLP.embeddings.StaticEmbedding`也支持加载本地的预训练词向量,glove, word2vec以及 +fasttext格式的。通过将model_dir_or_name修改为本地的embedding文件路径,即可使用本地的embedding。 --------------------------------------- -Part III: 使用预训练的静态embedding +Part III: 使用随机初始化的embedding --------------------------------------- -在使用预训练的embedding之前,需要根据数据集的内容构建一个词表 :class:`~fastNLP.core.vocabulary.Vocabulary` ,在 -预训练embedding类初始化的时候需要将这个词表作为参数传入。 - -在fastNLP中,我们提供了 :class:`~fastNLP.embeddings.StaticEmbedding` 这一个类。 -通过 :class:`~fastNLP.embeddings.StaticEmbedding` 可以加载预训练好的静态 -Embedding,例子如下: +有时候需要使用随机初始化的Embedding,也可以通过使用 :class:`~fastNLP.embeddings.StaticEmbedding`获得。只需要将model_dir_or_name +置为None,且传入embedding_dim,如下例所示 .. code-block:: python - from fastNLP import StaticEmbedding - embed = StaticEmbedding(vocab, model_dir_or_name='en-glove-6b-50', requires_grad=True) + from fastNLP.embeddings import StaticEmbedding + from fastNLP import Vocabulary -vocab为根据数据集构建的词表,model_dir_or_name可以是一个路径,也可以是embedding模型的名称: + vocab = Vocabulary() + vocab.add_word_lst("this is a demo .".split()) - 1 如果传入的是路径,那么fastNLP将会根据该路径来读取预训练的权重文件并将embedding加载进来(glove - 和word2vec类型的权重文件都支持) + embed = StaticEmbedding(vocab, model_dir_or_name=None, embedding_dim=30) - 2 如果传入的是模型名称,那么fastNLP将会根据名称查找embedding模型,如果在cache目录下找到模型则会 - 自动加载;如果找不到则会自动下载到cache目录。默认的cache目录为 `~/.fastNLP` 文件夹。可以通过环境 - 变量 ``FASTNLP_CACHE_DIR`` 来自定义cache目录,如:: + words = torch.LongTensor([[vocab.to_index(word) for word in "this is a demo .".split()]]) + print(embed(words).size()) - $ FASTNLP_CACHE_DIR=~/fastnlp_cache_dir python your_python_file.py +输出为:: + + torch.Size([1, 5, 30]) -这个命令表示fastNLP将会在 `~/fastnlp_cache_dir` 这个目录下寻找模型,找不到则会自动将模型下载到这个目录 ----------------------------------------------------------- -Part IV: 使用预训练的Contextual Embedding(ELMo & BERT) +Part IV: ELMo Embedding ----------------------------------------------------------- 在fastNLP中,我们提供了ELMo和BERT的embedding: :class:`~fastNLP.embeddings.ElmoEmbedding` -和 :class:`~fastNLP.embeddings.BertEmbedding` 。 +和 :class:`~fastNLP.embeddings.BertEmbedding` 。可自动下载的ElmoEmbedding可以 +从``_找到。 与静态embedding类似,ELMo的使用方法如下: .. code-block:: python - from fastNLP import ElmoEmbedding - embed = ElmoEmbedding(vocab, model_dir_or_name='small', requires_grad=False) + from fastNLP.embeddings import ElmoEmbedding + from fastNLP import Vocabulary + + vocab = Vocabulary() + vocab.add_word_lst("this is a demo .".split()) + + embed = ElmoEmbedding(vocab, model_dir_or_name='en-small', requires_grad=False) + words = torch.LongTensor([[vocab.to_index(word) for word in "this is a demo .".split()]]) + print(embed(words).size()) + +输出为:: + + torch.Size([1, 5, 256]) + +也可以输出多层的ELMo结果,fastNLP将在不同层的结果在最后一维上拼接,下面的代码需要在上面的代码执行结束之后执行 + +.. code-block:: python + + embed = ElmoEmbedding(vocab, model_dir_or_name='en-small', requires_grad=False, layers='1,2') + print(embed(words).size()) + +输出为:: + + torch.Size([1, 5, 512]) + +另外,根据``_,不同层之间使用可学习的权重可以使得ELMo的效果更好,在fastNLP中可以通过以下的初始化 +实现3层输出的结果通过可学习的权重进行加法融合。 + +.. code-block:: python + + embed = ElmoEmbedding(vocab, model_dir_or_name='en-small', requires_grad=True, layers='mix') + print(embed(words).size()) + +输出为:: + + torch.Size([1, 5, 256]) + + +----------------------------------------------------------- +Part V: Bert Embedding +----------------------------------------------------------- -BERT-embedding的使用方法如下: +虽然Bert并不算严格意义上的Embedding,但通过将Bert封装成Embedding的形式将极大减轻使用的复杂程度。可自动下载的Bert Embedding可以 +从``_找到。我们将使用下面的例子讲述一下 +BertEmbedding的使用 .. code-block:: python - from fastNLP import BertEmbedding - embed = BertEmbedding( - vocab, model_dir_or_name='en-base-cased', requires_grad=False, layers='4,-2,-1' - ) + from fastNLP.embeddings import BertEmbedding + from fastNLP import Vocabulary + + vocab = Vocabulary() + vocab.add_word_lst("this is a demo .".split()) + + embed = BertEmbedding(vocab, model_dir_or_name='en-base-cased') + words = torch.LongTensor([[vocab.to_index(word) for word in "this is a demo .".split()]]) + print(embed(words).size()) + +输出为:: + + torch.Size([1, 5, 768]) + +可以通过申明使用指定层数的output也可以使用多层的output,下面的代码需要在上面的代码执行结束之后执行 + +.. code-block:: python + + # 使用后面两层的输出 + embed = BertEmbedding(vocab, model_dir_or_name='en-base-cased', layers='10,11') + print(embed(words).size()) # 结果将是在最后一维做拼接 + +输出为:: + + torch.Size([1, 5, 1536]) + +在Bert中还存在两个特殊的字符[CLS]和[SEP],默认情况下这两个字符是自动加入并且在计算结束之后会自动删除,以使得输入的序列长度和输出的序列 +长度是一致的,但是有些分类的情况,必须需要使用[CLS]的表示,这种情况可以通过在初始化时申明一下需要保留[CLS]的表示,如下例所示 + +.. code-block:: python + + embed = BertEmbedding(vocab, model_dir_or_name='en-base-cased', layers='-1', include_cls_sep=True) + print(embed(words).size()) # 结果将在序列维度上增加2 + # 取出句子的cls表示 + cls_reps = embed(words)[:, 0] # shape: [batch_size, 768] + +输出为:: + + torch.Size([1, 7, 768]) + +在英文Bert模型中,一个英文单词可能会被切分为多个subword,例如"fairness"会被拆分为["fair", "##ness"],这样一个word对应的将有两个输出, +:class:`~fastNLP.embeddings.BertEmbedding`会使用pooling方法将一个word的subword的表示合并成一个vector,通过pool_method可以控制 +该pooling方法,支持的有"first"(即使用fair的表示作为fairness的表示), "last"(使用##ness的表示作为fairness的表示), "max"(对fair和 +##ness在每一维上做max),"avg"(对fair和##ness每一维做average)。 + +.. code-block:: python + + embed = BertEmbedding(vocab, model_dir_or_name='en-base-cased', layers='-1', pool_method='max') + print(embed(words).size()) + +输出为:: + + torch.Size([1, 5, 768]) + +另外,根据``_ ,Bert的还存在一种用法,句子之间通过[SEP]拼接起来,前一句话的token embedding为0, +后一句话的token embedding为1。BertEmbedding能够自动识别句子中间的[SEP]来正确设置对应的token_type_id的。 + +.. code-block:: python + + vocab = Vocabulary() + vocab.add_word_lst("this is a demo . [SEP] another sentence .".split()) + + embed = BertEmbedding(vocab, model_dir_or_name='en-base-cased', layers='-1', pool_method='max') + words = torch.LongTensor([[vocab.to_index(word) for word in "this is a demo . [SEP] another sentence .".split()]]) + print(embed(words).size()) + +输出为:: -其中layers变量表示需要取哪几层的encode结果。 + torch.Size([1, 9, 768]) + +在多个[SEP]的情况下,将会使token_type_id不断0,1循环。比如"first sentence [SEP] second sentence [SEP] third sentence", 它们的 +token_type_id将是[0, 0, 0, 1, 1, 1, 0, 0]。但请注意[SEP]一定要大写的,不能是[sep],否则无法识别。 + +更多:class:`~fastNLP.embedding.BertEmbedding`的使用,请参考\ref{找人写一篇BertEmbedding的使用教程} ----------------------------------------------------- -Part V: 使用character-level的embedding +Part VI: 使用character-level的embedding ----------------------------------------------------- -除了预训练的embedding以外,fastNLP还提供了CharEmbedding: :class:`~fastNLP.embeddings.CNNCharEmbedding` 和 -:class:`~fastNLP.embeddings.LSTMCharEmbedding` 。 +除了预训练的embedding以外,fastNLP还提供了两种Character Embedding: :class:`~fastNLP.embeddings.CNNCharEmbedding` 和 +:class:`~fastNLP.embeddings.LSTMCharEmbedding` 。一般在使用character embedding时,需要在预处理的时候将word拆分成character,这 +会使得预处理过程变得非常繁琐。在fastNLP中,使用character embedding也只需要传入:class:`~fastNLP.Vocabulary`即可,而且该 +Vocabulary与其它Embedding使用的Vocabulary是一致的,如下面的例子所示 CNNCharEmbedding的使用例子如下: .. code-block:: python - from fastNLP import CNNCharEmbedding - embed = CNNCharEmbedding(vocab, embed_size=100, char_emb_size=50) + from fastNLP.embeddings import CNNCharEmbedding + from fastNLP import Vocabulary + + vocab = Vocabulary() + vocab.add_word_lst("this is a demo .".split()) + + # character的embedding维度大小为50,返回的embedding结果维度大小为64。 + embed = CNNCharEmbedding(vocab, embed_size=64, char_emb_size=50) + words = torch.LongTensor([[vocab.to_index(word) for word in "this is a demo .".split()]]) + print(embed(words).size()) -这表示这个CNNCharEmbedding当中character的embedding维度大小为50,返回的embedding结果维度大小为100。 +输出为:: + + torch.Size([1, 5, 64]) 与CNNCharEmbedding类似,LSTMCharEmbedding的使用例子如下: .. code-block:: python - from fastNLP import LSTMCharEmbedding - embed = LSTMCharEmbedding(vocab, embed_size=100, char_emb_size=50) + from fastNLP.embeddings import LSTMCharEmbeddding + from fastNLP import Vocabulary + + vocab = Vocabulary() + vocab.add_word_lst("this is a demo .".split()) -这表示这个LSTMCharEmbedding当中character的embedding维度大小为50,返回的embedding结果维度大小为100。 + # character的embedding维度大小为50,返回的embedding结果维度大小为64。 + embed = LSTMCharEmbeddding(vocab, embed_size=64, char_emb_size=50) + words = torch.LongTensor([[vocab.to_index(word) for word in "this is a demo .".split()]]) + print(embed(words).size()) +输出为:: + + torch.Size([1, 5, 64]) ----------------------------------------------------- -Part VI: 叠加使用多个embedding +Part VII: 叠加使用多个embedding ----------------------------------------------------- -在fastNLP中,我们使用 :class:`~fastNLP.embeddings.StackEmbedding` 来叠加多个embedding +单独使用Character Embedding往往效果并不是很好,需要同时结合word embedding。在fastNLP中可以通过:class:`~fastNLP.embeddings.StackEmbedding` +来叠加embedding,具体的例子如下所示 + +.. code-block:: python + + from fastNLP.embeddings import StaticEmbedding, StackEmbedding, CNNCharEmbedding + from fastNLP import Vocabulary + + vocab = Vocabulary() + vocab.add_word_lst("this is a demo .".split()) + + word_embed = StaticEmbedding(vocab, model_dir_or_name='en-glove-6b-50d') + char_embed = CNNCharEmbedding(vocab, embed_size=64, char_emb_size=50) + embed = StackEmbedding([word_embed, char_embed]) + + words = torch.LongTensor([[vocab.to_index(word) for word in "this is a demo .".split()]]) + print(embed(words).size()) # 输出embedding的维度为50+64=114 + +输出为:: + + torch.Size([1, 5, 114]) -例子如下: +:class:`~fastNLP.embeddings.StaticEmbedding`, :class:`~fastNLP.embeddings.ElmoEmbedding`, +:class:`~fastNLP.embeddings.CNNCharEmbedding`, :class:`~fastNLP.embeddings.BertEmbedding`等都可以互相拼接。 +:class:`~fastNLP.embeddings.StackEmbedding`的使用也是和其它Embedding是一致的,即输出index返回对应的表示。但能够拼接起来的Embedding +必须使用同样的:class:`~fastNLP.Vocabulary`,因为只有使用同样的:class:`~fastNLP.Vocabulary`才能保证同一个index指向的是同一个词或字 + +----------------------------------------------------------- +Part VIII: Embedding的其它说明 +----------------------------------------------------------- + +(1) 获取各种Embedding的dimension .. code-block:: python - from fastNLP import StaticEmbedding, StackEmbedding - embed_1 = StaticEmbedding(vocab, model_dir_or_name='en-glove-6b-50', requires_grad=True) - embed_2 = StaticEmbedding(vocab, model_dir_or_name='en-word2vec-300', requires_grad=True) + from fastNLP.embeddings import * - stack_embed = StackEmbedding([embed_1, embed_2]) + vocab = Vocabulary() + vocab.add_word_lst("this is a demo .".split()) -StackEmbedding会把多个embedding的结果拼接起来,如上面例子的stack_embed返回的embedding维度为350维。 + static_embed = StaticEmbedding(vocab, model_dir_or_name='en-glove-6b-50d') + print(static_embed.embedding_dim) # 50 + char_embed = CNNCharEmbedding(vocab, embed_size=30) + print(char_embed.embedding_dim) # 30 + elmo_embed_1 = ElmoEmbedding(vocab, model_dir_or_name='en-small', layers='2') + print(elmo_embed_1.embedding_dim) # 256 + elmo_embed_2 = ElmoEmbedding(vocab, model_dir_or_name='en-small', layers='1,2') + print(elmo_embed_2.embedding_dim) # 512 + bert_embed_1 = BertEmbedding(vocab, layers='-1', model_dir_or_name='en-base-cased') + print(bert_embed_1.embedding_dim) # 768 + bert_embed_2 = BertEmbedding(vocab, layers='2,-1', model_dir_or_name='en-base-cased') + print(bert_embed_2.embedding_dim) # 1536 + stack_embed = StackEmbedding([static_embed, char_embed]) + print(stack_embed.embedding_dim) # 80 -除此以外,还可以把静态embedding跟上下文相关的embedding拼接起来: +(2) 设置Embedding的权重是否更新 .. code-block:: python - from fastNLP import StaticEmbedding, StackEmbedding, ElmoEmbedding - elmo_embedding = ElmoEmbedding(vocab, model_dir_or_name='medium', layers='0,1,2', requires_grad=False) - glove_embedding = StaticEmbedding(vocab, model_dir_or_name='en-glove-6b-50', requires_grad=True) + from fastNLP.embeddings import * + + vocab = Vocabulary() + vocab.add_word_lst("this is a demo .".split()) + + embed = BertEmbedding(vocab, model_dir_or_name='en-base-cased') + embed.requires_grad = False # BertEmbedding不更新 + +(3) 各种Embedding中word_dropout与dropout的说明 + +fastNLP中所有的Embedding都支持传入word_dropout和dropout参数,word_dropout指示的是以多大概率将输入的word置为unk的index,这样既可以 +是的unk得到训练,也可以有一定的regularize效果; dropout参数是在获取到word的表示之后,以多大概率将一些维度的表示置为0。 + +如果使用:class:`~fastNLP.embeddings.StackEmbedding`且需要用到word_dropout,建议将word_dropout设置在:class:`~fastNLP.embeddings.StackEmbedding`。 + + +----------------------------------------------------------- +Part IX: StaticEmbedding的使用建议 +----------------------------------------------------------- + +在英文的命名实体识别(NER)任务中,由``_ 指出,同时使用cnn character embedding和word embedding +会使得NER的效果有比较大的提升。正如你在\ref{引用第七节}看到的那样,fastNLP支持将:class:`~fastNLP.embeddings.CNNCharacterEmbedding` +与:class:`~fastNLP.embeddings.StaticEmbedding`拼成一个:class:`~fastNLP.embeddings.StackEmbedding`。如果通过这种方式使用,需要 +在预处理文本时,不要将词汇小写化(因为Character Embedding需要利用词语中的大小写信息)且不要将出现频次低于某个阈值的word设置为unk(因为 +Character embedding需要利用字形信息);但:class:`~fastNLP.embeddings.StaticEmbedding`使用的某些预训练词嵌入的词汇表中只有小写的词 +语, 且某些低频词并未在预训练中出现需要被剔除。即(1) character embedding需要保留大小写,而某些static embedding不需要保留大小写。(2) +character embedding需要保留所有的字形, 而static embedding需要设置一个最低阈值以学到更好的表示。 + +(1) fastNLP如何解决关于大小写的问题 + +fastNLP通过在:class:`~fastNLP.embeddings.StaticEmbedding`增加了一个lower参数解决该问题。如下面的例子所示 + +.. code-block:: python + + from fastNLP.embeddings import StaticEmbedding + from fastNLP import Vocabulary + + vocab = Vocabulary().add_word_lst("The the a A".split()) + # 下面用随机的StaticEmbedding演示,但与使用预训练时效果是一致的 + embed = StaticEmbedding(vocab, model_name_or_dir=None, embedding_dim=5) + print(embed(torch.LongTensor([vocab.to_index('The')]))) + print(embed(torch.LongTensor([vocab.to_index('the')]))) + +输出为:: + + tensor([[-0.4685, 0.4572, 0.5159, -0.2618, -0.6871]], grad_fn=) + tensor([[ 0.2615, 0.1490, -0.2491, 0.4009, -0.3842]], grad_fn=) + +可以看到"The"与"the"的vector是不一致的。但如果我们在初始化:class:`~fastNLP.embeddings.StaticEmbedding`将lower设置为True,效果将 +如下所示 + +.. code-block:: python + + from fastNLP.embeddings import StaticEmbedding + from fastNLP import Vocabulary + + vocab = Vocabulary().add_word_lst("The the a A".split()) + # 下面用随机的StaticEmbedding演示,但与使用预训练时效果是一致的 + embed = StaticEmbedding(vocab, model_name_or_dir=None, embedding_dim=5, lower=True) + print(embed(torch.LongTensor([vocab.to_index('The')]))) + print(embed(torch.LongTensor([vocab.to_index('the')]))) + +输出为:: + + tensor([[-0.2237, 0.6825, -0.3459, -0.1795, 0.7516]], grad_fn=) + tensor([[-0.2237, 0.6825, -0.3459, -0.1795, 0.7516]], grad_fn=) + +可以看到"The"与"the"的vector是一致的。他们实际上也是引用的同一个vector。通过将lower设置为True,可以在:class:`~fastNLP.embeddings.StaticEmbedding` +实现类似具备相同小写结果的词语引用同一个vector。 + +(2) fastNLP如何解决min_freq的问题 + +fastNLP通过在:class:`~fastNLP.embeddings.StaticEmbedding`增加了一个min_freq参数解决该问题。如下面的例子所示 + +.. code-block:: python + + from fastNLP.embeddings import StaticEmbedding + from fastNLP import Vocabulary + + vocab = Vocabulary().add_word_lst("the the the a".split()) + # 下面用随机的StaticEmbedding演示,但与使用预训练时效果是一致的 + embed = StaticEmbedding(vocab, model_name_or_dir=None, embedding_dim=5, min_freq=2) + print(embed(torch.LongTensor([vocab.to_index('the')]))) + print(embed(torch.LongTensor([vocab.to_index('a')]))) + print(embed(torch.LongTensor([vocab.unknown_idx]))) + +输出为:: + + tensor([[ 0.0454, 0.3375, 0.6758, -0.2026, -0.4715]], grad_fn=) + tensor([[-0.7602, 0.0149, 0.2733, 0.3974, 0.7371]], grad_fn=) + tensor([[-0.7602, 0.0149, 0.2733, 0.3974, 0.7371]], grad_fn=) + +其中最后一行为unknown值的vector,可以看到a的vector表示与unknown是一样的,这是由于a的频次低于了2,所以被指向了unknown的表示;而the由于 +词频超过了2次,所以它是单独的表示。 + +在计算min_freq时,也会考虑到lower的作用,比如 + +.. code-block:: python - stack_embed = StackEmbedding([elmo_embedding, glove_embedding]) + from fastNLP.embeddings import StaticEmbedding + from fastNLP import Vocabulary ------------------------------------------- -Part VII: fastNLP支持的预训练Embedding ------------------------------------------- + vocab = Vocabulary().add_word_lst("the the the a A".split()) + # 下面用随机的StaticEmbedding演示,但与使用预训练时效果是一致的 + embed = StaticEmbedding(vocab, model_name_or_dir=None, embedding_dim=5, min_freq=2, lower=True) + print(embed(torch.LongTensor([vocab.to_index('the')]))) + print(embed(torch.LongTensor([vocab.to_index('a')]))) + print(embed(torch.LongTensor([vocab.to_index('A')]))) + print(embed(torch.LongTensor([vocab.unknown_idx]))) -fastNLP支持多种预训练Embedding并提供自动下载功能,具体参见文档 +输出为:: -`fastNLP可加载的embedding与数据集 `_ + tensor([[-0.7453, -0.5542, 0.5039, 0.6195, -0.4723]], grad_fn=) # the + tensor([[ 0.0170, -0.0995, -0.5743, -0.2469, -0.2095]], grad_fn=) # a + tensor([[ 0.0170, -0.0995, -0.5743, -0.2469, -0.2095]], grad_fn=) # A + tensor([[ 0.6707, -0.5786, -0.6967, 0.0111, 0.1209]], grad_fn=) # unk +可以看到a不再和最后一行的unknown共享一个表示了,这是由于a与A都算入了a的词频,且A的表示也是a的表示。 diff --git a/docs/source/tutorials/tutorial_4_load_dataset.rst b/docs/source/tutorials/tutorial_4_load_dataset.rst new file mode 100644 index 00000000..c7e49fac --- /dev/null +++ b/docs/source/tutorials/tutorial_4_load_dataset.rst @@ -0,0 +1,219 @@ +======================================= +使用Loader和Pipe加载并处理数据集 +======================================= + +这一部分是一个关于如何加载数据集的教程 + +教程目录: + + - `Part I: 数据集容器DataBundle`_ + - `Part II: 加载的各种数据集的Loader`_ + - `Part III: 使用Pipe对数据集进行预处理`_ + - `Part IV: fastNLP封装好的Loader和Pipe`_ + - `Part V: 不同格式类型的基础Loader`_ + + +------------------------------------ +Part I: 数据集容器DataBundle +------------------------------------ + +而由于对于同一个任务,训练集,验证集和测试集会共用同一个词表以及具有相同的目标值,所以在fastNLP中我们使用了 :class:`~fastNLP.io.DataBundle` +来承载同一个任务的多个数据集 :class:`~fastNLP.DataSet` 以及它们的词表 :class:`~fastNLP.Vocabulary`。下面会有例子介绍:class:`~fastNLP.io.DataBundle` +的相关使用。 + +:class: `~fastNLP.io.DataBundle` 在fastNLP中主要在各个 :class: `~fastNLP.io.Loader` 和 :class: `~fastNLP.io.Pipe` 中被使用。 +下面我们将先介绍一下 :class: `~fastNLP.io.Loader` 和 :class: `~fastNLP.io.Pipe`, 之后我们将给出相应的例子。 + +------------------------------------- +Part II: 加载的各种数据集的Loader +------------------------------------- + +在fastNLP中,所有的数据Loader都可以通过其文档判断其支持读取的数据格式,以及读取之后返回的 :class:`~fastNLP.DataSet` 的格式。例如 +\ref 加个引用。 + + - download 函数:自动将该数据集下载到缓存地址,默认缓存地址为~/.fastNLP/datasets/。由于版权等原因,不是所有的Loader都实现了该方法。 + 该方法会返回下载后文件所处的缓存地址。可以查看对应Loader的download的方法的文档来判断该Loader加载的数据。 + - _load 函数:从一个数据文件中读取数据,返回一个 :class:`~fastNLP.DataSet`。返回的DataSet的格式可从Loader文档判断。 + - load 函数:从文件或者文件夹中读取数据并组装成 :class:`~fastNLP.io.DataBundle`。支持接受的参数类型有以下的几种 + - None, 将尝试读取自动缓存的数据,仅支持提供了自动下载数据的Loader + - 文件夹路径, 默认将尝试在该路径下匹配文件名中含有`train`, `test`, `dev`的文件,如果有多个文件含有这相同的关键字,将无法通过 + 该方式读取 + - dict, 例如{'train':"/path/to/tr.conll", 'dev':"/to/validate.conll", "test":"/to/te.conll"} + +.. code-block:: python + + from fastNLP.io import CWSLoader + + loader = CWSLoader(dataset_name='pku') + data_bundle = loader.load() + print(data_bundle) + +输出内容为:: + + In total 3 datasets: + dev has 1831 instances. + train has 17223 instances. + test has 1944 instances. + +这里表示一共有3个数据集。其中: + + - 3个数据集分别为train、dev、test数据集,分别有17223、1831、1944个instance + +也可以取出DataSet并DataSet中的具体内容 + +.. code-block:: python + + tr_data = data_bundle.get_dataset('train') + print(tr_data[:2]) + + 输出为:: + + +--------------------------------------------------------------------------------------+ + | raw_words | + +--------------------------------------------------------------------------------------+ + | 迈向 充满 希望 的 新 世纪 —— 一九九八年 新年 讲话 ( 附 图片 1 张 ) | + | 中共中央 总书记 、 国家 主席 江 泽民 | + +--------------------------------------------------------------------------------------+ + +------------------------------------------ +Part III: 使用Pipe对数据集进行预处理 +------------------------------------------ +通过:class:`~fastNLP.io.Loader` 可以将文本数据读入,但并不能直接被神经网络使用,还需要进行一定的预处理。 + +在fastNLP中,我们使用 :class:`~fastNLP.io.Pipe`的子类作为数据预处理的类,Pipe和Loader一般具备一一对应的关系,该关系可以从其名称判断, +例如:class:`~fastNLP.io.CWSLoader`与:class:`~fastNLP.io.CWSPipe`是一一对应的。一般情况下Pipe处理包含以下的几个过程,(1)将raw_words或 +raw_chars进行tokenize以切分成不同的词或字; (2) 再建立词或字的 :class:`~fastNLP.Vocabulary`, 并将词或字转换为index; (3)将target +列建立词表并将target列转为index; + +所有的Pipe都可通过其文档查看通过该Pipe之后DataSet中的field的情况; 如 \ref{TODO 添加对例子的引用} + +各种数据集的Pipe当中,都包含了以下的两个函数: + + - process 函数:对输入的 :class:`~fastNLP.io.DataBundle` 进行处理, 然后返回处理之后的 :class:`~fastNLP.io.DataBundle`。 + process函数的文档中包含了该Pipe支持处理的DataSet的格式。 + - process_from_file 函数:输入数据集所在文件夹,使用对应的Loader读取数据(所以该函数支持的参数类型是由于其对应的Loader的load函数 + 决定的),然后调用相对应的process函数对数据进行预处理。相当于是把Load和process放在一个函数中执行。 + +接着上面CWSLoader的例子,我们展示一下CWSPipe的功能: + +.. code-block:: python + + from fastNLP.io import CWSPipe + + data_bundle = CWSPipe().process(data_bundle) + print(data_bundle) + +输出内容为:: + + In total 3 datasets: + dev has 1831 instances. + train has 17223 instances. + test has 1944 instances. + In total 2 vocabs: + chars has 4777 entries. + target has 4 entries. + +表示一共有3个数据集和2个词表。其中: + + - 3个数据集分别为train、dev、test数据集,分别有17223、1831、1944个instance + - 2个词表分别为chars词表与target词表。其中chars词表为句子文本所构建的词表,一共有4777个字; + target词表为目标标签所构建的词表,一共有4种标签。 + +相较于之前CWSLoader读取的DataBundle,新增了两个Vocabulary。 我们可以打印一下处理之后的DataSet + +.. code-block:: python + + tr_data = data_bundle.get_dataset('train') + print(tr_data[:2]) + +输出为:: + + +---------------------------------------------------+------------------------------------+------------------------------------+---------+ + | raw_words | chars | target | seq_len | + +---------------------------------------------------+------------------------------------+------------------------------------+---------+ + | 迈向 充满 希望 的 新 世纪 —— 一九九八年... | [1224, 178, 674, 544, 573, 435,... | [0, 1, 0, 1, 0, 1, 2, 2, 0, 1, ... | 29 | + | 中共中央 总书记 、 国家 主席 江 泽民 | [11, 212, 11, 335, 124, 256, 10... | [0, 3, 3, 1, 0, 3, 1, 2, 0, 1, ... | 15 | + +---------------------------------------------------+------------------------------------+------------------------------------+---------+ + +可以看到有两列为int的field: chars和target。这两列的名称同时也是DataBundle中的Vocabulary的名称。可以通过下列的代码获取并查看Vocabulary的 +信息 + +.. code-block:: python + + vocab = data_bundle.get_vocab('target') + print(vocab) + +输出为:: + + Vocabulary(['B', 'E', 'S', 'M']...) + +------------------------------------------ +Part IV: fastNLP封装好的Loader和Pipe +------------------------------------------ + +fastNLP封装了多种任务/数据集的Loader和Pipe并提供自动下载功能,具体参见文档 + +`fastNLP可加载数据集 `_ + +-------------------------------------------------------- +Part V: 不同格式类型的基础Loader +-------------------------------------------------------- + +除了上面提到的针对具体任务的Loader,我们还提供了CSV格式和JSON格式的Loader + +:class:`~fastNLP.io.loader.CSVLoader` + 读取CSV类型的数据集文件。例子如下: + + .. code-block:: python + + from fastNLP.io.loader import CSVLoader + data_set_loader = CSVLoader( + headers=('raw_words', 'target'), sep='\t' + ) + # 表示将CSV文件中每一行的第一项填入'words' field,第二项填入'target' field。 + # 其中项之间由'\t'分割开来 + + data_set = data_set_loader._load('path/to/your/file') + + 数据集内容样例如下 :: + + But it does not leave you with much . 1 + You could hate it for the same reason . 1 + The performances are an absolute joy . 4 + + 读取之后的DataSet具有以下的field + + .. csv-table:: + :header: raw_words, target + + "But it does not leave you with much .", "1" + "You could hate it for the same reason .", "1" + "The performances are an absolute joy .", "4" + +:class:`~fastNLP.io.loader.JsonLoader` + 读取Json类型的数据集文件,数据必须按行存储,每行是一个包含各类属性的Json对象。例子如下: + + .. code-block:: python + + from fastNLP.io.loader import JsonLoader + oader = JsonLoader( + fields={'sentence1': 'raw_words1', 'sentence2': 'raw_words2', 'gold_label': 'target'} + ) + # 表示将Json对象中'sentence1'、'sentence2'和'gold_label'对应的值赋给'raw_words1'、'raw_words2'、'target'这三个fields + + data_set = loader._load('path/to/your/file') + + 数据集内容样例如下 :: + + {"annotator_labels": ["neutral"], "captionID": "3416050480.jpg#4", "gold_label": "neutral", "pairID": "3416050480.jpg#4r1n", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is training his horse for a competition.", "sentence2_binary_parse": "( ( A person ) ( ( is ( ( training ( his horse ) ) ( for ( a competition ) ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (VP (VBG training) (NP (PRP$ his) (NN horse)) (PP (IN for) (NP (DT a) (NN competition))))) (. .)))"} + {"annotator_labels": ["contradiction"], "captionID": "3416050480.jpg#4", "gold_label": "contradiction", "pairID": "3416050480.jpg#4r1c", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is at a diner, ordering an omelette.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is ( at ( a diner ) ) ) , ) ( ordering ( an omelette ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (PP (IN at) (NP (DT a) (NN diner))) (, ,) (S (VP (VBG ordering) (NP (DT an) (NN omelette))))) (. .)))"} + {"annotator_labels": ["entailment"], "captionID": "3416050480.jpg#4", "gold_label": "entailment", "pairID": "3416050480.jpg#4r1e", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is outdoors, on a horse.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is outdoors ) , ) ( on ( a horse ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (ADVP (RB outdoors)) (, ,) (PP (IN on) (NP (DT a) (NN horse)))) (. .)))"} + + 读取之后的DataSet具有以下的field + + .. csv-table:: + :header: raw_words0, raw_words1, target + + "A person on a horse jumps over a broken down airplane.", "A person is training his horse for a competition.", "neutral" + "A person on a horse jumps over a broken down airplane.", "A person is at a diner, ordering an omelette.", "contradiction" + "A person on a horse jumps over a broken down airplane.", "A person is outdoors, on a horse.", "entailment" diff --git a/docs/source/tutorials/tutorial_4_loss_optimizer.rst b/docs/source/tutorials/tutorial_6_loss_optimizer.rst similarity index 100% rename from docs/source/tutorials/tutorial_4_loss_optimizer.rst rename to docs/source/tutorials/tutorial_6_loss_optimizer.rst diff --git a/docs/source/tutorials/tutorial_8_metrics.rst b/docs/source/tutorials/tutorial_7_metrics.rst similarity index 100% rename from docs/source/tutorials/tutorial_8_metrics.rst rename to docs/source/tutorials/tutorial_7_metrics.rst diff --git a/docs/source/tutorials/tutorial_7_modules_models.rst b/docs/source/tutorials/tutorial_8_modules_models.rst similarity index 100% rename from docs/source/tutorials/tutorial_7_modules_models.rst rename to docs/source/tutorials/tutorial_8_modules_models.rst diff --git a/docs/source/tutorials/tutorial_6_seq_labeling.rst b/docs/source/tutorials/tutorial_9_seq_labeling.rst similarity index 100% rename from docs/source/tutorials/tutorial_6_seq_labeling.rst rename to docs/source/tutorials/tutorial_9_seq_labeling.rst diff --git a/docs/source/user/tutorials.rst b/docs/source/user/tutorials.rst index 3e9e1b54..e19f252b 100644 --- a/docs/source/user/tutorials.rst +++ b/docs/source/user/tutorials.rst @@ -8,13 +8,14 @@ fastNLP 详细使用教程 :maxdepth: 1 使用DataSet预处理文本 - 使用Loader和Pipe加载并处理数据集 + 使用Vocabulary转换文本与index 使用Embedding模块将文本转成向量 - 动手实现一个文本分类器I-使用Trainer和Tester快速训练和测试 + 使用Loader和Pipe加载并处理数据集 动手实现一个文本分类器II-使用DataSetIter实现自定义训练过程 - 快速实现序列标注模型 - 使用Modules和Models快速搭建自定义模型 - 使用Metric快速评测你的模型 - 使用Callback自定义你的训练过程 - 使用fitlog 辅助 fastNLP 进行科研 + 动手实现一个文本分类器I-使用Trainer和Tester快速训练和测试 + 使用Metric快速评测你的模型 + 使用Modules和Models快速搭建自定义模型 + 快速实现序列标注模型 + 使用Callback自定义你的训练过程 + 使用fitlog 辅助 fastNLP 进行科研 diff --git a/fastNLP/embeddings/bert_embedding.py b/fastNLP/embeddings/bert_embedding.py index 05351cbd..aa998801 100644 --- a/fastNLP/embeddings/bert_embedding.py +++ b/fastNLP/embeddings/bert_embedding.py @@ -126,27 +126,6 @@ class BertEmbedding(ContextualEmbedding): if self._word_sep_index: words.masked_fill_(sep_mask, self._word_sep_index) return words - - @property - def requires_grad(self): - """ - Embedding的参数是否允许优化。True: 所有参数运行优化; False: 所有参数不允许优化; None: 部分允许优化、部分不允许 - - :return: - """ - requires_grads = set([param.requires_grad for name, param in self.named_parameters() - if 'word_pieces_lengths' not in name]) - if len(requires_grads) == 1: - return requires_grads.pop() - else: - return None - - @requires_grad.setter - def requires_grad(self, value): - for name, param in self.named_parameters(): - if 'word_pieces_lengths' in name: # 这个不能加入到requires_grad中 - continue - param.requires_grad = value class BertWordPieceEncoder(nn.Module): @@ -175,23 +154,6 @@ class BertWordPieceEncoder(nn.Module): self.word_dropout = word_dropout self.dropout_layer = nn.Dropout(dropout) - @property - def requires_grad(self): - """ - Embedding的参数是否允许优化。True: 所有参数运行优化; False: 所有参数不允许优化; None: 部分允许优化、部分不允许 - :return: - """ - requires_grads = set([param.requires_grad for name, param in self.named_parameters()]) - if len(requires_grads) == 1: - return requires_grads.pop() - else: - return None - - @requires_grad.setter - def requires_grad(self, value): - for name, param in self.named_parameters(): - param.requires_grad = value - @property def embed_size(self): return self._embed_size diff --git a/fastNLP/embeddings/char_embedding.py b/fastNLP/embeddings/char_embedding.py index 59109206..2492b6d7 100644 --- a/fastNLP/embeddings/char_embedding.py +++ b/fastNLP/embeddings/char_embedding.py @@ -139,40 +139,6 @@ class CNNCharEmbedding(TokenEmbedding): chars = torch.sum(conv_chars, dim=-2) / chars_masks.eq(0).sum(dim=-1, keepdim=True).float() chars = self.fc(chars) return self.dropout(chars) - - @property - def requires_grad(self): - """ - Embedding的参数是否允许优化。True: 所有参数运行优化; False: 所有参数不允许优化; None: 部分允许优化、部分不允许 - :return: - """ - params = [] - for name, param in self.named_parameters(): - if 'words_to_chars_embedding' not in name and 'word_lengths' not in name: - params.append(param.requires_grad) - requires_grads = set(params) - if len(requires_grads) == 1: - return requires_grads.pop() - else: - return None - - @requires_grad.setter - def requires_grad(self, value): - for name, param in self.named_parameters(): - if 'words_to_chars_embedding' in name or 'word_lengths' in name: # 这个不能加入到requires_grad中 - continue - param.requires_grad = value - - def reset_parameters(self): - for name, param in self.named_parameters(): - if 'words_to_chars_embedding' in name or 'word_lengths' in name: # 这个不能reset - continue - if 'char_embedding' in name: - continue - if param.data.dim() > 1: - nn.init.xavier_uniform_(param, 1) - else: - nn.init.uniform_(param, -1, 1) class LSTMCharEmbedding(TokenEmbedding): @@ -293,27 +259,3 @@ class LSTMCharEmbedding(TokenEmbedding): chars = self.fc(chars) return self.dropout(chars) - - @property - def requires_grad(self): - """ - Embedding的参数是否允许优化。True: 所有参数运行优化; False: 所有参数不允许优化; None: 部分允许优化、部分不允许 - - :return: - """ - params = [] - for name, param in self.named_parameters(): - if 'words_to_chars_embedding' not in name and 'word_lengths' not in name: - params.append(param) - requires_grads = set(params) - if len(requires_grads) == 1: - return requires_grads.pop() - else: - return None - - @requires_grad.setter - def requires_grad(self, value): - for name, param in self.named_parameters(): - if 'words_to_chars_embedding' in name or 'word_lengths' in name: # 这个不能加入到requires_grad中 - continue - param.requires_grad = value diff --git a/fastNLP/embeddings/elmo_embedding.py b/fastNLP/embeddings/elmo_embedding.py index d19a3577..57842c33 100644 --- a/fastNLP/embeddings/elmo_embedding.py +++ b/fastNLP/embeddings/elmo_embedding.py @@ -55,7 +55,7 @@ class ElmoEmbedding(ContextualEmbedding): 并删除character encoder,之后将直接使用cache的embedding。默认为False。 """ - def __init__(self, vocab: Vocabulary, model_dir_or_name: str = 'en', layers: str = '2', requires_grad: bool = False, + def __init__(self, vocab: Vocabulary, model_dir_or_name: str = 'en', layers: str = '2', requires_grad: bool = True, word_dropout=0.0, dropout=0.0, cache_word_reprs: bool = False): super(ElmoEmbedding, self).__init__(vocab, word_dropout=word_dropout, dropout=dropout) @@ -136,27 +136,6 @@ class ElmoEmbedding(ContextualEmbedding): for name in ['layers', 'model', 'layer_weights', 'gamma']: if hasattr(self, name): delattr(self, name) - - @property - def requires_grad(self): - """ - Embedding的参数是否允许优化。True: 所有参数运行优化; False: 所有参数不允许优化; None: 部分允许优化、部分不允许 - - :return: - """ - requires_grads = set([param.requires_grad for name, param in self.named_parameters() - if 'words_to_chars_embedding' not in name and 'words_to_words' not in name]) - if len(requires_grads) == 1: - return requires_grads.pop() - else: - return None - - @requires_grad.setter - def requires_grad(self, value): - for name, param in self.named_parameters(): - if 'words_to_chars_embedding' in name or 'words_to_words' in name: # 这个不能加入到requires_grad中 - continue - param.requires_grad = value class _ElmoModel(nn.Module): diff --git a/fastNLP/embeddings/embedding.py b/fastNLP/embeddings/embedding.py index 255b0823..e82ef0b4 100644 --- a/fastNLP/embeddings/embedding.py +++ b/fastNLP/embeddings/embedding.py @@ -115,6 +115,10 @@ class Embedding(nn.Module): class TokenEmbedding(nn.Module): + """ + fastNLP中各种Embedding的基类 + + """ def __init__(self, vocab, word_dropout=0.0, dropout=0.0): super(TokenEmbedding, self).__init__() if vocab.rebuild: diff --git a/fastNLP/embeddings/stack_embedding.py b/fastNLP/embeddings/stack_embedding.py index e83a275c..91702ec2 100644 --- a/fastNLP/embeddings/stack_embedding.py +++ b/fastNLP/embeddings/stack_embedding.py @@ -22,10 +22,11 @@ class StackEmbedding(TokenEmbedding): Example:: >>> from fastNLP import Vocabulary - >>> from fastNLP.embeddings import StaticEmbedding + >>> from fastNLP.embeddings import StaticEmbedding, StackEmbedding >>> vocab = Vocabulary().add_word_lst("The whether is good .".split()) >>> embed_1 = StaticEmbedding(vocab, model_dir_or_name='en-glove-6b-50d', requires_grad=True) >>> embed_2 = StaticEmbedding(vocab, model_dir_or_name='en-word2vec-300', requires_grad=True) + >>> embed = StackEmbedding([embed_1, embed_2]) :param embeds: 一个由若干个TokenEmbedding组成的list,要求每一个TokenEmbedding的词表都保持一致 :param float word_dropout: 以多大的概率将一个词替换为unk。这样既可以训练unk也是一定的regularize。不同embedidng会在相同的位置 @@ -57,35 +58,26 @@ class StackEmbedding(TokenEmbedding): :return: """ assert isinstance(embed, TokenEmbedding) + self._embed_size += embed.embed_size self.embeds.append(embed) + return self def pop(self): """ 弹出最后一个embed :return: """ - return self.embeds.pop() + embed = self.embeds.pop() + self._embed_size -= embed.embed_size + return embed @property def embed_size(self): - return self._embed_size - - @property - def requires_grad(self): """ - Embedding的参数是否允许优化。True: 所有参数运行优化; False: 所有参数不允许优化; None: 部分允许优化、部分不允许 + 该Embedding输出的vector的最后一维的维度。 :return: """ - requires_grads = set([embed.requires_grad for embed in self.embeds()]) - if len(requires_grads) == 1: - return requires_grads.pop() - else: - return None - - @requires_grad.setter - def requires_grad(self, value): - for embed in self.embeds(): - embed.requires_grad = value + return self._embed_size def forward(self, words): """ diff --git a/fastNLP/embeddings/static_embedding.py b/fastNLP/embeddings/static_embedding.py index 8249aa11..399191dc 100644 --- a/fastNLP/embeddings/static_embedding.py +++ b/fastNLP/embeddings/static_embedding.py @@ -54,13 +54,16 @@ class StaticEmbedding(TokenEmbedding): 如果输入为None则使用embedding_dim的维度随机初始化一个embedding。 :param int embedding_dim: 随机初始化的embedding的维度,当该值为大于0的值时,将忽略model_dir_or_name。 :param bool requires_grad: 是否需要gradient. 默认为True - :param callable init_method: 如何初始化没有找到的值。可以使用torch.nn.init.*中各种方法。调用该方法时传入一个tensor对 + :param callable init_method: 如何初始化没有找到的值。可以使用torch.nn.init.*中各种方法, 传入的方法应该接受一个tensor,并 + inplace地修改其值。 :param bool lower: 是否将vocab中的词语小写后再和预训练的词表进行匹配。如果你的词表中包含大写的词语,或者就是需要单独 为大写的词语开辟一个vector表示,则将lower设置为False。 :param float dropout: 以多大的概率对embedding的表示进行Dropout。0.1即随机将10%的值置为0。 :param float word_dropout: 以多大的概率将一个词替换为unk。这样既可以训练unk也是一定的regularize。 :param bool normalize: 是否对vector进行normalize,使得每个vector的norm为1。 :param int min_freq: Vocabulary词频数小于这个数量的word将被指向unk。 + :param dict **kwarngs: only_train_min_freq, 仅对train中的词语使用min_freq筛选; only_norm_found_vector是否仅对在预训练中 + 找到的词语使用normalize。 """ def __init__(self, vocab: Vocabulary, model_dir_or_name: str = 'en', embedding_dim=-1, requires_grad: bool = True, @@ -183,27 +186,6 @@ class StaticEmbedding(TokenEmbedding): return embed - @property - def requires_grad(self): - """ - Embedding的参数是否允许优化。True: 所有参数运行优化; False: 所有参数不允许优化; None: 部分允许优化、部分不允许 - - :return: - """ - requires_grads = set([param.requires_grad for name, param in self.named_parameters() - if 'words_to_words' not in name]) - if len(requires_grads) == 1: - return requires_grads.pop() - else: - return None - - @requires_grad.setter - def requires_grad(self, value): - for name, param in self.named_parameters(): - if 'words_to_words' in name: - continue - param.requires_grad = value - def _load_with_vocab(self, embed_filepath, vocab, dtype=np.float32, padding='', unknown='', error='ignore', init_method=None): """ diff --git a/fastNLP/io/loader/classification.py b/fastNLP/io/loader/classification.py index 9efcf5d2..53bc6789 100644 --- a/fastNLP/io/loader/classification.py +++ b/fastNLP/io/loader/classification.py @@ -31,7 +31,6 @@ class YelpLoader(Loader): "1","I got 'new' tires from the..." "1","Don't waste your time..." - 读取YelpFull, YelpPolarity的数据。可以通过xxx下载并预处理数据。 读取的DataSet将具备以下的数据结构 .. csv-table:: diff --git a/fastNLP/io/loader/loader.py b/fastNLP/io/loader/loader.py index 22636a27..baf2874e 100644 --- a/fastNLP/io/loader/loader.py +++ b/fastNLP/io/loader/loader.py @@ -34,29 +34,27 @@ class Loader: """ 从指定一个或多个路径中的文件中读取数据,返回 :class:`~fastNLP.io.DataBundle` 。 - 读取的field根据ConllLoader初始化时传入的headers决定。 - :param Union[str, Dict[str, str]] paths: 支持以下的几种输入方式 (0) 如果为None,则先查看本地是否有缓存,如果没有则自动下载并缓存。 (1) 传入一个目录, 该目录下名称包含train的被认为是train,包含test的被认为是test,包含dev的被认为是dev,如果检测到多个文件 名包含'train'、 'dev'、 'test'则会报错:: - data_bundle = ConllLoader().load('/path/to/dir') # 返回的DataBundle中datasets根据目录下是否检测到train、 - # dev、 test等有所变化,可以通过以下的方式取出DataSet - tr_data = data_bundle.datasets['train'] - te_data = data_bundle.datasets['test'] # 如果目录下有文件包含test这个字段 + data_bundle = xxxLoader().load('/path/to/dir') # 返回的DataBundle中datasets根据目录下是否检测到train、 + # dev、 test等有所变化,可以通过以下的方式取出DataSet + tr_data = data_bundle.get_dataset('train') + te_data = data_bundle.get_dataset('test') # 如果目录下有文件包含test这个字段 - (2) 传入文件路径:: + (2) 传入一个dict,比如train,dev,test不在同一个目录下,或者名称中不包含train, dev, test:: - data_bundle = ConllLoader().load("/path/to/a/train.conll") # 返回DataBundle对象, datasets中仅包含'train' - tr_data = data_bundle.datasets['train'] # 可以通过以下的方式取出DataSet + paths = {'train':"/path/to/tr.conll", 'dev':"/to/validate.conll", "test":"/to/te.conll"} + data_bundle = xxxLoader().load(paths) # 返回的DataBundle中的dataset中包含"train", "dev", "test" + dev_data = data_bundle.get_dataset('dev') - (3) 传入一个dict,比如train,dev,test不在同一个目录下,或者名称中不包含train, dev, test:: + (3) 传入文件路径:: - paths = {'train':"/path/to/tr.conll", 'dev':"/to/validate.conll", "test":"/to/te.conll"} - data_bundle = ConllLoader().load(paths) # 返回的DataBundle中的dataset中包含"train", "dev", "test" - dev_data = data_bundle.datasets['dev'] + data_bundle = xxxLoader().load("/path/to/a/train.conll") # 返回DataBundle对象, datasets中仅包含'train' + tr_data = data_bundle.get_dataset('train') # 取出DataSet :return: 返回的 :class:`~fastNLP.io.DataBundle` """ @@ -78,7 +76,7 @@ class Loader: @staticmethod def _get_dataset_path(dataset_name): """ - 传入dataset的名称,获取读取数据的目录。如果数据不存在,会尝试自动下载并缓存 + 传入dataset的名称,获取读取数据的目录。如果数据不存在,会尝试自动下载并缓存(如果支持的话) :param str dataset_name: 数据集的名称 :return: str, 数据集的目录地址。直接到该目录下读取相应的数据即可。 From 587edd54382f9dac7c638eb7adbde73b34830f78 Mon Sep 17 00:00:00 2001 From: ChenXin Date: Tue, 10 Sep 2019 11:08:27 +0800 Subject: [PATCH 49/50] update the doc-checking tool --- docs/count.py | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/docs/count.py b/docs/count.py index 7118216a..0830c7cc 100644 --- a/docs/count.py +++ b/docs/count.py @@ -110,18 +110,34 @@ def check_file(m, name): return funcs, classes -def check_files(modules, out=sys.stdout): +def check_files(modules, out=None): for name in sorted(modules.keys()): print(name, file=out) funcs, classes = check_file(modules[name], name) - for f in funcs: - print("%-30s \t %s \t %s" % (f, gr("文档", funcs[f][0]), gr("测试", funcs[f][1])), file=out) - for c in classes: - print("%-30s \t %s \t %s" % (c, gr("文档", classes[c][0]), gr("测试", classes[c][1])), file=out) - methods = classes[c][2] - for f in methods: - print(" %-28s \t %s" % (f, gr("文档", methods[f][0])), file=out) - print(file=out) + if out is None: + for f in funcs: + print("%-30s \t %s \t %s" % (f, gr("文档", funcs[f][0]), gr("测试", funcs[f][1]))) + for c in classes: + print("%-30s \t %s \t %s" % (c, gr("文档", classes[c][0]), gr("测试", classes[c][1]))) + methods = classes[c][2] + for f in methods: + print(" %-28s \t %s" % (f, gr("文档", methods[f][0]))) + else: + for f in funcs: + if not funcs[f][0]: + print("缺少文档 %s" % (f), file=out) + if not funcs[f][1]: + print("缺少测试 %s" % (f), file=out) + for c in classes: + if not classes[c][0]: + print("缺少文档 %s" % (c), file=out) + if not classes[c][1]: + print("缺少测试 %s" % (c), file=out) + methods = classes[c][2] + for f in methods: + if not methods[f][0]: + print("缺少文档 %s" % (c + "." + f), file=out) + print(file=out) def main(): @@ -134,7 +150,7 @@ def main(): create_rst_file(modules, name, children) print(_colored_string('Done!', "Green")) print(_colored_string('Checking all files...', "Blue")) - check_files(modules) + check_files(modules, out=open("results.txt", "w")) print(_colored_string('Done!', "Green")) From 9b8265dc7e4df64de427b57334c8e6f4e10cebaf Mon Sep 17 00:00:00 2001 From: yh Date: Tue, 10 Sep 2019 12:39:57 +0800 Subject: [PATCH 50/50] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dchar=5Fembedding=20bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/embeddings/char_embedding.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fastNLP/embeddings/char_embedding.py b/fastNLP/embeddings/char_embedding.py index 2492b6d7..a0328525 100644 --- a/fastNLP/embeddings/char_embedding.py +++ b/fastNLP/embeddings/char_embedding.py @@ -106,8 +106,7 @@ class CNNCharEmbedding(TokenEmbedding): for i in range(len(kernel_sizes))]) self._embed_size = embed_size self.fc = nn.Linear(sum(filter_nums), embed_size) - self.reset_parameters() - + def forward(self, words): """ 输入words的index后,生成对应的words的表示。