From 9b7562a13aa626ab4891126034bf78c326a1207a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=88=98=E5=90=91=E9=98=B3?= <760605341@qq.com> Date: Wed, 18 Nov 2020 12:10:09 +0800 Subject: [PATCH] update fastNLP/modules/encoder/seq2seq_encoder.py. --- fastNLP/modules/encoder/seq2seq_encoder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastNLP/modules/encoder/seq2seq_encoder.py b/fastNLP/modules/encoder/seq2seq_encoder.py index d280582a..5eae1e6d 100644 --- a/fastNLP/modules/encoder/seq2seq_encoder.py +++ b/fastNLP/modules/encoder/seq2seq_encoder.py @@ -132,7 +132,7 @@ class TransformerSeq2SeqEncoder(Seq2SeqEncoder): x = self.input_fc(x) x = F.dropout(x, p=self.dropout, training=self.training) - encoder_mask = seq_len_to_mask(seq_len) + encoder_mask = seq_len_to_mask(seq_len, max_len=max_src_len) encoder_mask = encoder_mask.to(device) for layer in self.layer_stacks: