From f1cb6f616721d0c2f2eea7a56ae24970dd1c4d87 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Tue, 31 May 2022 03:56:39 +0000 Subject: [PATCH] small --- tests/modules/torch/encoder/test_seq2seq_encoder.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/modules/torch/encoder/test_seq2seq_encoder.py b/tests/modules/torch/encoder/test_seq2seq_encoder.py index 97aa5a7c..3570fd16 100755 --- a/tests/modules/torch/encoder/test_seq2seq_encoder.py +++ b/tests/modules/torch/encoder/test_seq2seq_encoder.py @@ -1,12 +1,12 @@ import pytest from fastNLP.envs.imports import _NEED_IMPORT_TORCH +from fastNLP import Vocabulary if _NEED_IMPORT_TORCH: import torch from fastNLP.modules.torch.encoder.seq2seq_encoder import TransformerSeq2SeqEncoder, LSTMSeq2SeqEncoder - from fastNLP import Vocabulary from fastNLP.embeddings.torch import StaticEmbedding @@ -22,6 +22,7 @@ class TestTransformerSeq2SeqEncoder: assert (encoder_output.size() == (1, 3, 10)) +@pytest.mark.torch class TestBiLSTMEncoder: def test_case(self): vocab = Vocabulary().add_word_lst("This is a test .".split())