From 1a1254b217a422adfbaee76e4ed2fe0ac672d490 Mon Sep 17 00:00:00 2001 From: Yige Xu Date: Tue, 28 Apr 2020 23:35:12 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E6=94=B9roberta=20embedding=E7=9A=84?= =?UTF-8?q?=E4=B8=80=E4=B8=AA=E6=96=87=E6=A1=A3=E9=94=99=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/embeddings/roberta_embedding.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastNLP/embeddings/roberta_embedding.py b/fastNLP/embeddings/roberta_embedding.py index c0e85bf1..1479a383 100644 --- a/fastNLP/embeddings/roberta_embedding.py +++ b/fastNLP/embeddings/roberta_embedding.py @@ -40,7 +40,7 @@ class RobertaEmbedding(ContextualEmbedding): >>> from fastNLP import Vocabulary >>> from fastNLP.embeddings import RobertaEmbedding >>> vocab = Vocabulary().add_word_lst("The whether is good .".split()) - >>> embed = RobertaEmbedding(vocab, model_dir_or_name='en-base-uncased', requires_grad=False, layers='4,-2,-1') + >>> embed = RobertaEmbedding(vocab, model_dir_or_name='en', requires_grad=False, layers='4,-2,-1') >>> words = torch.LongTensor([[vocab.to_index(word) for word in "The whether is good .".split()]]) >>> outputs = embed(words) >>> outputs.size()