|
- # coding=utf-8
- # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- """Tokenization classes for RoBERTa."""
-
- from typing import List, Optional
-
- from fastNLP.transformers.torch.tokenization_utils import AddedToken
- from ..gpt2.tokenization_gpt2 import GPT2Tokenizer
- from fastNLP.core.log import logger
-
- __all__ = [
- "RobertaTokenizer",
- ]
-
- VOCAB_FILES_NAMES = {
- "vocab_file": "vocab.json",
- "merges_file": "merges.txt",
- }
-
- PRETRAINED_VOCAB_FILES_MAP = {
- "vocab_file": {
- "roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
- "roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
- "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
- "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
- "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
- "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json",
- },
- "merges_file": {
- "roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
- "roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
- "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
- "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
- "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
- "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt",
- },
- }
-
- PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
- "roberta-base": 512,
- "roberta-large": 512,
- "roberta-large-mnli": 512,
- "distilroberta-base": 512,
- "roberta-base-openai-detector": 512,
- "roberta-large-openai-detector": 512,
- }
-
-
- class RobertaTokenizer(GPT2Tokenizer):
- """
- Constructs a RoBERTa tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding.
-
- This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
- be encoded differently whether it is at the beginning of the sentence (without space) or not:
-
- ::
-
- >>> from transformers import RobertaTokenizer
- >>> tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
- >>> tokenizer("Hello world")['input_ids']
- [0, 31414, 232, 328, 2]
- >>> tokenizer(" Hello world")['input_ids']
- [0, 20920, 232, 2]
-
- You can get around that behavior by passing ``add_prefix_space=True`` when instantiating this tokenizer or when you
- call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
-
- .. note::
-
- When used with ``is_split_into_words=True``, this tokenizer will add a space before each word (even the first
- one).
-
- This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
- Users should refer to this superclass for more information regarding those methods.
-
- Args:
- vocab_file (:obj:`str`):
- Path to the vocabulary file.
- merges_file (:obj:`str`):
- Path to the merges file.
- errors (:obj:`str`, `optional`, defaults to :obj:`"replace"`):
- Paradigm to follow when decoding bytes to UTF-8. See `bytes.decode
- <https://docs.python.org/3/library/stdtypes.html#bytes.decode>`__ for more information.
- bos_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
- The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
-
- .. note::
-
- When building a sequence using special tokens, this is not the token that is used for the beginning of
- sequence. The token used is the :obj:`cls_token`.
- eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
- The end of sequence token.
-
- .. note::
-
- When building a sequence using special tokens, this is not the token that is used for the end of
- sequence. The token used is the :obj:`sep_token`.
- sep_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
- The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
- sequence classification or for a text and a question for question answering. It is also used as the last
- token of a sequence built with special tokens.
- cls_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
- The classifier token which is used when doing sequence classification (classification of the whole sequence
- instead of per-token classification). It is the first token of the sequence when built with special tokens.
- unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
- The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
- token instead.
- pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
- The token used for padding, for example when batching sequences of different lengths.
- mask_token (:obj:`str`, `optional`, defaults to :obj:`"<mask>"`):
- The token used for masking values. This is the token used when training this model with masked language
- modeling. This is the token which the model will try to predict.
- add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`False`):
- Whether or not to add an initial space to the input. This allows to treat the leading word just as any
- other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
- """
-
- vocab_files_names = VOCAB_FILES_NAMES
- pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
- max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
- model_input_names = ["input_ids", "attention_mask"]
-
- def __init__(
- self,
- vocab_file,
- merges_file,
- errors="replace",
- bos_token="<s>",
- eos_token="</s>",
- sep_token="</s>",
- cls_token="<s>",
- unk_token="<unk>",
- pad_token="<pad>",
- mask_token="<mask>",
- add_prefix_space=False,
- **kwargs
- ):
- bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
- eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
- sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
- cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
- unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
- pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
-
- # Mask token behave like a normal word, i.e. include the space before it
- mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
-
- super().__init__(
- vocab_file=vocab_file,
- merges_file=merges_file,
- errors=errors,
- bos_token=bos_token,
- eos_token=eos_token,
- unk_token=unk_token,
- sep_token=sep_token,
- cls_token=cls_token,
- pad_token=pad_token,
- mask_token=mask_token,
- add_prefix_space=add_prefix_space,
- **kwargs,
- )
-
- def build_inputs_with_special_tokens(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
- ) -> List[int]:
- """
- Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
- adding special tokens. A RoBERTa sequence has the following format:
-
- - single sequence: ``<s> X </s>``
- - pair of sequences: ``<s> A </s></s> B </s>``
-
- Args:
- token_ids_0 (:obj:`List[int]`):
- List of IDs to which the special tokens will be added.
- token_ids_1 (:obj:`List[int]`, `optional`):
- Optional second list of IDs for sequence pairs.
-
- Returns:
- :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
- """
- if token_ids_1 is None:
- return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
- cls = [self.cls_token_id]
- sep = [self.sep_token_id]
- return cls + token_ids_0 + sep + sep + token_ids_1 + sep
-
- def get_special_tokens_mask(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
- ) -> List[int]:
- """
- Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
- special tokens using the tokenizer ``prepare_for_model`` method.
-
- Args:
- token_ids_0 (:obj:`List[int]`):
- List of IDs.
- token_ids_1 (:obj:`List[int]`, `optional`):
- Optional second list of IDs for sequence pairs.
- already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
- Whether or not the token list is already formatted with special tokens for the model.
-
- Returns:
- :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
- """
- if already_has_special_tokens:
- return super().get_special_tokens_mask(
- token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
- )
-
- if token_ids_1 is None:
- return [1] + ([0] * len(token_ids_0)) + [1]
- return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
-
- def create_token_type_ids_from_sequences(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
- ) -> List[int]:
- """
- Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
- make use of token type ids, therefore a list of zeros is returned.
-
- Args:
- token_ids_0 (:obj:`List[int]`):
- List of IDs.
- token_ids_1 (:obj:`List[int]`, `optional`):
- Optional second list of IDs for sequence pairs.
-
- Returns:
- :obj:`List[int]`: List of zeros.
- """
- sep = [self.sep_token_id]
- cls = [self.cls_token_id]
-
- if token_ids_1 is None:
- return len(cls + token_ids_0 + sep) * [0]
- return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
-
- def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
- add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
- if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
- text = " " + text
- return (text, kwargs)
|