|
-
-
-
- """
- 这个页面的代码很大程度上参考了https://github.com/huggingface/pytorch-pretrained-BERT的代码
- """
-
-
-
- from ...core.vocabulary import Vocabulary
- import collections
-
- import unicodedata
- from ...io.file_utils import _get_base_url, cached_path
- import numpy as np
- from itertools import chain
- import copy
- import json
- import math
- import os
-
- import torch
- from torch import nn
-
- CONFIG_FILE = 'bert_config.json'
- MODEL_WEIGHTS = 'pytorch_model.bin'
-
-
- def gelu(x):
- return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
-
-
- def swish(x):
- return x * torch.sigmoid(x)
-
-
- ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
-
-
- class BertLayerNorm(nn.Module):
- def __init__(self, hidden_size, eps=1e-12):
- super(BertLayerNorm, self).__init__()
- self.weight = nn.Parameter(torch.ones(hidden_size))
- self.bias = nn.Parameter(torch.zeros(hidden_size))
- self.variance_epsilon = eps
-
- def forward(self, x):
- u = x.mean(-1, keepdim=True)
- s = (x - u).pow(2).mean(-1, keepdim=True)
- x = (x - u) / torch.sqrt(s + self.variance_epsilon)
- return self.weight * x + self.bias
-
-
- class BertEmbeddings(nn.Module):
- def __init__(self, vocab_size, hidden_size, max_position_embeddings, type_vocab_size, hidden_dropout_prob):
- super(BertEmbeddings, self).__init__()
- self.word_embeddings = nn.Embedding(vocab_size, hidden_size)
- self.position_embeddings = nn.Embedding(max_position_embeddings, hidden_size)
- self.token_type_embeddings = nn.Embedding(type_vocab_size, hidden_size)
-
- # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
- # any TensorFlow checkpoint file
- self.LayerNorm = BertLayerNorm(hidden_size, eps=1e-12)
- self.dropout = nn.Dropout(hidden_dropout_prob)
-
- def forward(self, input_ids, token_type_ids=None):
- seq_length = input_ids.size(1)
- position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
- position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
- if token_type_ids is None:
- token_type_ids = torch.zeros_like(input_ids)
-
- words_embeddings = self.word_embeddings(input_ids)
- position_embeddings = self.position_embeddings(position_ids)
- token_type_embeddings = self.token_type_embeddings(token_type_ids)
-
- embeddings = words_embeddings + position_embeddings + token_type_embeddings
- embeddings = self.LayerNorm(embeddings)
- embeddings = self.dropout(embeddings)
- return embeddings
-
-
- class BertSelfAttention(nn.Module):
- def __init__(self, hidden_size, num_attention_heads, attention_probs_dropout_prob):
- super(BertSelfAttention, self).__init__()
- if hidden_size % num_attention_heads != 0:
- raise ValueError(
- "The hidden size (%d) is not a multiple of the number of attention "
- "heads (%d)" % (hidden_size, num_attention_heads))
- self.num_attention_heads = num_attention_heads
- self.attention_head_size = int(hidden_size / num_attention_heads)
- self.all_head_size = self.num_attention_heads * self.attention_head_size
-
- self.query = nn.Linear(hidden_size, self.all_head_size)
- self.key = nn.Linear(hidden_size, self.all_head_size)
- self.value = nn.Linear(hidden_size, self.all_head_size)
-
- self.dropout = nn.Dropout(attention_probs_dropout_prob)
-
- def transpose_for_scores(self, x):
- new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
- x = x.view(*new_x_shape)
- return x.permute(0, 2, 1, 3)
-
- def forward(self, hidden_states, attention_mask):
- mixed_query_layer = self.query(hidden_states)
- mixed_key_layer = self.key(hidden_states)
- mixed_value_layer = self.value(hidden_states)
-
- query_layer = self.transpose_for_scores(mixed_query_layer)
- key_layer = self.transpose_for_scores(mixed_key_layer)
- value_layer = self.transpose_for_scores(mixed_value_layer)
-
- # Take the dot product between "query" and "key" to get the raw attention scores.
- attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
- attention_scores = attention_scores / math.sqrt(self.attention_head_size)
- # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
- attention_scores = attention_scores + attention_mask
-
- # Normalize the attention scores to probabilities.
- attention_probs = nn.Softmax(dim=-1)(attention_scores)
-
- # This is actually dropping out entire tokens to attend to, which might
- # seem a bit unusual, but is taken from the original Transformer paper.
- attention_probs = self.dropout(attention_probs)
-
- context_layer = torch.matmul(attention_probs, value_layer)
- context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
- new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
- context_layer = context_layer.view(*new_context_layer_shape)
- return context_layer
-
-
- class BertSelfOutput(nn.Module):
- def __init__(self, hidden_size, hidden_dropout_prob):
- super(BertSelfOutput, self).__init__()
- self.dense = nn.Linear(hidden_size, hidden_size)
- self.LayerNorm = BertLayerNorm(hidden_size, eps=1e-12)
- self.dropout = nn.Dropout(hidden_dropout_prob)
-
- def forward(self, hidden_states, input_tensor):
- hidden_states = self.dense(hidden_states)
- hidden_states = self.dropout(hidden_states)
- hidden_states = self.LayerNorm(hidden_states + input_tensor)
- return hidden_states
-
-
- class BertAttention(nn.Module):
- def __init__(self, hidden_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob):
- super(BertAttention, self).__init__()
- self.self = BertSelfAttention(hidden_size, num_attention_heads, attention_probs_dropout_prob)
- self.output = BertSelfOutput(hidden_size, hidden_dropout_prob)
-
- def forward(self, input_tensor, attention_mask):
- self_output = self.self(input_tensor, attention_mask)
- attention_output = self.output(self_output, input_tensor)
- return attention_output
-
-
- class BertIntermediate(nn.Module):
- def __init__(self, hidden_size, intermediate_size, hidden_act):
- super(BertIntermediate, self).__init__()
- self.dense = nn.Linear(hidden_size, intermediate_size)
- self.intermediate_act_fn = ACT2FN[hidden_act] \
- if isinstance(hidden_act, str) else hidden_act
-
- def forward(self, hidden_states):
- hidden_states = self.dense(hidden_states)
- hidden_states = self.intermediate_act_fn(hidden_states)
- return hidden_states
-
-
- class BertOutput(nn.Module):
- def __init__(self, hidden_size, intermediate_size, hidden_dropout_prob):
- super(BertOutput, self).__init__()
- self.dense = nn.Linear(intermediate_size, hidden_size)
- self.LayerNorm = BertLayerNorm(hidden_size, eps=1e-12)
- self.dropout = nn.Dropout(hidden_dropout_prob)
-
- def forward(self, hidden_states, input_tensor):
- hidden_states = self.dense(hidden_states)
- hidden_states = self.dropout(hidden_states)
- hidden_states = self.LayerNorm(hidden_states + input_tensor)
- return hidden_states
-
-
- class BertLayer(nn.Module):
- def __init__(self, hidden_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob,
- intermediate_size, hidden_act):
- super(BertLayer, self).__init__()
- self.attention = BertAttention(hidden_size, num_attention_heads, attention_probs_dropout_prob,
- hidden_dropout_prob)
- self.intermediate = BertIntermediate(hidden_size, intermediate_size, hidden_act)
- self.output = BertOutput(hidden_size, intermediate_size, hidden_dropout_prob)
-
- def forward(self, hidden_states, attention_mask):
- attention_output = self.attention(hidden_states, attention_mask)
- intermediate_output = self.intermediate(attention_output)
- layer_output = self.output(intermediate_output, attention_output)
- return layer_output
-
-
- class BertEncoder(nn.Module):
- def __init__(self, num_hidden_layers, hidden_size, num_attention_heads, attention_probs_dropout_prob,
- hidden_dropout_prob,
- intermediate_size, hidden_act):
- super(BertEncoder, self).__init__()
- layer = BertLayer(hidden_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob,
- intermediate_size, hidden_act)
- self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_hidden_layers)])
-
- def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
- all_encoder_layers = []
- for layer_module in self.layer:
- hidden_states = layer_module(hidden_states, attention_mask)
- if output_all_encoded_layers:
- all_encoder_layers.append(hidden_states)
- if not output_all_encoded_layers:
- all_encoder_layers.append(hidden_states)
- return all_encoder_layers
-
-
- class BertPooler(nn.Module):
- def __init__(self, hidden_size):
- super(BertPooler, self).__init__()
- self.dense = nn.Linear(hidden_size, hidden_size)
- self.activation = nn.Tanh()
-
- def forward(self, hidden_states):
- # We "pool" the model by simply taking the hidden state corresponding
- # to the first token.
- first_token_tensor = hidden_states[:, 0]
- pooled_output = self.dense(first_token_tensor)
- pooled_output = self.activation(pooled_output)
- return pooled_output
-
-
- class BertModel(nn.Module):
- """BERT(Bidirectional Embedding Representations from Transformers).
-
- 如果你想使用预训练好的权重矩阵,请在以下网址下载.
- sources::
-
- 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
- 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
- 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
- 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
- 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
- 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
- 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
-
-
- 用预训练权重矩阵来建立BERT模型::
-
- model = BertModel.from_pretrained("path/to/weights/directory")
-
- 用随机初始化权重矩阵来建立BERT模型::
-
- model = BertModel()
-
- :param int vocab_size: 词表大小,默认值为30522,为BERT English uncase版本的词表大小
- :param int hidden_size: 隐层大小,默认值为768,为BERT base的版本
- :param int num_hidden_layers: 隐藏层数,默认值为12,为BERT base的版本
- :param int num_attention_heads: 多头注意力头数,默认值为12,为BERT base的版本
- :param int intermediate_size: FFN隐藏层大小,默认值是3072,为BERT base的版本
- :param str hidden_act: FFN隐藏层激活函数,默认值为``gelu``
- :param float hidden_dropout_prob: FFN隐藏层dropout,默认值为0.1
- :param float attention_probs_dropout_prob: Attention层的dropout,默认值为0.1
- :param int max_position_embeddings: 最大的序列长度,默认值为512,
- :param int type_vocab_size: 最大segment数量,默认值为2
- :param int initializer_range: 初始化权重范围,默认值为0.02
- """
-
- def __init__(self, vocab_size=30522,
- hidden_size=768,
- num_hidden_layers=12,
- num_attention_heads=12,
- intermediate_size=3072,
- hidden_act="gelu",
- hidden_dropout_prob=0.1,
- attention_probs_dropout_prob=0.1,
- max_position_embeddings=512,
- type_vocab_size=2,
- initializer_range=0.02):
- super(BertModel, self).__init__()
- self.hidden_size = hidden_size
- self.embeddings = BertEmbeddings(vocab_size, hidden_size, max_position_embeddings,
- type_vocab_size, hidden_dropout_prob)
- self.encoder = BertEncoder(num_hidden_layers, hidden_size, num_attention_heads,
- attention_probs_dropout_prob, hidden_dropout_prob, intermediate_size,
- hidden_act)
- self.pooler = BertPooler(hidden_size)
- self.initializer_range = initializer_range
-
- self.apply(self.init_bert_weights)
-
- def init_bert_weights(self, module):
- if isinstance(module, (nn.Linear, nn.Embedding)):
- # Slightly different from the TF version which uses truncated_normal for initialization
- # cf https://github.com/pytorch/pytorch/pull/5617
- module.weight.data.normal_(mean=0.0, std=self.initializer_range)
- elif isinstance(module, BertLayerNorm):
- module.bias.data.zero_()
- module.weight.data.fill_(1.0)
- if isinstance(module, nn.Linear) and module.bias is not None:
- module.bias.data.zero_()
-
- def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
- if attention_mask is None:
- attention_mask = torch.ones_like(input_ids)
- if token_type_ids is None:
- token_type_ids = torch.zeros_like(input_ids)
-
- # We create a 3D attention mask from a 2D tensor mask.
- # Sizes are [batch_size, 1, 1, to_seq_length]
- # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
- # this attention mask is more simple than the triangular masking of causal attention
- # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
- extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
-
- # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
- # masked positions, this operation will create a tensor which is 0.0 for
- # positions we want to attend and -10000.0 for masked positions.
- # Since we are adding it to the raw scores before the softmax, this is
- # effectively the same as removing these entirely.
- extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
- extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
-
- embedding_output = self.embeddings(input_ids, token_type_ids)
- encoded_layers = self.encoder(embedding_output,
- extended_attention_mask,
- output_all_encoded_layers=output_all_encoded_layers)
- sequence_output = encoded_layers[-1]
- pooled_output = self.pooler(sequence_output)
- if not output_all_encoded_layers:
- encoded_layers = encoded_layers[-1]
- return encoded_layers, pooled_output
-
- @classmethod
- def from_pretrained(cls, pretrained_model_dir, state_dict=None, *inputs, **kwargs):
- # Load config
- config_file = os.path.join(pretrained_model_dir, CONFIG_FILE)
- config = json.load(open(config_file, "r"))
- # config = BertConfig.from_json_file(config_file)
- # logger.info("Model config {}".format(config))
- # Instantiate model.
- model = cls(*inputs, **config, **kwargs)
- if state_dict is None:
- weights_path = os.path.join(pretrained_model_dir, MODEL_WEIGHTS)
- state_dict = torch.load(weights_path)
-
- old_keys = []
- new_keys = []
- for key in state_dict.keys():
- new_key = None
- if 'gamma' in key:
- new_key = key.replace('gamma', 'weight')
- if 'beta' in key:
- new_key = key.replace('beta', 'bias')
- if new_key:
- old_keys.append(key)
- new_keys.append(new_key)
- for old_key, new_key in zip(old_keys, new_keys):
- state_dict[new_key] = state_dict.pop(old_key)
-
- missing_keys = []
- unexpected_keys = []
- error_msgs = []
- # copy state_dict so _load_from_state_dict can modify it
- metadata = getattr(state_dict, '_metadata', None)
- state_dict = state_dict.copy()
- if metadata is not None:
- state_dict._metadata = metadata
-
- def load(module, prefix=''):
- local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
- module._load_from_state_dict(
- state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
- for name, child in module._modules.items():
- if child is not None:
- load(child, prefix + name + '.')
-
- load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
- if len(missing_keys) > 0:
- print("Weights of {} not initialized from pretrained model: {}".format(
- model.__class__.__name__, missing_keys))
- if len(unexpected_keys) > 0:
- print("Weights from pretrained model not used in {}: {}".format(
- model.__class__.__name__, unexpected_keys))
- return model
-
-
-
-
-
-
-
-
-
-
-
-
- def whitespace_tokenize(text):
- """Runs basic whitespace cleaning and splitting on a piece of text."""
- text = text.strip()
- if not text:
- return []
- tokens = text.split()
- return tokens
-
-
- class WordpieceTokenizer(object):
- """Runs WordPiece tokenization."""
-
- def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
- self.vocab = vocab
- self.unk_token = unk_token
- self.max_input_chars_per_word = max_input_chars_per_word
-
- def tokenize(self, text):
- """Tokenizes a piece of text into its word pieces.
-
- This uses a greedy longest-match-first algorithm to perform tokenization
- using the given vocabulary.
-
- For example:
- input = "unaffable"
- output = ["un", "##aff", "##able"]
-
- Args:
- text: A single token or whitespace separated tokens. This should have
- already been passed through `BasicTokenizer`.
-
- Returns:
- A list of wordpiece tokens.
- """
-
- output_tokens = []
- for token in whitespace_tokenize(text):
- chars = list(token)
- if len(chars) > self.max_input_chars_per_word:
- output_tokens.append(self.unk_token)
- continue
-
- is_bad = False
- start = 0
- sub_tokens = []
- while start < len(chars):
- end = len(chars)
- cur_substr = None
- while start < end:
- substr = "".join(chars[start:end])
- if start > 0:
- substr = "##" + substr
- if substr in self.vocab:
- cur_substr = substr
- break
- end -= 1
- if cur_substr is None:
- is_bad = True
- break
- sub_tokens.append(cur_substr)
- start = end
-
- if is_bad:
- output_tokens.append(self.unk_token)
- else:
- output_tokens.extend(sub_tokens)
- return output_tokens
-
- def load_vocab(vocab_file):
- """Loads a vocabulary file into a dictionary."""
- vocab = collections.OrderedDict()
- index = 0
- with open(vocab_file, "r", encoding="utf-8") as reader:
- while True:
- token = reader.readline()
- if not token:
- break
- token = token.strip()
- vocab[token] = index
- index += 1
- return vocab
-
- class BasicTokenizer(object):
- """Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
-
- def __init__(self,
- do_lower_case=True,
- never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
- """Constructs a BasicTokenizer.
-
- Args:
- do_lower_case: Whether to lower case the input.
- """
- self.do_lower_case = do_lower_case
- self.never_split = never_split
-
- def tokenize(self, text):
- """Tokenizes a piece of text."""
- text = self._clean_text(text)
- # This was added on November 1st, 2018 for the multilingual and Chinese
- # models. This is also applied to the English models now, but it doesn't
- # matter since the English models were not trained on any Chinese data
- # and generally don't have any Chinese data in them (there are Chinese
- # characters in the vocabulary because Wikipedia does have some Chinese
- # words in the English Wikipedia.).
- text = self._tokenize_chinese_chars(text)
- orig_tokens = whitespace_tokenize(text)
- split_tokens = []
- for token in orig_tokens:
- if self.do_lower_case and token not in self.never_split:
- token = token.lower()
- token = self._run_strip_accents(token)
- split_tokens.extend(self._run_split_on_punc(token))
-
- output_tokens = whitespace_tokenize(" ".join(split_tokens))
- return output_tokens
-
- def _run_strip_accents(self, text):
- """Strips accents from a piece of text."""
- text = unicodedata.normalize("NFD", text)
- output = []
- for char in text:
- cat = unicodedata.category(char)
- if cat == "Mn":
- continue
- output.append(char)
- return "".join(output)
-
- def _run_split_on_punc(self, text):
- """Splits punctuation on a piece of text."""
- if text in self.never_split:
- return [text]
- chars = list(text)
- i = 0
- start_new_word = True
- output = []
- while i < len(chars):
- char = chars[i]
- if _is_punctuation(char):
- output.append([char])
- start_new_word = True
- else:
- if start_new_word:
- output.append([])
- start_new_word = False
- output[-1].append(char)
- i += 1
-
- return ["".join(x) for x in output]
-
- def _tokenize_chinese_chars(self, text):
- """Adds whitespace around any CJK character."""
- output = []
- for char in text:
- cp = ord(char)
- if self._is_chinese_char(cp):
- output.append(" ")
- output.append(char)
- output.append(" ")
- else:
- output.append(char)
- return "".join(output)
-
- def _is_chinese_char(self, cp):
- """Checks whether CP is the codepoint of a CJK character."""
- # This defines a "chinese character" as anything in the CJK Unicode block:
- # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
- #
- # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
- # despite its name. The modern Korean Hangul alphabet is a different block,
- # as is Japanese Hiragana and Katakana. Those alphabets are used to write
- # space-separated words, so they are not treated specially and handled
- # like the all of the other languages.
- if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
- (cp >= 0x3400 and cp <= 0x4DBF) or #
- (cp >= 0x20000 and cp <= 0x2A6DF) or #
- (cp >= 0x2A700 and cp <= 0x2B73F) or #
- (cp >= 0x2B740 and cp <= 0x2B81F) or #
- (cp >= 0x2B820 and cp <= 0x2CEAF) or
- (cp >= 0xF900 and cp <= 0xFAFF) or #
- (cp >= 0x2F800 and cp <= 0x2FA1F)): #
- return True
-
- return False
-
- def _clean_text(self, text):
- """Performs invalid character removal and whitespace cleanup on text."""
- output = []
- for char in text:
- cp = ord(char)
- if cp == 0 or cp == 0xfffd or _is_control(char):
- continue
- if _is_whitespace(char):
- output.append(" ")
- else:
- output.append(char)
- return "".join(output)
-
- def _is_whitespace(char):
- """Checks whether `chars` is a whitespace character."""
- # \t, \n, and \r are technically contorl characters but we treat them
- # as whitespace since they are generally considered as such.
- if char == " " or char == "\t" or char == "\n" or char == "\r":
- return True
- cat = unicodedata.category(char)
- if cat == "Zs":
- return True
- return False
-
-
- def _is_control(char):
- """Checks whether `chars` is a control character."""
- # These are technically control characters but we count them as whitespace
- # characters.
- if char == "\t" or char == "\n" or char == "\r":
- return False
- cat = unicodedata.category(char)
- if cat.startswith("C"):
- return True
- return False
-
-
- def _is_punctuation(char):
- """Checks whether `chars` is a punctuation character."""
- cp = ord(char)
- # We treat all non-letter/number ASCII as punctuation.
- # Characters such as "^", "$", and "`" are not in the Unicode
- # Punctuation class but we treat them as punctuation anyways, for
- # consistency.
- if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
- (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
- return True
- cat = unicodedata.category(char)
- if cat.startswith("P"):
- return True
- return False
-
-
- class BertTokenizer(object):
- """Runs end-to-end tokenization: punctuation splitting + wordpiece"""
-
- def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True,
- never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
- """Constructs a BertTokenizer.
-
- Args:
- vocab_file: Path to a one-wordpiece-per-line vocabulary file
- do_lower_case: Whether to lower case the input
- Only has an effect when do_wordpiece_only=False
- do_basic_tokenize: Whether to do basic tokenization before wordpiece.
- max_len: An artificial maximum length to truncate tokenized sequences to;
- Effective maximum length is always the minimum of this
- value (if specified) and the underlying BERT model's
- sequence length.
- never_split: List of tokens which will never be split during tokenization.
- Only has an effect when do_wordpiece_only=False
- """
- if not os.path.isfile(vocab_file):
- raise ValueError(
- "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
- "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
- self.vocab = load_vocab(vocab_file)
- self.ids_to_tokens = collections.OrderedDict(
- [(ids, tok) for tok, ids in self.vocab.items()])
- self.do_basic_tokenize = do_basic_tokenize
- if do_basic_tokenize:
- self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
- never_split=never_split)
- self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
- self.max_len = max_len if max_len is not None else int(1e12)
-
- def tokenize(self, text):
- split_tokens = []
- if self.do_basic_tokenize:
- for token in self.basic_tokenizer.tokenize(text):
- for sub_token in self.wordpiece_tokenizer.tokenize(token):
- split_tokens.append(sub_token)
- else:
- split_tokens = self.wordpiece_tokenizer.tokenize(text)
- return split_tokens
-
- def convert_tokens_to_ids(self, tokens):
- """Converts a sequence of tokens into ids using the vocab."""
- ids = []
- for token in tokens:
- ids.append(self.vocab[token])
- if len(ids) > self.max_len:
- print(
- "Token indices sequence length is longer than the specified maximum "
- " sequence length for this BERT model ({} > {}). Running this"
- " sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
- )
- return ids
-
- def convert_ids_to_tokens(self, ids):
- """Converts a sequence of ids in wordpiece tokens using the vocab."""
- tokens = []
- for i in ids:
- tokens.append(self.ids_to_tokens[i])
- return tokens
-
- def save_vocabulary(self, vocab_path):
- """Save the tokenizer vocabulary to a directory or file."""
- index = 0
- if os.path.isdir(vocab_path):
- vocab_file = os.path.join(vocab_path, VOCAB_NAME)
- with open(vocab_file, "w", encoding="utf-8") as writer:
- for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
- if index != token_index:
- print("Saving vocabulary to {}: vocabulary indices are not consecutive."
- " Please check that the vocabulary is not corrupted!".format(vocab_file))
- index = token_index
- writer.write(token + u'\n')
- index += 1
- return vocab_file
-
- @classmethod
- def from_pretrained(cls, model_dir, *inputs, **kwargs):
- """
- 给定path,直接读取vocab.
-
- """
- pretrained_model_name_or_path = os.path.join(model_dir, VOCAB_NAME)
- print("loading vocabulary file {}".format(pretrained_model_name_or_path))
- max_len = 512
- kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
- # Instantiate tokenizer.
- tokenizer = cls(pretrained_model_name_or_path, *inputs, **kwargs)
- return tokenizer
-
- VOCAB_NAME = 'vocab.txt'
-
- class _WordBertModel(nn.Module):
- def __init__(self, model_dir:str, vocab:Vocabulary, layers:str='-1', pool_method:str='first', include_cls_sep:bool=False):
- super().__init__()
-
- self.tokenzier = BertTokenizer.from_pretrained(model_dir)
- self.encoder = BertModel.from_pretrained(model_dir)
- # 检查encoder_layer_number是否合理
- encoder_layer_number = len(self.encoder.encoder.layer)
- self.layers = list(map(int, layers.split(',')))
- for layer in self.layers:
- if layer<0:
- assert -layer<=encoder_layer_number, f"The layer index:{layer} is out of scope for " \
- f"a bert model with {encoder_layer_number} layers."
- else:
- assert layer<encoder_layer_number, f"The layer index:{layer} is out of scope for " \
- f"a bert model with {encoder_layer_number} layers."
-
- assert pool_method in ('avg', 'max', 'first', 'last')
- self.pool_method = pool_method
-
- self.include_cls_sep = include_cls_sep
-
- # 将所有vocab中word的wordpiece计算出来, 需要额外考虑[CLS]和[SEP]
- print("Start to generating word pieces for word.")
- word_to_wordpieces = []
- word_pieces_lengths = []
- for word, index in vocab:
- if index == vocab.padding_idx: # pad是个特殊的符号
- word = '[PAD]'
- elif index == vocab.unknown_idx:
- word = '[UNK]'
- word_pieces = self.tokenzier.wordpiece_tokenizer.tokenize(word)
- word_pieces = self.tokenzier.convert_tokens_to_ids(word_pieces)
- word_to_wordpieces.append(word_pieces)
- word_pieces_lengths.append(len(word_pieces))
- self._cls_index = len(vocab)
- self._sep_index = len(vocab) + 1
- self._pad_index = vocab.padding_idx
- self._wordpiece_pad_index = self.tokenzier.convert_tokens_to_ids(['[PAD]'])[0] # 需要用于生成word_piece
- word_to_wordpieces.append(self.tokenzier.convert_tokens_to_ids(['[CLS]']))
- word_to_wordpieces.append(self.tokenzier.convert_tokens_to_ids(['[SEP]']))
- self.word_to_wordpieces = np.array(word_to_wordpieces)
- self.word_pieces_lengths = nn.Parameter(torch.LongTensor(word_pieces_lengths), requires_grad=False)
- print("Successfully generate word pieces.")
-
- def forward(self, words):
- """
-
- :param words: torch.LongTensor, batch_size x max_len
- :return: num_layers x batch_size x max_len x hidden_size或者num_layers x batch_size x (max_len+2) x hidden_size
- """
- batch_size, max_word_len = words.size()
- seq_len = words.ne(self._pad_index).sum(dim=-1)
- batch_word_pieces_length = self.word_pieces_lengths[words] # batch_size x max_len
- word_pieces_lengths = batch_word_pieces_length.sum(dim=-1)
- max_word_piece_length = word_pieces_lengths.max().item()
- # +2是由于需要加入[CLS]与[SEP]
- word_pieces = words.new_full((batch_size, max_word_piece_length+2), fill_value=self._wordpiece_pad_index)
- word_pieces[:, 0].fill_(self._cls_index)
- word_pieces[torch.arange(batch_size).to(words), word_pieces_lengths+1] = self._sep_index
- attn_masks = torch.zeros_like(word_pieces)
- # 1. 获取words的word_pieces的id,以及对应的span范围
- word_indexes = words.tolist()
- for i in range(batch_size):
- word_pieces_i = list(chain(*self.word_to_wordpieces[word_indexes[i]]))
- word_pieces[i, 1:len(word_pieces_i)+1] = torch.LongTensor(word_pieces_i)
- attn_masks[i, :len(word_pieces_i)+2].fill_(1)
- # 2. 获取hidden的结果,根据word_pieces进行对应的pool计算
- # all_outputs: [batch_size x max_len x hidden_size, batch_size x max_len x hidden_size, ...]
- bert_outputs, _ = self.encoder(word_pieces, token_type_ids=None, attention_mask=attn_masks,
- output_all_encoded_layers=True)
- # output_layers = [self.layers] # len(self.layers) x batch_size x max_word_piece_length x hidden_size
-
- if self.include_cls_sep:
- outputs = bert_outputs[-1].new_zeros(len(self.layers), batch_size, max_word_len + 2,
- bert_outputs[-1].size(-1))
- s_shift = 1
- else:
- outputs = bert_outputs[-1].new_zeros(len(self.layers), batch_size, max_word_len,
- bert_outputs[-1].size(-1))
- s_shift = 0
- batch_word_pieces_cum_length = batch_word_pieces_length.new_zeros(batch_size, max_word_len + 1)
- batch_word_pieces_cum_length[:, 1:] = batch_word_pieces_length.cumsum(dim=-1) # batch_size x max_len
- for l_index, l in enumerate(self.layers):
- output_layer = bert_outputs[l]
- # 从word_piece collapse到word的表示
- truncate_output_layer = output_layer[:, 1:-1] # 删除[CLS]与[SEP] batch_size x len x hidden_size
- outputs_seq_len = seq_len + s_shift
- if self.pool_method == 'first':
- for i in range(batch_size):
- i_word_pieces_cum_length = batch_word_pieces_cum_length[i, :seq_len[i]] # 每个word的start位置
- outputs[l_index, i, s_shift:outputs_seq_len[i]] = truncate_output_layer[i, i_word_pieces_cum_length] # num_layer x batch_size x len x hidden_size
- elif self.pool_method == 'last':
- for i in range(batch_size):
- i_word_pieces_cum_length = batch_word_pieces_cum_length[i, 1:seq_len[i]+1] - 1 # 每个word的end
- outputs[l_index, i, s_shift:outputs_seq_len[i]] = truncate_output_layer[i, i_word_pieces_cum_length]
- elif self.pool_method == 'max':
- for i in range(batch_size):
- for j in range(seq_len[i]):
- start, end = batch_word_pieces_cum_length[i, j], batch_word_pieces_cum_length[i, j+1]
- outputs[l_index, i, j+s_shift], _ = torch.max(truncate_output_layer[i, start:end], dim=-2)
- else:
- for i in range(batch_size):
- for j in range(seq_len[i]):
- start, end = batch_word_pieces_cum_length[i, j], batch_word_pieces_cum_length[i, j+1]
- outputs[l_index, i, j+s_shift] = torch.mean(truncate_output_layer[i, start:end], dim=-2)
- if self.include_cls_sep:
- outputs[:, :, 0] = output_layer[:, 0]
- outputs[:, :, seq_len+s_shift] = output_layer[:, seq_len+s_shift]
- # 3. 最终的embedding结果
- return outputs
-
-
- class _WordPieceBertModel(nn.Module):
- """
- 这个模块用于直接计算word_piece的结果.
-
- """
- def __init__(self, model_dir:str, vocab:Vocabulary, layers:str='-1'):
- super().__init__()
-
- self.tokenzier = BertTokenizer.from_pretrained(model_dir)
- self.encoder = BertModel.from_pretrained(model_dir)
- # 检查encoder_layer_number是否合理
- encoder_layer_number = len(self.encoder.encoder.layer)
- self.layers = list(map(int, layers.split(',')))
- for layer in self.layers:
- if layer<0:
- assert -layer<=encoder_layer_number, f"The layer index:{layer} is out of scope for " \
- f"a bert model with {encoder_layer_number} layers."
- else:
- assert layer<encoder_layer_number, f"The layer index:{layer} is out of scope for " \
- f"a bert model with {encoder_layer_number} layers."
-
- # 将所有vocab中word的wordpiece计算出来, 需要额外考虑[CLS]和[SEP]
- print("Start to generating word pieces for word.")
- self.word_to_wordpieces = []
- self.word_pieces_length = []
- for word, index in vocab:
- if index == vocab.padding_idx: # pad是个特殊的符号
- word = '[PAD]'
- elif index == vocab.unknown_idx:
- word = '[UNK]'
- word_pieces = self.tokenzier.wordpiece_tokenizer.tokenize(word)
- word_pieces = self.tokenzier.convert_tokens_to_ids(word_pieces)
- self.word_to_wordpieces.append(word_pieces)
- self.word_pieces_length.append(len(word_pieces))
- self._cls_index = len(vocab)
- self._sep_index = len(vocab) + 1
- self._pad_index = vocab.padding_idx
- self._wordpiece_pad_index = self.tokenzier.convert_tokens_to_ids(['[PAD]'])[0] # 需要用于生成word_piece
- self.word_to_wordpieces.append(self.tokenzier.convert_tokens_to_ids(['[CLS]']))
- self.word_to_wordpieces.append(self.tokenzier.convert_tokens_to_ids(['[SEP]']))
- self.word_to_wordpieces = np.array(self.word_to_wordpieces, dtype=int)
- print("Successfully generate word pieces.")
-
- def index_dataset(self, *datasets):
- """
- 使用bert的tokenizer将word_pieces与word_pieces_seq_len这两列加入到datasets中,并将他们设置为input。加入的word_piece
- 已经包含了[CLS]与[SEP], 且将word_pieces这一列的pad value设置为了bert的pad value。
-
- :param datasets: DataSet对象
- :return:
- """
- def convert_words_to_word_pieces(words):
- word_pieces = list(chain(*self.word_to_wordpieces[words].tolist()))
- word_pieces = [self._cls_index] + word_pieces + [self._sep_index]
- return word_pieces
-
- for index, dataset in enumerate(datasets):
- try:
- dataset.apply_field(convert_words_to_word_pieces, field_name='words', new_field_name='word_pieces',
- is_input=True)
- dataset.set_pad_val('word_pieces', self._wordpiece_pad_index)
- except Exception as e:
- print(f"Exception happens when processing the {index} dataset.")
- raise e
-
- def forward(self, word_pieces, token_type_ids=None):
- """
-
- :param word_pieces: torch.LongTensor, batch_size x max_len
- :param token_type_ids: torch.LongTensor, batch_size x max_len
- :return: num_layers x batch_size x max_len x hidden_size或者num_layers x batch_size x (max_len+2) x hidden_size
- """
- batch_size, max_len = word_pieces.size()
-
- attn_masks = word_pieces.ne(self._pad_index)
- bert_outputs, _ = self.encoder(word_pieces, token_type_ids=token_type_ids, attention_mask=attn_masks,
- output_all_encoded_layers=True)
- # output_layers = [self.layers] # len(self.layers) x batch_size x max_word_piece_length x hidden_size
- outputs = bert_outputs[0].new_zeros((len(self.layers), batch_size, max_len, bert_outputs[0].size(-1)))
- for l_index, l in enumerate(self.layers):
- outputs[l_index] = bert_outputs[l]
- return outputs
|