|
- """undocumented
- 这个页面的代码很大程度上参考(复制粘贴)了https://github.com/huggingface/pytorch-pretrained-BERT的代码, 如果你发现该代码对你
- 有用,也请引用一下他们。
- """
-
- __all__ = [
- "BertModel"
- ]
-
- import collections
- import copy
- import json
- import math
- import os
- import unicodedata
-
- import torch
- from torch import nn
-
- from ..utils import _get_file_name_base_on_postfix
- from ...io.file_utils import _get_embedding_url, cached_path, PRETRAINED_BERT_MODEL_DIR
- from ...core import logger
-
- CONFIG_FILE = 'bert_config.json'
- VOCAB_NAME = 'vocab.txt'
-
-
- class BertConfig(object):
- """Configuration class to store the configuration of a `BertModel`.
- """
-
- def __init__(self,
- vocab_size_or_config_json_file,
- hidden_size=768,
- num_hidden_layers=12,
- num_attention_heads=12,
- intermediate_size=3072,
- hidden_act="gelu",
- hidden_dropout_prob=0.1,
- attention_probs_dropout_prob=0.1,
- max_position_embeddings=512,
- type_vocab_size=2,
- initializer_range=0.02,
- layer_norm_eps=1e-12):
- """Constructs BertConfig.
-
- Args:
- vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
- hidden_size: Size of the encoder layers and the pooler layer.
- num_hidden_layers: Number of hidden layers in the Transformer encoder.
- num_attention_heads: Number of attention heads for each attention layer in
- the Transformer encoder.
- intermediate_size: The size of the "intermediate" (i.e., feed-forward)
- layer in the Transformer encoder.
- hidden_act: The non-linear activation function (function or string) in the
- encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
- hidden_dropout_prob: The dropout probabilitiy for all fully connected
- layers in the embeddings, encoder, and pooler.
- attention_probs_dropout_prob: The dropout ratio for the attention
- probabilities.
- max_position_embeddings: The maximum sequence length that this model might
- ever be used with. Typically set this to something large just in case
- (e.g., 512 or 1024 or 2048).
- type_vocab_size: The vocabulary size of the `token_type_ids` passed into
- `BertModel`.
- initializer_range: The sttdev of the truncated_normal_initializer for
- initializing all weight matrices.
- layer_norm_eps: The epsilon used by LayerNorm.
- """
- if isinstance(vocab_size_or_config_json_file, str):
- with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
- json_config = json.loads(reader.read())
- for key, value in json_config.items():
- self.__dict__[key] = value
- elif isinstance(vocab_size_or_config_json_file, int):
- self.vocab_size = vocab_size_or_config_json_file
- self.hidden_size = hidden_size
- self.num_hidden_layers = num_hidden_layers
- self.num_attention_heads = num_attention_heads
- self.hidden_act = hidden_act
- self.intermediate_size = intermediate_size
- self.hidden_dropout_prob = hidden_dropout_prob
- self.attention_probs_dropout_prob = attention_probs_dropout_prob
- self.max_position_embeddings = max_position_embeddings
- self.type_vocab_size = type_vocab_size
- self.initializer_range = initializer_range
- self.layer_norm_eps = layer_norm_eps
- else:
- raise ValueError("First argument must be either a vocabulary size (int)"
- "or the path to a pretrained model config file (str)")
-
- @classmethod
- def from_dict(cls, json_object):
- """Constructs a `BertConfig` from a Python dictionary of parameters."""
- config = BertConfig(vocab_size_or_config_json_file=-1)
- for key, value in json_object.items():
- config.__dict__[key] = value
- return config
-
- @classmethod
- def from_json_file(cls, json_file):
- """Constructs a `BertConfig` from a json file of parameters."""
- with open(json_file, "r", encoding='utf-8') as reader:
- text = reader.read()
- return cls.from_dict(json.loads(text))
-
- def __repr__(self):
- return str(self.to_json_string())
-
- def to_dict(self):
- """Serializes this instance to a Python dictionary."""
- output = copy.deepcopy(self.__dict__)
- return output
-
- def to_json_string(self):
- """Serializes this instance to a JSON string."""
- return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
-
- def to_json_file(self, json_file_path):
- """ Save this instance to a json file."""
- with open(json_file_path, "w", encoding='utf-8') as writer:
- writer.write(self.to_json_string())
-
-
- def gelu(x):
- return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
-
-
- def swish(x):
- return x * torch.sigmoid(x)
-
-
- ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
-
-
- def _get_bert_dir(model_dir_or_name: str = 'en-base-uncased'):
- if model_dir_or_name.lower() in PRETRAINED_BERT_MODEL_DIR:
- model_url = _get_embedding_url('bert', model_dir_or_name.lower())
- model_dir = cached_path(model_url, name='embedding')
- # 检查是否存在
- elif os.path.isdir(os.path.abspath(os.path.expanduser(model_dir_or_name))):
- model_dir = os.path.abspath(os.path.expanduser(model_dir_or_name))
- else:
- logger.error(f"Cannot recognize BERT dir or name ``{model_dir_or_name}``.")
- raise ValueError(f"Cannot recognize BERT dir or name ``{model_dir_or_name}``.")
- return str(model_dir)
-
-
- class BertLayerNorm(nn.Module):
- def __init__(self, hidden_size, eps=1e-12):
- """Construct a layernorm module in the TF style (epsilon inside the square root).
- """
- super(BertLayerNorm, self).__init__()
- self.weight = nn.Parameter(torch.ones(hidden_size))
- self.bias = nn.Parameter(torch.zeros(hidden_size))
- self.variance_epsilon = eps
-
- def forward(self, x):
- u = x.mean(-1, keepdim=True)
- s = (x - u).pow(2).mean(-1, keepdim=True)
- x = (x - u) / torch.sqrt(s + self.variance_epsilon)
- return self.weight * x + self.bias
-
-
- class BertEmbeddings(nn.Module):
- """Construct the embeddings from word, position and token_type embeddings.
- """
-
- def __init__(self, config):
- super(BertEmbeddings, self).__init__()
- self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
- self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
- self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
-
- # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
- # any TensorFlow checkpoint file
- self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
-
- def forward(self, input_ids, token_type_ids=None):
- seq_length = input_ids.size(1)
- position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
- position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
- if token_type_ids is None:
- token_type_ids = torch.zeros_like(input_ids)
-
- words_embeddings = self.word_embeddings(input_ids)
- position_embeddings = self.position_embeddings(position_ids)
- token_type_embeddings = self.token_type_embeddings(token_type_ids)
-
- embeddings = words_embeddings + position_embeddings + token_type_embeddings
- embeddings = self.LayerNorm(embeddings)
- embeddings = self.dropout(embeddings)
- return embeddings
-
-
- class BertSelfAttention(nn.Module):
- def __init__(self, config):
- super(BertSelfAttention, self).__init__()
- if config.hidden_size % config.num_attention_heads != 0:
- raise ValueError(
- "The hidden size (%d) is not a multiple of the number of attention "
- "heads (%d)" % (config.hidden_size, config.num_attention_heads))
- self.num_attention_heads = config.num_attention_heads
- self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
- self.all_head_size = self.num_attention_heads * self.attention_head_size
-
- self.query = nn.Linear(config.hidden_size, self.all_head_size)
- self.key = nn.Linear(config.hidden_size, self.all_head_size)
- self.value = nn.Linear(config.hidden_size, self.all_head_size)
-
- self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
-
- def transpose_for_scores(self, x):
- new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
- x = x.view(*new_x_shape)
- return x.permute(0, 2, 1, 3)
-
- def forward(self, hidden_states, attention_mask):
- mixed_query_layer = self.query(hidden_states)
- mixed_key_layer = self.key(hidden_states)
- mixed_value_layer = self.value(hidden_states)
-
- query_layer = self.transpose_for_scores(mixed_query_layer)
- key_layer = self.transpose_for_scores(mixed_key_layer)
- value_layer = self.transpose_for_scores(mixed_value_layer)
-
- # Take the dot product between "query" and "key" to get the raw attention scores.
- attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
- attention_scores = attention_scores / math.sqrt(self.attention_head_size)
- # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
- attention_scores = attention_scores + attention_mask
-
- # Normalize the attention scores to probabilities.
- attention_probs = nn.Softmax(dim=-1)(attention_scores)
-
- # This is actually dropping out entire tokens to attend to, which might
- # seem a bit unusual, but is taken from the original Transformer paper.
- attention_probs = self.dropout(attention_probs)
-
- context_layer = torch.matmul(attention_probs, value_layer)
- context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
- new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
- context_layer = context_layer.view(*new_context_layer_shape)
- return context_layer
-
-
- class BertSelfOutput(nn.Module):
- def __init__(self, config):
- super(BertSelfOutput, self).__init__()
- self.dense = nn.Linear(config.hidden_size, config.hidden_size)
- self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
-
- def forward(self, hidden_states, input_tensor):
- hidden_states = self.dense(hidden_states)
- hidden_states = self.dropout(hidden_states)
- hidden_states = self.LayerNorm(hidden_states + input_tensor)
- return hidden_states
-
-
- class BertAttention(nn.Module):
- def __init__(self, config):
- super(BertAttention, self).__init__()
- self.self = BertSelfAttention(config)
- self.output = BertSelfOutput(config)
-
- def forward(self, input_tensor, attention_mask):
- self_output = self.self(input_tensor, attention_mask)
- attention_output = self.output(self_output, input_tensor)
- return attention_output
-
-
- class BertIntermediate(nn.Module):
- def __init__(self, config):
- super(BertIntermediate, self).__init__()
- self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
- if isinstance(config.hidden_act, str):
- self.intermediate_act_fn = ACT2FN[config.hidden_act]
- else:
- self.intermediate_act_fn = config.hidden_act
-
- def forward(self, hidden_states):
- hidden_states = self.dense(hidden_states)
- hidden_states = self.intermediate_act_fn(hidden_states)
- return hidden_states
-
-
- class BertOutput(nn.Module):
- def __init__(self, config):
- super(BertOutput, self).__init__()
- self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
- self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
-
- def forward(self, hidden_states, input_tensor):
- hidden_states = self.dense(hidden_states)
- hidden_states = self.dropout(hidden_states)
- hidden_states = self.LayerNorm(hidden_states + input_tensor)
- return hidden_states
-
-
- class BertLayer(nn.Module):
- def __init__(self, config):
- super(BertLayer, self).__init__()
- self.attention = BertAttention(config)
- self.intermediate = BertIntermediate(config)
- self.output = BertOutput(config)
-
- def forward(self, hidden_states, attention_mask):
- attention_output = self.attention(hidden_states, attention_mask)
- intermediate_output = self.intermediate(attention_output)
- layer_output = self.output(intermediate_output, attention_output)
- return layer_output
-
-
- class BertEncoder(nn.Module):
- def __init__(self, config):
- super(BertEncoder, self).__init__()
- layer = BertLayer(config)
- self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
-
- def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
- all_encoder_layers = []
- for layer_module in self.layer:
- hidden_states = layer_module(hidden_states, attention_mask)
- if output_all_encoded_layers:
- all_encoder_layers.append(hidden_states)
- if not output_all_encoded_layers:
- all_encoder_layers.append(hidden_states)
- return all_encoder_layers
-
-
- class BertPooler(nn.Module):
- def __init__(self, config):
- super(BertPooler, self).__init__()
- self.dense = nn.Linear(config.hidden_size, config.hidden_size)
- self.activation = nn.Tanh()
-
- def forward(self, hidden_states):
- # We "pool" the model by simply taking the hidden state corresponding
- # to the first token.
- first_token_tensor = hidden_states[:, 0]
- pooled_output = self.dense(first_token_tensor)
- pooled_output = self.activation(pooled_output)
- return pooled_output
-
-
- class BertModel(nn.Module):
- """
- 别名::class:`fastNLP.modules.BertModel` :class:`fastNLP.modules.encoder.BertModel`
-
- BERT(Bidirectional Embedding Representations from Transformers).
-
- 用预训练权重矩阵来建立BERT模型::
-
- model = BertModel.from_pretrained(model_dir_or_name)
-
- 用随机初始化权重矩阵来建立BERT模型::
-
- model = BertModel()
-
- :param int vocab_size: 词表大小,默认值为30522,为BERT English uncase版本的词表大小
- :param int hidden_size: 隐层大小,默认值为768,为BERT base的版本
- :param int num_hidden_layers: 隐藏层数,默认值为12,为BERT base的版本
- :param int num_attention_heads: 多头注意力头数,默认值为12,为BERT base的版本
- :param int intermediate_size: FFN隐藏层大小,默认值是3072,为BERT base的版本
- :param str hidden_act: FFN隐藏层激活函数,默认值为``gelu``
- :param float hidden_dropout_prob: FFN隐藏层dropout,默认值为0.1
- :param float attention_probs_dropout_prob: Attention层的dropout,默认值为0.1
- :param int max_position_embeddings: 最大的序列长度,默认值为512,
- :param int type_vocab_size: 最大segment数量,默认值为2
- :param int initializer_range: 初始化权重范围,默认值为0.02
- """
-
- def __init__(self, config, *inputs, **kwargs):
- super(BertModel, self).__init__()
- if not isinstance(config, BertConfig):
- raise ValueError(
- "Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
- "To create a model from a Google pretrained model use "
- "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
- self.__class__.__name__, self.__class__.__name__
- ))
- super(BertModel, self).__init__()
- self.config = config
- self.hidden_size = self.config.hidden_size
- self.embeddings = BertEmbeddings(config)
- self.encoder = BertEncoder(config)
- self.pooler = BertPooler(config)
- self.apply(self.init_bert_weights)
-
- def init_bert_weights(self, module):
- """ Initialize the weights.
- """
- if isinstance(module, (nn.Linear, nn.Embedding)):
- # Slightly different from the TF version which uses truncated_normal for initialization
- # cf https://github.com/pytorch/pytorch/pull/5617
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
- elif isinstance(module, BertLayerNorm):
- module.bias.data.zero_()
- module.weight.data.fill_(1.0)
- if isinstance(module, nn.Linear) and module.bias is not None:
- module.bias.data.zero_()
-
- def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
- if attention_mask is None:
- attention_mask = torch.ones_like(input_ids)
- if token_type_ids is None:
- token_type_ids = torch.zeros_like(input_ids)
-
- # We create a 3D attention mask from a 2D tensor mask.
- # Sizes are [batch_size, 1, 1, to_seq_length]
- # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
- # this attention mask is more simple than the triangular masking of causal attention
- # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
- extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
-
- # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
- # masked positions, this operation will create a tensor which is 0.0 for
- # positions we want to attend and -10000.0 for masked positions.
- # Since we are adding it to the raw scores before the softmax, this is
- # effectively the same as removing these entirely.
- extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
- extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
-
- embedding_output = self.embeddings(input_ids, token_type_ids)
- encoded_layers = self.encoder(embedding_output,
- extended_attention_mask,
- output_all_encoded_layers=output_all_encoded_layers)
- sequence_output = encoded_layers[-1]
- pooled_output = self.pooler(sequence_output)
- if not output_all_encoded_layers:
- encoded_layers = encoded_layers[-1]
- return encoded_layers, pooled_output
-
- @classmethod
- def from_pretrained(cls, pretrained_model_dir_or_name, *inputs, **kwargs):
- state_dict = kwargs.get('state_dict', None)
- kwargs.pop('state_dict', None)
- kwargs.pop('cache_dir', None)
- kwargs.pop('from_tf', None)
-
- # get model dir from name or dir
- pretrained_model_dir = _get_bert_dir(pretrained_model_dir_or_name)
-
- # Load config
- config_file = _get_file_name_base_on_postfix(pretrained_model_dir, '.json')
- config = BertConfig.from_json_file(config_file)
- # logger.info("Model config {}".format(config))
- # Instantiate model.
- model = cls(config, *inputs, **kwargs)
- if state_dict is None:
- weights_path = _get_file_name_base_on_postfix(pretrained_model_dir, '.bin')
- state_dict = torch.load(weights_path, map_location='cpu')
- else:
- logger.error(f'Cannot load parameters through `state_dict` variable.')
- raise RuntimeError(f'Cannot load parameters through `state_dict` variable.')
-
- old_keys = []
- new_keys = []
- for key in state_dict.keys():
- new_key = None
- if 'gamma' in key:
- new_key = key.replace('gamma', 'weight')
- if 'beta' in key:
- new_key = key.replace('beta', 'bias')
- if new_key:
- old_keys.append(key)
- new_keys.append(new_key)
- for old_key, new_key in zip(old_keys, new_keys):
- state_dict[new_key] = state_dict.pop(old_key)
-
- missing_keys = []
- unexpected_keys = []
- error_msgs = []
- # copy state_dict so _load_from_state_dict can modify it
- metadata = getattr(state_dict, '_metadata', None)
- state_dict = state_dict.copy()
- if metadata is not None:
- state_dict._metadata = metadata
-
- def load(module, prefix=''):
- local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
- module._load_from_state_dict(
- state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
- for name, child in module._modules.items():
- if child is not None:
- load(child, prefix + name + '.')
-
- load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
- if len(missing_keys) > 0:
- logger.warn("Weights of {} not initialized from pretrained model: {}".format(
- model.__class__.__name__, missing_keys))
- if len(unexpected_keys) > 0:
- logger.warn("Weights from pretrained model not used in {}: {}".format(
- model.__class__.__name__, unexpected_keys))
-
- logger.info(f"Load pre-trained BERT parameters from file {weights_path}.")
- return model
-
-
- def whitespace_tokenize(text):
- """Runs basic whitespace cleaning and splitting on a piece of text."""
- text = text.strip()
- if not text:
- return []
- tokens = text.split()
- return tokens
-
-
- class WordpieceTokenizer(object):
- """Runs WordPiece tokenization."""
-
- def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
- self.vocab = vocab
- self.unk_token = unk_token
- self.max_input_chars_per_word = max_input_chars_per_word
-
- def tokenize(self, text):
- """Tokenizes a piece of text into its word pieces.
-
- This uses a greedy longest-match-first algorithm to perform tokenization
- using the given vocabulary.
-
- For example:
- input = "unaffable"
- output = ["un", "##aff", "##able"]
-
- Args:
- text: A single token or whitespace separated tokens. This should have
- already been passed through `BasicTokenizer`.
-
- Returns:
- A list of wordpiece tokens.
- """
-
- output_tokens = []
- for token in whitespace_tokenize(text):
- chars = list(token)
- if len(chars) > self.max_input_chars_per_word:
- output_tokens.append(self.unk_token)
- continue
-
- is_bad = False
- start = 0
- sub_tokens = []
- while start < len(chars):
- end = len(chars)
- cur_substr = None
- while start < end:
- substr = "".join(chars[start:end])
- if start > 0:
- substr = "##" + substr
- if substr in self.vocab:
- cur_substr = substr
- break
- end -= 1
- if cur_substr is None:
- is_bad = True
- break
- sub_tokens.append(cur_substr)
- start = end
-
- if is_bad:
- output_tokens.append(self.unk_token)
- else:
- output_tokens.extend(sub_tokens)
- if len(output_tokens) == 0: # 防止里面全是空格或者回车符号
- return [self.unk_token]
- return output_tokens
-
-
- def load_vocab(vocab_file):
- """Loads a vocabulary file into a dictionary."""
- vocab = collections.OrderedDict()
- index = 0
- with open(vocab_file, "r", encoding="utf-8") as reader:
- while True:
- token = reader.readline()
- if not token:
- break
- token = token.strip()
- vocab[token] = index
- index += 1
- return vocab
-
-
- class BasicTokenizer(object):
- """Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
-
- def __init__(self,
- do_lower_case=True,
- never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
- """Constructs a BasicTokenizer.
-
- Args:
- do_lower_case: Whether to lower case the input.
- """
- self.do_lower_case = do_lower_case
- self.never_split = never_split
-
- def tokenize(self, text):
- """Tokenizes a piece of text."""
- text = self._clean_text(text)
- # This was added on November 1st, 2018 for the multilingual and Chinese
- # models. This is also applied to the English models now, but it doesn't
- # matter since the English models were not trained on any Chinese data
- # and generally don't have any Chinese data in them (there are Chinese
- # characters in the vocabulary because Wikipedia does have some Chinese
- # words in the English Wikipedia.).
- text = self._tokenize_chinese_chars(text)
- orig_tokens = whitespace_tokenize(text)
- split_tokens = []
- for token in orig_tokens:
- if self.do_lower_case and token not in self.never_split:
- token = token.lower()
- token = self._run_strip_accents(token)
- split_tokens.extend(self._run_split_on_punc(token))
-
- output_tokens = whitespace_tokenize(" ".join(split_tokens))
- return output_tokens
-
- def _run_strip_accents(self, text):
- """Strips accents from a piece of text."""
- text = unicodedata.normalize("NFD", text)
- output = []
- for char in text:
- cat = unicodedata.category(char)
- if cat == "Mn":
- continue
- output.append(char)
- return "".join(output)
-
- def _run_split_on_punc(self, text):
- """Splits punctuation on a piece of text."""
- if text in self.never_split:
- return [text]
- chars = list(text)
- i = 0
- start_new_word = True
- output = []
- while i < len(chars):
- char = chars[i]
- if _is_punctuation(char):
- output.append([char])
- start_new_word = True
- else:
- if start_new_word:
- output.append([])
- start_new_word = False
- output[-1].append(char)
- i += 1
-
- return ["".join(x) for x in output]
-
- def _tokenize_chinese_chars(self, text):
- """Adds whitespace around any CJK character."""
- output = []
- for char in text:
- cp = ord(char)
- if self._is_chinese_char(cp):
- output.append(" ")
- output.append(char)
- output.append(" ")
- else:
- output.append(char)
- return "".join(output)
-
- def _is_chinese_char(self, cp):
- """Checks whether CP is the codepoint of a CJK character."""
- # This defines a "chinese character" as anything in the CJK Unicode block:
- # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
- #
- # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
- # despite its name. The modern Korean Hangul alphabet is a different block,
- # as is Japanese Hiragana and Katakana. Those alphabets are used to write
- # space-separated words, so they are not treated specially and handled
- # like the all of the other languages.
- if (((cp >= 0x4E00) and (cp <= 0x9FFF)) or #
- ((cp >= 0x3400) and (cp <= 0x4DBF)) or #
- ((cp >= 0x20000) and (cp <= 0x2A6DF)) or #
- ((cp >= 0x2A700) and (cp <= 0x2B73F)) or #
- ((cp >= 0x2B740) and (cp <= 0x2B81F)) or #
- ((cp >= 0x2B820) and (cp <= 0x2CEAF)) or
- ((cp >= 0xF900) and (cp <= 0xFAFF)) or #
- ((cp >= 0x2F800) and (cp <= 0x2FA1F))): #
- return True
-
- return False
-
- def _clean_text(self, text):
- """Performs invalid character removal and whitespace cleanup on text."""
- output = []
- for char in text:
- cp = ord(char)
- if cp == 0 or cp == 0xfffd or _is_control(char):
- continue
- if _is_whitespace(char):
- output.append(" ")
- else:
- output.append(char)
- return "".join(output)
-
-
- def _is_whitespace(char):
- """Checks whether `chars` is a whitespace character."""
- # \t, \n, and \r are technically contorl characters but we treat them
- # as whitespace since they are generally considered as such.
- if char == " " or char == "\t" or char == "\n" or char == "\r":
- return True
- cat = unicodedata.category(char)
- if cat == "Zs":
- return True
- return False
-
-
- def _is_control(char):
- """Checks whether `chars` is a control character."""
- # These are technically control characters but we count them as whitespace
- # characters.
- if char == "\t" or char == "\n" or char == "\r":
- return False
- cat = unicodedata.category(char)
- if cat.startswith("C"):
- return True
- return False
-
-
- def _is_punctuation(char):
- """Checks whether `chars` is a punctuation character."""
- cp = ord(char)
- # We treat all non-letter/number ASCII as punctuation.
- # Characters such as "^", "$", and "`" are not in the Unicode
- # Punctuation class but we treat them as punctuation anyways, for
- # consistency.
- if (((cp >= 33) and (cp <= 47)) or ((cp >= 58) and (cp <= 64)) or
- ((cp >= 91) and (cp <= 96)) or ((cp >= 123) and (cp <= 126))):
- return True
- cat = unicodedata.category(char)
- if cat.startswith("P"):
- return True
- return False
-
-
- class BertTokenizer(object):
- """Runs end-to-end tokenization: punctuation splitting + wordpiece"""
-
- def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True,
- never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
- """Constructs a BertTokenizer.
-
- Args:
- vocab_file: Path to a one-wordpiece-per-line vocabulary file
- do_lower_case: Whether to lower case the input
- Only has an effect when do_wordpiece_only=False
- do_basic_tokenize: Whether to do basic tokenization before wordpiece.
- max_len: An artificial maximum length to truncate tokenized sequences to;
- Effective maximum length is always the minimum of this
- value (if specified) and the underlying BERT model's
- sequence length.
- never_split: List of tokens which will never be split during tokenization.
- Only has an effect when do_wordpiece_only=False
- """
- if not os.path.isfile(vocab_file):
- raise ValueError(
- "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
- "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
- self.vocab = load_vocab(vocab_file)
- self.ids_to_tokens = collections.OrderedDict(
- [(ids, tok) for tok, ids in self.vocab.items()])
- self.do_basic_tokenize = do_basic_tokenize
- if do_basic_tokenize:
- self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
- never_split=never_split)
- self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
- self.max_len = max_len if max_len is not None else int(1e12)
-
- def _reinit_on_new_vocab(self, vocab):
- """
- 在load bert之后,可能会对vocab进行重新排列。重新排列之后调用这个函数重新初始化与vocab相关的性质
-
- :param vocab:
- :return:
- """
- self.vocab = vocab
- self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
-
- def tokenize(self, text):
- split_tokens = []
- if self.do_basic_tokenize:
- for token in self.basic_tokenizer.tokenize(text):
- for sub_token in self.wordpiece_tokenizer.tokenize(token):
- split_tokens.append(sub_token)
- else:
- split_tokens = self.wordpiece_tokenizer.tokenize(text)
- return split_tokens
-
- def convert_tokens_to_ids(self, tokens):
- """Converts a sequence of tokens into ids using the vocab."""
- ids = []
- for token in tokens:
- ids.append(self.vocab[token])
- if len(ids) > self.max_len:
- logger.warn(
- "Token indices sequence length is longer than the specified maximum "
- " sequence length for this BERT model ({} > {}). Running this"
- " sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
- )
- return ids
-
- def convert_ids_to_tokens(self, ids):
- """Converts a sequence of ids in wordpiece tokens using the vocab."""
- tokens = []
- for i in ids:
- tokens.append(self.ids_to_tokens[i])
- return tokens
-
- def save_vocabulary(self, vocab_path):
- """Save the tokenizer vocabulary to a directory or file."""
- index = 0
- if os.path.isdir(vocab_path):
- vocab_file = os.path.join(vocab_path, VOCAB_NAME)
- else:
- vocab_file = vocab_path
- with open(vocab_file, "w", encoding="utf-8") as writer:
- for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
- if index != token_index:
- logger.warn("Saving vocabulary to {}: vocabulary indices are not consecutive."
- " Please check that the vocabulary is not corrupted!".format(vocab_file))
- index = token_index
- writer.write(token + u'\n')
- index += 1
- return vocab_file
-
- @classmethod
- def from_pretrained(cls, model_dir_or_name, *inputs, **kwargs):
- """
- 给定模型的名字或者路径,直接读取vocab.
- """
- model_dir = _get_bert_dir(model_dir_or_name)
- pretrained_model_name_or_path = _get_file_name_base_on_postfix(model_dir, '.txt')
- logger.info("loading vocabulary file {}".format(pretrained_model_name_or_path))
- max_len = 512
- kwargs['max_len'] = min(kwargs.get('max_position_embeddings', int(1e12)), max_len)
- # Instantiate tokenizer.
- tokenizer = cls(pretrained_model_name_or_path, *inputs, **kwargs)
- return tokenizer
-
-
- class _WordPieceBertModel(nn.Module):
- """
- 这个模块用于直接计算word_piece的结果.
-
- """
-
- def __init__(self, model_dir_or_name: str, layers: str = '-1', pooled_cls: bool=False):
- super().__init__()
-
- self.tokenzier = BertTokenizer.from_pretrained(model_dir_or_name)
- self.encoder = BertModel.from_pretrained(model_dir_or_name)
- # 检查encoder_layer_number是否合理
- encoder_layer_number = len(self.encoder.encoder.layer)
- self.layers = list(map(int, layers.split(',')))
- for layer in self.layers:
- if layer < 0:
- assert -layer <= encoder_layer_number, f"The layer index:{layer} is out of scope for " \
- f"a bert model with {encoder_layer_number} layers."
- else:
- assert layer < encoder_layer_number, f"The layer index:{layer} is out of scope for " \
- f"a bert model with {encoder_layer_number} layers."
-
- self._cls_index = self.tokenzier.vocab['[CLS]']
- self._sep_index = self.tokenzier.vocab['[SEP]']
- self._wordpiece_unknown_index = self.tokenzier.vocab['[UNK]']
- self._wordpiece_pad_index = self.tokenzier.vocab['[PAD]'] # 需要用于生成word_piece
- self.pooled_cls = pooled_cls
-
- def index_dataset(self, *datasets, field_name, add_cls_sep=True):
- """
- 使用bert的tokenizer新生成word_pieces列加入到datasets中,并将他们设置为input。如果首尾不是
- [CLS]与[SEP]会在首尾额外加入[CLS]与[SEP], 且将word_pieces这一列的pad value设置为了bert的pad value。
-
- :param datasets: DataSet对象
- :param field_name: 基于哪一列index
- :return:
- """
-
- def convert_words_to_word_pieces(words):
- word_pieces = []
- for word in words:
- tokens = self.tokenzier.wordpiece_tokenizer.tokenize(word)
- word_piece_ids = self.tokenzier.convert_tokens_to_ids(tokens)
- word_pieces.extend(word_piece_ids)
- if add_cls_sep:
- if word_pieces[0] != self._cls_index:
- word_pieces.insert(0, self._cls_index)
- if word_pieces[-1] != self._sep_index:
- word_pieces.insert(-1, self._sep_index)
- return word_pieces
-
- for index, dataset in enumerate(datasets):
- try:
- dataset.apply_field(convert_words_to_word_pieces, field_name=field_name, new_field_name='word_pieces',
- is_input=True)
- dataset.set_pad_val('word_pieces', self._wordpiece_pad_index)
- except Exception as e:
- logger.error(f"Exception happens when processing the {index} dataset.")
- raise e
-
- def forward(self, word_pieces, token_type_ids=None):
- """
-
- :param word_pieces: torch.LongTensor, batch_size x max_len
- :param token_type_ids: torch.LongTensor, batch_size x max_len
- :return: num_layers x batch_size x max_len x hidden_size或者num_layers x batch_size x (max_len+2) x hidden_size
- """
- batch_size, max_len = word_pieces.size()
-
- attn_masks = word_pieces.ne(self._wordpiece_pad_index)
- bert_outputs, pooled_cls = self.encoder(word_pieces, token_type_ids=token_type_ids, attention_mask=attn_masks,
- output_all_encoded_layers=True)
- # output_layers = [self.layers] # len(self.layers) x batch_size x max_word_piece_length x hidden_size
- outputs = bert_outputs[0].new_zeros((len(self.layers), batch_size, max_len, bert_outputs[0].size(-1)))
- for l_index, l in enumerate(self.layers):
- bert_output = bert_outputs[l]
- if l in (len(bert_outputs)-1, -1) and self.pooled_cls:
- bert_output[:, 0] = pooled_cls
- outputs[l_index] = bert_output
- return outputs
|