| @@ -1,6 +1,7 @@ | |||
| import torch | |||
| from torch import nn | |||
| from fastNLP.modules.utils import initial_parameter | |||
| def log_sum_exp(x, dim=-1): | |||
| max_value, _ = x.max(dim=dim, keepdim=True) | |||
| @@ -19,7 +20,7 @@ def seq_len_to_byte_mask(seq_lens): | |||
| class ConditionalRandomField(nn.Module): | |||
| def __init__(self, tag_size, include_start_end_trans=True): | |||
| def __init__(self, tag_size, include_start_end_trans=True ,initial_method = None): | |||
| """ | |||
| :param tag_size: int, num of tags | |||
| :param include_start_end_trans: bool, whether to include start/end tag | |||
| @@ -35,8 +36,8 @@ class ConditionalRandomField(nn.Module): | |||
| self.start_scores = nn.Parameter(torch.randn(tag_size)) | |||
| self.end_scores = nn.Parameter(torch.randn(tag_size)) | |||
| self.reset_parameter() | |||
| # self.reset_parameter() | |||
| initial_parameter(self, initial_method) | |||
| def reset_parameter(self): | |||
| nn.init.xavier_normal_(self.transition_m) | |||
| if self.include_start_end_trans: | |||
| @@ -1,8 +1,8 @@ | |||
| import torch | |||
| import torch.nn as nn | |||
| from fastNLP.modules.utils import initial_parameter | |||
| class MLP(nn.Module): | |||
| def __init__(self, size_layer, num_class=2, activation='relu'): | |||
| def __init__(self, size_layer, num_class=2, activation='relu' , initial_method = None): | |||
| """Multilayer Perceptrons as a decoder | |||
| Args: | |||
| @@ -36,7 +36,7 @@ class MLP(nn.Module): | |||
| self.hidden_active = activation | |||
| else: | |||
| raise ValueError("should set activation correctly: {}".format(activation)) | |||
| initial_parameter(self, initial_method ) | |||
| def forward(self, x): | |||
| for layer in self.hiddens: | |||
| x = self.hidden_active(layer(x)) | |||
| @@ -1,11 +1,12 @@ | |||
| import torch | |||
| import torch.nn.functional as F | |||
| from torch import nn | |||
| # from torch.nn.init import xavier_uniform | |||
| from fastNLP.modules.utils import initial_parameter | |||
| class ConvCharEmbedding(nn.Module): | |||
| def __init__(self, char_emb_size=50, feature_maps=(40, 30, 30), kernels=(3, 4, 5)): | |||
| def __init__(self, char_emb_size=50, feature_maps=(40, 30, 30), kernels=(3, 4, 5),initial_method = None): | |||
| """ | |||
| Character Level Word Embedding | |||
| :param char_emb_size: the size of character level embedding. Default: 50 | |||
| @@ -20,6 +21,8 @@ class ConvCharEmbedding(nn.Module): | |||
| nn.Conv2d(1, feature_maps[i], kernel_size=(char_emb_size, kernels[i]), bias=True, padding=(0, 4)) | |||
| for i in range(len(kernels))]) | |||
| initial_parameter(self,initial_method) | |||
| def forward(self, x): | |||
| """ | |||
| :param x: [batch_size * sent_length, word_length, char_emb_size] | |||
| @@ -53,7 +56,7 @@ class LSTMCharEmbedding(nn.Module): | |||
| :param hidden_size: int, the number of hidden units. Default: equal to char_emb_size. | |||
| """ | |||
| def __init__(self, char_emb_size=50, hidden_size=None): | |||
| def __init__(self, char_emb_size=50, hidden_size=None , initial_method= None): | |||
| super(LSTMCharEmbedding, self).__init__() | |||
| self.hidden_size = char_emb_size if hidden_size is None else hidden_size | |||
| @@ -62,7 +65,7 @@ class LSTMCharEmbedding(nn.Module): | |||
| num_layers=1, | |||
| bias=True, | |||
| batch_first=True) | |||
| initial_parameter(self, initial_method) | |||
| def forward(self, x): | |||
| """ | |||
| :param x:[ n_batch*n_word, word_length, char_emb_size] | |||
| @@ -6,6 +6,7 @@ import torch.nn as nn | |||
| from torch.nn.init import xavier_uniform_ | |||
| # import torch.nn.functional as F | |||
| from fastNLP.modules.utils import initial_parameter | |||
| class Conv(nn.Module): | |||
| """ | |||
| @@ -15,7 +16,7 @@ class Conv(nn.Module): | |||
| def __init__(self, in_channels, out_channels, kernel_size, | |||
| stride=1, padding=0, dilation=1, | |||
| groups=1, bias=True, activation='relu'): | |||
| groups=1, bias=True, activation='relu',initial_method = None ): | |||
| super(Conv, self).__init__() | |||
| self.conv = nn.Conv1d( | |||
| in_channels=in_channels, | |||
| @@ -26,7 +27,7 @@ class Conv(nn.Module): | |||
| dilation=dilation, | |||
| groups=groups, | |||
| bias=bias) | |||
| xavier_uniform_(self.conv.weight) | |||
| # xavier_uniform_(self.conv.weight) | |||
| activations = { | |||
| 'relu': nn.ReLU(), | |||
| @@ -37,6 +38,7 @@ class Conv(nn.Module): | |||
| raise Exception( | |||
| 'Should choose activation function from: ' + | |||
| ', '.join([x for x in activations])) | |||
| initial_parameter(self, initial_method) | |||
| def forward(self, x): | |||
| x = torch.transpose(x, 1, 2) # [N,L,C] -> [N,C,L] | |||
| @@ -5,7 +5,7 @@ import torch | |||
| import torch.nn as nn | |||
| import torch.nn.functional as F | |||
| from torch.nn.init import xavier_uniform_ | |||
| from fastNLP.modules.utils import initial_parameter | |||
| class ConvMaxpool(nn.Module): | |||
| """ | |||
| @@ -14,7 +14,7 @@ class ConvMaxpool(nn.Module): | |||
| def __init__(self, in_channels, out_channels, kernel_sizes, | |||
| stride=1, padding=0, dilation=1, | |||
| groups=1, bias=True, activation='relu'): | |||
| groups=1, bias=True, activation='relu',initial_method = None ): | |||
| super(ConvMaxpool, self).__init__() | |||
| # convolution | |||
| @@ -47,6 +47,8 @@ class ConvMaxpool(nn.Module): | |||
| raise Exception( | |||
| "Undefined activation function: choose from: relu") | |||
| initial_parameter(self, initial_method) | |||
| def forward(self, x): | |||
| # [N,L,C] -> [N,C,L] | |||
| x = torch.transpose(x, 1, 2) | |||
| @@ -1,6 +1,6 @@ | |||
| import torch.nn as nn | |||
| from fastNLP.modules.utils import initial_parameter | |||
| class Linear(nn.Module): | |||
| """ | |||
| Linear module | |||
| @@ -12,10 +12,10 @@ class Linear(nn.Module): | |||
| bidirectional : If True, becomes a bidirectional RNN | |||
| """ | |||
| def __init__(self, input_size, output_size, bias=True): | |||
| def __init__(self, input_size, output_size, bias=True,initial_method = None ): | |||
| super(Linear, self).__init__() | |||
| self.linear = nn.Linear(input_size, output_size, bias) | |||
| initial_parameter(self, initial_method) | |||
| def forward(self, x): | |||
| x = self.linear(x) | |||
| return x | |||
| @@ -1,6 +1,6 @@ | |||
| import torch.nn as nn | |||
| from fastNLP.modules.utils import initial_parameter | |||
| class Lstm(nn.Module): | |||
| """ | |||
| LSTM module | |||
| @@ -13,11 +13,13 @@ class Lstm(nn.Module): | |||
| bidirectional : If True, becomes a bidirectional RNN. Default: False. | |||
| """ | |||
| def __init__(self, input_size, hidden_size=100, num_layers=1, dropout=0, bidirectional=False): | |||
| def __init__(self, input_size, hidden_size=100, num_layers=1, dropout=0, bidirectional=False , initial_method = None): | |||
| super(Lstm, self).__init__() | |||
| self.lstm = nn.LSTM(input_size, hidden_size, num_layers, bias=True, batch_first=True, | |||
| dropout=dropout, bidirectional=bidirectional) | |||
| initial_parameter(self, initial_method) | |||
| def forward(self, x): | |||
| x, _ = self.lstm(x) | |||
| return x | |||
| if __name__ == "__main__": | |||
| lstm = Lstm(10) | |||
| @@ -4,7 +4,7 @@ import torch | |||
| import torch.nn as nn | |||
| import torch.nn.functional as F | |||
| from fastNLP.modules.utils import initial_parameter | |||
| def MaskedRecurrent(reverse=False): | |||
| def forward(input, hidden, cell, mask, train=True, dropout=0): | |||
| """ | |||
| @@ -192,7 +192,7 @@ def AutogradMaskedStep(num_layers=1, dropout=0, train=True, lstm=False): | |||
| class MaskedRNNBase(nn.Module): | |||
| def __init__(self, Cell, input_size, hidden_size, | |||
| num_layers=1, bias=True, batch_first=False, | |||
| layer_dropout=0, step_dropout=0, bidirectional=False, **kwargs): | |||
| layer_dropout=0, step_dropout=0, bidirectional=False, initial_method = None , **kwargs): | |||
| """ | |||
| :param Cell: | |||
| :param input_size: | |||
| @@ -226,7 +226,7 @@ class MaskedRNNBase(nn.Module): | |||
| cell = self.Cell(layer_input_size, hidden_size, self.bias, **kwargs) | |||
| self.all_cells.append(cell) | |||
| self.add_module('cell%d' % (layer * num_directions + direction), cell) # Max的代码写得真好看 | |||
| initial_parameter(self, initial_method) | |||
| def reset_parameters(self): | |||
| for cell in self.all_cells: | |||
| cell.reset_parameters() | |||
| @@ -6,6 +6,7 @@ import torch.nn.functional as F | |||
| from torch.nn._functions.thnn import rnnFusedPointwise as fusedBackend | |||
| from torch.nn.parameter import Parameter | |||
| from fastNLP.modules.utils import initial_parameter | |||
| def default_initializer(hidden_size): | |||
| stdv = 1.0 / math.sqrt(hidden_size) | |||
| @@ -172,7 +173,7 @@ def AutogradVarMaskedStep(num_layers=1, lstm=False): | |||
| class VarMaskedRNNBase(nn.Module): | |||
| def __init__(self, Cell, input_size, hidden_size, | |||
| num_layers=1, bias=True, batch_first=False, | |||
| dropout=(0, 0), bidirectional=False, initializer=None, **kwargs): | |||
| dropout=(0, 0), bidirectional=False, initializer=None,initial_method = None, **kwargs): | |||
| super(VarMaskedRNNBase, self).__init__() | |||
| self.Cell = Cell | |||
| @@ -193,7 +194,7 @@ class VarMaskedRNNBase(nn.Module): | |||
| cell = self.Cell(layer_input_size, hidden_size, self.bias, p=dropout, initializer=initializer, **kwargs) | |||
| self.all_cells.append(cell) | |||
| self.add_module('cell%d' % (layer * num_directions + direction), cell) | |||
| initial_parameter(self, initial_method) | |||
| def reset_parameters(self): | |||
| for cell in self.all_cells: | |||
| cell.reset_parameters() | |||
| @@ -284,7 +285,7 @@ class VarFastLSTMCell(VarRNNCellBase): | |||
| \end{array} | |||
| """ | |||
| def __init__(self, input_size, hidden_size, bias=True, p=(0.5, 0.5), initializer=None): | |||
| def __init__(self, input_size, hidden_size, bias=True, p=(0.5, 0.5), initializer=None,initial_method =None): | |||
| super(VarFastLSTMCell, self).__init__() | |||
| self.input_size = input_size | |||
| self.hidden_size = hidden_size | |||
| @@ -311,7 +312,7 @@ class VarFastLSTMCell(VarRNNCellBase): | |||
| self.p_hidden = p_hidden | |||
| self.noise_in = None | |||
| self.noise_hidden = None | |||
| initial_parameter(self, initial_method) | |||
| def reset_parameters(self): | |||
| for weight in self.parameters(): | |||
| if weight.dim() == 1: | |||
| @@ -2,8 +2,8 @@ from collections import defaultdict | |||
| import numpy as np | |||
| import torch | |||
| import torch.nn.init as init | |||
| import torch.nn as nn | |||
| def mask_softmax(matrix, mask): | |||
| if mask is None: | |||
| result = torch.nn.functional.softmax(matrix, dim=-1) | |||
| @@ -11,6 +11,51 @@ def mask_softmax(matrix, mask): | |||
| raise NotImplementedError | |||
| return result | |||
| def initial_parameter(net ,initial_method =None): | |||
| if initial_method == 'xavier_uniform': | |||
| init_method = init.xavier_uniform_ | |||
| elif initial_method=='xavier_normal': | |||
| init_method = init.xavier_normal_ | |||
| elif initial_method == 'kaiming_normal' or initial_method =='msra': | |||
| init_method = init.kaiming_normal | |||
| elif initial_method == 'kaiming_uniform': | |||
| init_method = init.kaiming_normal | |||
| elif initial_method == 'orthogonal': | |||
| init_method = init.orthogonal_ | |||
| elif initial_method == 'sparse': | |||
| init_method = init.sparse_ | |||
| elif initial_method =='normal': | |||
| init_method = init.normal_ | |||
| elif initial_method =='uniform': | |||
| initial_method = init.uniform_ | |||
| else: | |||
| init_method = init.xavier_normal_ | |||
| def weights_init(m): | |||
| # classname = m.__class__.__name__ | |||
| if isinstance(m, nn.Conv2d) or isinstance(m,nn.Conv1d) or isinstance(m,nn.Conv3d): # for all the cnn | |||
| if initial_method != None: | |||
| init_method(m.weight.data) | |||
| else: | |||
| init.xavier_normal_(m.weight.data) | |||
| init.normal_(m.bias.data) | |||
| elif isinstance(m, nn.LSTM): | |||
| for w in m.parameters(): | |||
| if len(w.data.size())>1: | |||
| init_method(w.data) # weight | |||
| else: | |||
| init.normal_(w.data) # bias | |||
| elif hasattr(m, 'weight') and m.weight.requires_grad: | |||
| init_method(m.weight.data) | |||
| else: | |||
| for w in m.parameters() : | |||
| if w.requires_grad: | |||
| if len(w.data.size())>1: | |||
| init_method(w.data) # weight | |||
| else: | |||
| init.normal_(w.data) # bias | |||
| # print("init else") | |||
| net.apply(weights_init) | |||
| def seq_mask(seq_len, max_len): | |||
| mask = [torch.ge(torch.LongTensor(seq_len), i + 1) for i in range(max_len)] | |||