Browse Source

add initial parameters

tags/v0.1.0
2017alan 6 years ago
parent
commit
a89875df1e
10 changed files with 85 additions and 29 deletions
  1. +4
    -3
      fastNLP/modules/decoder/CRF.py
  2. +3
    -3
      fastNLP/modules/decoder/MLP.py
  3. +7
    -4
      fastNLP/modules/encoder/char_embedding.py
  4. +4
    -2
      fastNLP/modules/encoder/conv.py
  5. +4
    -2
      fastNLP/modules/encoder/conv_maxpool.py
  6. +3
    -3
      fastNLP/modules/encoder/linear.py
  7. +5
    -3
      fastNLP/modules/encoder/lstm.py
  8. +3
    -3
      fastNLP/modules/encoder/masked_rnn.py
  9. +5
    -4
      fastNLP/modules/encoder/variational_rnn.py
  10. +47
    -2
      fastNLP/modules/utils.py

+ 4
- 3
fastNLP/modules/decoder/CRF.py View File

@@ -1,6 +1,7 @@
import torch import torch
from torch import nn from torch import nn


from fastNLP.modules.utils import initial_parameter


def log_sum_exp(x, dim=-1): def log_sum_exp(x, dim=-1):
max_value, _ = x.max(dim=dim, keepdim=True) max_value, _ = x.max(dim=dim, keepdim=True)
@@ -19,7 +20,7 @@ def seq_len_to_byte_mask(seq_lens):




class ConditionalRandomField(nn.Module): class ConditionalRandomField(nn.Module):
def __init__(self, tag_size, include_start_end_trans=True):
def __init__(self, tag_size, include_start_end_trans=True ,initial_method = None):
""" """
:param tag_size: int, num of tags :param tag_size: int, num of tags
:param include_start_end_trans: bool, whether to include start/end tag :param include_start_end_trans: bool, whether to include start/end tag
@@ -35,8 +36,8 @@ class ConditionalRandomField(nn.Module):
self.start_scores = nn.Parameter(torch.randn(tag_size)) self.start_scores = nn.Parameter(torch.randn(tag_size))
self.end_scores = nn.Parameter(torch.randn(tag_size)) self.end_scores = nn.Parameter(torch.randn(tag_size))


self.reset_parameter()
# self.reset_parameter()
initial_parameter(self, initial_method)
def reset_parameter(self): def reset_parameter(self):
nn.init.xavier_normal_(self.transition_m) nn.init.xavier_normal_(self.transition_m)
if self.include_start_end_trans: if self.include_start_end_trans:


+ 3
- 3
fastNLP/modules/decoder/MLP.py View File

@@ -1,8 +1,8 @@
import torch import torch
import torch.nn as nn import torch.nn as nn
from fastNLP.modules.utils import initial_parameter
class MLP(nn.Module): class MLP(nn.Module):
def __init__(self, size_layer, num_class=2, activation='relu'):
def __init__(self, size_layer, num_class=2, activation='relu' , initial_method = None):
"""Multilayer Perceptrons as a decoder """Multilayer Perceptrons as a decoder


Args: Args:
@@ -36,7 +36,7 @@ class MLP(nn.Module):
self.hidden_active = activation self.hidden_active = activation
else: else:
raise ValueError("should set activation correctly: {}".format(activation)) raise ValueError("should set activation correctly: {}".format(activation))
initial_parameter(self, initial_method )
def forward(self, x): def forward(self, x):
for layer in self.hiddens: for layer in self.hiddens:
x = self.hidden_active(layer(x)) x = self.hidden_active(layer(x))


+ 7
- 4
fastNLP/modules/encoder/char_embedding.py View File

@@ -1,11 +1,12 @@
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
from torch import nn from torch import nn
# from torch.nn.init import xavier_uniform


from fastNLP.modules.utils import initial_parameter
class ConvCharEmbedding(nn.Module): class ConvCharEmbedding(nn.Module):


def __init__(self, char_emb_size=50, feature_maps=(40, 30, 30), kernels=(3, 4, 5)):
def __init__(self, char_emb_size=50, feature_maps=(40, 30, 30), kernels=(3, 4, 5),initial_method = None):
""" """
Character Level Word Embedding Character Level Word Embedding
:param char_emb_size: the size of character level embedding. Default: 50 :param char_emb_size: the size of character level embedding. Default: 50
@@ -20,6 +21,8 @@ class ConvCharEmbedding(nn.Module):
nn.Conv2d(1, feature_maps[i], kernel_size=(char_emb_size, kernels[i]), bias=True, padding=(0, 4)) nn.Conv2d(1, feature_maps[i], kernel_size=(char_emb_size, kernels[i]), bias=True, padding=(0, 4))
for i in range(len(kernels))]) for i in range(len(kernels))])


initial_parameter(self,initial_method)

def forward(self, x): def forward(self, x):
""" """
:param x: [batch_size * sent_length, word_length, char_emb_size] :param x: [batch_size * sent_length, word_length, char_emb_size]
@@ -53,7 +56,7 @@ class LSTMCharEmbedding(nn.Module):
:param hidden_size: int, the number of hidden units. Default: equal to char_emb_size. :param hidden_size: int, the number of hidden units. Default: equal to char_emb_size.
""" """


def __init__(self, char_emb_size=50, hidden_size=None):
def __init__(self, char_emb_size=50, hidden_size=None , initial_method= None):
super(LSTMCharEmbedding, self).__init__() super(LSTMCharEmbedding, self).__init__()
self.hidden_size = char_emb_size if hidden_size is None else hidden_size self.hidden_size = char_emb_size if hidden_size is None else hidden_size


@@ -62,7 +65,7 @@ class LSTMCharEmbedding(nn.Module):
num_layers=1, num_layers=1,
bias=True, bias=True,
batch_first=True) batch_first=True)
initial_parameter(self, initial_method)
def forward(self, x): def forward(self, x):
""" """
:param x:[ n_batch*n_word, word_length, char_emb_size] :param x:[ n_batch*n_word, word_length, char_emb_size]


+ 4
- 2
fastNLP/modules/encoder/conv.py View File

@@ -6,6 +6,7 @@ import torch.nn as nn
from torch.nn.init import xavier_uniform_ from torch.nn.init import xavier_uniform_
# import torch.nn.functional as F # import torch.nn.functional as F


from fastNLP.modules.utils import initial_parameter


class Conv(nn.Module): class Conv(nn.Module):
""" """
@@ -15,7 +16,7 @@ class Conv(nn.Module):


def __init__(self, in_channels, out_channels, kernel_size, def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, stride=1, padding=0, dilation=1,
groups=1, bias=True, activation='relu'):
groups=1, bias=True, activation='relu',initial_method = None ):
super(Conv, self).__init__() super(Conv, self).__init__()
self.conv = nn.Conv1d( self.conv = nn.Conv1d(
in_channels=in_channels, in_channels=in_channels,
@@ -26,7 +27,7 @@ class Conv(nn.Module):
dilation=dilation, dilation=dilation,
groups=groups, groups=groups,
bias=bias) bias=bias)
xavier_uniform_(self.conv.weight)
# xavier_uniform_(self.conv.weight)


activations = { activations = {
'relu': nn.ReLU(), 'relu': nn.ReLU(),
@@ -37,6 +38,7 @@ class Conv(nn.Module):
raise Exception( raise Exception(
'Should choose activation function from: ' + 'Should choose activation function from: ' +
', '.join([x for x in activations])) ', '.join([x for x in activations]))
initial_parameter(self, initial_method)


def forward(self, x): def forward(self, x):
x = torch.transpose(x, 1, 2) # [N,L,C] -> [N,C,L] x = torch.transpose(x, 1, 2) # [N,L,C] -> [N,C,L]


+ 4
- 2
fastNLP/modules/encoder/conv_maxpool.py View File

@@ -5,7 +5,7 @@ import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from torch.nn.init import xavier_uniform_ from torch.nn.init import xavier_uniform_
from fastNLP.modules.utils import initial_parameter


class ConvMaxpool(nn.Module): class ConvMaxpool(nn.Module):
""" """
@@ -14,7 +14,7 @@ class ConvMaxpool(nn.Module):


def __init__(self, in_channels, out_channels, kernel_sizes, def __init__(self, in_channels, out_channels, kernel_sizes,
stride=1, padding=0, dilation=1, stride=1, padding=0, dilation=1,
groups=1, bias=True, activation='relu'):
groups=1, bias=True, activation='relu',initial_method = None ):
super(ConvMaxpool, self).__init__() super(ConvMaxpool, self).__init__()


# convolution # convolution
@@ -47,6 +47,8 @@ class ConvMaxpool(nn.Module):
raise Exception( raise Exception(
"Undefined activation function: choose from: relu") "Undefined activation function: choose from: relu")


initial_parameter(self, initial_method)

def forward(self, x): def forward(self, x):
# [N,L,C] -> [N,C,L] # [N,L,C] -> [N,C,L]
x = torch.transpose(x, 1, 2) x = torch.transpose(x, 1, 2)


+ 3
- 3
fastNLP/modules/encoder/linear.py View File

@@ -1,6 +1,6 @@
import torch.nn as nn import torch.nn as nn


from fastNLP.modules.utils import initial_parameter
class Linear(nn.Module): class Linear(nn.Module):
""" """
Linear module Linear module
@@ -12,10 +12,10 @@ class Linear(nn.Module):
bidirectional : If True, becomes a bidirectional RNN bidirectional : If True, becomes a bidirectional RNN
""" """


def __init__(self, input_size, output_size, bias=True):
def __init__(self, input_size, output_size, bias=True,initial_method = None ):
super(Linear, self).__init__() super(Linear, self).__init__()
self.linear = nn.Linear(input_size, output_size, bias) self.linear = nn.Linear(input_size, output_size, bias)
initial_parameter(self, initial_method)
def forward(self, x): def forward(self, x):
x = self.linear(x) x = self.linear(x)
return x return x

+ 5
- 3
fastNLP/modules/encoder/lstm.py View File

@@ -1,6 +1,6 @@
import torch.nn as nn import torch.nn as nn


from fastNLP.modules.utils import initial_parameter
class Lstm(nn.Module): class Lstm(nn.Module):
""" """
LSTM module LSTM module
@@ -13,11 +13,13 @@ class Lstm(nn.Module):
bidirectional : If True, becomes a bidirectional RNN. Default: False. bidirectional : If True, becomes a bidirectional RNN. Default: False.
""" """


def __init__(self, input_size, hidden_size=100, num_layers=1, dropout=0, bidirectional=False):
def __init__(self, input_size, hidden_size=100, num_layers=1, dropout=0, bidirectional=False , initial_method = None):
super(Lstm, self).__init__() super(Lstm, self).__init__()
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, bias=True, batch_first=True, self.lstm = nn.LSTM(input_size, hidden_size, num_layers, bias=True, batch_first=True,
dropout=dropout, bidirectional=bidirectional) dropout=dropout, bidirectional=bidirectional)
initial_parameter(self, initial_method)
def forward(self, x): def forward(self, x):
x, _ = self.lstm(x) x, _ = self.lstm(x)
return x return x
if __name__ == "__main__":
lstm = Lstm(10)

+ 3
- 3
fastNLP/modules/encoder/masked_rnn.py View File

@@ -4,7 +4,7 @@ import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F


from fastNLP.modules.utils import initial_parameter
def MaskedRecurrent(reverse=False): def MaskedRecurrent(reverse=False):
def forward(input, hidden, cell, mask, train=True, dropout=0): def forward(input, hidden, cell, mask, train=True, dropout=0):
""" """
@@ -192,7 +192,7 @@ def AutogradMaskedStep(num_layers=1, dropout=0, train=True, lstm=False):
class MaskedRNNBase(nn.Module): class MaskedRNNBase(nn.Module):
def __init__(self, Cell, input_size, hidden_size, def __init__(self, Cell, input_size, hidden_size,
num_layers=1, bias=True, batch_first=False, num_layers=1, bias=True, batch_first=False,
layer_dropout=0, step_dropout=0, bidirectional=False, **kwargs):
layer_dropout=0, step_dropout=0, bidirectional=False, initial_method = None , **kwargs):
""" """
:param Cell: :param Cell:
:param input_size: :param input_size:
@@ -226,7 +226,7 @@ class MaskedRNNBase(nn.Module):
cell = self.Cell(layer_input_size, hidden_size, self.bias, **kwargs) cell = self.Cell(layer_input_size, hidden_size, self.bias, **kwargs)
self.all_cells.append(cell) self.all_cells.append(cell)
self.add_module('cell%d' % (layer * num_directions + direction), cell) # Max的代码写得真好看 self.add_module('cell%d' % (layer * num_directions + direction), cell) # Max的代码写得真好看
initial_parameter(self, initial_method)
def reset_parameters(self): def reset_parameters(self):
for cell in self.all_cells: for cell in self.all_cells:
cell.reset_parameters() cell.reset_parameters()


+ 5
- 4
fastNLP/modules/encoder/variational_rnn.py View File

@@ -6,6 +6,7 @@ import torch.nn.functional as F
from torch.nn._functions.thnn import rnnFusedPointwise as fusedBackend from torch.nn._functions.thnn import rnnFusedPointwise as fusedBackend
from torch.nn.parameter import Parameter from torch.nn.parameter import Parameter


from fastNLP.modules.utils import initial_parameter


def default_initializer(hidden_size): def default_initializer(hidden_size):
stdv = 1.0 / math.sqrt(hidden_size) stdv = 1.0 / math.sqrt(hidden_size)
@@ -172,7 +173,7 @@ def AutogradVarMaskedStep(num_layers=1, lstm=False):
class VarMaskedRNNBase(nn.Module): class VarMaskedRNNBase(nn.Module):
def __init__(self, Cell, input_size, hidden_size, def __init__(self, Cell, input_size, hidden_size,
num_layers=1, bias=True, batch_first=False, num_layers=1, bias=True, batch_first=False,
dropout=(0, 0), bidirectional=False, initializer=None, **kwargs):
dropout=(0, 0), bidirectional=False, initializer=None,initial_method = None, **kwargs):


super(VarMaskedRNNBase, self).__init__() super(VarMaskedRNNBase, self).__init__()
self.Cell = Cell self.Cell = Cell
@@ -193,7 +194,7 @@ class VarMaskedRNNBase(nn.Module):
cell = self.Cell(layer_input_size, hidden_size, self.bias, p=dropout, initializer=initializer, **kwargs) cell = self.Cell(layer_input_size, hidden_size, self.bias, p=dropout, initializer=initializer, **kwargs)
self.all_cells.append(cell) self.all_cells.append(cell)
self.add_module('cell%d' % (layer * num_directions + direction), cell) self.add_module('cell%d' % (layer * num_directions + direction), cell)
initial_parameter(self, initial_method)
def reset_parameters(self): def reset_parameters(self):
for cell in self.all_cells: for cell in self.all_cells:
cell.reset_parameters() cell.reset_parameters()
@@ -284,7 +285,7 @@ class VarFastLSTMCell(VarRNNCellBase):
\end{array} \end{array}
""" """


def __init__(self, input_size, hidden_size, bias=True, p=(0.5, 0.5), initializer=None):
def __init__(self, input_size, hidden_size, bias=True, p=(0.5, 0.5), initializer=None,initial_method =None):
super(VarFastLSTMCell, self).__init__() super(VarFastLSTMCell, self).__init__()
self.input_size = input_size self.input_size = input_size
self.hidden_size = hidden_size self.hidden_size = hidden_size
@@ -311,7 +312,7 @@ class VarFastLSTMCell(VarRNNCellBase):
self.p_hidden = p_hidden self.p_hidden = p_hidden
self.noise_in = None self.noise_in = None
self.noise_hidden = None self.noise_hidden = None
initial_parameter(self, initial_method)
def reset_parameters(self): def reset_parameters(self):
for weight in self.parameters(): for weight in self.parameters():
if weight.dim() == 1: if weight.dim() == 1:


+ 47
- 2
fastNLP/modules/utils.py View File

@@ -2,8 +2,8 @@ from collections import defaultdict


import numpy as np import numpy as np
import torch import torch
import torch.nn.init as init
import torch.nn as nn
def mask_softmax(matrix, mask): def mask_softmax(matrix, mask):
if mask is None: if mask is None:
result = torch.nn.functional.softmax(matrix, dim=-1) result = torch.nn.functional.softmax(matrix, dim=-1)
@@ -11,6 +11,51 @@ def mask_softmax(matrix, mask):
raise NotImplementedError raise NotImplementedError
return result return result


def initial_parameter(net ,initial_method =None):

if initial_method == 'xavier_uniform':
init_method = init.xavier_uniform_
elif initial_method=='xavier_normal':
init_method = init.xavier_normal_
elif initial_method == 'kaiming_normal' or initial_method =='msra':
init_method = init.kaiming_normal
elif initial_method == 'kaiming_uniform':
init_method = init.kaiming_normal
elif initial_method == 'orthogonal':
init_method = init.orthogonal_
elif initial_method == 'sparse':
init_method = init.sparse_
elif initial_method =='normal':
init_method = init.normal_
elif initial_method =='uniform':
initial_method = init.uniform_
else:
init_method = init.xavier_normal_
def weights_init(m):
# classname = m.__class__.__name__
if isinstance(m, nn.Conv2d) or isinstance(m,nn.Conv1d) or isinstance(m,nn.Conv3d): # for all the cnn
if initial_method != None:
init_method(m.weight.data)
else:
init.xavier_normal_(m.weight.data)
init.normal_(m.bias.data)
elif isinstance(m, nn.LSTM):
for w in m.parameters():
if len(w.data.size())>1:
init_method(w.data) # weight
else:
init.normal_(w.data) # bias
elif hasattr(m, 'weight') and m.weight.requires_grad:
init_method(m.weight.data)
else:
for w in m.parameters() :
if w.requires_grad:
if len(w.data.size())>1:
init_method(w.data) # weight
else:
init.normal_(w.data) # bias
# print("init else")
net.apply(weights_init)


def seq_mask(seq_len, max_len): def seq_mask(seq_len, max_len):
mask = [torch.ge(torch.LongTensor(seq_len), i + 1) for i in range(max_len)] mask = [torch.ge(torch.LongTensor(seq_len), i + 1) for i in range(max_len)]


Loading…
Cancel
Save