Browse Source

restructure module: 4 classes; add modules; move prototype and rename

tags/v0.1.0
FengZiYjun 6 years ago
parent
commit
eb66cbe6c4
21 changed files with 15 additions and 11 deletions
  1. +1
    -1
      fastNLP/models/sequence_modeling.py
  2. +0
    -0
      fastNLP/modules/aggregation/__init__.py
  3. +0
    -0
      fastNLP/modules/aggregation/attention.py
  4. +0
    -0
      fastNLP/modules/aggregation/avg_pool.py
  5. +0
    -0
      fastNLP/modules/aggregation/kmax_pool.py
  6. +1
    -1
      fastNLP/modules/aggregation/linear_attention.py
  7. +0
    -0
      fastNLP/modules/aggregation/max_pool.py
  8. +3
    -2
      fastNLP/modules/aggregation/self_attention.py
  9. +0
    -0
      fastNLP/modules/decoder/CRF.py
  10. +0
    -0
      fastNLP/modules/decoder/__init__.py
  11. +0
    -0
      fastNLP/modules/encoder/__init__.py
  12. +0
    -0
      fastNLP/modules/encoder/conv.py
  13. +2
    -1
      fastNLP/modules/encoder/embedding.py
  14. +3
    -3
      fastNLP/modules/encoder/lstm.py
  15. +0
    -0
      fastNLP/modules/interaction/__init__.py
  16. +0
    -0
      reproduction/LSTM+self_attention_sentiment_analysis/README.md
  17. +0
    -0
      reproduction/LSTM+self_attention_sentiment_analysis/Word2Idx.py
  18. +3
    -2
      reproduction/LSTM+self_attention_sentiment_analysis/dataloader.py
  19. +0
    -0
      reproduction/LSTM+self_attention_sentiment_analysis/example.py
  20. +0
    -1
      reproduction/LSTM+self_attention_sentiment_analysis/predict.py
  21. +2
    -0
      reproduction/LSTM+self_attention_sentiment_analysis/prepare.py

+ 1
- 1
fastNLP/models/sequence_modeling.py View File

@@ -3,7 +3,7 @@ import torch.nn as nn
from torch.nn import functional as F from torch.nn import functional as F


from fastNLP.models.base_model import BaseModel from fastNLP.models.base_model import BaseModel
from fastNLP.modules.CRF import ContionalRandomField
from fastNLP.modules.decoder.CRF import ContionalRandomField




class SeqLabeling(BaseModel): class SeqLabeling(BaseModel):


fastNLP/modules/attention/__init__.py → fastNLP/modules/aggregation/__init__.py View File


fastNLP/modules/attention/attention.py → fastNLP/modules/aggregation/attention.py View File


fastNLP/modules/convolution/avg_pool.py → fastNLP/modules/aggregation/avg_pool.py View File


fastNLP/modules/convolution/kmax_pool.py → fastNLP/modules/aggregation/kmax_pool.py View File


fastNLP/modules/attention/linear_attention.py → fastNLP/modules/aggregation/linear_attention.py View File

@@ -1,4 +1,4 @@
from fastNLP.modules.attention.attention import Attention
from fastNLP.modules.aggregation.attention import Attention




class LinearAttention(Attention): class LinearAttention(Attention):

fastNLP/modules/convolution/max_pool.py → fastNLP/modules/aggregation/max_pool.py View File


fastNLP/modules/prototype/aggregation.py → fastNLP/modules/aggregation/self_attention.py View File

@@ -2,7 +2,8 @@ import torch
import torch.nn as nn import torch.nn as nn
from torch.autograd import Variable from torch.autograd import Variable


class Selfattention(nn.Module):

class SelfAttention(nn.Module):
""" """
Self Attention Module. Self Attention Module.


@@ -12,7 +13,7 @@ class Selfattention(nn.Module):
r : the number of encoded vectors r : the number of encoded vectors
""" """
def __init__(self, input_size, d_a, r): def __init__(self, input_size, d_a, r):
super(Selfattention, self).__init__()
super(SelfAttention, self).__init__()
self.W_s1 = nn.Parameter(torch.randn(d_a, input_size), requires_grad=True) self.W_s1 = nn.Parameter(torch.randn(d_a, input_size), requires_grad=True)
self.W_s2 = nn.Parameter(torch.randn(r, d_a), requires_grad=True) self.W_s2 = nn.Parameter(torch.randn(r, d_a), requires_grad=True)
self.softmax = nn.Softmax(dim=2) self.softmax = nn.Softmax(dim=2)

fastNLP/modules/CRF.py → fastNLP/modules/decoder/CRF.py View File


fastNLP/modules/convolution/__init__.py → fastNLP/modules/decoder/__init__.py View File


fastNLP/modules/recurrent/__init__.py → fastNLP/modules/encoder/__init__.py View File


fastNLP/modules/convolution/conv.py → fastNLP/modules/encoder/conv.py View File


fastNLP/modules/prototype/embedding.py → fastNLP/modules/encoder/embedding.py View File

@@ -1,6 +1,6 @@
import torch
import torch.nn as nn import torch.nn as nn



class Lookuptable(nn.Module): class Lookuptable(nn.Module):
""" """
A simple lookup table A simple lookup table
@@ -19,5 +19,6 @@ class Lookuptable(nn.Module):
def forward(self, x): def forward(self, x):
return self.embed(x) return self.embed(x)



if __name__ == "__main__": if __name__ == "__main__":
model = Lookuptable(10, 20) model = Lookuptable(10, 20)

fastNLP/modules/prototype/encoder.py → fastNLP/modules/encoder/lstm.py View File

@@ -1,6 +1,6 @@
import torch
import torch.nn as nn import torch.nn as nn



class Lstm(nn.Module): class Lstm(nn.Module):
""" """
LSTM module LSTM module
@@ -14,8 +14,8 @@ class Lstm(nn.Module):
""" """
def __init__(self, input_size, hidden_size, num_layers, dropout, bidirectional): def __init__(self, input_size, hidden_size, num_layers, dropout, bidirectional):
super(Lstm, self).__init__() super(Lstm, self).__init__()
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, bias=True, batch_first=True,\
dropout=dropout, bidirectional=bidirectional)
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, bias=True, batch_first=True,
dropout=dropout, bidirectional=bidirectional)
def forward(self, x): def forward(self, x):
x, _ = self.lstm(x) x, _ = self.lstm(x)

+ 0
- 0
fastNLP/modules/interaction/__init__.py View File


fastNLP/modules/prototype/README.md → reproduction/LSTM+self_attention_sentiment_analysis/README.md View File


fastNLP/modules/prototype/Word2Idx.py → reproduction/LSTM+self_attention_sentiment_analysis/Word2Idx.py View File


fastNLP/modules/prototype/dataloader.py → reproduction/LSTM+self_attention_sentiment_analysis/dataloader.py View File

@@ -1,9 +1,10 @@
import random
import pickle import pickle
import random

import torch import torch
import numpy as np
from torch.autograd import Variable from torch.autograd import Variable



def float_wrapper(x, requires_grad=True, using_cuda=True): def float_wrapper(x, requires_grad=True, using_cuda=True):
""" """
transform float type list to pytorch variable transform float type list to pytorch variable

fastNLP/modules/prototype/example.py → reproduction/LSTM+self_attention_sentiment_analysis/example.py View File


fastNLP/modules/prototype/predict.py → reproduction/LSTM+self_attention_sentiment_analysis/predict.py View File

@@ -1,4 +1,3 @@
import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F



fastNLP/modules/prototype/prepare.py → reproduction/LSTM+self_attention_sentiment_analysis/prepare.py View File

@@ -1,6 +1,8 @@
import pickle import pickle

import Word2Idx import Word2Idx



def get_sets(m, n): def get_sets(m, n):
""" """
get a train set containing m samples and a test set containing n samples get a train set containing m samples and a test set containing n samples

Loading…
Cancel
Save