Browse Source

models的文档结构和别名

tags/v0.4.10
ChenXin 6 years ago
parent
commit
dcc6d5d15d
12 changed files with 31 additions and 71 deletions
  1. +0
    -7
      docs/source/fastNLP.models.base_model.rst
  2. +0
    -7
      docs/source/fastNLP.models.bert.rst
  3. +0
    -7
      docs/source/fastNLP.models.enas_controller.rst
  4. +0
    -7
      docs/source/fastNLP.models.enas_model.rst
  5. +0
    -7
      docs/source/fastNLP.models.enas_trainer.rst
  6. +0
    -7
      docs/source/fastNLP.models.enas_utils.rst
  7. +0
    -6
      docs/source/fastNLP.models.rst
  8. +2
    -0
      fastNLP/models/biaffine_parser.py
  9. +4
    -4
      fastNLP/models/cnn_text_classification.py
  10. +6
    -3
      fastNLP/models/sequence_labeling.py
  11. +4
    -1
      fastNLP/models/snli.py
  12. +15
    -15
      fastNLP/models/star_transformer.py

+ 0
- 7
docs/source/fastNLP.models.base_model.rst View File

@@ -1,7 +0,0 @@
fastNLP.models.base\_model
==========================

.. automodule:: fastNLP.models.base_model
:members:
:undoc-members:
:show-inheritance:

+ 0
- 7
docs/source/fastNLP.models.bert.rst View File

@@ -1,7 +0,0 @@
fastNLP.models.bert
===================

.. automodule:: fastNLP.models.bert
:members:
:undoc-members:
:show-inheritance:

+ 0
- 7
docs/source/fastNLP.models.enas_controller.rst View File

@@ -1,7 +0,0 @@
fastNLP.models.enas\_controller module
======================================

.. automodule:: fastNLP.models.enas_controller
:members:
:undoc-members:
:show-inheritance:

+ 0
- 7
docs/source/fastNLP.models.enas_model.rst View File

@@ -1,7 +0,0 @@
fastNLP.models.enas\_model
==========================

.. automodule:: fastNLP.models.enas_model
:members:
:undoc-members:
:show-inheritance:

+ 0
- 7
docs/source/fastNLP.models.enas_trainer.rst View File

@@ -1,7 +0,0 @@
fastNLP.models.enas\_trainer
============================

.. automodule:: fastNLP.models.enas_trainer
:members:
:undoc-members:
:show-inheritance:

+ 0
- 7
docs/source/fastNLP.models.enas_utils.rst View File

@@ -1,7 +0,0 @@
fastNLP.models.enas\_utils
==========================

.. automodule:: fastNLP.models.enas_utils
:members:
:undoc-members:
:show-inheritance:

+ 0
- 6
docs/source/fastNLP.models.rst View File

@@ -11,14 +11,8 @@ fastNLP.models

.. toctree::

fastNLP.models.base_model
fastNLP.models.bert
fastNLP.models.biaffine_parser
fastNLP.models.cnn_text_classification
fastNLP.models.enas_controller
fastNLP.models.enas_model
fastNLP.models.enas_trainer
fastNLP.models.enas_utils
fastNLP.models.sequence_labeling
fastNLP.models.snli
fastNLP.models.star_transformer

+ 2
- 0
fastNLP/models/biaffine_parser.py View File

@@ -130,6 +130,8 @@ def _find_cycle(vertices, edges):

class GraphParser(BaseModel):
"""
别名::class:`fastNLP.models.GraphParser` :class:`fastNLP.models.baffine_parser.GraphParser`

基于图的parser base class, 支持贪婪解码和最大生成树解码
"""


+ 4
- 4
fastNLP/models/cnn_text_classification.py View File

@@ -25,14 +25,14 @@ class CNNText(torch.nn.Module):
:param int,tuple(int) kernel_sizes: 输出channel的kernel大小。
:param float dropout: Dropout的大小
"""
def __init__(self, init_embed,
num_classes,
kernel_nums=(30, 40, 50),
kernel_sizes=(1, 3, 5),
dropout=0.5):
super(CNNText, self).__init__()
# no support for pre-trained embedding currently
self.embed = embedding.Embedding(init_embed)
self.conv_pool = encoder.ConvMaxpool(
@@ -41,7 +41,7 @@ class CNNText(torch.nn.Module):
kernel_sizes=kernel_sizes)
self.dropout = nn.Dropout(dropout)
self.fc = nn.Linear(sum(kernel_nums), num_classes)
def forward(self, words, seq_len=None):
"""

@@ -58,7 +58,7 @@ class CNNText(torch.nn.Module):
x = self.dropout(x)
x = self.fc(x) # [N,C] -> [N, N_class]
return {C.OUTPUT: x}
def predict(self, words, seq_len=None):
"""
:param torch.LongTensor words: [batch_size, seq_len],句子中word的index


+ 6
- 3
fastNLP/models/sequence_labeling.py View File

@@ -1,10 +1,10 @@
"""
本模块实现了种序列标注模型
本模块实现了种序列标注模型
"""
__all__ = [
"SeqLabeling",
"AdvSeqLabel",
"BiLSTMCRF"
# "BiLSTMCRF"
]

import torch
@@ -25,7 +25,10 @@ from ..modules import ConditionalRandomField
class BiLSTMCRF(BaseModel):
"""
结构为BiLSTM + FC + Dropout + CRF.
TODO 补充文档

.. todo::
继续补充文档

:param embed: tuple:
:param num_classes:
:param num_layers:


+ 4
- 1
fastNLP/models/snli.py View File

@@ -15,7 +15,10 @@ from ..core.utils import seq_len_to_mask


class ESIM(BaseModel):
"""ESIM model的一个PyTorch实现
"""
别名::class:`fastNLP.models.ESIM` :class:`fastNLP.models.snli.ESIM`

ESIM model的一个PyTorch实现
论文参见: https://arxiv.org/pdf/1609.06038.pdf

:param fastNLP.TokenEmbedding init_embedding: 初始化的TokenEmbedding


+ 15
- 15
fastNLP/models/star_transformer.py View File

@@ -34,7 +34,7 @@ class StarTransEnc(nn.Module):
:param emb_dropout: 词嵌入的dropout概率.
:param dropout: 模型除词嵌入外的dropout概率.
"""
def __init__(self, init_embed,
hidden_size,
num_layers,
@@ -54,7 +54,7 @@ class StarTransEnc(nn.Module):
head_dim=head_dim,
dropout=dropout,
max_len=max_len)
def forward(self, x, mask):
"""
:param FloatTensor x: [batch, length, hidden] 输入的序列
@@ -79,7 +79,7 @@ class _Cls(nn.Module):
nn.Dropout(dropout),
nn.Linear(hid_dim, num_cls),
)
def forward(self, x):
h = self.fc(x)
return h
@@ -95,7 +95,7 @@ class _NLICls(nn.Module):
nn.Dropout(dropout),
nn.Linear(hid_dim, num_cls),
)
def forward(self, x1, x2):
x = torch.cat([x1, x2, torch.abs(x1 - x2), x1 * x2], 1)
h = self.fc(x)
@@ -121,7 +121,7 @@ class STSeqLabel(nn.Module):
:param emb_dropout: 词嵌入的dropout概率. Default: 0.1
:param dropout: 模型除词嵌入外的dropout概率. Default: 0.1
"""
def __init__(self, init_embed, num_cls,
hidden_size=300,
num_layers=4,
@@ -141,7 +141,7 @@ class STSeqLabel(nn.Module):
emb_dropout=emb_dropout,
dropout=dropout)
self.cls = _Cls(hidden_size, num_cls, cls_hidden_size)
def forward(self, words, seq_len):
"""

@@ -154,7 +154,7 @@ class STSeqLabel(nn.Module):
output = self.cls(nodes)
output = output.transpose(1, 2) # make hidden to be dim 1
return {Const.OUTPUT: output} # [bsz, n_cls, seq_len]
def predict(self, words, seq_len):
"""

@@ -186,7 +186,7 @@ class STSeqCls(nn.Module):
:param emb_dropout: 词嵌入的dropout概率. Default: 0.1
:param dropout: 模型除词嵌入外的dropout概率. Default: 0.1
"""
def __init__(self, init_embed, num_cls,
hidden_size=300,
num_layers=4,
@@ -206,7 +206,7 @@ class STSeqCls(nn.Module):
emb_dropout=emb_dropout,
dropout=dropout)
self.cls = _Cls(hidden_size, num_cls, cls_hidden_size, dropout=dropout)
def forward(self, words, seq_len):
"""

@@ -219,7 +219,7 @@ class STSeqCls(nn.Module):
y = 0.5 * (relay + nodes.max(1)[0])
output = self.cls(y) # [bsz, n_cls]
return {Const.OUTPUT: output}
def predict(self, words, seq_len):
"""

@@ -251,7 +251,7 @@ class STNLICls(nn.Module):
:param emb_dropout: 词嵌入的dropout概率. Default: 0.1
:param dropout: 模型除词嵌入外的dropout概率. Default: 0.1
"""
def __init__(self, init_embed, num_cls,
hidden_size=300,
num_layers=4,
@@ -271,7 +271,7 @@ class STNLICls(nn.Module):
emb_dropout=emb_dropout,
dropout=dropout)
self.cls = _NLICls(hidden_size, num_cls, cls_hidden_size)
def forward(self, words1, words2, seq_len1, seq_len2):
"""

@@ -283,16 +283,16 @@ class STNLICls(nn.Module):
"""
mask1 = seq_len_to_mask(seq_len1)
mask2 = seq_len_to_mask(seq_len2)
def enc(seq, mask):
nodes, relay = self.enc(seq, mask)
return 0.5 * (relay + nodes.max(1)[0])
y1 = enc(words1, mask1)
y2 = enc(words2, mask2)
output = self.cls(y1, y2) # [bsz, n_cls]
return {Const.OUTPUT: output}
def predict(self, words1, words2, seq_len1, seq_len2):
"""



Loading…
Cancel
Save