* update fastNLP.core.rst * refine all docstrings in core/tags/v0.3.0
@@ -16,8 +16,6 @@ import os | |||||
import sys | import sys | ||||
sys.path.insert(0, os.path.abspath('../../')) | sys.path.insert(0, os.path.abspath('../../')) | ||||
import sphinx_rtd_theme | |||||
# -- Project information ----------------------------------------------------- | # -- Project information ----------------------------------------------------- | ||||
project = 'fastNLP' | project = 'fastNLP' | ||||
@@ -27,7 +25,7 @@ author = 'xpqiu' | |||||
# The short X.Y version | # The short X.Y version | ||||
version = '' | version = '' | ||||
# The full version, including alpha/beta/rc tags | # The full version, including alpha/beta/rc tags | ||||
release = '1.0' | |||||
release = '2.0' | |||||
# -- General configuration --------------------------------------------------- | # -- General configuration --------------------------------------------------- | ||||
@@ -13,10 +13,10 @@ fastNLP.core.dataset | |||||
.. automodule:: fastNLP.core.dataset | .. automodule:: fastNLP.core.dataset | ||||
:members: | :members: | ||||
fastNLP.core.field | |||||
fastNLP.core.fieldarray | |||||
------------------- | ------------------- | ||||
.. automodule:: fastNLP.core.field | |||||
.. automodule:: fastNLP.core.fieldarray | |||||
:members: | :members: | ||||
fastNLP.core.instance | fastNLP.core.instance | ||||
@@ -25,10 +25,10 @@ fastNLP.core.instance | |||||
.. automodule:: fastNLP.core.instance | .. automodule:: fastNLP.core.instance | ||||
:members: | :members: | ||||
fastNLP.core.loss | |||||
fastNLP.core.losses | |||||
------------------ | ------------------ | ||||
.. automodule:: fastNLP.core.loss | |||||
.. automodule:: fastNLP.core.losses | |||||
:members: | :members: | ||||
fastNLP.core.metrics | fastNLP.core.metrics | ||||
@@ -49,12 +49,6 @@ fastNLP.core.predictor | |||||
.. automodule:: fastNLP.core.predictor | .. automodule:: fastNLP.core.predictor | ||||
:members: | :members: | ||||
fastNLP.core.preprocess | |||||
------------------------ | |||||
.. automodule:: fastNLP.core.preprocess | |||||
:members: | |||||
fastNLP.core.sampler | fastNLP.core.sampler | ||||
--------------------- | --------------------- | ||||
@@ -5,21 +5,19 @@ import torch | |||||
class Batch(object): | class Batch(object): | ||||
"""Batch is an iterable object which iterates over mini-batches. | """Batch is an iterable object which iterates over mini-batches. | ||||
:: | |||||
for batch_x, batch_y in Batch(data_set, batch_size=16, sampler=SequentialSampler()): | |||||
Example:: | |||||
for batch_x, batch_y in Batch(data_set, batch_size=16, sampler=SequentialSampler()): | |||||
# ... | |||||
:param dataset: a DataSet object | |||||
:param batch_size: int, the size of the batch | |||||
:param sampler: a Sampler object | |||||
:param as_numpy: bool. If True, return Numpy array. Otherwise, return torch tensors. | |||||
""" | """ | ||||
def __init__(self, dataset, batch_size, sampler, as_numpy=False): | def __init__(self, dataset, batch_size, sampler, as_numpy=False): | ||||
""" | |||||
:param dataset: a DataSet object | |||||
:param batch_size: int, the size of the batch | |||||
:param sampler: a Sampler object | |||||
:param as_numpy: bool. If True, return Numpy array. Otherwise, return torch tensors. | |||||
""" | |||||
self.dataset = dataset | self.dataset = dataset | ||||
self.batch_size = batch_size | self.batch_size = batch_size | ||||
self.sampler = sampler | self.sampler = sampler | ||||
@@ -118,7 +118,7 @@ class DataSet(object): | |||||
def __len__(self): | def __len__(self): | ||||
"""Fetch the length of the dataset. | """Fetch the length of the dataset. | ||||
:return int length: | |||||
:return length: | |||||
""" | """ | ||||
if len(self.field_arrays) == 0: | if len(self.field_arrays) == 0: | ||||
return 0 | return 0 | ||||
@@ -170,7 +170,7 @@ class DataSet(object): | |||||
def delete_field(self, name): | def delete_field(self, name): | ||||
"""Delete a field based on the field name. | """Delete a field based on the field name. | ||||
:param str name: the name of the field to be deleted. | |||||
:param name: the name of the field to be deleted. | |||||
""" | """ | ||||
self.field_arrays.pop(name) | self.field_arrays.pop(name) | ||||
@@ -182,14 +182,14 @@ class DataSet(object): | |||||
def get_all_fields(self): | def get_all_fields(self): | ||||
"""Return all the fields with their names. | """Return all the fields with their names. | ||||
:return dict field_arrays: the internal data structure of DataSet. | |||||
:return field_arrays: the internal data structure of DataSet. | |||||
""" | """ | ||||
return self.field_arrays | return self.field_arrays | ||||
def get_length(self): | def get_length(self): | ||||
"""Fetch the length of the dataset. | """Fetch the length of the dataset. | ||||
:return int length: | |||||
:return length: | |||||
""" | """ | ||||
return len(self) | return len(self) | ||||
@@ -232,14 +232,14 @@ class DataSet(object): | |||||
def get_input_name(self): | def get_input_name(self): | ||||
"""Get all field names with `is_input` as True. | """Get all field names with `is_input` as True. | ||||
:return list field_names: a list of str | |||||
:return field_names: a list of str | |||||
""" | """ | ||||
return [name for name, field in self.field_arrays.items() if field.is_input] | return [name for name, field in self.field_arrays.items() if field.is_input] | ||||
def get_target_name(self): | def get_target_name(self): | ||||
"""Get all field names with `is_target` as True. | """Get all field names with `is_target` as True. | ||||
:return list field_names: a list of str | |||||
:return field_names: a list of str | |||||
""" | """ | ||||
return [name for name, field in self.field_arrays.items() if field.is_target] | return [name for name, field in self.field_arrays.items() if field.is_target] | ||||
@@ -294,8 +294,9 @@ class DataSet(object): | |||||
"""Split the dataset into training and development(validation) set. | """Split the dataset into training and development(validation) set. | ||||
:param float dev_ratio: the ratio of test set in all data. | :param float dev_ratio: the ratio of test set in all data. | ||||
:return DataSet train_set: the training set | |||||
DataSet dev_set: the development set | |||||
:return (train_set, dev_set): | |||||
train_set: the training set | |||||
dev_set: the development set | |||||
""" | """ | ||||
assert isinstance(dev_ratio, float) | assert isinstance(dev_ratio, float) | ||||
assert 0 < dev_ratio < 1 | assert 0 < dev_ratio < 1 | ||||
@@ -326,7 +327,7 @@ class DataSet(object): | |||||
:param List[str] or Tuple[str] headers: headers of the CSV file | :param List[str] or Tuple[str] headers: headers of the CSV file | ||||
:param str sep: delimiter in CSV file. Default: "," | :param str sep: delimiter in CSV file. Default: "," | ||||
:param bool dropna: If True, drop rows that have less entries than headers. | :param bool dropna: If True, drop rows that have less entries than headers. | ||||
:return DataSet dataset: | |||||
:return dataset: the read data set | |||||
""" | """ | ||||
with open(csv_path, "r") as f: | with open(csv_path, "r") as f: | ||||
@@ -370,7 +371,7 @@ class DataSet(object): | |||||
"""Load a DataSet object from pickle. | """Load a DataSet object from pickle. | ||||
:param str path: the path to the pickle | :param str path: the path to the pickle | ||||
:return DataSet data_set: | |||||
:return data_set: | |||||
""" | """ | ||||
with open(path, 'rb') as f: | with open(path, 'rb') as f: | ||||
return pickle.load(f) | return pickle.load(f) | ||||
@@ -2,20 +2,18 @@ import numpy as np | |||||
class FieldArray(object): | class FieldArray(object): | ||||
"""FieldArray is the collection of Instances of the same Field. | |||||
It is the basic element of DataSet class. | |||||
"""``FieldArray`` is the collection of ``Instance``s of the same field. | |||||
It is the basic element of ``DataSet`` class. | |||||
:param str name: the name of the FieldArray | |||||
:param list content: a list of int, float, str or np.ndarray, or a list of list of one, or a np.ndarray. | |||||
:param int padding_val: the integer for padding. Default: 0. | |||||
:param bool is_target: If True, this FieldArray is used to compute loss. | |||||
:param bool is_input: If True, this FieldArray is used to the model input. | |||||
""" | """ | ||||
def __init__(self, name, content, padding_val=0, is_target=None, is_input=None): | def __init__(self, name, content, padding_val=0, is_target=None, is_input=None): | ||||
""" | |||||
:param str name: the name of the FieldArray | |||||
:param list content: a list of int, float, str or np.ndarray, or a list of list of one, or a np.ndarray. | |||||
:param int padding_val: the integer for padding. Default: 0. | |||||
:param bool is_target: If True, this FieldArray is used to compute loss. | |||||
:param bool is_input: If True, this FieldArray is used to the model input. | |||||
""" | |||||
self.name = name | self.name = name | ||||
if isinstance(content, list): | if isinstance(content, list): | ||||
content = content | content = content | ||||
@@ -1,23 +1,22 @@ | |||||
class Instance(object): | class Instance(object): | ||||
"""An Instance is an example of data. It is the collection of Fields. | |||||
"""An Instance is an example of data. | |||||
Example:: | |||||
ins = Instance(field_1=[1, 1, 1], field_2=[2, 2, 2]) | |||||
ins["field_1"] | |||||
>>[1, 1, 1] | |||||
ins.add_field("field_3", [3, 3, 3]) | |||||
:: | |||||
Instance(field_1=[1, 1, 1], field_2=[2, 2, 2]) | |||||
:param fields: a dict of (str: list). | |||||
""" | """ | ||||
def __init__(self, **fields): | def __init__(self, **fields): | ||||
""" | |||||
:param fields: a dict of (str: list). | |||||
""" | |||||
self.fields = fields | self.fields = fields | ||||
def add_field(self, field_name, field): | def add_field(self, field_name, field): | ||||
"""Add a new field to the instance. | """Add a new field to the instance. | ||||
:param field_name: str, the name of the field. | :param field_name: str, the name of the field. | ||||
:param field: | |||||
""" | """ | ||||
self.fields[field_name] = field | self.fields[field_name] = field | ||||
@@ -13,6 +13,9 @@ from fastNLP.core.utils import get_func_signature | |||||
class LossBase(object): | class LossBase(object): | ||||
"""Base class for all losses. | |||||
""" | |||||
def __init__(self): | def __init__(self): | ||||
self.param_map = {} | self.param_map = {} | ||||
self._checked = False | self._checked = False | ||||
@@ -68,10 +71,9 @@ class LossBase(object): | |||||
# f"positional argument.).") | # f"positional argument.).") | ||||
def _fast_param_map(self, pred_dict, target_dict): | def _fast_param_map(self, pred_dict, target_dict): | ||||
""" | |||||
Only used as inner function. When the pred_dict, target is unequivocal. Don't need users to pass key_map. | |||||
"""Only used as inner function. When the pred_dict, target is unequivocal. Don't need users to pass key_map. | |||||
such as pred_dict has one element, target_dict has one element | such as pred_dict has one element, target_dict has one element | ||||
:param pred_dict: | :param pred_dict: | ||||
:param target_dict: | :param target_dict: | ||||
:return: dict, if dict is not {}, pass it to self.evaluate. Otherwise do mapping. | :return: dict, if dict is not {}, pass it to self.evaluate. Otherwise do mapping. | ||||
@@ -265,27 +267,22 @@ def _prepare_losser(losser): | |||||
def squash(predict, truth, **kwargs): | def squash(predict, truth, **kwargs): | ||||
"""To reshape tensors in order to fit loss functions in pytorch | |||||
:param predict : Tensor, model output | |||||
:param truth : Tensor, truth from dataset | |||||
:param **kwargs : extra arguments | |||||
"""To reshape tensors in order to fit loss functions in PyTorch. | |||||
:param predict: Tensor, model output | |||||
:param truth: Tensor, truth from dataset | |||||
:param **kwargs: extra arguments | |||||
:return predict , truth: predict & truth after processing | :return predict , truth: predict & truth after processing | ||||
""" | """ | ||||
return predict.view(-1, predict.size()[-1]), truth.view(-1, ) | return predict.view(-1, predict.size()[-1]), truth.view(-1, ) | ||||
def unpad(predict, truth, **kwargs): | def unpad(predict, truth, **kwargs): | ||||
"""To process padded sequence output to get true loss | |||||
Using pack_padded_sequence() method | |||||
This method contains squash() | |||||
"""To process padded sequence output to get true loss. | |||||
:param predict : Tensor, [batch_size , max_len , tag_size] | |||||
:param truth : Tensor, [batch_size , max_len] | |||||
:param **kwargs : extra arguments, kwargs["lens"] is expected to be exsist | |||||
kwargs["lens"] : list or LongTensor, [batch_size] | |||||
the i-th element is true lengths of i-th sequence | |||||
:param predict: Tensor, [batch_size , max_len , tag_size] | |||||
:param truth: Tensor, [batch_size , max_len] | |||||
:param kwargs: kwargs["lens"] is a list or LongTensor, with size [batch_size]. The i-th element is true lengths of i-th sequence. | |||||
:return predict , truth: predict & truth after processing | :return predict , truth: predict & truth after processing | ||||
""" | """ | ||||
@@ -299,15 +296,11 @@ def unpad(predict, truth, **kwargs): | |||||
def unpad_mask(predict, truth, **kwargs): | def unpad_mask(predict, truth, **kwargs): | ||||
"""To process padded sequence output to get true loss | |||||
Using mask() method | |||||
This method contains squash() | |||||
"""To process padded sequence output to get true loss. | |||||
:param predict : Tensor, [batch_size , max_len , tag_size] | |||||
:param truth : Tensor, [batch_size , max_len] | |||||
:param **kwargs : extra arguments, kwargs["lens"] is expected to be exsist | |||||
kwargs["lens"] : list or LongTensor, [batch_size] | |||||
the i-th element is true lengths of i-th sequence | |||||
:param predict: Tensor, [batch_size , max_len , tag_size] | |||||
:param truth: Tensor, [batch_size , max_len] | |||||
:param kwargs: kwargs["lens"] is a list or LongTensor, with size [batch_size]. The i-th element is true lengths of i-th sequence. | |||||
:return predict , truth: predict & truth after processing | :return predict , truth: predict & truth after processing | ||||
""" | """ | ||||
@@ -318,14 +311,11 @@ def unpad_mask(predict, truth, **kwargs): | |||||
def mask(predict, truth, **kwargs): | def mask(predict, truth, **kwargs): | ||||
"""To select specific elements from Tensor | |||||
This method contains squash() | |||||
"""To select specific elements from Tensor. This method calls ``squash()``. | |||||
:param predict : Tensor, [batch_size , max_len , tag_size] | |||||
:param truth : Tensor, [batch_size , max_len] | |||||
:param **kwargs : extra arguments, kwargs["mask"] is expected to be exsist | |||||
kwargs["mask"] : ByteTensor, [batch_size , max_len] | |||||
the mask Tensor , the position that is 1 will be selected | |||||
:param predict: Tensor, [batch_size , max_len , tag_size] | |||||
:param truth: Tensor, [batch_size , max_len] | |||||
:param **kwargs: extra arguments, kwargs["mask"]: ByteTensor, [batch_size , max_len], the mask Tensor. The position that is 1 will be selected. | |||||
:return predict , truth: predict & truth after processing | :return predict , truth: predict & truth after processing | ||||
""" | """ | ||||
@@ -343,13 +333,11 @@ def mask(predict, truth, **kwargs): | |||||
def make_mask(lens, tar_len): | def make_mask(lens, tar_len): | ||||
"""to generate a mask that select [:lens[i]] for i-th element | |||||
embezzle from fastNLP.models.sequence_modeling.seq_mask | |||||
:param lens : list or LongTensor, [batch_size] | |||||
:param tar_len : int | |||||
"""To generate a mask over a sequence. | |||||
:return mask : ByteTensor | |||||
:param lens: list or LongTensor, [batch_size] | |||||
:param tar_len: int | |||||
:return mask: ByteTensor | |||||
""" | """ | ||||
lens = torch.LongTensor(lens) | lens = torch.LongTensor(lens) | ||||
mask = [torch.ge(lens, i + 1) for i in range(tar_len)] | mask = [torch.ge(lens, i + 1) for i in range(tar_len)] | ||||
@@ -13,6 +13,24 @@ from fastNLP.core.utils import seq_lens_to_masks | |||||
class MetricBase(object): | class MetricBase(object): | ||||
"""Base class for all metrics. | |||||
``MetricBase`` handles validity check of its input dictionaries - ``pred_dict`` and ``target_dict``. | |||||
``pred_dict`` is the output of ``forward()`` or prediction function of a model. | |||||
``target_dict`` is the ground truth from DataSet where ``is_target`` is set ``True``. | |||||
``MetricBase`` will do the following type checks: | |||||
1. whether self.evaluate has varargs, which is not supported. | |||||
2. whether params needed by self.evaluate is not included in ``pred_dict``, ``target_dict``. | |||||
3. whether params needed by self.evaluate duplicate in ``pred_dict``, ``target_dict``. | |||||
4. whether params in ``pred_dict``, ``target_dict`` are not used by evaluate.(Might cause warning) | |||||
Besides, before passing params into self.evaluate, this function will filter out params from output_dict and | |||||
target_dict which are not used in self.evaluate. (but if **kwargs presented in self.evaluate, no filtering | |||||
will be conducted.) | |||||
However, in some cases where type check is not necessary, ``_fast_param_map`` will be used. | |||||
""" | |||||
def __init__(self): | def __init__(self): | ||||
self.param_map = {} # key is param in function, value is input param. | self.param_map = {} # key is param in function, value is input param. | ||||
self._checked = False | self._checked = False | ||||
@@ -71,10 +89,9 @@ class MetricBase(object): | |||||
raise NotImplemented | raise NotImplemented | ||||
def _fast_param_map(self, pred_dict, target_dict): | def _fast_param_map(self, pred_dict, target_dict): | ||||
""" | |||||
Only used as inner function. When the pred_dict, target is unequivocal. Don't need users to pass key_map. | |||||
"""Only used as inner function. When the pred_dict, target is unequivocal. Don't need users to pass key_map. | |||||
such as pred_dict has one element, target_dict has one element | such as pred_dict has one element, target_dict has one element | ||||
:param pred_dict: | :param pred_dict: | ||||
:param target_dict: | :param target_dict: | ||||
:return: dict, if dict is not {}, pass it to self.evaluate. Otherwise do mapping. | :return: dict, if dict is not {}, pass it to self.evaluate. Otherwise do mapping. | ||||
@@ -177,6 +194,9 @@ class MetricBase(object): | |||||
class AccuracyMetric(MetricBase): | class AccuracyMetric(MetricBase): | ||||
"""Accuracy Metric | |||||
""" | |||||
def __init__(self, pred=None, target=None, seq_lens=None): | def __init__(self, pred=None, target=None, seq_lens=None): | ||||
super().__init__() | super().__init__() | ||||
@@ -186,10 +206,9 @@ class AccuracyMetric(MetricBase): | |||||
self.acc_count = 0 | self.acc_count = 0 | ||||
def _fast_param_map(self, pred_dict, target_dict): | def _fast_param_map(self, pred_dict, target_dict): | ||||
""" | |||||
Only used as inner function. When the pred_dict, target is unequivocal. Don't need users to pass key_map. | |||||
"""Only used as inner function. When the pred_dict, target is unequivocal. Don't need users to pass key_map. | |||||
such as pred_dict has one element, target_dict has one element | such as pred_dict has one element, target_dict has one element | ||||
:param pred_dict: | :param pred_dict: | ||||
:param target_dict: | :param target_dict: | ||||
:return: dict, if dict is not None, pass it to self.evaluate. Otherwise do mapping. | :return: dict, if dict is not None, pass it to self.evaluate. Otherwise do mapping. | ||||
@@ -230,7 +249,7 @@ class AccuracyMetric(MetricBase): | |||||
torch.Size([B,]), torch.Size([B,]), torch.Size([B, max_len]), torch.Size([B, max_len]) | torch.Size([B,]), torch.Size([B,]), torch.Size([B, max_len]), torch.Size([B, max_len]) | ||||
:param seq_lens: List of (torch.Tensor, or numpy.ndarray). Element's can be: | :param seq_lens: List of (torch.Tensor, or numpy.ndarray). Element's can be: | ||||
None, None, torch.Size([B], torch.Size([B]). ignored if masks are provided. | None, None, torch.Size([B], torch.Size([B]). ignored if masks are provided. | ||||
:return: dict({'acc': float}) | |||||
""" | """ | ||||
# TODO 这里报错需要更改,因为pred是啥用户并不知道。需要告知用户真实的value | # TODO 这里报错需要更改,因为pred是啥用户并不知道。需要告知用户真实的value | ||||
if not isinstance(pred, torch.Tensor): | if not isinstance(pred, torch.Tensor): | ||||
@@ -269,6 +288,11 @@ class AccuracyMetric(MetricBase): | |||||
self.total += np.prod(list(pred.size())) | self.total += np.prod(list(pred.size())) | ||||
def get_metric(self, reset=True): | def get_metric(self, reset=True): | ||||
"""Returns computed metric. | |||||
:param bool reset: whether to recount next time. | |||||
:return evaluate_result: {"acc": float} | |||||
""" | |||||
evaluate_result = {'acc': round(self.acc_count / self.total, 6)} | evaluate_result = {'acc': round(self.acc_count / self.total, 6)} | ||||
if reset: | if reset: | ||||
self.acc_count = 0 | self.acc_count = 0 | ||||
@@ -308,34 +332,31 @@ def _prepare_metrics(metrics): | |||||
def accuracy_topk(y_true, y_prob, k=1): | def accuracy_topk(y_true, y_prob, k=1): | ||||
"""Compute accuracy of y_true matching top-k probable | |||||
labels in y_prob. | |||||
"""Compute accuracy of y_true matching top-k probable labels in y_prob. | |||||
:param y_true: ndarray, true label, [n_samples] | |||||
:param y_prob: ndarray, label probabilities, [n_samples, n_classes] | |||||
:param k: int, k in top-k | |||||
:return :accuracy of top-k | |||||
""" | |||||
:param y_true: ndarray, true label, [n_samples] | |||||
:param y_prob: ndarray, label probabilities, [n_samples, n_classes] | |||||
:param k: int, k in top-k | |||||
:returns acc: accuracy of top-k | |||||
""" | |||||
y_pred_topk = np.argsort(y_prob, axis=-1)[:, -1:-k - 1:-1] | y_pred_topk = np.argsort(y_prob, axis=-1)[:, -1:-k - 1:-1] | ||||
y_true_tile = np.tile(np.expand_dims(y_true, axis=1), (1, k)) | y_true_tile = np.tile(np.expand_dims(y_true, axis=1), (1, k)) | ||||
y_match = np.any(y_pred_topk == y_true_tile, axis=-1) | y_match = np.any(y_pred_topk == y_true_tile, axis=-1) | ||||
acc = np.sum(y_match) / y_match.shape[0] | acc = np.sum(y_match) / y_match.shape[0] | ||||
return acc | return acc | ||||
def pred_topk(y_prob, k=1): | def pred_topk(y_prob, k=1): | ||||
"""Return top-k predicted labels and corresponding probabilities. | """Return top-k predicted labels and corresponding probabilities. | ||||
:param y_prob: ndarray, size [n_samples, n_classes], probabilities on labels | |||||
:param k: int, k of top-k | |||||
:returns | |||||
:param y_prob: ndarray, size [n_samples, n_classes], probabilities on labels | |||||
:param k: int, k of top-k | |||||
:returns (y_pred_topk, y_prob_topk): | |||||
y_pred_topk: ndarray, size [n_samples, k], predicted top-k labels | y_pred_topk: ndarray, size [n_samples, k], predicted top-k labels | ||||
y_prob_topk: ndarray, size [n_samples, k], probabilities for top-k labels | y_prob_topk: ndarray, size [n_samples, k], probabilities for top-k labels | ||||
""" | |||||
""" | |||||
y_pred_topk = np.argsort(y_prob, axis=-1)[:, -1:-k - 1:-1] | y_pred_topk = np.argsort(y_prob, axis=-1)[:, -1:-k - 1:-1] | ||||
x_axis_index = np.tile( | x_axis_index = np.tile( | ||||
np.arange(len(y_prob))[:, np.newaxis], | np.arange(len(y_prob))[:, np.newaxis], | ||||
@@ -2,6 +2,11 @@ import torch | |||||
class Optimizer(object): | class Optimizer(object): | ||||
""" | |||||
:param model_params: a generator. E.g. ``model.parameters()`` for PyTorch models. | |||||
:param kwargs: additional parameters. | |||||
""" | |||||
def __init__(self, model_params, **kwargs): | def __init__(self, model_params, **kwargs): | ||||
if model_params is not None and not hasattr(model_params, "__next__"): | if model_params is not None and not hasattr(model_params, "__next__"): | ||||
raise RuntimeError("model parameters should be a generator, rather than {}.".format(type(model_params))) | raise RuntimeError("model parameters should be a generator, rather than {}.".format(type(model_params))) | ||||
@@ -10,13 +15,14 @@ class Optimizer(object): | |||||
class SGD(Optimizer): | class SGD(Optimizer): | ||||
def __init__(self, lr=0.001, momentum=0, model_params=None): | |||||
""" | |||||
""" | |||||
:param float lr: learning rate. Default: 0.01 | :param float lr: learning rate. Default: 0.01 | ||||
:param float momentum: momentum. Default: 0 | :param float momentum: momentum. Default: 0 | ||||
:param model_params: a generator. E.g. model.parameters() for PyTorch models. | |||||
""" | |||||
:param model_params: a generator. E.g. ``model.parameters()`` for PyTorch models. | |||||
""" | |||||
def __init__(self, lr=0.001, momentum=0, model_params=None): | |||||
if not isinstance(lr, float): | if not isinstance(lr, float): | ||||
raise TypeError("learning rate has to be float.") | raise TypeError("learning rate has to be float.") | ||||
super(SGD, self).__init__(model_params, lr=lr, momentum=momentum) | super(SGD, self).__init__(model_params, lr=lr, momentum=momentum) | ||||
@@ -30,13 +36,14 @@ class SGD(Optimizer): | |||||
class Adam(Optimizer): | class Adam(Optimizer): | ||||
def __init__(self, lr=0.001, weight_decay=0, betas=(0.9, 0.999), eps=1e-8, amsgrad=False, model_params=None): | |||||
""" | |||||
""" | |||||
:param float lr: learning rate | :param float lr: learning rate | ||||
:param float weight_decay: | :param float weight_decay: | ||||
:param model_params: a generator. E.g. model.parameters() for PyTorch models. | |||||
""" | |||||
:param model_params: a generator. E.g. ``model.parameters()`` for PyTorch models. | |||||
""" | |||||
def __init__(self, lr=0.001, weight_decay=0, betas=(0.9, 0.999), eps=1e-8, amsgrad=False, model_params=None): | |||||
if not isinstance(lr, float): | if not isinstance(lr, float): | ||||
raise TypeError("learning rate has to be float.") | raise TypeError("learning rate has to be float.") | ||||
super(Adam, self).__init__(model_params, lr=lr, betas=betas, eps=eps, amsgrad=amsgrad, | super(Adam, self).__init__(model_params, lr=lr, betas=betas, eps=eps, amsgrad=amsgrad, | ||||
@@ -20,8 +20,8 @@ def convert_to_torch_tensor(data_list, use_cuda): | |||||
class BaseSampler(object): | class BaseSampler(object): | ||||
"""The base class of all samplers. | """The base class of all samplers. | ||||
Sub-classes must implement the __call__ method. | |||||
__call__ takes a DataSet object and returns a list of int - the sampling indices. | |||||
Sub-classes must implement the ``__call__`` method. | |||||
``__call__`` takes a DataSet object and returns a list of int - the sampling indices. | |||||
""" | """ | ||||
def __call__(self, *args, **kwargs): | def __call__(self, *args, **kwargs): | ||||
@@ -32,8 +32,12 @@ class SequentialSampler(BaseSampler): | |||||
"""Sample data in the original order. | """Sample data in the original order. | ||||
""" | """ | ||||
def __call__(self, data_set): | def __call__(self, data_set): | ||||
""" | |||||
:param DataSet data_set: | |||||
:return result: a list of integers. | |||||
""" | |||||
return list(range(len(data_set))) | return list(range(len(data_set))) | ||||
@@ -41,13 +45,23 @@ class RandomSampler(BaseSampler): | |||||
"""Sample data in random permutation order. | """Sample data in random permutation order. | ||||
""" | """ | ||||
def __call__(self, data_set): | def __call__(self, data_set): | ||||
""" | |||||
:param DataSet data_set: | |||||
:return result: a list of integers. | |||||
""" | |||||
return list(np.random.permutation(len(data_set))) | return list(np.random.permutation(len(data_set))) | ||||
class BucketSampler(BaseSampler): | class BucketSampler(BaseSampler): | ||||
""" | |||||
:param int num_buckets: the number of buckets to use. | |||||
:param int batch_size: batch size per epoch. | |||||
:param str seq_lens_field_name: the field name indicating the field about sequence length. | |||||
""" | |||||
def __init__(self, num_buckets=10, batch_size=32, seq_lens_field_name='seq_lens'): | def __init__(self, num_buckets=10, batch_size=32, seq_lens_field_name='seq_lens'): | ||||
self.num_buckets = num_buckets | self.num_buckets = num_buckets | ||||
self.batch_size = batch_size | self.batch_size = batch_size | ||||
@@ -1,5 +1,3 @@ | |||||
from collections import defaultdict | |||||
import torch | import torch | ||||
from torch import nn | from torch import nn | ||||
@@ -15,7 +13,16 @@ from fastNLP.core.utils import get_func_signature | |||||
class Tester(object): | class Tester(object): | ||||
"""An collection of model inference and evaluation of performance, used over validation/dev set and test set. """ | |||||
"""An collection of model inference and evaluation of performance, used over validation/dev set and test set. | |||||
:param DataSet data: a validation/development set | |||||
:param torch.nn.modules.module model: a PyTorch model | |||||
:param MetricBase metrics: a metric object or a list of metrics (List[MetricBase]) | |||||
:param int batch_size: batch size for validation | |||||
:param bool use_cuda: whether to use CUDA in validation. | |||||
:param int verbose: the number of steps after which an information is printed. | |||||
""" | |||||
def __init__(self, data, model, metrics, batch_size=16, use_cuda=False, verbose=1): | def __init__(self, data, model, metrics, batch_size=16, use_cuda=False, verbose=1): | ||||
super(Tester, self).__init__() | super(Tester, self).__init__() | ||||
@@ -49,6 +56,11 @@ class Tester(object): | |||||
self._predict_func = self._model.forward | self._predict_func = self._model.forward | ||||
def test(self): | def test(self): | ||||
"""Start test or validation. | |||||
:return eval_results: a dictionary whose keys are the class name of metrics to use, values are the evaluation results of these metrics. | |||||
""" | |||||
# turn on the testing mode; clean up the history | # turn on the testing mode; clean up the history | ||||
network = self._model | network = self._model | ||||
self._mode(network, is_test=True) | self._mode(network, is_test=True) | ||||
@@ -60,8 +72,8 @@ class Tester(object): | |||||
_move_dict_value_to_device(batch_x, batch_y, device=self._model_device) | _move_dict_value_to_device(batch_x, batch_y, device=self._model_device) | ||||
pred_dict = self._data_forward(self._predict_func, batch_x) | pred_dict = self._data_forward(self._predict_func, batch_x) | ||||
if not isinstance(pred_dict, dict): | if not isinstance(pred_dict, dict): | ||||
raise TypeError(f"The return value of {get_func_signature(self._predict_func)} " | |||||
f"must be `dict`, got {type(pred_dict)}.") | |||||
raise TypeError(f"The return value of {get_func_signature(self._predict_func)} " | |||||
f"must be `dict`, got {type(pred_dict)}.") | |||||
for metric in self.metrics: | for metric in self.metrics: | ||||
metric(pred_dict, batch_y) | metric(pred_dict, batch_y) | ||||
for metric in self.metrics: | for metric in self.metrics: | ||||
@@ -27,39 +27,37 @@ from fastNLP.core.utils import get_func_signature | |||||
class Trainer(object): | class Trainer(object): | ||||
"""Main Training Loop | |||||
""" | """ | ||||
def __init__(self, train_data, model, loss=None, metrics=None, n_epochs=3, batch_size=32, print_every=50, | |||||
validate_every=-1, dev_data=None, use_cuda=False, save_path=None, | |||||
optimizer=Adam(lr=0.01, weight_decay=0), check_code_level=0, | |||||
metric_key=None, sampler=RandomSampler(), use_tqdm=True): | |||||
""" | |||||
:param DataSet train_data: the training data | :param DataSet train_data: the training data | ||||
:param torch.nn.modules.module model: a PyTorch model | :param torch.nn.modules.module model: a PyTorch model | ||||
:param LossBase loss: a loss object | :param LossBase loss: a loss object | ||||
:param MetricBase or List[MetricBase] metrics: a metric object or a list of metrics | |||||
:param MetricBase metrics: a metric object or a list of metrics (List[MetricBase]) | |||||
:param int n_epochs: the number of training epochs | :param int n_epochs: the number of training epochs | ||||
:param int batch_size: batch size for training and validation | :param int batch_size: batch size for training and validation | ||||
:param int print_every: step interval to print next training information. Default: -1(no print). | :param int print_every: step interval to print next training information. Default: -1(no print). | ||||
:param int validate_every: step interval to do next validation. Default: -1(validate every epoch). | :param int validate_every: step interval to do next validation. Default: -1(validate every epoch). | ||||
:param DataSet dev_data: the validation data | :param DataSet dev_data: the validation data | ||||
:param use_cuda: | |||||
:param save_path: file path to save models | |||||
:param bool use_cuda: whether to use CUDA in training. | |||||
:param str save_path: file path to save models | |||||
:param Optimizer optimizer: an optimizer object | :param Optimizer optimizer: an optimizer object | ||||
:param int check_code_level: level of FastNLP code checker. -1: don't check, 0: ignore. 1: warning. 2: strict. | |||||
:param int check_code_level: level of FastNLP code checker. -1: don't check, 0: ignore. 1: warning. 2: strict.\\ | |||||
`ignore` will not check unused field; `warning` when warn if some field are not used; `strict` means | `ignore` will not check unused field; `warning` when warn if some field are not used; `strict` means | ||||
it will raise error if some field are not used. | it will raise error if some field are not used. | ||||
:param str metric_key: a single indicator used to decide the best model based on metric results. It must be one | :param str metric_key: a single indicator used to decide the best model based on metric results. It must be one | ||||
of the keys returned by the FIRST metric in `metrics`. If the overall result gets better if the indicator gets | of the keys returned by the FIRST metric in `metrics`. If the overall result gets better if the indicator gets | ||||
smaller, add a `-` character in front of the string. For example | |||||
:: | |||||
smaller, add "-" in front of the string. For example:: | |||||
metric_key="-PPL" # language model gets better as perplexity gets smaller | metric_key="-PPL" # language model gets better as perplexity gets smaller | ||||
:param sampler: method used to generate batch data. | |||||
:param use_tqdm: boolean, use tqdm to show train progress. | |||||
""" | |||||
:param BaseSampler sampler: method used to generate batch data. | |||||
:param bool use_tqdm: whether to use tqdm to show train progress. | |||||
""" | |||||
def __init__(self, train_data, model, loss=None, metrics=None, n_epochs=3, batch_size=32, print_every=50, | |||||
validate_every=-1, dev_data=None, use_cuda=False, save_path=None, | |||||
optimizer=Adam(lr=0.01, weight_decay=0), check_code_level=0, | |||||
metric_key=None, sampler=RandomSampler(), use_tqdm=True): | |||||
super(Trainer, self).__init__() | super(Trainer, self).__init__() | ||||
if not isinstance(train_data, DataSet): | if not isinstance(train_data, DataSet): | ||||
@@ -141,30 +139,30 @@ class Trainer(object): | |||||
def train(self, load_best_model=True): | def train(self, load_best_model=True): | ||||
""" | """ | ||||
开始训练过程。主要有以下几个步骤 | |||||
for epoch in range(num_epochs): | |||||
(1) 使用Batch从DataSet中按批取出数据,并自动对DataSet中dtype为float, int的fields进行padding。并转换为Tensor。非 | |||||
float,int类型的参数将不会被转换为Tensor,且不进行padding | |||||
for batch_x, batch_y in Batch(DataSet): | |||||
# batch_x中为设置为input的field | |||||
# batch_y中为设置为target的field | |||||
(2) 将batch_x的数据送入到model.forward函数中,并获取结果 | |||||
(3) 将batch_y与model.forward的结果一并送入loss中计算loss | |||||
(4) 获取到loss之后,进行反向求导并更新梯度 | |||||
if dev_data is not None: | |||||
根据metrics进行evaluation,并根据是否提供了save_path判断是否存储模型 | |||||
:param load_best_model: 该参数只有在初始化提供了dev_data的情况下有效,如果True, trainer将在返回之前重新加载dev表现最好的 | |||||
开始训练过程。主要有以下几个步骤:: | |||||
对于每次循环 | |||||
1. 使用Batch从DataSet中按批取出数据,并自动对DataSet中dtype为float, int的fields进行padding。并转换为Tensor。 | |||||
非float,int类型的参数将不会被转换为Tensor,且不进行padding。 | |||||
for batch_x, batch_y in Batch(DataSet) | |||||
# batch_x中为设置为input的field | |||||
# batch_y中为设置为target的field | |||||
2. 将batch_x的数据送入到model.forward函数中,并获取结果 | |||||
3. 将batch_y与model.forward的结果一并送入loss中计算loss | |||||
4. 获取到loss之后,进行反向求导并更新梯度 | |||||
如果测试集不为空 | |||||
根据metrics进行evaluation,并根据是否提供了save_path判断是否存储模型 | |||||
:param bool load_best_model: 该参数只有在初始化提供了dev_data的情况下有效,如果True, trainer将在返回之前重新加载dev表现最好的 | |||||
模型参数。 | 模型参数。 | ||||
:return results: 返回一个字典类型的数据, 内含以下内容:: | |||||
将会返回一个字典类型的数据, 内含以下内容: | |||||
seconds: float, 表示训练时长 | seconds: float, 表示训练时长 | ||||
以下三个内容只有在提供了dev_data的情况下会有。 | 以下三个内容只有在提供了dev_data的情况下会有。 | ||||
best_eval: Dict of Dict, 表示evaluation的结果 | best_eval: Dict of Dict, 表示evaluation的结果 | ||||
best_epoch: int,在第几个epoch取得的最佳值 | best_epoch: int,在第几个epoch取得的最佳值 | ||||
best_step: int, 在第几个step(batch)更新取得的最佳值 | best_step: int, 在第几个step(batch)更新取得的最佳值 | ||||
return dict: | |||||
""" | """ | ||||
results = {} | results = {} | ||||
try: | try: | ||||
@@ -41,13 +41,13 @@ class Vocabulary(object): | |||||
vocab.update(word_list) | vocab.update(word_list) | ||||
vocab["word"] | vocab["word"] | ||||
vocab.to_word(5) | vocab.to_word(5) | ||||
:param int max_size: set the max number of words in Vocabulary. Default: None | |||||
:param int min_freq: set the min occur frequency of words in Vocabulary. Default: None | |||||
""" | """ | ||||
def __init__(self, max_size=None, min_freq=None, unknown='<unk>', padding='<pad>'): | def __init__(self, max_size=None, min_freq=None, unknown='<unk>', padding='<pad>'): | ||||
""" | |||||
:param int max_size: set the max number of words in Vocabulary. Default: None | |||||
:param int min_freq: set the min occur frequency of words in Vocabulary. Default: None | |||||
""" | |||||
self.max_size = max_size | self.max_size = max_size | ||||
self.min_freq = min_freq | self.min_freq = min_freq | ||||
self.word_count = Counter() | self.word_count = Counter() | ||||
@@ -78,6 +78,7 @@ class Vocabulary(object): | |||||
"""Add a single word into the vocabulary. | """Add a single word into the vocabulary. | ||||
:param str word: a word or token. | :param str word: a word or token. | ||||
""" | """ | ||||
self.add(word) | self.add(word) | ||||
@@ -86,11 +87,12 @@ class Vocabulary(object): | |||||
"""Add a list of words into the vocabulary. | """Add a list of words into the vocabulary. | ||||
:param list word_lst: a list of strings | :param list word_lst: a list of strings | ||||
""" | """ | ||||
self.update(word_lst) | self.update(word_lst) | ||||
def build_vocab(self): | def build_vocab(self): | ||||
"""Build 'word to index' dict, and filter the word using `max_size` and `min_freq`. | |||||
"""Build a mapping from word to index, and filter the word using ``max_size`` and ``min_freq``. | |||||
""" | """ | ||||
self.word2idx = {} | self.word2idx = {} | ||||
@@ -111,7 +113,7 @@ class Vocabulary(object): | |||||
self.rebuild = False | self.rebuild = False | ||||
def build_reverse_vocab(self): | def build_reverse_vocab(self): | ||||
"""Build 'index to word' dict based on 'word to index' dict. | |||||
"""Build "index to word" dict based on "word to index" dict. | |||||
""" | """ | ||||
self.idx2word = {i: w for w, i in self.word2idx.items()} | self.idx2word = {i: w for w, i in self.word2idx.items()} | ||||
@@ -146,10 +148,9 @@ class Vocabulary(object): | |||||
raise ValueError("word {} not in vocabulary".format(w)) | raise ValueError("word {} not in vocabulary".format(w)) | ||||
def to_index(self, w): | def to_index(self, w): | ||||
""" Turn a word to an index. | |||||
If w is not in Vocabulary, return the unknown label. | |||||
""" Turn a word to an index. If w is not in Vocabulary, return the unknown label. | |||||
:param str w: | |||||
:param str w: a word | |||||
""" | """ | ||||
return self.__getitem__(w) | return self.__getitem__(w) | ||||