diff --git a/fastNLP/core/__init__.py b/fastNLP/core/__init__.py index 052bed5b..300a342f 100644 --- a/fastNLP/core/__init__.py +++ b/fastNLP/core/__init__.py @@ -46,9 +46,11 @@ __all__ = [ 'TorchDataLoader', 'PaddleDataLoader', 'JittorDataLoader', + 'OneflowDataLoader', 'prepare_jittor_dataloader', 'prepare_paddle_dataloader', 'prepare_torch_dataloader', + 'prepare_oneflow_dataloader', "prepare_dataloader", # dataset @@ -64,6 +66,8 @@ __all__ = [ "PaddleFleetDriver", "JittorSingleDriver", "JittorMPIDriver", + "OneflowSingleDriver", + "OneflowDDPDriver", # log "logger", diff --git a/fastNLP/core/collators/collator.py b/fastNLP/core/collators/collator.py index dab5028c..5fbdacb9 100644 --- a/fastNLP/core/collators/collator.py +++ b/fastNLP/core/collators/collator.py @@ -18,7 +18,7 @@ from .packer_unpacker import SequencePackerUnpacker, SinglePackerUnpacker, Mappi NestedMappingPackerUnpacker sequence_idx_str = re.compile(r'^_\d+$') # 形如_0, _1 -SUPPORTED_BACKENDS = ['torch', 'jittor', 'paddle', 'numpy', 'raw', 'auto', None] +SUPPORTED_BACKENDS = ['torch', 'jittor', 'paddle', 'oneflow', 'numpy', 'raw', 'auto', None] # 由于 jittor DataLoader 存在自动的 to_jittor 的转换,所以只需要 collate 成为 numpy 就行 AUTO_BACKEND_MAPPING = {'jittor': 'numpy'} @@ -103,7 +103,7 @@ class Collator: Collator 在第一次进行 pad 的时候自动根据设置以及数据情况,为每个 field 获取一个 padder ,在之后的每次调用中,都将使用对应 的 Padder 给对应的 field 。 - :param backend: 对于可以 pad 的 field,使用哪种 tensor,支持 ['torch','jittor','paddle','numpy','raw', auto, None]。 + :param backend: 对于可以 pad 的 field,使用哪种 tensor,支持 ['torch','jittor','paddle','oneflow','numpy','raw', auto, None]。 若为 'auto' ,则在进行 pad 的时候会根据调用的环境决定其 backend 。该参数对不能进行 pad 的数据没用影响,不能 pad 的数据返回一定是 list 。 """ @@ -200,8 +200,8 @@ class Collator: field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 None 。如果 backend 为 None ,该值 无意义。 :param dtype: 对于需要 pad 的 field ,该 field 的数据 dtype 应该是什么。 - :param backend: 可选['raw', 'numpy', 'torch', 'paddle', 'jittor', 'auto'],分别代表,输出为 list, numpy.ndarray, - torch.Tensor, paddle.Tensor, jittor.Var 类型。若 pad_val 为 None ,该值无意义 。 + :param backend: 可选['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto'],分别代表,输出为 list, numpy.ndarray, + torch.Tensor, paddle.Tensor, jittor.Var oneflow.Tensor 类型。若 pad_val 为 None ,该值无意义 。 :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 pad_val, dtype, backend 等参数失效。pad_fn 的输入为当前 field 的 batch 形式。 Collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。pad_func 的输入即为 field 的 batch 形式,输出将被直接作为结果输出。 @@ -275,7 +275,7 @@ class Collator: """ 设置可以 pad 的 field 默认 pad 为什么类型的 tensor - :param backend: 对于可以 pad 的 field,使用哪种 tensor,支持 ['torch','jittor','paddle','numpy','raw', 'auto', None], + :param backend: 对于可以 pad 的 field,使用哪种 tensor,支持 ['torch','jittor','paddle','oneflow','numpy','raw', 'auto', None], 若为 auto ,则在进行 pad 的时候会自动根据调用的环境决定其 backend 。 :return: """ diff --git a/fastNLP/core/collators/padders/get_padder.py b/fastNLP/core/collators/padders/get_padder.py index b0a82849..6416a978 100644 --- a/fastNLP/core/collators/padders/get_padder.py +++ b/fastNLP/core/collators/padders/get_padder.py @@ -10,6 +10,7 @@ from .torch_padder import TorchNumberPadder, TorchSequencePadder, TorchTensorPad from .raw_padder import RawNumberPadder, RawSequencePadder, RawTensorPadder from .paddle_padder import PaddleTensorPadder, PaddleSequencePadder, PaddleNumberPadder from .jittor_padder import JittorTensorPadder, JittorSequencePadder, JittorNumberPadder +from .oneflow_padder import OneflowTensorPadder, OneflowSequencePadder, OneflowNumberPadder from .exceptions import * @@ -91,6 +92,8 @@ def get_padder(batch_field:Sequence[Any], pad_val, dtype, backend, field_name)-> return PaddleNumberPadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) elif backend == 'jittor': return JittorNumberPadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) + elif backend == 'oneflow': + return OneflowNumberPadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) else: raise ValueError(f"backend={backend} is not supported for list(Field:{field_name}).") @@ -105,6 +108,8 @@ def get_padder(batch_field:Sequence[Any], pad_val, dtype, backend, field_name)-> return PaddleSequencePadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) elif backend == 'jittor': return JittorSequencePadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) + elif backend == 'oneflow': + return OneflowSequencePadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) else: raise ValueError(f"backend={backend} is not supported for nested list(Field:{field_name}).") @@ -121,6 +126,8 @@ def get_padder(batch_field:Sequence[Any], pad_val, dtype, backend, field_name)-> return PaddleTensorPadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) elif backend == 'jittor': return JittorTensorPadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) + elif backend == 'oneflow': + return OneflowTensorPadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) else: raise ValueError(f"backend={backend} is not supported for tensors(Field:{field_name}).") diff --git a/fastNLP/core/collators/padders/numpy_padder.py b/fastNLP/core/collators/padders/numpy_padder.py index b6edba04..499fdb8b 100644 --- a/fastNLP/core/collators/padders/numpy_padder.py +++ b/fastNLP/core/collators/padders/numpy_padder.py @@ -18,9 +18,9 @@ def _get_dtype(ele_dtype, dtype, class_name): """ 用于检测数据的 dtype 类型, 根据内部和外部数据判断。 - :param ele_dtype 内部数据的类型 - :param dtype 数据外部类型 - :param class_name 类的名称 + :param ele_dtype: 内部数据的类型 + :param dtype: 数据外部类型 + :param class_name: 类的名称 """ if ele_dtype is not None and not is_number_or_numpy_number(ele_dtype): raise EleDtypeUnsupportedError(f"`{class_name}` only supports padding python numbers " diff --git a/fastNLP/core/collators/padders/oneflow_padder.py b/fastNLP/core/collators/padders/oneflow_padder.py new file mode 100644 index 00000000..c218bcca --- /dev/null +++ b/fastNLP/core/collators/padders/oneflow_padder.py @@ -0,0 +1,204 @@ +__all__ = [ + 'OneflowNumberPadder', + 'OneflowSequencePadder', + 'OneflowTensorPadder' +] +from inspect import isclass +import numpy as np + +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW + +if _NEED_IMPORT_ONEFLOW: + import oneflow + numpy_to_oneflow_dtype_dict = { + np.bool_: oneflow.bool, + np.uint8: oneflow.uint8, + np.int8: oneflow.int8, + np.int32: oneflow.int32, + np.int64: oneflow.int64, + np.float16: oneflow.float16, + np.float32: oneflow.float32, + np.float64: oneflow.float32, # 这里都统一为到 float32 吧,这是由于 numpy 大部分时候都默认 float64 了 + } + number_to_oneflow_dtype_dict = { + float: oneflow.float32, # 因为 oneflow.tensor([1], dtype=float)是oneflow.float64 + int: oneflow.int64, + bool: oneflow.bool + } + +from .padder import Padder +from .utils import is_number_or_numpy_number, is_number, is_numpy_number_dtype, get_shape, is_numpy_generic_class +from .exceptions import * + + +def is_oneflow_tensor(dtype): + """ + 判断是否为 oneflow 的 tensor + + :param dtype 数据的 dtype 类型 + """ + if not isclass(dtype) and isinstance(dtype, oneflow.dtype): + return True + return False + + +def _get_dtype(ele_dtype, dtype, class_name): + """ + 用于检测数据的 dtype 类型, 根据内部和外部数据判断。 + + :param ele_dtype: 内部数据的类型 + :param dtype: 数据外部类型 + :param class_name: 类的名称 + """ + if not (ele_dtype is None or (is_number_or_numpy_number(ele_dtype) or is_oneflow_tensor(ele_dtype))): + raise EleDtypeUnsupportedError(f"`{class_name}` only supports padding python numbers " + f"or numpy numbers or oneflow.Tensor but get `{ele_dtype}`.") + + if dtype is not None: + if not (is_oneflow_tensor(dtype) or is_number(dtype)): + raise DtypeUnsupportedError(f"The dtype of `{class_name}` only supports python numbers " + f"or oneflow.dtype but get `{dtype}`.") + dtype = number_to_oneflow_dtype_dict.get(dtype, dtype) + else: + if ele_dtype is not None: + if (is_number(ele_dtype) or is_oneflow_tensor(ele_dtype)): + ele_dtype = number_to_oneflow_dtype_dict.get(ele_dtype, ele_dtype) + dtype = ele_dtype + elif is_numpy_number_dtype(ele_dtype): # 存在一个转换的问题了 + dtype = numpy_to_oneflow_dtype_dict.get(ele_dtype.type) + elif is_numpy_generic_class(ele_dtype): + dtype = numpy_to_oneflow_dtype_dict.get(ele_dtype) + + return dtype + + +class OneflowNumberPadder(Padder): + """ + 可以将形如 [1, 2, 3] 这类的数据转为 oneflow.Tensor([1, 2, 3]) + + :param pad_val: 该值无意义 + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 oneflow.tensor 类型。 + :param dtype: 输出的数据的 dtype 是什么。如 oneflow.long, oneflow.float32, int, float 等 + """ + def __init__(self, pad_val=0, ele_dtype=None, dtype=None): + dtype = _get_dtype(ele_dtype, dtype, class_name=self.__class__.__name__) + super().__init__(pad_val=pad_val, dtype=dtype) + + @staticmethod + def pad(batch_field, pad_val=0, dtype=None): + return oneflow.tensor(batch_field, dtype=dtype) + + +class OneflowSequencePadder(Padder): + """ + 将类似于 [[1], [1, 2]] 的内容 pad 为 oneflow.Tensor([[1, 0], [1, 2]]) 可以 pad 多重嵌套的数据。 + + :param pad_val: 需要 pad 的值。 + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 oneflow.tensor 类型。 + :param dtype: 输出的数据的 dtype 是什么。如 oneflow.long, oneflow.float32, int, float 等 + """ + def __init__(self, pad_val=0, ele_dtype=None, dtype=None): + dtype = _get_dtype(ele_dtype, dtype, class_name=self.__class__.__name__) + super().__init__(pad_val=pad_val, dtype=dtype) + + @staticmethod + def pad(batch_field, pad_val=0, dtype=None): + tensor = get_padded_oneflow_tensor(batch_field, dtype=dtype, pad_val=pad_val) + return tensor + + +class OneflowTensorPadder(Padder): + """ + 目前支持 [oneflow.tensor([3, 2], oneflow.tensor([1])] 类似的。若内部元素不为 oneflow.tensor ,则必须含有 tolist() 方法。 + + >>> OneflowTensorPadder.pad([np.array([3, 4]), np.array([1])], pad_val=-100) + [[ 3. 4.] + [ 1. -100.]] + >>> OneflowTensorPadder.pad([oneflow.LongTensor([3, 4]), oneflow.LongTensor([1])], pad_val=-100) + tensor([[ 3, 4], + [ 1, -100]]) + + :param pad_val: 需要 pad 的值。 + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 oneflow.tensor 类型。 + :param dtype: 输出的数据的 dtype 是什么。如 oneflow.long, oneflow.float32, int, float 等 + """ + def __init__(self, pad_val=0, ele_dtype=None, dtype=None): + dtype = _get_dtype(ele_dtype, dtype, class_name=self.__class__.__name__) + super().__init__(pad_val=pad_val, dtype=dtype) + + @staticmethod + def pad(batch_field, pad_val=0, dtype=None): + device = None + try: + if not isinstance(batch_field[0], oneflow.Tensor): + batch_field = [oneflow.tensor(field.tolist(), dtype=dtype) for field in batch_field] + else: + batch_field = [field.to(dtype) for field in batch_field] + device = batch_field[0].device + if dtype is None: + dtype = batch_field[0].dtype + except AttributeError: + raise RuntimeError(f"If the field is not a oneflow.Tensor (it is {type(batch_field[0])}), " + f"it must have tolist() method.") + + shapes = [field.shape for field in batch_field] + if len(batch_field) < 2: + max_shape = [len(batch_field)] + list(shapes[0]) + else: + max_shape = [len(batch_field)] + [max(*_) for _ in zip(*shapes)] + + tensor = oneflow.full(max_shape, value=pad_val, dtype=dtype, device=device) + for i, field in enumerate(batch_field): + slices = (i, ) + tuple(slice(0, s) for s in shapes[i]) + tensor[slices] = field + return tensor + + +def fill_tensor(batch_field, padded_batch, dtype): + """ + 将 batch_field 中的值填入到 tensor 中。 + + :param batch_field: 需要填充进入 array 中的内容 + :param padded_batch: 待填充的 tensor + :param dtype: 数据的类别 + + :return: + """ + if padded_batch.ndim == 2: + for i, content_i in enumerate(batch_field): + padded_batch[i, :len(content_i)] = oneflow.tensor(content_i, dtype=dtype) + elif padded_batch.ndim == 3: + for i, content_i in enumerate(batch_field): + for j, content_ii in enumerate(content_i): + padded_batch[i, j, :len(content_ii)] = oneflow.tensor(content_ii, dtype=dtype) + elif padded_batch.ndim == 4: + try: # 应该是图像,所以直接应该就 ok 了。 + padded_batch = oneflow.tensor(batch_field) + except: + for i, content_i in enumerate(batch_field): + for j, content_ii in enumerate(content_i): + for k, content_iii in enumerate(content_ii): + padded_batch[i, j, k, :len(content_iii)] = oneflow.tensor(content_iii, dtype=dtype) + elif padded_batch.ndim == 1: + padded_batch[:] = oneflow.tensor(batch_field, dtype=dtype) + else: + raise RuntimeError("fastNLP does not support padding for more than 3 dimensions. If you need this, please " + "report.") + return padded_batch + + +def get_padded_oneflow_tensor(batch_field, dtype=None, pad_val=0): + """ + 例如: + [[1,2], [3]] -> oneflow.LongTensor([[1, 2], [3, 0]]) + + :param batch_field: 需要 pad 的对象。需要保证应该是可以进行 pad 的。支持 1d(多为句子长度)/2d(多为文本序列)/3d(多为字符序列) + /4d(多为图片)。 + :param dtype: 目标类别是什么 + :param pad_val: pad 的 value + :return: + """ + shapes = get_shape(batch_field) + tensor = oneflow.full(shapes, dtype=dtype, value=pad_val) + tensor = fill_tensor(batch_field, tensor, dtype=dtype) + return tensor diff --git a/fastNLP/core/collators/padders/raw_padder.py b/fastNLP/core/collators/padders/raw_padder.py index 645c145c..3828b2c0 100644 --- a/fastNLP/core/collators/padders/raw_padder.py +++ b/fastNLP/core/collators/padders/raw_padder.py @@ -13,9 +13,9 @@ def _get_dtype(ele_dtype, dtype, class_name): """ 用于检测数据的 dtype 类型, 根据内部和外部数据判断。 - :param ele_dtype 内部数据的类型 - :param dtype 数据外部类型 - :param class_name 类的名称 + :param ele_dtype: 内部数据的类型 + :param dtype: 数据外部类型 + :param class_name: 类的名称 """ if ele_dtype is not None and not is_number_or_numpy_number(ele_dtype): raise EleDtypeUnsupportedError(f"`{class_name}` only supports padding python numbers " diff --git a/fastNLP/core/collators/padders/torch_padder.py b/fastNLP/core/collators/padders/torch_padder.py index 911c7d8c..91f58af4 100644 --- a/fastNLP/core/collators/padders/torch_padder.py +++ b/fastNLP/core/collators/padders/torch_padder.py @@ -38,7 +38,7 @@ def is_torch_tensor(dtype): """ 判断是否为 torch 的 tensor - :param dtype 数据的 dtype 类型 + :param dtype: 数据的 dtype 类型 """ if not isclass(dtype) and isinstance(dtype, torch.dtype): return True @@ -49,9 +49,9 @@ def _get_dtype(ele_dtype, dtype, class_name): """ 用于检测数据的 dtype 类型, 根据内部和外部数据判断。 - :param ele_dtype 内部数据的类型 - :param dtype 数据外部类型 - :param class_name 类的名称 + :param ele_dtype: 内部数据的类型 + :param dtype: 数据外部类型 + :param class_name: 类的名称 """ if not (ele_dtype is None or (is_number_or_numpy_number(ele_dtype) or is_torch_tensor(ele_dtype))): raise EleDtypeUnsupportedError(f"`{class_name}` only supports padding python numbers " diff --git a/fastNLP/core/controllers/evaluator.py b/fastNLP/core/controllers/evaluator.py index 84ca03bd..ac5b7c05 100644 --- a/fastNLP/core/controllers/evaluator.py +++ b/fastNLP/core/controllers/evaluator.py @@ -122,7 +122,7 @@ class Evaluator: _evaluate_batch_loop: Loop def __init__(self, model, dataloaders, metrics: Optional[Dict] = None, - driver: Union[str, Driver] = 'torch', device: Optional[Union[int, List[int], str]] = None, + driver: Union[str, Driver] = 'auto', device: Optional[Union[int, List[int], str]] = None, evaluate_batch_step_fn: Optional[callable] = None, evaluate_fn: Optional[str] = None, input_mapping: Optional[Union[Callable, Dict]] = None, output_mapping: Optional[Union[Callable, Dict]] = None, model_wo_auto_param_call: bool = False, @@ -279,8 +279,9 @@ class Evaluator: raise e finally: self.finally_progress_bar() + metric_results = flat_nest_dict(metric_results, separator=self.separator, compress_none_key=True, top_down=False) if len(metric_results) > 0: # 如果 metric 不为 None 需要 print 。 - metric_results = flat_nest_dict(metric_results, separator=self.separator, compress_none_key=True, top_down=False) + # metric_results = flat_nest_dict(metric_results, separator=self.separator, compress_none_key=True, top_down=False) if self.verbose: if self.progress_bar == 'rich': f_rich_progress.print(metric_results) diff --git a/fastNLP/core/controllers/trainer.py b/fastNLP/core/controllers/trainer.py index 0f22e63c..c1e64636 100644 --- a/fastNLP/core/controllers/trainer.py +++ b/fastNLP/core/controllers/trainer.py @@ -55,9 +55,10 @@ class Trainer(TrainerEventTrigger): 您应当使用 ``TorchDDPDriver``,意味着您需要通过 ``python -m torch.distributed.launch`` 的方式来启动训练,此时参数 ``device`` 应当设置为 None(此时我们会忽略该参数),具体见下面对于参数 ``device`` 的更详细的解释。 - :param driver: 训练模型所使用的具体的驱动模式,应当为以下选择中的一个:["torch"],之后我们会加入 jittor、paddle 等 - 国产框架的训练模式;其中 "torch" 表示使用 ``TorchSingleDriver`` 或者 ``TorchDDPDriver``,具体使用哪一种取决于参数 ``device`` - 的设置; + :param driver: 训练模型所使用的具体的驱动模式,应当为以下选择中的一个:["auto", "torch", "paddle", "jittor", "fairscale"]。其值为 ``"auto"`` 时, + **FastNLP** 会根据传入模型的类型自行判断使用哪一种模式;其值为 "torch" 时,表示使用 ``TorchSingleDriver`` 或者 ``TorchDDPDriver``; + 其值为 "paddle" 时,表示使用 ``PaddleSingleDriver`` 或者 ``PaddleFleetDriver``;其值为 "jittor" 时,表示使用 ``JittorSingleDriver`` + 或者 ``JittorMPIDriver``;其值为 "fairscale" 时,表示使用 ``FairScaleDriver``。在指定了框架的情况下,具体使用哪一种取决于参数 ``device`` 的设置; .. warning:: @@ -81,7 +82,7 @@ class Trainer(TrainerEventTrigger): device 的可选输入如下所示: - * *str*: 例如 'cpu', 'cuda', 'cuda:0', 'cuda:1' 等; + * *str*: 例如 'cpu', 'cuda', 'cuda:0', 'cuda:1', 'gpu:0' 等; * *torch.device*: 例如 'torch.device("cuda:0")'; * *int*: 将使用 ``device_id`` 为该值的 ``gpu`` 进行训练;如果值为 -1,那么默认使用全部的显卡,此时使用的 driver 实例是 `TorchDDPDriver`; * *list(int)*: 如果多于 1 个device,应当通过该种方式进行设定;注意此时我们一定会使用 ``TorchDDPDriver``,不管您传入的列表的长度是 1 还是其它值; @@ -285,20 +286,11 @@ class Trainer(TrainerEventTrigger): 第一个 ``Trainer`` 实例,即使该 ``Trainer`` 实例的 marker 不为 None;这一点详见 :meth:`~fastNLP.core.controllers.Trainer.on` :kwargs: - * *torch_kwargs* -- 用于在指定 ``driver`` 为 'torch' 时设定具体 driver 实例的一些参数: - - * ddp_kwargs -- 用于在使用 ``TorchDDPDriver`` 时指定 ``DistributedDataParallel`` 初始化时的参数;例如传入 - {'find_unused_parameters': True} 来解决有参数不参与前向运算导致的报错等; - * set_grad_to_none -- 是否在训练过程中在每一次 optimizer 更新后将 grad 置为 None; - * non_blocking -- 表示用于 pytorch 的 tensor 的 to 方法的参数 non_blocking; - * gradscaler_kwargs -- 用于 fp16=True 时,提供给 ``torch.amp.cuda.GradScaler`` 的参数。 - * *paddle_kwargs* -- 用于在指定 ``driver`` 为 'paddle' 时设定具体 driver 实例的一些参数: - - * fleet_kwargs -- 用于在使用 ``PaddleFleetDriver`` 时指定 ``DataParallel`` 和 ``fleet`` 初始化时的参数,包括: - - * is_collective -- 是否使用 paddle 集群式的分布式训练方法,目前仅支持为 ``True`` 的情况; - * role_maker -- 初始化 ``fleet`` 分布式训练 API 时使用的 ``RoleMaker`` - * 其它用于初始化 ``DataParallel`` 的参数; + * *torch_kwargs* -- ``TorchDriver`` 所需的其它参数,详见 :class:`~fastNLP.core.drivers.torch_driver.TorchSingleDriver` 和 + :class:`~fastNLP.core.drivers.torch_driver.TorchDDPDriver`; + * *paddle_kwargs* -- ``PaddleDriver`` 所需的其它参数,详见 :class:`~fastNLP.core.drivers.paddle_driver.PaddleSingleDriver` 和 + :class:`~fastNLP.core.drivers.paddle_driver.PaddleSingleDriver`; + * *fairscale_kwargs* -- ``FairScaleDriver`` 所需的其它参数,详见 :class:`~fastNLP.core.drivers.torch_driver.FairScaleDriver`; * *data_device* -- 一个具体的 driver 实例中,有 ``model_device`` 和 ``data_device``,前者表示模型所在的设备,后者表示 当 ``model_device`` 为 None 时应当将数据迁移到哪个设备; @@ -312,23 +304,23 @@ class Trainer(TrainerEventTrigger): 3. 对于 paddle,该参数无效; * *use_dist_sampler* -- 表示是否使用分布式的 ``sampler``。在多卡时,分布式 ``sampler`` 将自动决定每张卡上读取的 sample ,使得一个 epoch - 内所有卡的 sample 加起来为一整个数据集的 sample,同时为了保证所有卡上拥有相同数量的 sample ,有的卡上可能会有重复的 sample ,例如 - 8卡训练,只有9个sample,如果batch_size为1,那么第二个batch时,有7张卡将没有 sample 可用,因此只有重复使用 sample 来 pad 到第二个 - batch 中。如果不希望 fastNLP 对 dataloader 的sampler 做特殊设置,请将该值设置为 False ,若确实需要分布式的训练,请在 Trainer 外 - 对 train_dataloader 做的数据做特殊处理使得其在不同的卡之间 sample 是 + 内所有卡的 sample 加起来为一整个数据集的 sample,同时为了保证所有卡上拥有相同数量的 sample ,有的卡上可能会有重复的 sample ,例如 + 8卡训练,只有9个sample,如果batch_size为1,那么第二个batch时,有7张卡将没有 sample 可用,因此只有重复使用 sample 来 pad 到第二个 + batch 中。如果不希望 fastNLP 对 dataloader 的sampler 做特殊设置,请将该值设置为 False ,若确实需要分布式的训练,请在 Trainer 外 + 对 train_dataloader 做的数据做特殊处理使得其在不同的卡之间 sample 是 * *evaluate_use_dist_sampler* -- 表示在 ``Evaluator`` 中在使用分布式的时候是否将保证 dataloader 的 ``sampler`` 替换为 - evaluate 时使用的分布式的 ``sampler``,其特点是每个卡上的数据之间不重叠,所有卡上数据的加起来是整个数据集。若传入的 dataloader - 的 sampler 为 (a) 深度学习框架自带的默认 sampler ; (b) fastNLP 的 Sampler 等,则将替换为 + evaluate 时使用的分布式的 ``sampler``,其特点是每个卡上的数据之间不重叠,所有卡上数据的加起来是整个数据集。若传入的 dataloader + 的 sampler 为 (a) 深度学习框架自带的默认 sampler ; (b) fastNLP 的 Sampler 等,则将替换为 :class:`~fastNLP.UnrepeatedSequentialSampler`,如果这个行为不是期待的,请本参数设置为 ``False``,并针对每个卡控制其可以 用到的数据。 * *output_from_new_proc* -- 应当为一个字符串,表示在多进程的 driver 中其它进程的输出流应当被做如何处理;其值应当为以下之一: - ["all", "ignore", "only_error"];当该参数的值不是以上值时,该值应当表示一个文件夹的名字,我们会将其他 rank 的输出流重定向到 - log 文件中,然后将 log 文件保存在通过该参数值设定的文件夹中;默认为 "only_error"; + ["all", "ignore", "only_error"];当该参数的值不是以上值时,该值应当表示一个文件夹的名字,我们会将其他 rank 的输出流重定向到 + log 文件中,然后将 log 文件保存在通过该参数值设定的文件夹中;默认为 "only_error"; 注意该参数仅当使用分布式的 ``driver`` 时才有效,例如 ``TorchDDPDriver``; * *progress_bar* -- 以哪种方式显示 progress ,目前支持[None, 'raw', 'rich', 'auto', 'tqdm'] 或者 :class:`~fastNLP.RichCallback`, :class:`~fastNLP.RawTextCallback`等对象, - 默认为 auto , auto 表示如果检测到当前 terminal 为交互型则使用 :class:`~fastNLP.RichCallback`,否则使用 :class:`~fastNLP.RawTextCallback` 对象。如果 - 需要定制 progress bar 的参数,例如打印频率等,可以传入 :class:`~fastNLP.RichCallback`, :class:`~fastNLP.RawTextCallback` 等对象。 + 默认为 auto , auto 表示如果检测到当前 terminal 为交互型则使用 :class:`~fastNLP.RichCallback`,否则使用 :class:`~fastNLP.RawTextCallback` 对象。如果 + 需要定制 progress bar 的参数,例如打印频率等,可以传入 :class:`~fastNLP.RichCallback`, :class:`~fastNLP.RawTextCallback` 等对象。 * *train_input_mapping* -- 与 input_mapping 一致,但是只用于 ``Trainer`` 中。与 input_mapping 互斥。 * *train_output_mapping* -- 与 output_mapping 一致,但是只用于 ``Trainer`` 中。与 output_mapping 互斥。 * *evaluate_input_mapping* -- 与 input_mapping 一致,但是只用于 ``Evaluator`` 中。与 input_mapping 互斥。 @@ -365,9 +357,9 @@ class Trainer(TrainerEventTrigger): def __init__( self, model, - driver, train_dataloader, optimizers, + driver: str = "auto", device: Optional[Union[int, List[int], str]] = "cpu", n_epochs: int = 20, evaluate_dataloaders=None, diff --git a/fastNLP/core/dataloaders/__init__.py b/fastNLP/core/dataloaders/__init__.py index b18e371c..06d3f5a8 100644 --- a/fastNLP/core/dataloaders/__init__.py +++ b/fastNLP/core/dataloaders/__init__.py @@ -3,9 +3,11 @@ __all__ = [ 'TorchDataLoader', 'PaddleDataLoader', 'JittorDataLoader', + 'OneflowDataLoader', 'prepare_jittor_dataloader', 'prepare_paddle_dataloader', 'prepare_torch_dataloader', + 'prepare_oneflow_dataloader', "prepare_dataloader", @@ -15,5 +17,6 @@ __all__ = [ from .jittor_dataloader import JittorDataLoader, prepare_jittor_dataloader from .torch_dataloader import TorchDataLoader, prepare_torch_dataloader, MixDataLoader from .paddle_dataloader import PaddleDataLoader, prepare_paddle_dataloader +from .oneflow_dataloader import OneflowDataLoader, prepare_oneflow_dataloader from .prepare_dataloader import prepare_dataloader from .utils import OverfitDataLoader \ No newline at end of file diff --git a/fastNLP/core/dataloaders/oneflow_dataloader/__init__.py b/fastNLP/core/dataloaders/oneflow_dataloader/__init__.py new file mode 100644 index 00000000..d17ce91c --- /dev/null +++ b/fastNLP/core/dataloaders/oneflow_dataloader/__init__.py @@ -0,0 +1,6 @@ +__all__ = [ + "OneflowDataLoader", + "prepare_oneflow_dataloader", +] + +from .fdl import OneflowDataLoader, prepare_oneflow_dataloader diff --git a/fastNLP/core/dataloaders/oneflow_dataloader/fdl.py b/fastNLP/core/dataloaders/oneflow_dataloader/fdl.py new file mode 100644 index 00000000..e68402ea --- /dev/null +++ b/fastNLP/core/dataloaders/oneflow_dataloader/fdl.py @@ -0,0 +1,353 @@ +__all__ = [ + 'OneflowDataLoader', + 'prepare_oneflow_dataloader' +] + +from typing import Optional, Callable, Sequence, Union, Tuple, Dict, Mapping, List, Any +from abc import ABC +from copy import deepcopy + +from fastNLP.core.dataset import DataSet +from fastNLP.core.collators import Collator +from fastNLP.core.dataloaders.utils import indice_collate_wrapper +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW +from fastNLP.core.samplers import ReproducibleBatchSampler, ReproducibleSampler, UnrepeatedSampler, RandomSampler +from ..utils import _match_param +from ..utils import HasLenGetitemType + +if _NEED_IMPORT_ONEFLOW: + from oneflow.utils.data import DataLoader, Sampler, Dataset +else: + from fastNLP.core.utils.dummy_class import DummyClass as DataLoader + + +class _FDataSet: + """ + 提供给 ``OneflowDataLoader`` 使用的 warp 类,其功能是对 dataset 进行封装,wrap 修改 dataset 的 __getitem__ 函数,增加返回 + 数据的下标 idx 。 + + ..note:: + + 需要注意的是传入 ``__init__`` 的 dataset 需要实现 __getattribute__ 方法才能在 _FDataset 实例化对象中调用 dataset 的方法 + + """ + + def __init__(self, dataset) -> None: + self.dataset = dataset + + def __getitem__(self, item: Union[int, list]) -> Tuple: + return (item, self.dataset[item]) + + def __getattr__(self, item): + try: + return self.dataset.__getattribute__(item) + except AttributeError as e: + raise e + + def __len__(self) -> int: + return len(self.dataset) + + +class OneflowDataLoader(DataLoader): + """ + 提供给 ``oneflow`` 框架使用的 ``DataLoader`` 函数,``OneflowDataLoader`` 提供了 ``Collator`` 来自动检测 dataset 的每个 field 是否可 pad, + 若是可 pad 的 field 则自动 pad 到相同长度,否则只会将相同 field 的数据收集组成一个 batch 返回。 + 具体详见 :class:`~fastNLP.core.collators.Collator`;用户通过 callte_fn 来控制是否使用该功能, collate_fn 只能为 ``['auto', None, Callable]`` + 三种取值。 + + * callate_fn 为 ``'auto'`` 时,``OneflowDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的取值。 + 此时可以配套使用 ``OneflowDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 + * callate_fn 为 ``None`` 时, ``OneflowDataLoadr`` 默认使用 oneflow DataLoader 自带的 collate_fn + * collate_fn 为 ``Callable`` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 + dataset 的一条数据;该 Callable 函数还应当返回一个对象。 + + """ + + def __init__(self, dataset, batch_size: int = 16, + shuffle: bool = False, sampler = None, batch_sampler = None, + num_workers: int = 0, collate_fn: Union[Callable, str, None] = 'auto', + pin_memory: bool = False, drop_last: bool = False, + timeout: float = 0, worker_init_fn: Optional[Callable] = None, + multiprocessing_context=None, generator=None, prefetch_factor: int = 2, + persistent_workers: bool = False, **kwargs) -> None: + """ + + :param dataset: 实现了 __getitem__() 和 __len__() 的对象。 + :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 + :param shuffle: 是否打乱数据集, 默认为 ``False``。 + :param sampler: 实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , + 默认为None, 当其不为 None 时, shuffle 参数无效。 + :param batch_sampler: 实现了 __len__() 和 __iter__() 的实例化对象,,其__iter__() 方法每次都会返回一个 List 对象, List中的值为 + dataset 的下标 index ;默认为 None,当其不为 None 时,bacth_size, sampler, shuffle 参数均失效。 + :param num_workers: 当 ``num_workers > 0`` 时, ``OneflowDataLoader`` 会开启 num_workers 个子进程来处理数据, 可以加快 + 数据处理速度,但同时也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 + :param collate_fn: 用于从 dataset 取到的一个 batch 数据进行打包处理的 Callable 函数,其值应该为以下三个: ``[None, "auto", Callable]``. + + * callate_fn 为 ``None`` 时,需要注意的是此时传进来的 datset 类型不能为 :class:`~fastNLP.core.dataset.DataSet` , 当 collate_fn 为 ``None`` 时, + ``OneflowDataLoader`` 调用默认的 oneflow 框架的 ``DataLoader`` 自带的 ``default_collate_fn`` 作为 callate_fn 的默认值, 其无法处理 + :class:`~fastNLP.core.dataset.DataSet` 的dataset对象。 + * callate_fn 为 ``'auto'`` 时,``OneflowDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的默认值。 + 此时可以配套使用 ``OneflowDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 + * collate_fn 为 ``Callable`` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 + dataset 的一条数据;该 Callable 函数还应当返回一个对象。 + + :param pin_memory: 如果其为 ``True``, 那么 ``OneflowDataLoader`` 会在返回数据张量之前将其 copy 到 cud a的 pin memory 中。 + :param drop_last: 当 ``drop_last=True`` 时,``OneflowDataLoader`` 会扔掉最后一个长度小于 ``batch_size`` 的 batch 数据; + 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 + :param timeout: 子进程的输出队列获取数据的超时值 + :param worker_init_fn: init 函数,如果不设置为 None ,则将会在每个子进程初始化时调用该函数。 + :param multiprocessing_context: 多进程的上下文环境 + :param generator: 如果其不为 ``None``, 将会使用 RandomSampler 去生成随机的 index 且会为每个子进程生成一个 ``base_seed`` + :param prefetch_factor: 每个 worker 提前装载的 samples 数量。``2``意味着在所有的进程中会有 2*num_workers 的数据被预取。默认值为 ``2`` . + :param persistent_workers: 如果其为 ``True``, ``OneflowDataLoader`` 在迭代完一次 dataset 后不会关闭所有进程。默认为 ``False`` + + """ + if isinstance(dataset, DataSet) and collate_fn is None: + raise ValueError("When use FastNLP DataSet, collate_fn must be not None") + + if not isinstance(dataset, _FDataSet): + dataset = _FDataSet(dataset) + + if num_workers>0 and multiprocessing_context is None: + multiprocessing_context = 'fork' # 这里默认使用fork的方式来启动多进程 + + if batch_sampler is not None: + batch_size = 1 + shuffle = False + sampler = None + elif sampler is None: + sampler = RandomSampler(dataset, shuffle=shuffle) + shuffle = False + + if isinstance(collate_fn, str): + if collate_fn == 'auto': + if isinstance(dataset.dataset, DataSet): # 使用了 fastnlp dataset + collate_fn = deepcopy(dataset.dataset.collator) + collate_fn.set_backend(backend="oneflow") + else: + collate_fn = Collator(backend="oneflow") + else: + raise ValueError(f"collate_fn: {collate_fn} must be 'auto'") + + dl_kwargs = _match_param(OneflowDataLoader.__init__, DataLoader.__init__, fn_name=DataLoader.__name__) + if dl_kwargs is None: + super().__init__(dataset=dataset, batch_size=batch_size, shuffle=shuffle, sampler=sampler, + batch_sampler=batch_sampler, num_workers=num_workers, collate_fn=collate_fn, + pin_memory=pin_memory, drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn, + multiprocessing_context=multiprocessing_context, generator=generator, + prefetch_factor=prefetch_factor, + persistent_workers=persistent_workers) + else: + super().__init__(**dl_kwargs) + + self.cur_batch_indices = None + + def __iter__(self): + self.collate_fn = indice_collate_wrapper(self.collate_fn) + for indices, data in super().__iter__(): + self.cur_batch_indices = indices + yield data + + def set_pad(self, field_name: Union[str, tuple], pad_val: Union[int, float, None] = 0, dtype=None, backend=None, + pad_fn: Callable = None) -> Collator: + """ + 如果需要对某个 field 的内容进行特殊的调整,请使用这个函数。 + + :param field_name: 需要调整的 field 的名称。如果 Dataset 的 __getitem__ 方法返回的是 dict 类型的,则可以直接使用对应的 + field 的 key 来表示,如果是 nested 的 dict,可以使用元组表示多层次的 key,例如 {'a': {'b': 1}} 中的使用 ('a', 'b'); + 如果 __getitem__ 返回的是 Sequence 类型的,则可以使用 '_0', '_1' 表示序列中第 0 或 1 个元素。如果该 field 在数据中没 + 有找到,则报错;如果 __getitem__ 返回的是就是整体内容,请使用 "_single" 。 + :param pad_val: 这个 field 的默认 pad 值。如果设置为 None,则表示该 field 不需要 pad , fastNLP 默认只会对可以 pad 的 + field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 None 。如果 backend 为 None ,该值 + 无意义。 + :param dtype: 对于需要 pad 的 field ,该 field 的数据 dtype 应该是什么。 + :param backend: 可选['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto'],分别代表,输出为 list, numpy.ndarray, + torch.Tensor, paddle.Tensor, jittor.Var, oneflow.Tensor 类型。若 pad_val 为 None ,该值无意义 。 + :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 pad_val, dtype, backend 等参数失效。pad_fn 的输入为当前 field 的 + batch 形式。 Collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。pad_func 的输入即为 field 的 batch + 形式,输出将被直接作为结果输出。 + :return: 返回 Collator + """ + collator = self._get_collator() + if isinstance(collator, Collator): + collator.set_pad(field_name=field_name, pad_val=pad_val, dtype=dtype, pad_fn=pad_fn, backend=backend) + return collator + else: + raise ValueError(f"Only when the collate_fn is a fastNLP Collator, set_pad() is allowed.") + + def _get_collator(self): + """ + 如果 collate_fn 是 Collator 对象,得到该对象。如果没有的话,返回 None + + :return: + """ + collator = None + if hasattr(self.collate_fn, '__wrapped__') and isinstance(self.collate_fn.__wrapped__, Collator): + collator = self.collate_fn.__wrapped__ + elif isinstance(self.collate_fn, Collator): + collator = self.collate_fn + return collator + + def set_ignore(self, *field_names) -> Collator: + """ + 如果有的内容不希望输出,可以在此处进行设置,被设置的 field 将在 batch 的输出中被忽略。 + Example:: + + collator.set_ignore('field1', 'field2') + + :param field_names: 需要忽略的 field 的名称。如果 Dataset 的 __getitem__ 方法返回的是 dict 类型的,则可以直接使用对应的 + field 的 key 来表示,如果是 nested 的 dict,可以使用元组来表示,例如 {'a': {'b': 1}} 中的使用 ('a', 'b'); 如果 + __getitem__ 返回的是 Sequence 类型的,则可以使用 '_0', '_1' 表示序列中第 0 或 1 个元素。 + :return: 返回 Collator 自身 + """ + collator = self._get_collator() + if isinstance(collator, Collator): + collator.set_ignore(*field_names) + return collator + else: + raise ValueError(f"Only when the collate_fn is a fastNLP Collator, set_ignore() is allowed.") + + def get_batch_indices(self) -> List[int]: + """ + 获取当前 ``batch`` 中每条数据对应的索引。 + + :return: 当前 ``batch`` 数据的索引; + """ + return self.cur_batch_indices + + +def prepare_oneflow_dataloader(ds_or_db, + batch_size: int = 16, + shuffle: bool = None, + sampler: Union["Sampler[int]", ReproducibleSampler, UnrepeatedSampler] = None, + batch_sampler: Union["Sampler[Sequence[int]]", ReproducibleBatchSampler] = None, + num_workers: int = 0, collate_fn: Union[Callable, str, None] = 'auto', + pin_memory: bool = False, drop_last: bool = False, + timeout: float = 0, worker_init_fn: Optional[Callable] = None, + multiprocessing_context=None, generator=None, prefetch_factor: int = 2, + persistent_workers: bool = False, + non_train_sampler: Union["Sampler[int]", ReproducibleSampler, UnrepeatedSampler] = None, + non_train_batch_size: int = None) \ + -> Union[OneflowDataLoader, Dict[str, OneflowDataLoader]]: + """ + ``prepare_oneflow_dataloader`` 的功能是将输入的单个或多个 dataset 同时转为 ``OneflowDataloader``对象, 详见 :class:`~fastNLP.OneflowDataLoader`。 + 根据 ds_or_db 的类型 ``[DataSet, DataBundle, Dict[name, Dataset]]`` 不同而有不同返回结果, 具体如下: + + * 当 ds_or_db 为 ``DataSet``时,``prepare_oneflow_dataloader`` 会将使用的除了 non_train_batch_size 和 non_train_sampler 以外的参数来 + 帮你实例化一个 ``OneflowDataLoader`` 对象并返回该对象。 详见:class:`~fastNLP.core.dataloaders.OneflowDataLoader`。 + * 当 ds_or_db 为 :class:`~fastNLP.io.DataBundle` 时,``prepare_oneflow_dataloader`` 会遍历 ``DataBundle`` 的数据集的 key-value + 来创建不同的 ``OneflowDataLoader`` 对象;当 key 中包含'train'字符串时,``prepare_oneflow_dataloader`` 默认该 value 为 train 数据集, + 会将 batch_size 和 sampler 作为参数,其他 key 不包含 'train' 字符串的数据集则使用 non_train_size 和 non_train_sampler 作为参数。 + 最终根据 ``key: OneflowDataLoader`` 组成 ``Dict[key, OneflowDataLoader]`` 的字典返回。 + * 当 ds_or_db 为 ``Dict[str, DataSet]`` 字典类型时, ``prepare_oneflow_dataloader`` 会遍历 该 dict 的的 key-value 来创建不同的 + ``OneflowDataLoader`` 对象;当 key 中包含'train'字符串时,``prepare_oneflow_dataloader`` 默认该 value 为 train 数据集,会将 batch_size 和 sampler 作为参数, + 其他 key 不包含 'train' 字符串的数据集则使用 non_train_size 和 non_train_sampler 作为参数。最终根据 ``key: OneflowDataLoader`` 组成 + ``Dict[key, OneflowDataLoader]`` 的字典返回。 + + :param ds_or_db: 可以有以下三种取值, + + * ds_or_db 为 :class:`~fastNLP.io.DataBundle`, 返回值为 ``Dict[str, OneflowDataLoader]`` 的字典 + * ds_or_db 为 ``Dict[str, DataSet]`` 字典, 返回值为 ``Dict[str, OneflowDataLoader]`` 的字典 + * ds_or_db 为实现了 __getitem__() 和 __len__() 的对象 ,返回值为:class:`~fastNLP.OneflowDataLoader` + + :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 + :param non_train_batch_size: 非 'train' 数据集的 ``OneflowDataLoader`` 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 + :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 'train' 则设置其 shuffle 为 True , + 其它的为 False 。 + :param sampler: 实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , + 默认为None, 当其不为 None 时, shuffle 参数无效。 + :param non_train_sampler: 非 'train' 数据集的的实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , + 默认为None, 当其不为 None 时, shuffle 参数无效。 + :param batch_sampler: 实现了 __len__() 和 __iter__() 的实例化对象,,其__iter__() 方法每次都会返回一个 List 对象, List中的值为 + dataset 的下标 index ;默认为 None,当其不为 None 时,bacth_size, sampler, shuffle 参数均失效。 + :param num_workers: 当 ``num_workers > 0`` 时, ``OneflowDataLoader`` 会开启 num_workers 个子进程来处理数据, 可以加快 + 数据处理速度,但同时也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 + :param collate_fn: 用于从 dataset 取到的一个 batch 数据进行打包处理的 Callable 函数,其值应该为以下三个: ``[None, "auto", Callable]``. + + * callate_fn 为 'None' 时,需要注意的是此时传进来的 datset 类型不能为 :class:`~fastNLP.core.dataset.DataSet` , 当 collate_fn 为 ``None`` 时, + ``OneflowDataLoader`` 调用默认的 oneflow 框架的 ``DataLoader`` 自带的 `default_collate_fn` 作为 callate_fn 的默认值, 其无法处理 + :class:`~fastNLP.core.dataset.DataSet` 的dataset对象。 + * callate_fn 为 ``'auto'`` 时,`OneflowDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的默认值。 + 此时可以配套使用 ``OneflowDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 + * `collate_fn 为 ``Callable`` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 + dataset 的一条数据;该 Callable 函数还应当返回一个对象。 + + :param pin_memory: 如果其为 ``True``, 那么 ``OneflowDataLoader`` 会在返回数据张量之前将其 copy 到 cud a的 pin memory 中。 + :param drop_last: 当 ``drop_last=True`` 时,``OneflowDataLoader`` 会扔掉最后一个长度小于 ``batch_size`` 的 batch 数据; + 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 + :param timeout: 子进程的输出队列获取数据的超时值 + :param worker_init_fn: init 函数,如果不设置为 None ,则将会在每个子进程初始化时调用该函数。 + :param multiprocessing_context: 多进程的上下文环境 + :param generator: 如果其不为 ``None``, 将会使用 RandomSampler 去生成随机的 index 且会为每个子进程生成一个``base_seed`` + :param prefetch_factor: 每个 worker 提前装载的 samples 数量。``2``意味着在所有的进程中会有 2*num_workers 的数据被预取。默认值为 ``2`` . + :param persistent_workers: 如果其为 ``True``, ``OneflowDataLoader`` 在迭代完一次 dataset 后不会关闭所有进程。默认为 ``False`` + + """ + + from fastNLP.io import DataBundle + + if isinstance(ds_or_db, DataBundle): + dl_bundle = {} + for name, ds in ds_or_db.iter_datasets(): + if 'train' in name: + dl_bundle[name] = OneflowDataLoader(dataset=ds, batch_size=batch_size, + shuffle=True if shuffle is None else shuffle, sampler=sampler, batch_sampler=batch_sampler, + num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory, + drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn, + multiprocessing_context=multiprocessing_context, generator=generator, + prefetch_factor=prefetch_factor, + persistent_workers=persistent_workers, + ) + else: + dl_bundle[name] = OneflowDataLoader(dataset=ds, + batch_size=non_train_batch_size if non_train_batch_size else batch_size, + shuffle=False if shuffle is None else shuffle, + sampler=non_train_sampler if non_train_sampler else sampler, + batch_sampler=batch_sampler, + num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory, + drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn, + multiprocessing_context=multiprocessing_context, generator=generator, + prefetch_factor=prefetch_factor, + persistent_workers=persistent_workers, + ) + return dl_bundle + + elif isinstance(ds_or_db, Mapping): + dl_bundle = {} + for name, ds in ds_or_db.items(): + if 'train' in name: + dl_bundle[name] = OneflowDataLoader(dataset=ds, batch_size=batch_size, + shuffle=True if shuffle is None else shuffle, sampler=sampler, batch_sampler=batch_sampler, + num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory, + drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn, + multiprocessing_context=multiprocessing_context, generator=generator, + prefetch_factor=prefetch_factor, + persistent_workers=persistent_workers, + ) + else: + dl_bundle[name] = OneflowDataLoader(dataset=ds, + batch_size=non_train_batch_size if non_train_batch_size else batch_size, + shuffle=False if shuffle is None else shuffle, + sampler=non_train_sampler if non_train_sampler else sampler, + batch_sampler=batch_sampler, + num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory, + drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn, + multiprocessing_context=multiprocessing_context, generator=generator, + prefetch_factor=prefetch_factor, + persistent_workers=persistent_workers, + ) + + return dl_bundle + + elif isinstance(ds_or_db, HasLenGetitemType): + dl = OneflowDataLoader(dataset=ds_or_db, batch_size=batch_size, + shuffle=False if shuffle is None else shuffle, sampler=sampler, batch_sampler=batch_sampler, + num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory, + drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn, + multiprocessing_context=multiprocessing_context, generator=generator, + prefetch_factor=prefetch_factor, persistent_workers=persistent_workers, + ) + return dl + + else: + raise ValueError(f"ds_or_db: {ds_or_db} must be fastnlp dataset or data_bundle or mapping!") diff --git a/fastNLP/core/dataloaders/paddle_dataloader/fdl.py b/fastNLP/core/dataloaders/paddle_dataloader/fdl.py index 8999322b..9eec6e8f 100644 --- a/fastNLP/core/dataloaders/paddle_dataloader/fdl.py +++ b/fastNLP/core/dataloaders/paddle_dataloader/fdl.py @@ -39,7 +39,7 @@ class _PaddleDataset(Dataset): def __getattr__(self, item): try: - self.dataset.__getattribute__(item) + return self.dataset.__getattribute__(item) except Exception as e: raise e @@ -194,8 +194,8 @@ class PaddleDataLoader(DataLoader): field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 None 。如果 backend 为 None ,该值 无意义。 :param dtype: 对于需要 pad 的 field ,该 field 的数据 dtype 应该是什么。 - :param backend: 可选['raw', 'numpy', 'Paddle', 'paddle', 'paddle', 'auto'],分别代表,输出为 list, numpy.ndarray, - Paddle.Tensor, paddle.Tensor, paddle.Var 类型。若 pad_val 为 None ,该值无意义 。 + :param backend: 可选['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto'],分别代表,输出为 list, numpy.ndarray, + torch.Tensor, paddle.Tensor, jittor.Var, oneflow.Tensor 类型。若 pad_val 为 None ,该值无意义 。 :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 pad_val, dtype, backend 等参数失效。pad_fn 的输入为当前 field 的 batch 形式。 Collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。pad_func 的输入即为 field 的 batch 形式,输出将被直接作为结果输出。 diff --git a/fastNLP/core/dataloaders/prepare_dataloader.py b/fastNLP/core/dataloaders/prepare_dataloader.py index 9cda2bd3..1bac3257 100644 --- a/fastNLP/core/dataloaders/prepare_dataloader.py +++ b/fastNLP/core/dataloaders/prepare_dataloader.py @@ -9,6 +9,7 @@ import sys from .torch_dataloader import prepare_torch_dataloader from .paddle_dataloader import prepare_paddle_dataloader from .jittor_dataloader import prepare_jittor_dataloader +from .oneflow_dataloader import prepare_oneflow_dataloader from ...envs import FASTNLP_BACKEND, SUPPORT_BACKENDS from ..log import logger @@ -37,7 +38,7 @@ def prepare_dataloader(dataset, batch_size: int = 16, shuffle: bool = None, drop * 为 ``Callable`` 时,应当接受一个 ``batch`` 的数据作为参数,同时输出一个对象 。 * 为 ``None`` 时,使用各个框架的 DataLoader 的默认 ``collate_fn`` 。 :param num_workers: 使用多少进程进行数据的 fetch 。 - :param backend: 当前支持 ``["auto", "torch", "paddle", "jittor"]`` 四种类型。 + :param backend: 当前支持 ``["auto", "torch", "paddle", "jittor", "oneflow"]`` 四种类型。 * 为 ``auto`` 时,首先(1) 根据环境变量 "FASTNLP_BACKEND" 进行判断;如果没有设置则,(2)通过当前 ``sys.modules`` 中已经 import 的 ``backend`` 进行判定。如果以上均无法判定,则报错。如果找到了 @@ -45,6 +46,7 @@ def prepare_dataloader(dataset, batch_size: int = 16, shuffle: bool = None, drop * 为 ``torch`` 时,使用 :func:`~fastNLP.prepare_torch_dataloader` 。 * 为 ``paddle`` 时,使用 :func:`~fastNLP.prepare_paddle_dataloader` 。 * 为 ``jittor`` 时,使用 :func:`~fastNLP.prepare_jittor_dataloader` 。 + * 为 ``oneflow`` 时,使用 :func:`~fastNLP.prepare_oneflow_dataloader` 。 :return """ @@ -61,6 +63,10 @@ def prepare_dataloader(dataset, batch_size: int = 16, shuffle: bool = None, drop prepare_jittor_dataloader(ds_or_db=dataset, sampler=None, collate_fn=collate_fn, num_workers=num_workers, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last) + elif backend == 'oneflow': + return prepare_oneflow_dataloader(ds_or_db=dataset, batch_sampler=None, collate_fn=collate_fn, + num_workers=num_workers, shuffle=shuffle, sampler=None, + batch_size=batch_size) else: raise ValueError(f"Currently we do not support backend:{backend}.") diff --git a/fastNLP/core/dataloaders/torch_dataloader/fdl.py b/fastNLP/core/dataloaders/torch_dataloader/fdl.py index 9b0ab8d3..09211f71 100644 --- a/fastNLP/core/dataloaders/torch_dataloader/fdl.py +++ b/fastNLP/core/dataloaders/torch_dataloader/fdl.py @@ -161,8 +161,8 @@ class TorchDataLoader(DataLoader): field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 None 。如果 backend 为 None ,该值 无意义。 :param dtype: 对于需要 pad 的 field ,该 field 的数据 dtype 应该是什么。 - :param backend: 可选['raw', 'numpy', 'torch', 'torch', 'jittor', 'auto'],分别代表,输出为 list, numpy.ndarray, - torch.Tensor, torch.Tensor, jittor.Var 类型。若 pad_val 为 None ,该值无意义 。 + :param backend: 可选['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto'],分别代表,输出为 list, numpy.ndarray, + torch.Tensor, paddle.Tensor, jittor.Var, oneflow.Tensor 类型。若 pad_val 为 None ,该值无意义 。 :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 pad_val, dtype, backend 等参数失效。pad_fn 的输入为当前 field 的 batch 形式。 Collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。pad_func 的输入即为 field 的 batch 形式,输出将被直接作为结果输出。 diff --git a/fastNLP/core/dataloaders/utils.py b/fastNLP/core/dataloaders/utils.py index 06f09da3..4f8fa743 100644 --- a/fastNLP/core/dataloaders/utils.py +++ b/fastNLP/core/dataloaders/utils.py @@ -118,17 +118,22 @@ class OverfitDataLoader: 实现一个简单的迭代器来模拟实际的 dataloader,从给定的 dataloader 中取出部分数据,来让 Trainer 实现 overfit 的功能; """ - def __init__(self, dataloader, overfit_batches: int): + def __init__(self, dataloader, overfit_batches: int, batches=None): + # batches 参数是给重新初始化dataloader使用的 self.dataloader = dataloader # 需要将实际的 dataloader 挂载到该对象上,从而应付一些对于实际的 dataloader 的操作; - self.batches = [] - self.overfit_batches = int(overfit_batches) - - if self.overfit_batches > len(dataloader): - logger.warning("Parameter 'overfit_batches' is bigger than the length of 'train_dataloader'.") - - for idx, batch in enumerate(dataloader): - if idx < self.overfit_batches or self.overfit_batches <= -1: - self.batches.append(batch) + if batches is None: + self.batches = [] + self.overfit_batches = int(overfit_batches) + + if self.overfit_batches > len(dataloader): + logger.warning("Parameter 'overfit_batches' is bigger than the length of 'train_dataloader'.") + + for idx, batch in enumerate(dataloader): + if idx < self.overfit_batches or self.overfit_batches <= -1: + self.batches.append(batch) + else: + assert isinstance(batches, list) + self.batches = batches def __len__(self): return len(self.batches) diff --git a/fastNLP/core/dataset/dataset.py b/fastNLP/core/dataset/dataset.py index fff8b5c2..0238a65d 100644 --- a/fastNLP/core/dataset/dataset.py +++ b/fastNLP/core/dataset/dataset.py @@ -445,7 +445,7 @@ class DataSet: "DataSet object has {} fields, but attempt to append an Instance object with {} fields." .format(len(self.field_arrays), len(instance.fields))) for name, field in instance.items(): - assert name in self.field_arrays + assert name in self.field_arrays, f'Field:`{name}` is not found in {self.field_arrays.keys()}' try: self.field_arrays[name].append(field) except Exception as e: diff --git a/fastNLP/core/drivers/__init__.py b/fastNLP/core/drivers/__init__.py index 127e723a..84e0e9b5 100644 --- a/fastNLP/core/drivers/__init__.py +++ b/fastNLP/core/drivers/__init__.py @@ -1,6 +1,7 @@ __all__ = [ 'Driver', 'TorchDriver', +<<<<<<< HEAD "TorchSingleDriver", "TorchDDPDriver", "DeepSpeedDriver", @@ -10,14 +11,29 @@ __all__ = [ "JittorDriver", "JittorSingleDriver", "JittorMPIDriver", +======= + 'TorchSingleDriver', + 'TorchDDPDriver', + 'PaddleDriver', + 'PaddleSingleDriver', + 'PaddleFleetDriver', + 'JittorDriver', + 'JittorSingleDriver', + 'JittorMPIDriver', + 'OneflowDriver', + 'OneflowSingleDriver', + 'OneflowDDPDriver', +>>>>>>> dev0.8.0 'torch_seed_everything', 'paddle_seed_everything', + 'oneflow_seed_everything', 'optimizer_state_to_device' ] from .torch_driver import TorchDriver, TorchSingleDriver, TorchDDPDriver, DeepSpeedDriver, torch_seed_everything, optimizer_state_to_device from .jittor_driver import JittorDriver, JittorMPIDriver, JittorSingleDriver from .paddle_driver import PaddleDriver, PaddleFleetDriver, PaddleSingleDriver, paddle_seed_everything +from .oneflow_driver import OneflowDriver, OneflowSingleDriver, OneflowDDPDriver, oneflow_seed_everything from .driver import Driver diff --git a/fastNLP/core/drivers/choose_driver.py b/fastNLP/core/drivers/choose_driver.py index 56d30e6f..0f173b1c 100644 --- a/fastNLP/core/drivers/choose_driver.py +++ b/fastNLP/core/drivers/choose_driver.py @@ -1,6 +1,7 @@ from typing import Union, Optional, List from .driver import Driver +from ..utils import is_torch_module, is_paddle_module, is_jittor_module, is_oneflow_module def choose_driver(model, driver: Union[str, Driver], device: Optional[Union[int, List[int], str]], **kwargs) -> Driver: @@ -17,6 +18,18 @@ def choose_driver(model, driver: Union[str, Driver], device: Optional[Union[int, if isinstance(driver, Driver): return driver + if driver == "auto": + if is_torch_module(model): + driver = "torch" + elif is_paddle_module(model): + driver = "paddle" + elif is_jittor_module(model): + driver = "jittor" + elif is_oneflow_module(model): + driver = "oneflow" + else: + raise ValueError(f"Cannot choose driver automatically based on model, please set `driver` specifically.") + if driver in {"torch", "fairscale", "deepspeed"}: from fastNLP.core.drivers.torch_driver.initialize_torch_driver import initialize_torch_driver return initialize_torch_driver(driver, device, model, **kwargs) @@ -26,6 +39,9 @@ def choose_driver(model, driver: Union[str, Driver], device: Optional[Union[int, elif driver in {"paddle"}: from fastNLP.core.drivers.paddle_driver.initialize_paddle_driver import initialize_paddle_driver return initialize_paddle_driver(driver, device, model, **kwargs) + elif driver in {"oneflow"}: + from fastNLP.core.drivers.oneflow_driver.initialize_oneflow_driver import initialize_oneflow_driver + return initialize_oneflow_driver(driver, device, model, **kwargs) else: raise ValueError("Parameter `driver` can only be one of these values: ['torch', 'fairscale', " - "'jittor', 'paddle'].") \ No newline at end of file + "'jittor', 'paddle', 'oneflow'].") \ No newline at end of file diff --git a/fastNLP/core/drivers/jittor_driver/jittor_driver.py b/fastNLP/core/drivers/jittor_driver/jittor_driver.py index 312f0d83..542b39f9 100644 --- a/fastNLP/core/drivers/jittor_driver/jittor_driver.py +++ b/fastNLP/core/drivers/jittor_driver/jittor_driver.py @@ -40,20 +40,22 @@ __all__ = [ class JittorDriver(Driver): r""" - ``Jittor`` 框架的 ``Driver`` + ``Jittor`` 框架的 ``Driver``,是 ``JittorSingleDevice`` 和 ``JittorMPIDriver`` 的父类。 - .. note:: + .. warning:: - 这是一个正在开发中的功能,敬请期待。 + 您不应当直接初始化该类,然后传入给 ``Trainer``,换句话说,您应当使用该类的子类 ``JittorSingleDriver`` 和 ``TorchDDPDriver``,而不是 + 该类本身; - .. todo:: + .. note:: - 实现 fp16 的设置,且支持 cpu 和 gpu 的切换; - 实现用于断点重训的 save 和 load 函数; + 您可以在使用 ``JittorSingleDevice`` 和 ``JittorMPIDriver`` 时使用 ``JittorDriver`` 提供的接口; + :param model: 训练时使用的 **jittor** 模型; + :param fp16: 是否开启混合精度训练; + :param jittor_kwargs: """ - - def __init__(self, model, fp16: bool = False, **kwargs): + def __init__(self, model, fp16: bool = False, jittor_kwargs: Dict = {}, **kwargs): if not isinstance(model, Module): raise ValueError(f"Parameter `model` can not be `{type(model)}` in `JittorDriver`, it should be exactly " f"`jittor.Module` type.") @@ -65,6 +67,7 @@ class JittorDriver(Driver): jt.flags.auto_mixed_precision_level = 0 self.fp16 = fp16 self._auto_cast = nullcontext + self._jittor_kwargs = jittor_kwargs # 用来设置是否关闭 auto_param_call 中的参数匹配问题; self.wo_auto_param_call = kwargs.get("model_wo_auto_param_call", False) diff --git a/fastNLP/core/drivers/jittor_driver/mpi.py b/fastNLP/core/drivers/jittor_driver/mpi.py index b072b83d..47e9279b 100644 --- a/fastNLP/core/drivers/jittor_driver/mpi.py +++ b/fastNLP/core/drivers/jittor_driver/mpi.py @@ -34,10 +34,11 @@ class JittorMPIDriver(JittorDriver): parallel_device: None, is_pull_by_jittor_run: bool = False, fp16: bool = False, + jittor_kwargs: Dict = {}, **kwargs ): - super(JittorMPIDriver, self).__init__(model, fp16=fp16, **kwargs) + super(JittorMPIDriver, self).__init__(model, fp16=fp16, jittor_kwargs=jittor_kwargs, **kwargs) raise NotImplementedError("MPI for Jittor is not supported right now.") self.is_pull_by_jittor_run = is_pull_by_jittor_run diff --git a/fastNLP/core/drivers/jittor_driver/single_device.py b/fastNLP/core/drivers/jittor_driver/single_device.py index 386f8694..be8ef1b9 100644 --- a/fastNLP/core/drivers/jittor_driver/single_device.py +++ b/fastNLP/core/drivers/jittor_driver/single_device.py @@ -25,15 +25,6 @@ class JittorSingleDriver(JittorDriver): r""" ``Jittor`` 框架下用于 ``cpu`` 和单卡 ``gpu`` 运算的 ``Driver``。 - .. note:: - - 这是一个正在开发中的功能,敬请期待。 - - .. todo:: - - 支持 cpu 和 gpu 的切换; - 实现断点重训中替换 dataloader 的 set_dist_repro_dataloader 函数 - :param model: 传入给 ``Trainer`` 的 ``model`` 参数; :param device: 训练和模型所在的设备,在 **Jittor** 中,应当为以下值之一:``[None, 'cpu', 'gpu', 'cuda']``; @@ -43,12 +34,13 @@ class JittorSingleDriver(JittorDriver): 表示在显卡设备上进行训练; :param fp16: 是否开启 fp16; + :param jittor_kwargs: """ - def __init__(self, model, device=None, fp16: bool = False, **kwargs): + def __init__(self, model, device=None, fp16: bool = False, jittor_kwargs: Dict = {}, **kwargs): if device not in [None, "cpu", "gpu", "cuda"]: raise RuntimeError("Parameter `device` should be one of [None, 'cpu', 'gpu', 'cuda'] .") - super(JittorSingleDriver, self).__init__(model, fp16) + super(JittorSingleDriver, self).__init__(model, fp16, jittor_kwargs=jittor_kwargs) self.model_device = device if device is not None else "cpu" diff --git a/fastNLP/core/drivers/oneflow_driver/__init__.py b/fastNLP/core/drivers/oneflow_driver/__init__.py new file mode 100644 index 00000000..12beffc0 --- /dev/null +++ b/fastNLP/core/drivers/oneflow_driver/__init__.py @@ -0,0 +1,18 @@ +__all__ = [ + "OneflowDDPDriver", + "OneflowSingleDriver", + "OneflowDriver", + "oneflow_seed_everything", + "optimizer_state_to_device" +] + +from .ddp import OneflowDDPDriver +from .single_device import OneflowSingleDriver +from .oneflow_driver import OneflowDriver +from .utils import oneflow_seed_everything, optimizer_state_to_device + + + + + + diff --git a/fastNLP/core/drivers/oneflow_driver/ddp.py b/fastNLP/core/drivers/oneflow_driver/ddp.py new file mode 100644 index 00000000..fb992bc8 --- /dev/null +++ b/fastNLP/core/drivers/oneflow_driver/ddp.py @@ -0,0 +1,323 @@ +import os +from typing import List, Optional, Union, Dict + +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW + +if _NEED_IMPORT_ONEFLOW: + import oneflow + import oneflow.comm as comm + import oneflow.env as dist_env + from oneflow.nn.parallel import DistributedDataParallel + from oneflow.utils.data import BatchSampler + +__all__ = [ + "OneflowDDPDriver" +] + +from .oneflow_driver import OneflowDriver +from fastNLP.core.drivers.oneflow_driver.utils import ( + replace_sampler, + replace_batch_sampler +) +from fastNLP.core.utils import check_user_specific_params +from fastNLP.core.samplers import ReproducibleSampler, RandomSampler, UnrepeatedSequentialSampler, \ + ReproducibleBatchSampler, \ + re_instantiate_sampler, UnrepeatedSampler, conversion_between_reproducible_and_unrepeated_sampler +from fastNLP.envs import FASTNLP_GLOBAL_SEED, FASTNLP_NO_SYNC +from fastNLP.core.log import logger +from fastNLP.core.drivers.oneflow_driver.dist_utils import fastnlp_oneflow_all_gather, fastnlp_oneflow_broadcast_object +from .utils import _check_dataloader_args_for_distributed + + +class OneflowDDPDriver(OneflowDriver): + r""" + ``OneflowDDPDriver`` 实现了动态图下使用 ``DistributedDataParallel`` 进行的数据并行分布式训练。 + + .. note:: + + 您在绝大多数情况下不需要自己使用到该类,通过向 ``Trainer`` 传入正确的参数,您可以方便快速地部署您的分布式训练; + + ``OneflowDDPDriver`` 目前支持两种启动方式: + + 1. 用户不做任何处理,通过运行 ``python -m oneflow.distributed.launch --nproc_per_node 2 train.py`` 启动; + 2. 用户将模型通过 ``DistributedDataParallel`` 处理后,通过运行 ``python -m oneflow.distributed.launch --nproc_per_node 2 train.py`` 启动; + + 注意多机的启动强制要求用户在每一台机器上使用 ``python -m oneflow.distributed.launch`` 启动;因此我们不会在 ``OneflowDDPDriver`` 中保存 + 任何当前有多少台机器的信息; + + :param model: 传入给 ``Trainer`` 的 ``model`` 参数; + :param parallel_device: 该参数无效,**FastNLP** 会自动获取当前进程的设备; + :param fp16: 是否开启 fp16 训练;目前该参数无效; + :param oneflow_kwargs: + * *ddp_kwargs* -- 用于 ``DistributedDataParallel`` 的其它参数,详情可查阅 **oneflow** 的官方文档; + """ + + def __init__( + self, + model, + parallel_device: Optional["oneflow.device"], + fp16: bool = False, + oneflow_kwargs: Dict = {}, + **kwargs + ): + + super(OneflowDDPDriver, self).__init__(model, fp16=fp16, oneflow_kwargs=oneflow_kwargs, **kwargs) + + # oneflow 会自己初始化通信组,因此 parallel_device 实际上不起作用,可以通过 current_device 获取设备 + self.model_device = oneflow.device("cuda", oneflow.cuda.current_device()) + self._data_device = self.model_device + + self.global_rank = int(os.environ["RANK"]) + self.world_size = int(os.environ["WORLD_SIZE"]) + + self._ddp_kwargs = self._oneflow_kwargs.get("ddp_kwargs", {}) + check_user_specific_params(self._ddp_kwargs, DistributedDataParallel.__init__, DistributedDataParallel.__name__) + if len(self.model._buffers) != 0 and self._ddp_kwargs.get("broadcast_buffers", None) is None: + logger.info("Notice your model has buffers and you are using `OneflowDDPDriver`, but you do not set " + "'broadcast_buffers' in your trainer. Cause in most situations, this parameter can be set" + " to 'False' to avoid redundant data communication between different processes.") + + self.output_from_new_proc = kwargs.get("output_from_new_proc", "only_error") + assert isinstance(self.output_from_new_proc, str), "Parameter `output_from_new_proc` can only be `str` type." + if self.output_from_new_proc not in {"all", "ignore", "only_error"}: + os.makedirs(name=self.output_from_new_proc, exist_ok=True) + self.output_from_new_proc = os.path.abspath(self.output_from_new_proc) + + self._has_setup = False # 设置这一参数是因为 evaluator 中也会进行 setup 操作,但是显然是不需要的也不应该的; + self._has_ddpwrapped = False# hasattr(model, ) + + def setup(self): + r""" + 将模型用 ``DistributedDataParallel`` 进行处理; + """ + if self._has_setup: + return + self._has_setup = True + + self.configure_ddp() + self.barrier() + # 初始化 self._pids,从而使得每一个进程都能接受到 rank0 的 send 操作; + # self._pids = [oneflow.tensor(0, dtype=oneflow.int).to(self.data_device) for _ in range(dist_env.get_world_size())] + # comm.all_gather(self._pids, oneflow.tensor(os.getpid(), dtype=oneflow.int).to(self.data_device)) + # local_world_size = int(os.environ.get("LOCAL_WORLD_SIZE")) if "LOCAL_WORLD_SIZE" in os.environ else None + # if local_world_size is None: + # local_world_size = oneflow.tensor(int(os.environ.get("LOCAL_RANK")), dtype=oneflow.int).to(self.data_device) + # comm.all_reduce(local_world_size, op=dist_env.ReduceOp.MAX) + # local_world_size = local_world_size.tolist() + 1 + + # node_rank = self.global_rank // local_world_size + # self._pids = self._pids[node_rank * local_world_size: (node_rank + 1) * local_world_size] + # self._pids = self.tensor_to_numeric(self._pids) + + def configure_ddp(self): + if not hasattr(self.model, "_ddp_state_for_reversed_params"): + self.model.to(self.model_device) + self.model = DistributedDataParallel( + # 注意这里的 self.model_device 是 `oneflow.device` type,因此 self.model_device.index; + self.model, + **self._ddp_kwargs + ) + self._has_ddpwrapped = True + + @property + def master_address(self) -> str: + return os.environ.get("MASTER_ADDR") + + @property + def master_port(self) -> str: + return os.environ.get("MASTER_PORT") + + @property + def world_size(self) -> int: + return self._world_size + + @world_size.setter + def world_size(self, size: int): + self._world_size = size + + @property + def global_rank(self) -> int: + return self._global_rank + + @global_rank.setter + def global_rank(self, rank: int) -> None: + self._global_rank = rank + + @property + def local_rank(self) -> int: # 这个不会受到 all_rank_call_context 的影响 + return int(os.environ.get("LOCAL_RANK", 0)) + + @property + def data_device(self): + return self._data_device + + def set_dist_repro_dataloader(self, dataloader, + dist: Optional[Union[str, ReproducibleSampler, ReproducibleBatchSampler]] = None, + reproducible: bool = False): + # 如果 dist 为 ReproducibleBatchSampler, ReproducibleSampler 说明是在断点重训时 driver.load_checkpoint 函数调用; + # 注意这里不需要调用 dist_sampler.set_distributed;因为如果用户使用的是 OneflowDDPDriver,那么其在 Trainer 初始化的时候就已经调用了该函数; + if isinstance(dist, ReproducibleBatchSampler): + dist.set_distributed( + num_replicas=self.world_size, + rank=self.global_rank, + pad=True + ) + return replace_batch_sampler(dataloader, dist) + if isinstance(dist, ReproducibleSampler): + dist.set_distributed( + num_replicas=self.world_size, + rank=self.global_rank, + pad=True + ) + return replace_sampler(dataloader, dist) + + # 如果 dist 为 str 或者 None,说明是在 trainer 初试化时调用; + # trainer, evaluator + if dist is None: + if reproducible: + raise RuntimeError("It is not allowed to save checkpoint if the sampler is not allowed to be replaced.") + else: + args = self.get_dataloader_args(dataloader) + if isinstance(args.batch_sampler, ReproducibleBatchSampler): + return replace_batch_sampler(dataloader, re_instantiate_sampler(args.batch_sampler)) + if isinstance(args.sampler, ReproducibleSampler): + return replace_sampler(dataloader, re_instantiate_sampler(args.sampler)) + return dataloader + # trainer + elif dist == "dist": + args = self.get_dataloader_args(dataloader) + # 如果用户的 trainer.use_dist_sampler 为 True,那么此时其是否进行断点重训,不影响这里的行为; + if isinstance(args.batch_sampler, ReproducibleBatchSampler): + batch_sampler = re_instantiate_sampler(args.batch_sampler) + batch_sampler.set_distributed( + num_replicas=self.world_size, + rank=self.global_rank, + pad=True + ) + return replace_batch_sampler(dataloader, batch_sampler) + elif isinstance(args.sampler, ReproducibleSampler): + sampler = re_instantiate_sampler(args.sampler) + sampler.set_distributed( + num_replicas=self.world_size, + rank=self.global_rank, + pad=True + ) + return replace_sampler(dataloader, sampler) + else: + _check_dataloader_args_for_distributed(args, controller="Trainer") + sampler = RandomSampler( + dataset=args.dataset, + shuffle=args.shuffle, + seed=int(os.environ.get(FASTNLP_GLOBAL_SEED, 0)) + ) + sampler.set_distributed( + num_replicas=self.world_size, + rank=self.global_rank, + pad=True + ) + return replace_sampler(dataloader, sampler) + # evaluator + elif dist == "unrepeatdist": + args = self.get_dataloader_args(dataloader) + if isinstance(args.sampler, ReproducibleSampler): + sampler = conversion_between_reproducible_and_unrepeated_sampler(args.sampler) + elif not isinstance(args.sampler, UnrepeatedSampler): + _check_dataloader_args_for_distributed(args, controller="Evaluator") + sampler = UnrepeatedSequentialSampler( + dataset=args.dataset + ) + else: + sampler = re_instantiate_sampler(args.sampler) + sampler.set_distributed( + num_replicas=self.world_size, + rank=self.global_rank + ) + batch_sampler = BatchSampler(sampler, args.batch_size, drop_last=False) + return replace_batch_sampler(dataloader, batch_sampler) + else: + raise ValueError( + "Parameter `dist_sampler` can only be one of three values: ('dist', 'unrepeatdist', None).") + + def is_global_zero(self): + r""" + :return: 返回当前的进程是否在全局上是进程 0 ; + """ + return self.global_rank == 0 + + def get_model_no_sync_context(self): + r""" + :return: 返回一个 ``context`` 上下文环境,用于关闭各个进程之间的同步;该功能暂时无效,返回一个空的上下文环境; + """ + # TODO 暂时没有在 oneflow 中找到类似的功能; + from fastNLP.core.utils import nullcontext + return nullcontext + return self.model.no_sync + + def unwrap_model(self): + r""" + :return: 返回原始模型; + """ + return self.model + + def get_local_rank(self) -> int: + r""" + :return: 返回当前进程局部的进程编号; + """ + return self.local_rank + + def barrier(self): + r""" + 通过使用该函数来使得各个进程之间同步操作; + """ + if int(os.environ.get(FASTNLP_NO_SYNC, 0)) < 1: # 当 FASTNLP_NO_SYNC 小于 1 时实际执行 + comm.barrier() + + def is_distributed(self): + r""" + :return: 返回当前使用的 driver 是否是分布式的 driver,对于 ``OneflowDDPDriver`` 来说,该函数一定返回 ``True``; + """ + return True + + def broadcast_object(self, obj, src: int = 0, **kwargs): + r""" + 从 src 端将 obj 对象(可能是 tensor ,可能是 object )发送到 dst 处。如果是非 tensor 的对象会尝试使用 pickle 进行打包进行 + 传输,然后再 dst 处再加载回来。仅在分布式的 driver 中有实际意义。 + + :param obj: obj,可能是 Tensor 或 嵌套类型的数据 + :param int src: source 的 global rank 。 + :param int dst: target 的 global rank,可以是多个目标 rank + :param group: 所属的 group + :return: 如果当前不是分布式 driver 直接返回输入的 obj 。如果当前 rank 是接收端(其 global rank 包含在了 dst 中),则返回 + 接收到的参数;如果是 source 端则返回发射的内容;既不是发送端、又不是接收端,则返回 None 。 + """ + if int(os.environ.get(FASTNLP_NO_SYNC, 0)) == 2: # 如果 FASTNLP_NO_SYNC == 2 直接返回。 + return + return fastnlp_oneflow_broadcast_object(obj, src, device=self.data_device) + + def all_gather(self, obj) -> List: + r""" + 将 obj 互相传送到其它所有的 rank 上,其中 obj 可能是 Tensor,也可能是嵌套结构的 object 。如果不是基础类型的数据,尝试通过 + pickle 进行序列化,接收到之后再反序列化。 + + example:: + + obj = { + 'a': [1, 1], + 'b': [[1, 2], [1, 2]], + 'c': { + 'd': [1, 2] + } + } + -> + [ + {'a': 1, 'b':[1, 2], 'c':{'d': 1}}, + {'a': 1, 'b':[1, 2], 'c':{'d': 2}} + ] + + :param obj: 需要传输的对象,在每个rank上都应该保持相同的结构。 + :param group: + :return: + """ + if int(os.environ.get(FASTNLP_NO_SYNC, 0)) == 2: # 如果 FASTNLP_NO_SYNC 表示不执行 + return [obj] + return fastnlp_oneflow_all_gather(obj) diff --git a/fastNLP/core/drivers/oneflow_driver/dist_utils.py b/fastNLP/core/drivers/oneflow_driver/dist_utils.py new file mode 100644 index 00000000..e84df213 --- /dev/null +++ b/fastNLP/core/drivers/oneflow_driver/dist_utils.py @@ -0,0 +1,306 @@ +import io +import pickle +import os +from typing import Any, List + +from fastNLP.core.utils import apply_to_collection, get_oneflow_device +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW +from fastNLP.envs.env import FASTNLP_NO_SYNC +if _NEED_IMPORT_ONEFLOW: + import oneflow + import oneflow.comm as comm + import oneflow.env as dist_env + +PROTOCOL_VERSION = 1 + +def _validate_output_list_for_rank(my_rank, dst, gather_list): + if dst == my_rank: + if not gather_list: + raise ValueError( + "Argument ``gather_list`` must be specified on destination rank." + ) + elif gather_list: + raise ValueError( + "Argument ``gather_list`` must NOT be specified " + "on non-destination ranks." + ) + + obj = {"protocol_version": PROTOCOL_VERSION, "data": obj} + pickled_bytes = pickle.dumps(obj) + +def fastnlp_oneflow_gather_object(obj, dst=0): + """ + 从其它 rank gather 东西到 dst rank 。 + + Example:: + >>> # Assumes world_size of 3. + >>> gather_objects = ["foo", 12, {1: 2}] # any picklable object + >>> output = [None for _ in gather_objects] + >>> fastnlp_oneflow_gather_object( + gather_objects[dist.get_rank()], + output if dist.get_rank() == 0 else None, + dst=0 + ) + >>> # On rank 0 + >>> output + ['foo', 12, {1: 2}] + + :param obj: 需要发送的 obj 对象,需要是可以 pickable 的对象 + :param dst: 目标的 rank 。 + :return: 在 dst 上面返回 world_size 的 list,依次为 rank 0;rank 1...上 obj + """ + if int(os.environ.get(FASTNLP_NO_SYNC, '0')) == 2: + return [obj] + + if dist_env.get_rank() == dst: + object_gather_list = [None for _ in range(dist_env.get_world_size())] + else: + object_gather_list = None + + # Ensure object_gather_list is specified appopriately. + my_rank = dist_env.get_rank() + _validate_output_list_for_rank(my_rank, dst, object_gather_list) + # 防止 unpickle 的时候出现在了发送的 gpu 上。 + obj = apply_to_collection(obj, oneflow.Tensor, _to_device, device=oneflow.device("cpu")) + input_tensor, local_size = _object_to_tensor(obj) + current_device = oneflow.device("cuda") + input_tensor = input_tensor.to(current_device) + local_size = local_size.to(current_device) + # Gather all local sizes. This is so that we can find the max size, and index + # until the correct size when deserializing the tensors. + group_size = dist_env.get_world_size() + object_sizes_tensor = oneflow.zeros(group_size, dtype=oneflow.long, device=current_device) + object_size_list = [ + object_sizes_tensor[i].unsqueeze(dim=0) for i in range(group_size) + ] + # Allgather tensor sizes. An all-gather is needed here despite this being a + # gather, since each rank needs to broadcast a tensor of the same (maximal) + # size. + comm.all_gather(object_size_list, local_size) + max_object_size = int(max(object_size_list).item()) # type: ignore[type-var] + # Resize tensor to max size across all ranks. + input_tensor = input_tensor.reshape(max_object_size) + # Avoid populating output tensors if the result won't be gathered on this rank. + if my_rank == dst: + coalesced_output_tensor = oneflow.empty( + max_object_size * group_size, dtype=oneflow.uint8, device=current_device + ) + # Output tensors are nonoverlapping views of coalesced_output_tensor + output_tensors = [ + coalesced_output_tensor[max_object_size * i : max_object_size * (i + 1)] + for i in range(group_size) + ] + # All ranks call gather with equal-sized tensors. + comm.gather( + input_tensor, + gather_list=output_tensors if my_rank == dst else None, + dst=dst, + ) + if my_rank != dst: + return + for i, tensor in enumerate(output_tensors): + tensor = tensor.type(oneflow.uint8) # type: ignore[call-overload] + tensor_size = object_size_list[i] + object_gather_list[i] = _tensor_to_object(tensor, tensor_size) + + +def _object_to_tensor(obj, device=None): + f = io.BytesIO() + obj = {"protocol_version": PROTOCOL_VERSION, "data": obj} + pickled_bytes = pickle.dumps(obj) + + byte_tensor = oneflow.ByteTensor(list(pickled_bytes)) + local_size = oneflow.LongTensor([byte_tensor.numel()]) + if device is not None: + byte_tensor = byte_tensor.to(device) + local_size = local_size.to(device) + return byte_tensor, local_size + +def _tensor_to_object(tensor, tensor_size): + buf = tensor.detach().cpu().numpy().tobytes()[:tensor_size] + res = pickle.loads(buf) + assert res["protocol_version"] == PROTOCOL_VERSION + return res["data"] + +def send_recv_object(obj, src, cur_rank, device): + r""" + oneflow 中的单点对多点的分发函数; + + 例如将进程 0 上的对象 object 分发到其它进程上; + + Example:: + + cur_rank = int(os.environ.get('LOCAL_RANK', 0)) + + # 拿到 local_device + + send_recv_object(object, 0, cur_rank, local_device) + + :param obj: 一个可以序列化的 python 对象; + :param src: 从哪一个 rank 上发送到其它 rank; + :param cur_rank: 当前的进程的 rank 序号; + :param device: 当前的进程所在的设备; + :param group: 通信组,默认为 None; + :param tag: 将发送与远程接收匹配的标记; + :return: + """ + # src rank send to all other ranks + size = oneflow.LongTensor([0]).to(device) + + if cur_rank == src: + world_size = dist_env.get_world_size() + tensor, size = _object_to_tensor(obj) + tensor = tensor.to(device) + size = size.to(device) + + # 首先同步 obj 的 size 的信息; + comm.broadcast(size, src) + for subrank in range(world_size): + if subrank != src: + comm.send(tensor=tensor, dst=subrank) + else: + comm.broadcast(size, src) + tensor = oneflow.ByteTensor([0] * size).to(device) + comm.recv(tensor=tensor, src=src) + + return _tensor_to_object(tensor.cpu(), size) + + +def _to_device(tensor, device): + return tensor.contiguous().to(device) + + +def fastnlp_oneflow_all_gather(obj: Any, device=None) ->List: + """ + 实现任何类型的数据都使用该接口可以进行 all_gather 操作。对于非 tensor 类型的数据,通过 pickle 序列化再反序列化的方式进行传输。 + + example:: + + obj = { + 'a': [1, 1], + 'b': [[1, 2], [1, 2]], + 'c': { + 'd': [1, 2] + } + } + -> + [ + {'a': 1, 'b':[1, 2], 'c':{'d': 1}}, + {'a': 1, 'b':[1, 2], 'c':{'d': 2}} + ] + + :param obj: 任意结构的数据,如果为 tensor ,需要保证每个显卡上的 tensor 的形状是一样的。如果传入的是非 tensor 对象都将直接进行 + 序列化之后进行传输。 + :param device: 当前该参数无意义。 + :param group: + :return: 返回的结果是 [obj0, obj1, ...],其中 obj_i 即为第 i 个 rank 上的 obj 。 + """ + if int(os.environ.get(FASTNLP_NO_SYNC, "0")) == 2: + return [obj] + + if isinstance(obj, oneflow.Tensor): + objs = [oneflow.zeros_like(obj) for _ in range(dist_env.get_world_size())] + comm.all_gather(objs, obj) + else: + objs = [None for _ in range(dist_env.get_world_size())] + # 防止 unpickle 的时候弄到发送的 gpu 上了 + obj = apply_to_collection(obj, oneflow.Tensor, _to_device, device=oneflow.device("cpu")) + all_gather_object(objs, obj) + return objs + + +def fastnlp_oneflow_broadcast_object(obj, src, device=None): + """ + 将 src 上的 obj 对象广播到其它 rank 上。 + + :param obj: 需要发送的对象 + :param src: 从哪里发出。 + :param device: + :param group: 属于哪个通信 group + :return: + """ + if int(os.environ.get(FASTNLP_NO_SYNC, "0")) == 2: + if src == dist_env.get_rank(): + return obj + else: + return None + + cur_rank = dist_env.get_rank() + if cur_rank == src: + # 如果有 tensor 全部移动到 cpu 上,方便 pickle , 不然 unpickle 的时候可能会 pickle 到发送过来的卡那里 + obj = apply_to_collection(obj, oneflow.Tensor, _to_device, device=oneflow.device("cpu")) + if device is None: + device = oneflow.cuda.current_device() + device = get_oneflow_device(device) + + if cur_rank == src: + tensor, size = _object_to_tensor(obj, device=device) + else: + size = oneflow.LongTensor([0]).to(device) + + comm.broadcast(size, src=src) + if cur_rank != src: + tensor = oneflow.empty( + size.int().item(), # type: ignore[arg-type] + dtype=oneflow.uint8, + device=device + ) + comm.broadcast(tensor, src=src) + + return _tensor_to_object(tensor, tensor_size=size.item()) + +def all_gather_object(object_list, obj): + """ + + Example:: + >>> # Note: Process group initialization omitted on each rank. + >>> # Assumes world_size of 3. + >>> gather_objects = ["foo", 12, {1: 2}] # any picklable object + >>> output = [None for _ in gather_objects] + >>> all_gather_object(output, gather_objects[dist.get_rank()]) + >>> output + ['foo', 12, {1: 2}] + + :param object_list: + :param obj: + :param group: + :return: + """ + if int(os.environ.get(FASTNLP_NO_SYNC, "0")) == 2: + return [obj] + + current_device = get_oneflow_device(oneflow.cuda.current_device()) + + input_tensor, local_size = _object_to_tensor(obj, device=current_device) + + # Gather all local sizes. This is so that we can find the max size, and index + # until the correct size when deserializing the tensors. + group_size = dist_env.get_world_size() + object_sizes_tensor = oneflow.zeros( + group_size, dtype=oneflow.long, device=current_device + ) + object_size_list = [ + object_sizes_tensor[i].unsqueeze(dim=0) for i in range(group_size) + ] + # Allgather tensor sizes + comm.all_gather(object_size_list, local_size) + max_object_size = int(max(object_size_list).item()) # type: ignore[type-var] + # Resize tensor to max size across all ranks. + input_tensor = input_tensor.reshape(max_object_size) + coalesced_output_tensor = oneflow.empty( + max_object_size * group_size, dtype=oneflow.uint8, device=current_device + ) + # Output tensors are nonoverlapping views of coalesced_output_tensor + output_tensors = [ + coalesced_output_tensor[max_object_size * i : max_object_size * (i + 1)] + for i in range(group_size) + ] + comm.all_gather(output_tensors, input_tensor) + # Deserialize outputs back to object. + for i, tensor in enumerate(output_tensors): + tensor = tensor.type(oneflow.uint8) + if tensor.device != oneflow.device("cpu"): + tensor = tensor.cpu() + tensor_size = object_size_list[i] + object_list[i] = _tensor_to_object(tensor, tensor_size) + return object_list diff --git a/fastNLP/core/drivers/oneflow_driver/initialize_oneflow_driver.py b/fastNLP/core/drivers/oneflow_driver/initialize_oneflow_driver.py new file mode 100644 index 00000000..2dab1729 --- /dev/null +++ b/fastNLP/core/drivers/oneflow_driver/initialize_oneflow_driver.py @@ -0,0 +1,70 @@ +import os +from typing import Optional, Union, List, Sequence +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW +if _NEED_IMPORT_ONEFLOW: + import oneflow + +from .oneflow_driver import OneflowDriver +from .single_device import OneflowSingleDriver +from .ddp import OneflowDDPDriver +from fastNLP.core.log import logger +from fastNLP.envs import FASTNLP_BACKEND_LAUNCH + +__all__ = [] + + +def initialize_oneflow_driver(driver: str, device: Optional[Union[str, "oneflow.device", int, List[int]]], + model: "oneflow.nn.Module", **kwargs) -> OneflowDriver: + r""" + 用来根据参数 ``driver` 和 ``device`` 来确定并且初始化一个具体的 ``Driver`` 实例然后返回回去; + + :param driver: 该参数的值应为以下之一:``["oneflow"]``; + :param device: 该参数的格式与 ``Trainer`` 对参数 ``device`` 的要求一致; + :param model: 训练或者评测的具体的模型; + + :return: 返回一个 :class:`~fastNLP.core.OneflowSingleDriver` 或 :class:`~fastNLP.core.OneflowDDPDriver` 实例; + """ + # world_size 和 rank + if FASTNLP_BACKEND_LAUNCH in os.environ: + if device is not None: + logger.rank_zero_warning("Parameter `device` would be ignored when you are using `oneflow.distributed.launch` to pull " + "up your script. ", once=True) + return OneflowDDPDriver(model, None, **kwargs) + + if driver not in {"oneflow"}: + raise ValueError("Parameter `driver` can only be one of these values: ['oneflow'].") + + _could_use_device_num = oneflow.cuda.device_count() + if isinstance(device, str): + device = oneflow.device(device) + elif isinstance(device, int): + if device < 0: + if device != -1: + raise ValueError("Parameter `device` can only be '-1' when it is smaller than 0.") + device = [oneflow.device(f"cuda:{w}") for w in range(_could_use_device_num)] + elif device >= _could_use_device_num: + print(device, _could_use_device_num) + raise ValueError("The gpu device that parameter `device` specifies is not existed.") + else: + device = oneflow.device(f"cuda:{device}") + elif isinstance(device, Sequence): + device = list(set(device)) + for each in device: + if not isinstance(each, int): + raise ValueError("When parameter `device` is 'Sequence' type, the value in it should be 'int' type.") + elif each < 0: + raise ValueError("When parameter `device` is 'Sequence' type, the value in it should be bigger than 0.") + elif each >= _could_use_device_num: + raise ValueError(f"When parameter `device` is 'Sequence' type, the value in it should not be bigger than" + f" the available gpu number:{_could_use_device_num}.") + device = [oneflow.device(f"cuda:{w}") for w in device] + elif device is not None and not isinstance(device, oneflow.device): + raise ValueError("Parameter `device` is wrong type, please check our documentation for the right use.") + + if driver == "oneflow": # single, ddp, 直接启动。 + if not isinstance(device, List): + return OneflowSingleDriver(model, device, **kwargs) + else: + raise RuntimeError("If you want to run distributed training, please use " + "'python -m oneflow.distributed.launch xxx.py'.") + return OneflowDDPDriver(model, device, **kwargs) \ No newline at end of file diff --git a/fastNLP/core/drivers/oneflow_driver/oneflow_driver.py b/fastNLP/core/drivers/oneflow_driver/oneflow_driver.py new file mode 100644 index 00000000..17777358 --- /dev/null +++ b/fastNLP/core/drivers/oneflow_driver/oneflow_driver.py @@ -0,0 +1,445 @@ +import os +from typing import Union, Dict, Optional, Callable, Tuple +from functools import partial +import numpy as np +import random +from dataclasses import dataclass +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW +from pathlib import Path +if _NEED_IMPORT_ONEFLOW: + import oneflow + from oneflow.utils.data import DataLoader, Sampler, BatchSampler, Dataset + from oneflow.optim import Optimizer + from oneflow.utils.data import RandomSampler as OneflowRandomSampler + _reduces = { + "sum": oneflow.sum, + "min": oneflow.min, + "max": oneflow.max, + "mean": oneflow.mean + } + + +__all__ = [ + "OneflowDriver" +] + +from .utils import optimizer_state_to_device, DummyGradScaler +from fastNLP.core.drivers.driver import Driver +from fastNLP.core.utils.utils import _get_fun_msg, nullcontext +from fastNLP.core.utils import apply_to_collection, oneflow_move_data_to_device, auto_param_call +from fastNLP.envs import rank_zero_call +from fastNLP.envs import FASTNLP_GLOBAL_RANK, FASTNLP_MODEL_FILENAME, FASTNLP_CHECKPOINT_FILENAME +from fastNLP.core.log import logger +from fastNLP.core.samplers import ReproducibleBatchSampler, ReproducibleSampler, ReproduceBatchSampler, RandomSampler +from fastNLP.core.dataloaders import OverfitDataLoader + + +class OneflowDriver(Driver): + r""" + 专属于 ``oneflow`` 的 ``driver``,是 ``OneflowSingleDriver`` 和 ``OneflowDDPDriver`` 的父类; + + .. warning:: + + 您不应当直接初始化该类,然后传入给 ``Trainer``,换句话说,您应当使用该类的子类 ``OneflowSingleDriver`` 和 ``OneflowDDPDriver``,而不是 + 该类本身; + + .. note:: + + 您可以在使用 ``OneflowSingleDriver`` 和 ``OneflowDDPDriver`` 时使用 ``OneflowDriver`` 提供的接口; + + """ + def __init__(self, model, fp16: Optional[bool] = False, oneflow_kwargs: Dict = {}, **kwargs): + super(OneflowDriver, self).__init__(model) + + """ 进行 fp16 的设置 """ + self._oneflow_kwargs = oneflow_kwargs + + self.fp16 = fp16 + if fp16: + logger.warn("OneflowDriver of eager mode dose not support fp16 now.``") + # self.auto_cast, _grad_scaler = _build_fp16_env(dummy=not self.fp16) + # self.grad_scaler = _grad_scaler(**self._oneflow_kwargs.get("gradscaler_kwargs", {})) + self.auto_cast = nullcontext + self.grad_scaler = DummyGradScaler() + self.set_grad_to_none = self._oneflow_kwargs.get("set_grad_to_none") + + self.wo_auto_param_call = kwargs.get("model_wo_auto_param_call", False) + + def zero_grad(self): + for optimizer in self.optimizers: + optimizer.zero_grad(self.set_grad_to_none) + + def backward(self, loss): + loss.backward() + # self.grad_scaler.scale(loss).backward() + + def step(self): + for optimizer in self.optimizers: + self.grad_scaler.step(optimizer) + self.grad_scaler.update() + + def check_dataloader_legality(self, dataloader): + if not isinstance(dataloader, DataLoader) and not isinstance(dataloader, OverfitDataLoader): + raise TypeError(f"{DataLoader} is expected, instead of `{type(dataloader)}`") + if len(dataloader) == 0: + logger.rank_zero_warning("Your dataloader is empty, which is not recommended because it " + "may cause some unexpected exceptions.", once=True) + + @staticmethod + def _check_optimizer_legality(optimizers): + for each_optimizer in optimizers: + if not isinstance(each_optimizer, Optimizer): + raise TypeError(f"Each optimizer of parameter `optimizers` should be 'Optimizer' type, " + f"not {type(each_optimizer)}.") + + @staticmethod + def tensor_to_numeric(tensor, reduce: str = None): + r""" + 将 ``oneflow.Tensor`` 转换成 python 中的数值类型; + + :param tensor: ``oneflow.Tensor``; + :param reduce: 当 tensor 是一个多数值的张量时,应当使用何种归一化操作来转换成单一数值,应当为以下类型之一:``['max', 'min', 'sum', 'mean']``; + :return: 返回一个单一数值,其数值类型是 python 中的基本的数值类型,例如 ``int,float`` 等; + """ + + if tensor is None: + return None + + def _translate(_data): + if _data.numel() == 1: + return _data.item() + if reduce is None: + return _data.tolist() + return _reduces[reduce](_data).item() + + return apply_to_collection( + data=tensor, + dtype=oneflow.Tensor, + function=_translate + ) + + def set_model_mode(self, mode: str): + r""" + 设置模型的状态是 ``train`` 还是 ``eval``; + :param mode: ``'train'`` 或 ``'eval'``; + """ + assert mode in {"train", "eval"} + getattr(self.model, mode)() + + @rank_zero_call + def save_model(self, filepath: Union[str, Path], only_state_dict: bool = True, **kwargs): + """ + 保存当前 driver 的模型到 folder 下。 + + :param filepath: 保存到哪个文件夹; + :param only_state_dict: 是否只保存权重;如果使用 ``DistributedDataParallel`` 启动分布式训练的话,该参数只能为 ``True``; + :return: + """ + model = self.unwrap_model() + if not only_state_dict and self.is_distributed(): + logger.warn("`Cannot save ddp model directly, we will save its state_dict for you.") + only_state_dict = True + + if only_state_dict: + states = {name: param.cpu().detach().clone() for name, param in model.state_dict().items()} + oneflow.save(states, filepath) + else: + if self.model_device is not None: + if not self.is_distributed(): + self.move_model_to_device(model, oneflow.device("cpu")) + oneflow.save(model, filepath) + if not self.is_distributed(): + self.move_model_to_device(model, self.model_device) + else: + oneflow.save(model, filepath) + + def load_model(self, filepath: Union[Path, str], only_state_dict: bool = True, **kwargs): + """ + 从 folder 中加载权重并赋值到当前 driver 的模型上。 + + :param filepath: 加载权重或模型的路径 + :param load_state_dict: 保存的内容是否只是权重。 + :param kwargs: + :return: + """ + model = self.unwrap_model() + res = oneflow.load(filepath) + if isinstance(res, dict) and only_state_dict is False: + logger.rank_zero_warning(f"It seems like that {filepath} only contains state, you may need to use " + f"`only_state_dict=True`") + elif not isinstance(res, dict) and only_state_dict is True: + logger.rank_zero_warning(f"It seems like that {filepath} is not state, you may need to use " + f"`only_state_dict=False`") + if not isinstance(res, dict): + res = res.state_dict() + model.load_state_dict(res) + + @rank_zero_call + def save_checkpoint(self, folder: Path, states: Dict, dataloader, only_state_dict: bool = True, should_save_model: bool = True, **kwargs): + # 传入的 dataloader 参数是 trainer 的 dataloader 属性,因为 driver 的所有 dataloader 我们是不会去改变它的,而是通过改变 + # trainer.dataloader 来改变 dataloader 的状态,从而适配训练或者评测环境; + + # 1. sampler 的状态; + num_consumed_batches = states.pop("num_consumed_batches") + states["sampler_states"] = self.get_sampler_state(dataloader, num_consumed_batches) + + # 2. 保存模型的状态; + if should_save_model: + if not os.path.exists(folder): + os.mkdir(folder) + model_path = folder.joinpath(FASTNLP_MODEL_FILENAME) + self.save_model(model_path, only_state_dict=only_state_dict) + + # 3. 保存 optimizers 的状态; + states["optimizers_state_dict"] = self.get_optimizer_state() + logger.debug("Save optimizer state dict.") + + # # 4. 保存fp16的状态 + # if not isinstance(self.grad_scaler, DummyGradScaler): + # grad_scaler_state_dict = self.grad_scaler.state_dict() + # states['grad_scaler_state_dict'] = grad_scaler_state_dict + + oneflow.save(states, Path(folder).joinpath(FASTNLP_CHECKPOINT_FILENAME)) + + def get_sampler_state(self, dataloader, num_consumed_batches): + dataloader_args = self.get_dataloader_args(dataloader) + if isinstance(dataloader_args.batch_sampler, ReproducibleBatchSampler): + sampler = dataloader_args.batch_sampler + elif dataloader_args.sampler: + sampler = dataloader_args.sampler + else: + raise RuntimeError("This condition is not supposed to appear. Please report a bug to us.") + + if hasattr(sampler, "state_dict") and callable(sampler.state_dict): + sampler_states = sampler.state_dict() + if dataloader_args.batch_size is not None: + sampler_states["num_consumed_samples"] = sampler.num_replicas * dataloader_args.batch_size \ + * num_consumed_batches + else: + logger.rank_zero_warning("fastNLP cannot get batch_size, we have to save based on sampler's " + "`num_consumed_samples`, it may cause missing some samples when reload.") + else: + raise RuntimeError("The sampler has no `state_dict()` method, fastNLP cannot save the training " + "state.") + + return sampler_states + + def load_sampler_state(self, dataloader, sampler_states): + states = {} + dataloader_args = self.get_dataloader_args(dataloader) + if isinstance(dataloader_args.batch_sampler, ReproducibleBatchSampler): + sampler = dataloader_args.batch_sampler + elif isinstance(dataloader_args.sampler, ReproducibleSampler): + sampler = dataloader_args.sampler + elif isinstance(dataloader_args.sampler, OneflowRandomSampler): + sampler = RandomSampler(dataloader_args.sampler.data_source) + logger.debug("Replace oneflow RandomSampler into fastNLP RandomSampler.") + elif self.is_distributed(): + raise RuntimeError("It is not allowed to use checkpoint retraining when you do not use our" + "`ReproducibleSampler`.") + else: + sampler = ReproduceBatchSampler( + batch_sampler=dataloader_args.batch_sampler if dataloader_args.batch_sampler is not None else dataloader_args.sampler, + batch_size=dataloader_args.batch_size, + drop_last=dataloader_args.drop_last + ) + sampler.load_state_dict(sampler_states) + states["dataloader"] = self.set_dist_repro_dataloader(dataloader, sampler) + + # 修改 trainer_state.batch_idx_in_epoch + # sampler 是类似 RandomSampler 的sampler,不是 batch_sampler; + if not isinstance(sampler, ReproducibleBatchSampler): + if dataloader_args.drop_last: + batch_idx_in_epoch = len( + sampler) // dataloader_args.batch_size - sampler.num_left_samples // dataloader_args.batch_size + else: + batch_idx_in_epoch = (len(sampler) + dataloader_args.batch_size - 1) // dataloader_args.batch_size - \ + (sampler.num_left_samples + dataloader_args.batch_size - 1) // dataloader_args.batch_size + # sampler 是 batch_sampler; + else: + batch_idx_in_epoch = sampler.batch_idx_in_epoch + + states["batch_idx_in_epoch"] = batch_idx_in_epoch + return states + + def get_optimizer_state(self): + optimizers_state_dict = {} + for i in range(len(self.optimizers)): + optimizer: oneflow.optim.Optimizer = self.optimizers[i] + optimizer_state = optimizer.state_dict() + optimizer_state["state"] = optimizer_state_to_device(optimizer_state["state"], oneflow.device("cpu")) + optimizers_state_dict[f"optimizer{i}"] = optimizer_state # 注意这里没有使用 deepcopy,测试是不需要的; + return optimizers_state_dict + + def load_optimizer_state(self, states): + assert len(states) == len(self.optimizers), f"The number of optimizers is:{len(self.optimizers)}, while in " \ + f"checkpoint it is:{len(states)}" + for i in range(len(self.optimizers)): + optimizer: oneflow.optim.Optimizer = self.optimizers[i] + optimizer.load_state_dict(states[f"optimizer{i}"]) + logger.debug("Load optimizer state dict.") + + def load_checkpoint(self, folder: Path, dataloader, only_state_dict: bool = True, should_load_model: bool = True, **kwargs) -> Dict: + states = oneflow.load(folder.joinpath(FASTNLP_CHECKPOINT_FILENAME)) + + # 1. 加载 optimizers 的状态; + optimizers_state_dict = states.pop("optimizers_state_dict") + self.load_optimizer_state(optimizers_state_dict) + + # 2. 加载模型状态; + if should_load_model: + self.load_model(filepath=folder.joinpath(FASTNLP_MODEL_FILENAME), only_state_dict=only_state_dict) + + # # 3. 加载 fp16 的状态 + # if "grad_scaler_state_dict" in states: + # grad_scaler_state_dict = states.pop("grad_scaler_state_dict") + # if not isinstance(self.grad_scaler, DummyGradScaler): + # self.grad_scaler.load_state_dict(grad_scaler_state_dict) + # logger.debug("Load grad_scaler state dict...") + # elif not isinstance(self.grad_scaler, DummyGradScaler): + # logger.rank_zero_warning(f"Checkpoint {folder} is not trained with fp16=True, while resume to a fp16=True training, " + # f"the training process may be unstable.") + + # 4. 恢复 sampler 的状态; + sampler_states = states.pop("sampler_states") + states_ret = self.load_sampler_state(dataloader, sampler_states) + states.update(states_ret) + + return states + + def get_evaluate_context(self): + r""" + :return: 返回 ``oneflow.no_grad`` 这个 context; + """ + return oneflow.no_grad + + def model_call(self, batch, fn: Callable, signature_fn: Optional[Callable]) -> Dict: + if isinstance(batch, Dict) and not self.wo_auto_param_call: + return auto_param_call(fn, batch, signature_fn=signature_fn) + else: + return fn(batch) + + def get_model_call_fn(self, fn: str) -> Tuple: + if hasattr(self.model, fn): + fn = getattr(self.model, fn) + if not callable(fn): + raise RuntimeError(f"The `{fn}` attribute is not `Callable`.") + logger.debug(f"Use {_get_fun_msg(fn, with_fp=False)}...") + return fn, None + elif fn in {"train_step", "evaluate_step"}: + logger.debug(f"Use {_get_fun_msg(self.model.forward, with_fp=False)}...") + return self.model, self.model.forward + else: + raise RuntimeError(f"There is no `{fn}` method in your {type(self.model)}.") + + @staticmethod + def move_model_to_device(model: "oneflow.nn.Module", device: "oneflow.device"): + r""" + 将模型迁移到对应的设备上; + """ + if device is not None: + model.to(device) + + def move_data_to_device(self, batch): + """ + 将一个 batch 的数据迁移到对应的设备上; + + :param batch: 一个 batch 的数据,可以是 ``list、dict`` 等; + :return: + """ + return oneflow_move_data_to_device(batch, self.data_device) + + @staticmethod + def worker_init_function(worker_id: int, rank: Optional[int] = None) -> None: # pragma: no cover + global_rank = rank if rank is not None else int(os.environ.get(FASTNLP_GLOBAL_RANK, 0)) + process_seed = oneflow.initial_seed() + + base_seed = process_seed - worker_id + ss = np.random.SeedSequence([base_seed, worker_id, global_rank]) + + np.random.seed(ss.generate_state(4)) + + oneflow_ss, stdlib_ss = ss.spawn(2) + oneflow.manual_seed(oneflow_ss.generate_state(1, dtype=np.uint64)[0]) + + stdlib_seed = (stdlib_ss.generate_state(2, dtype=np.uint64).astype(object) * [1 << 64, 1]).sum() + random.seed(stdlib_seed) + + def set_deterministic_dataloader(self, dataloader: "DataLoader"): + if dataloader.worker_init_fn is None: + dataloader.worker_init_fn = partial(self.worker_init_function, + rank=int(os.environ.get(FASTNLP_GLOBAL_RANK, 0))) + + def set_sampler_epoch(self, dataloader: "DataLoader", cur_epoch_idx: int): + # 保证 ddp 训练时的 shuffle=True 时的正确性,因为需要保证每一个进程上的 sampler 的shuffle 的随机数种子是一样的; + if callable(getattr(dataloader.sampler, "set_epoch", None)): + dataloader.sampler.set_epoch(cur_epoch_idx) + + @staticmethod + def get_dataloader_args(dataloader: "DataLoader"): + """ + 获取 dataloader 的 shuffle 和 drop_last 属性; + """ + + @dataclass + class Res: + dataset: Optional[Dataset] = None + batch_sampler: Optional[BatchSampler] = None + sampler: Optional[Sampler] = None + batch_size: Optional[int] = None + shuffle: Optional[bool] = None + drop_last: Optional[bool] = None + + res = Res() + + # oneflow 的 DataLoader 一定会有 dataset 属性; + res.dataset = dataloader.dataset + + # dataloader 使用的是 sampler; + if dataloader.batch_sampler is None: + res.sampler = dataloader.sampler + res.batch_size = 1 + res.shuffle = True if isinstance(dataloader.sampler, RandomSampler) else False + res.drop_last = False + # dataloader 使用的是 batch_sampler; + else: + res.batch_sampler = dataloader.batch_sampler + if hasattr(dataloader.batch_sampler, "batch_size"): + res.batch_size = getattr(dataloader.batch_sampler, "batch_size") + # 用户使用的是自己的 batch_sampler 并且其没有 "batch_size" 属性; + else: + dataloader_iter = iter(dataloader) + pre_sample = next(dataloader_iter) + res.batch_size = pre_sample.shape[0] + + if hasattr(dataloader.batch_sampler, "sampler"): + res.sampler = dataloader.batch_sampler.sampler + if hasattr(dataloader.batch_sampler.sampler, "shuffle"): + res.shuffle = dataloader.batch_sampler.sampler.shuffle + elif isinstance(dataloader.batch_sampler.sampler, OneflowRandomSampler): + res.shuffle = True + else: + res.shuffle = False + # ReproduceBatchSampler 的情况 + elif hasattr(dataloader.batch_sampler, "batch_sampler"): + batch_sampler = dataloader.batch_sampler.batch_sampler + res.sampler = batch_sampler.sampler + if hasattr(batch_sampler.sampler, "shuffle"): + res.shuffle = dataloader.batch_sampler.sampler.shuffle + elif isinstance(batch_sampler.sampler, OneflowRandomSampler): + res.shuffle = True + else: + res.shuffle = False + else: + # 如果 dataloader.batch_sampler 没有 sampler 这个属性,那么说明其使用的是自己的 batch_sampler,且没有 "sampler" 属性; + # 这种情况下 DataLoader 会自己初始化一个 sampler;我们因此将这个默认初始化的 sampler 挂载到 res 上; + res.sampler = dataloader.sampler + res.shuffle = False + + if hasattr(dataloader.batch_sampler, "drop_last"): + res.drop_last = getattr(dataloader.batch_sampler, "drop_last") + # 用户使用的是自己的 batch_sampler 并且其没有 "drop_last" 属性; + else: + res.drop_last = False + + return res diff --git a/fastNLP/core/drivers/oneflow_driver/single_device.py b/fastNLP/core/drivers/oneflow_driver/single_device.py new file mode 100644 index 00000000..aec4d0e1 --- /dev/null +++ b/fastNLP/core/drivers/oneflow_driver/single_device.py @@ -0,0 +1,114 @@ +import os +from typing import Dict, Union +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW + +if _NEED_IMPORT_ONEFLOW: + import oneflow + from oneflow.utils.data import SequentialSampler as OneflowSequentialSampler + from oneflow.utils.data import BatchSampler as OneflowBatchSampler + +__all__ = [ + "OneflowSingleDriver" +] + +from .oneflow_driver import OneflowDriver +from fastNLP.core.drivers.oneflow_driver.utils import replace_sampler, replace_batch_sampler +from fastNLP.core.samplers import ReproducibleBatchSampler, ReproducibleSampler, re_instantiate_sampler, \ + ReproduceBatchSampler +from fastNLP.core.samplers import RandomSampler +from fastNLP.core.log import logger + + +class OneflowSingleDriver(OneflowDriver): + r""" + 用于执行 ``oneflow`` 动态图 cpu 和 单卡 gpu 运算的 ``driver``; + + :param model: 传入给 ``Trainer`` 的 ``model`` 参数; + :param device: oneflow.device,当前进程所使用的设备; + :param fp16: 是否开启 fp16;目前动态图的单卡下该参数无效; + :param oneflow_kwargs: + """ + + def __init__(self, model, device: "oneflow.device", fp16: bool = False, oneflow_kwargs: Dict = {}, **kwargs): + cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None) + if cuda_visible_devices == "": + device = oneflow.device("cpu") + logger.info("You have set `CUDA_VISIBLE_DEVICES` to '' in system environment variable, and we are gonna to" + "use `cpu` instead of `gpu` device.") + + super(OneflowSingleDriver, self).__init__(model, fp16=fp16, **kwargs) + + if device is None: + logger.debug("device is not set, fastNLP will try to automatically get it.") + try: + device = next(model.parameters()).device + assert isinstance(device, oneflow.device) + except: + raise ValueError("fastNLP cannot get device automatically, please set device explicitly.") + + self.model_device = device + + self.local_rank = 0 + self.global_rank = 0 + self.world_size = 1 + + def setup(self): + r""" + 将模型迁移到相应的设备上; + """ + if self.model_device is not None: + self.model.to(self.model_device) + + def set_dist_repro_dataloader(self, dataloader, + dist: Union[str, ReproducibleBatchSampler, ReproducibleSampler] = None, + reproducible: bool = False): + + # 如果 dist 为 ReproducibleBatchSampler, ReproducibleIterator 说明是在断点重训时 driver.load_checkpoint 函数调用; + if isinstance(dist, ReproducibleBatchSampler): + return replace_batch_sampler(dataloader, dist) + elif isinstance(dist, ReproducibleSampler): + return replace_sampler(dataloader, dist) + + # 如果 dist 为 str 或者 None,说明是在 trainer 初试化时调用; + args = self.get_dataloader_args(dataloader) + if isinstance(args.batch_sampler, ReproducibleBatchSampler): + batch_sampler = re_instantiate_sampler(args.batch_sampler) + return replace_batch_sampler(dataloader, batch_sampler) + elif isinstance(args.sampler, ReproducibleSampler): + sampler = re_instantiate_sampler(args.sampler) + return replace_sampler(dataloader, sampler) + + if reproducible: + if type(args.batch_sampler) is OneflowBatchSampler: + if type(args.sampler) is OneflowSequentialSampler: + # 需要替换为不要 shuffle 的。 + sampler = RandomSampler(args.sampler.data_source, shuffle=False) + logger.debug("Replace oneflow SequentialSampler into fastNLP RandomSampler.") + return replace_sampler(dataloader, sampler) + batch_sampler = ReproduceBatchSampler( + batch_sampler=args.batch_sampler, + batch_size=args.batch_size, + drop_last=args.drop_last + ) + return replace_batch_sampler(dataloader, batch_sampler) + else: + return dataloader + + def unwrap_model(self): + r""" + :return: 返回模型 + """ + return self.model + + @property + def data_device(self): + r""" + :return: 数据和模型所在的设备; + """ + return self.model_device + + def is_distributed(self): + r""" + :return: 返回当前使用的 driver 是否是分布式的 driver,在 ``OneflowSingleDriver`` 中返回 ``False``; + """ + return False diff --git a/fastNLP/core/drivers/oneflow_driver/utils.py b/fastNLP/core/drivers/oneflow_driver/utils.py new file mode 100644 index 00000000..33019883 --- /dev/null +++ b/fastNLP/core/drivers/oneflow_driver/utils.py @@ -0,0 +1,292 @@ +import os + +from typing import Any, Dict, Optional +from enum import IntEnum +import contextlib +import random +import numpy as np +import inspect + +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW +from fastNLP.envs.utils import get_global_seed +from fastNLP.envs import ( + get_global_rank, + FASTNLP_BACKEND_LAUNCH, + FASTNLP_GLOBAL_SEED, +) +from fastNLP.core.samplers import ReproducibleBatchSampler +from fastNLP.core.utils import auto_param_call +from fastNLP.core.log import logger + +if _NEED_IMPORT_ONEFLOW: + import oneflow + from oneflow.nn import Module + from oneflow.utils.data import DataLoader + from oneflow.utils.data import RandomSampler as oneflowRandomSampler + from oneflow.utils.data import SequentialSampler as oneflowSequentialSampler + from oneflow.utils.data import BatchSampler as oneflowBatchSampler +else: + from fastNLP.core.utils.dummy_class import DummyClass as Module + + +__all__ = [ + 'oneflow_seed_everything', + 'optimizer_state_to_device' +] + +def oneflow_seed_everything(seed: int = None, add_global_rank_to_seed: bool = True) -> int: + r""" + 为 **oneflow**、**numpy**、**python.random** 伪随机数生成器设置种子。 + + :param seed: 全局随机状态的整数值种子。如果为 ``None`` 则会根据时间戳生成一个种子。 + :param add_global_rank_to_seed: 在分布式训练中,是否在不同 **rank** 中使用不同的随机数。 + 当设置为 ``True`` 时,**FastNLP** 会将种子加上当前的 ``global_rank``。 + """ + max_seed_value = np.iinfo(np.uint32).max + min_seed_value = np.iinfo(np.uint32).min + + if seed is None: + if os.getenv(FASTNLP_BACKEND_LAUNCH) == "1": + seed = 42 + else: + seed = get_global_seed() + logger.info(f"'FASTNLP_GLOBAL_SEED' is set to {seed} automatically.") + if not isinstance(seed, int): + seed = int(seed) + + if not (min_seed_value <= seed <= max_seed_value): + logger.rank_zero_warning("Your seed value is too big or too small for numpy, we will choose a random seed for you.") + seed %= max_seed_value + + os.environ[FASTNLP_GLOBAL_SEED] = f"{seed}" + if add_global_rank_to_seed: + seed += get_global_rank() + + random.seed(seed) + np.random.seed(seed) + oneflow.manual_seed(seed) + oneflow.cuda.manual_seed_all(seed) + return seed + + +class ForwardState(IntEnum): + TRAIN = 0 + VALIDATE = 1 + TEST = 2 + PREDICT = 3 + + +class _DDPWrappingModel(Module): + """ + 该函数用于 DDP 训练时处理用户自己定制的 train_step 等函数; + 之所以要使用这一额外的包裹模型,是因为在使用 DDP 时,必须使用 DistributedDataParallel 的 forward 函数才能实现正常的运行; + 另一方面,我们要求用户在使用我们的框架时,需要针对不用的模式实现不同的处理函数,例如 'train_step', 'evaluate_step' 等; + 然而,当使用 DistributedDataParallel 包裹 model 后,模型看不见其除了 forward 之外的方法;并且当我们尝试在训练过程中主动提取 + `model = model.module`,这同样会导致错误,会使得每一个gpu上的模型参数不同; + + 因此出于以上考虑,我们实现了这一函数; + 对于更详细的解释,可以参考 'pytorch_lightning' 的 ddp 的设计; + """ + + def __init__(self, model: Module): + super(_DDPWrappingModel, self).__init__() + self.model = model + + def forward(self, batch, **kwargs) -> Dict: + """ + pytorch lightning 实现了先 unwrapping_model 的操作,但是感觉对于我们来说没有什么必须要,先写个注释放这里,之后有需求了再看; + """ + fn = kwargs.pop("fastnlp_fn") + signature_fn = kwargs.pop("fastnlp_signature_fn") + wo_auto_param_call = kwargs.pop("wo_auto_param_call") + + if isinstance(batch, Dict) and not wo_auto_param_call: + return auto_param_call(fn, batch, signature_fn=signature_fn) + else: + return fn(batch) + + +class DummyGradScaler: + + def __init__(self, *args, **kwargs): + pass + + def get_scale(self): + return 1.0 + + def is_enabled(self): + return False + + def scale(self, outputs): + return outputs + + def step(self, optimizer, *args, **kwargs): + optimizer.step(*args, **kwargs) + + def update(self, new_scale=None): + pass + + def unscale_(self, optimizer): + pass + + def load_state_dict(self, state_dict): + pass + + def state_dict(self): + return {} + + +def _build_fp16_env(dummy=False): + return + if dummy: + autocast = contextlib.ExitStack + GradScaler = DummyGradScaler + else: + if not oneflow.cuda.is_available(): + raise RuntimeError("Oneflow is not installed in gpu version, please use device='cpu'.") + if oneflow.cuda.get_device_capability(0)[0] < 7: + logger.rank_zero_warning( + "NOTE: your device does NOT support faster training with fp16, " + "please switch to FP32 which is likely to be faster" + ) + try: + from oneflow.amp import GradScaler + from oneflow.cuda.amp import autocast, GradScaler + except ImportError: + raise RuntimeError("torch version too low (less than 1.6)") + return autocast, GradScaler + + +def replace_sampler(dataloader: "DataLoader", sampler): + r""" + 替换 sampler (初始化一个新的 dataloader 的逻辑在于): + + 用户可能继承了 dataloader,定制了自己的 dataloader 类,这也是我们为什么先 `inspect.signature(dataloader)` 而不是直接 + `inspect.signature(DataLoader)` 的原因,因此同时注意到我们在外层重新初始化一个 dataloader 时也是使用的用户传进来的 dataloader + 的类,而不是直接的 DataLoader; + + 如果需要定制自己的 dataloader,保证以下两点: + + 1. 在 __init__ 方法中加入 **kwargs,这是为了方便我们将 sampler 插入到具体的 DataLoader 的构造中; + 2. 在 __init__ 方法中出现的参数,请务必挂为同样名字的实例属性,例如 self.one_arg_name = one_arg_name,这是因为我们只能通过属性 + 来获取实际的参数的值; + + """ + + # 拿到实例属性; + instance_attrs = {k: v for k, v in vars(dataloader).items() if not k.startswith('_')} + + # 'multiprocessing_context' 是 user-defined function; + if getattr(dataloader, 'multiprocessing_context', None) is not None: + instance_attrs["multiprocessing_context"] = dataloader.multiprocessing_context + + # 拿到 dataloader '__init__' 函数的默认函数签名; + init_params = dict(inspect.signature(dataloader.__init__).parameters) + + # 防止用户的 DataLoader 是继承了 oneflow 的 DataLoader,然后还是使用了 **kwargs 的方式对父类传参数 + has_variadic_kwargs = any(v.kind is v.VAR_KEYWORD for k, v in init_params.items()) + if has_variadic_kwargs and isinstance(dataloader, DataLoader): + # 防止用户写入了 super().__init__(**kwargs) + for key, value in dict(inspect.signature(DataLoader.__init__).parameters).items(): + if key not in init_params and key != 'self': + init_params[key] = value + + # 如果初始化dataloader所使用的参数不是默认值,那么我们需要将其记录下来用于重新初始化时设置; + non_default_params = {name for name, p in init_params.items() if + name in instance_attrs and p.default != instance_attrs[name]} + # add `dataset` as it might have been replaced with `*args` + non_default_params.add("dataset") + + reconstruct_args = {k: v for k, v in instance_attrs.items() if k in non_default_params} + if isinstance(dataloader, DataLoader): + reconstruct_args.update({"sampler": sampler, "shuffle": False, "batch_sampler": None}) + + batch_sampler = getattr(dataloader, "batch_sampler") + if batch_sampler is not None and isinstance(batch_sampler, ReproducibleBatchSampler): + raise RuntimeError("It should not be running here, please report a bug to us.") + + required_args = { + p.name + for p in init_params.values() + if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) + and p.default is p.empty + and p.name not in reconstruct_args + } + + # 在 attribute 中没有找到这些参数,导致了没有办法重新初始化 + if required_args: + required_args = sorted(required_args) + dataloader_self_name = dataloader.__class__.__name__ + raise Exception( + f"Need to inject arguments {required_args} into the __init__ of `{dataloader_self_name}`. " + f"But they are not found in the attribute of `{dataloader_self_name}`, fastNLP cannot determine its " + f"value when try to reinitialize `{dataloader_self_name}`, please add `{required_args}` to be " + f"`{dataloader_self_name}`'s attribute." + ) + + # 这种错误针对的是传入的 dataloader 不是直接的 DataLoader,而是定制了 DataLoader,但是 __init__ 中没有 **kwargs; + if not has_variadic_kwargs: + # the dataloader signature does not allow keyword arguments that need to be passed + missing_kwargs = reconstruct_args.keys() - init_params.keys() + if missing_kwargs: + missing_kwargs = sorted(missing_kwargs) + dataloader_self_name = dataloader.__class__.__name__ + raise Exception( + f"The parameter:{missing_kwargs} needed to reinitialize `{dataloader_self_name}` is not found." + ) + # 如果没有kwargs,则保证一下只传入需要的参数 + if not isinstance(dataloader, DataLoader): + reconstruct_args = {key:value for key,value in reconstruct_args.items() if key in init_params} + + return type(dataloader)(**reconstruct_args) + + +def replace_batch_sampler(dataloader, new_batch_sampler): + r""" + 替换一个 dataloader 的 batch_sampler; + """ + params_keys = [k for k in dataloader.__dict__.keys() if not k.startswith("_")] + for k in ["batch_size", "sampler", "drop_last", "batch_sampler", "dataset_kind"]: + if k in params_keys: + params_keys.remove(k) + params = {k: getattr(dataloader, k) for k in params_keys} + params["batch_sampler"] = new_batch_sampler + + if not isinstance(dataloader, DataLoader): + init_params = dict(inspect.signature(dataloader.__init__).parameters) + has_variadic_kwargs = any(v.kind is v.VAR_KEYWORD for k, v in init_params.items()) + if not has_variadic_kwargs: + params = {key:value for key,value in params.items() if key in init_params} + + return type(dataloader)(**params) + + +def optimizer_state_to_device(state, device): + r""" + 将一个 ``optimizer`` 的 ``state_dict`` 迁移到对应的设备; + + :param state: ``optimzier.state_dict()``; + :param device: 要迁移到的目的设备; + :return: 返回迁移后的新的 state_dict; + """ + new_state = {} + for name, param in state.items(): + if isinstance(param, dict): + new_state[name] = optimizer_state_to_device(param, device) + elif isinstance(param, oneflow.Tensor): + new_state[name] = param.to(device).clone() + else: + new_state[name] = param + return new_state + + +def _check_dataloader_args_for_distributed(args, controller='Trainer'): + if type(args.batch_sampler) is not oneflowBatchSampler or (type(args.sampler) not in {oneflowRandomSampler, + oneflowSequentialSampler}): + mode = 'training' if controller == 'Trainer' else 'evaluation' + substitution = 'fastNLP.RandomSampler' if controller == 'Trainer' else 'fastNLP.UnrepeatedSequentialSampler' + raise TypeError(f"Using customized ``batch_sampler`` or ``sampler`` for distributed {mode} may cause " + f"unpredictable problems, because fastNLP will substitute the dataloader's sampler into " + f"``{substitution}``. The customized sampler should set for distributed running " + f"before initializing ``{controller}`` , and then set the " + f"parameter ``use_dist_sampler`` of ``{controller}`` to ``False``.") diff --git a/fastNLP/core/drivers/paddle_driver/fleet.py b/fastNLP/core/drivers/paddle_driver/fleet.py index 9344f515..6668d577 100644 --- a/fastNLP/core/drivers/paddle_driver/fleet.py +++ b/fastNLP/core/drivers/paddle_driver/fleet.py @@ -130,15 +130,15 @@ class PaddleFleetDriver(PaddleDriver): :param is_pull_by_paddle_run: 标记当前进程是否为通过 ``python -m paddle.distributed.launch`` 启动的。 这个参数仅在 :class:`~fastNLP.core.Trainer` 中初始化 driver 时使用 :param fp16: 是否开启混合精度训练; + :param paddle_kwargs: + * *fleet_kwargs* -- 用于在使用 ``PaddleFleetDriver`` 时指定 ``DataParallel`` 和 ``fleet`` 初始化时的参数,包括: + + * *is_collective* -- 是否使用 paddle 集群式的分布式训练方法,目前仅支持为 ``True`` 的情况; + * *role_maker* -- 初始化 ``fleet`` 分布式训练 API 时使用的 ``RoleMaker``; + * 其它用于初始化 ``DataParallel`` 的参数; + * *gradscaler_kwargs* -- 用于 ``fp16=True`` 时,提供给 :class:`paddle.amp.GradScaler` 的参数; + :kwargs: - * *paddle_kwargs* -- 用于在指定 ``driver`` 为 'paddle' 时设定具体 driver 实例的一些参数: - - * fleet_kwargs -- 用于在使用 ``PaddleFleetDriver`` 时指定 ``DataParallel`` 和 ``fleet`` 初始化时的参数,包括: - - * is_collective -- 是否使用 paddle 集群式的分布式训练方法,目前仅支持为 ``True`` 的情况; - * role_maker -- 初始化 ``fleet`` 分布式训练 API 时使用的 ``RoleMaker`` - * 其它用于初始化 ``DataParallel`` 的参数; - * wo_auto_param_call (``bool``) -- 是否关闭在训练时调用我们的 ``auto_param_call`` 函数来自动匹配 batch 和前向函数的参数的行为; .. note:: @@ -152,11 +152,12 @@ class PaddleFleetDriver(PaddleDriver): parallel_device: Optional[Union[List[str], str]], is_pull_by_paddle_run: bool = False, fp16: bool = False, + paddle_kwrags: Dict = {}, **kwargs ): if USER_CUDA_VISIBLE_DEVICES not in os.environ: raise RuntimeError("To run paddle distributed training, please set `FASTNLP_BACKEND` to 'paddle' before using FastNLP.") - super(PaddleFleetDriver, self).__init__(model, fp16=fp16, **kwargs) + super(PaddleFleetDriver, self).__init__(model, fp16=fp16, paddle_kwrags=paddle_kwargs, **kwargs) # 如果不是通过 launch 启动,要求用户必须传入 parallel_device if not is_pull_by_paddle_run: diff --git a/fastNLP/core/drivers/paddle_driver/paddle_driver.py b/fastNLP/core/drivers/paddle_driver/paddle_driver.py index bfc26350..f604994e 100644 --- a/fastNLP/core/drivers/paddle_driver/paddle_driver.py +++ b/fastNLP/core/drivers/paddle_driver/paddle_driver.py @@ -56,17 +56,21 @@ class PaddleDriver(Driver): 1. :class:`~fastNLP.core.drivers.PaddleSingleDriver`:实现了使用单卡和 ``cpu`` 训练的具体功能; 2. :class:`~fastNLP.core.drivers.PaddleFleetDriver`:实现了使用 ``fleet`` 分布式训练 API 进行集群式分布式训练的具体功能; - :param model: 训练时使用的 **PaddlePaddle** 模型; - :param fp16: 是否开启混合精度训练; - :kwargs: - * wo_auto_param_call (``bool``) -- 是否关闭在训练时调用我们的 ``auto_param_call`` 函数来自动匹配 batch 和前向函数的参数的行为; + .. warning:: - .. note:: + 您不应当直接初始化该类,然后传入给 ``Trainer``,换句话说,您应当使用该类的子类 ``PaddleSingleDriver`` 和 ``PaddleDDPDriver``,而不是 + 该类本身; + + .. note:: - 关于该参数的详细说明,请参见 :class:`~fastNLP.core.controllers.Trainer` 中的描述;函数 ``auto_param_call`` 详见 :func:`fastNLP.core.utils.auto_param_call`。 + 您可以在使用 ``PaddleSingleDriver`` 和 ``PaddleFleetDriver`` 时使用 ``PaddleDriver`` 提供的接口; + + :param model: 训练时使用的 **PaddlePaddle** 模型; + :param fp16: 是否开启混合精度训练; + :param paddle_kwargs: """ - def __init__(self, model: "paddle.nn.Layer", fp16: Optional[bool] = False, **kwargs): + def __init__(self, model: "paddle.nn.Layer", fp16: Optional[bool] = False, paddle_kwrags: Dict = {}, **kwargs): if not isinstance(model, paddle.nn.Layer): raise ValueError(f"Parameter `model` can not be `{type(model)}` in `PaddleDriver`, it should be exactly " f"`paddle.nn.Layer` type.") @@ -76,7 +80,7 @@ class PaddleDriver(Driver): # scaler的参数 self.auto_cast, _grad_scaler = _build_fp16_env(dummy=not fp16) - self.grad_scaler = _grad_scaler() + self.grad_scaler = _grad_scaler(**self._paddle_kwargs.get("gradscaler_kwargs", {})) # 用来设置是否关闭 auto_param_call 中的参数匹配问题; self.wo_auto_param_call = kwargs.get("model_wo_auto_param_call", False) diff --git a/fastNLP/core/drivers/paddle_driver/single_device.py b/fastNLP/core/drivers/paddle_driver/single_device.py index 4105bf20..267c10bd 100644 --- a/fastNLP/core/drivers/paddle_driver/single_device.py +++ b/fastNLP/core/drivers/paddle_driver/single_device.py @@ -43,6 +43,8 @@ class PaddleSingleDriver(PaddleDriver): :param model: 训练时使用的 **PaddlePaddle** 模型; :param device: 训练使用的设备; :param fp16: 是否开启混合精度训练; + :param paddle_kwargs: + * *gradscaler_kwargs* -- 用于 ``fp16=True`` 时,提供给 :class:`paddle.amp.GradScaler` 的参数; :kwargs: * wo_auto_param_call (``bool``) -- 是否关闭在训练时调用我们的 ``auto_param_call`` 函数来自动匹配 batch 和前向函数的参数的行为; @@ -51,7 +53,7 @@ class PaddleSingleDriver(PaddleDriver): 关于该参数的详细说明,请参见 :class:`~fastNLP.core.controllers.Trainer` 中的描述;函数 ``auto_param_call`` 详见 :func:`fastNLP.core.utils.auto_param_call`。 """ - def __init__(self, model: "paddle.nn.Layer", device: Union[str, int], fp16: Optional[bool] = False, **kwargs): + def __init__(self, model: "paddle.nn.Layer", device: Union[str, int], fp16: Optional[bool] = False, paddle_kwrags: Dict = {}, **kwargs): if isinstance(model, DataParallel): raise ValueError("`paddle.DataParallel` is not supported in `PaddleSingleDriver`") @@ -61,7 +63,7 @@ class PaddleSingleDriver(PaddleDriver): logger.info("You have set `CUDA_VISIBLE_DEVICES` to '' in system environment variable, and we are gonna to" "use `cpu` instead of `gpu` device.") - super(PaddleSingleDriver, self).__init__(model, fp16=fp16, **kwargs) + super(PaddleSingleDriver, self).__init__(model, fp16=fp16, paddle_kwrags=paddle_kwrags, **kwargs) if device is None: raise ValueError("Parameter `device` can not be None in `PaddleSingleDriver`.") diff --git a/fastNLP/core/drivers/torch_driver/ddp.py b/fastNLP/core/drivers/torch_driver/ddp.py index b5485d16..47d9cbb5 100644 --- a/fastNLP/core/drivers/torch_driver/ddp.py +++ b/fastNLP/core/drivers/torch_driver/ddp.py @@ -235,7 +235,12 @@ class TorchDDPDriver(TorchDriver): :param parallel_device: 用于分布式训练的 ``gpu`` 设备; :param is_pull_by_torch_run: 标志当前的脚本的启动是否由 ``python -m torch.distributed.launch`` 启动的; :param fp16: 是否开启 fp16 训练; - :param kwargs: 其余的一些用于设定 ddp 训练的参数; + :param torch_kwargs: + * *ddp_kwargs* -- 用于在使用 ``TorchDDPDriver`` 时指定 ``DistributedDataParallel`` 初始化时的参数;例如传入 + {'find_unused_parameters': True} 来解决有参数不参与前向运算导致的报错等; + * *set_grad_to_none* -- 是否在训练过程中在每一次 optimizer 更新后将 grad 置为 None; + * *non_blocking* -- 表示用于 pytorch 的 tensor 的 to 方法的参数 non_blocking; + * *gradscaler_kwargs* -- 用于 fp16=True 时,提供给 ``torch.amp.cuda.GradScaler`` 的参数; """ def __init__( @@ -244,11 +249,12 @@ class TorchDDPDriver(TorchDriver): parallel_device: Optional[Union[List["torch.device"], "torch.device"]], is_pull_by_torch_run: bool = False, fp16: bool = False, + torch_kwargs: Dict = {}, **kwargs ): # 在加入很多东西后,需要注意这里调用 super 函数的位置; - super(TorchDDPDriver, self).__init__(model, fp16=fp16, **kwargs) + super(TorchDDPDriver, self).__init__(model, fp16=fp16, torch_kwargs=torch_kwargs, **kwargs) if isinstance(model, torch.nn.DataParallel): raise ValueError(f"Parameter `model` can not be `DataParallel` in `TorchDDPDriver`, it should be " diff --git a/fastNLP/core/drivers/torch_driver/single_device.py b/fastNLP/core/drivers/torch_driver/single_device.py index 263cf712..b59aba64 100644 --- a/fastNLP/core/drivers/torch_driver/single_device.py +++ b/fastNLP/core/drivers/torch_driver/single_device.py @@ -35,9 +35,13 @@ class TorchSingleDriver(TorchDriver): :param model: 传入给 ``Trainer`` 的 ``model`` 参数; :param device: torch.device,当前进程所使用的设备; :param fp16: 是否开启 fp16; + :param torch_kwargs: + * *set_grad_to_none* -- 是否在训练过程中在每一次 optimizer 更新后将 grad 置为 None; + * *non_blocking* -- 表示用于 pytorch 的 tensor 的 to 方法的参数 non_blocking; + * *gradscaler_kwargs* -- 用于 fp16=True 时,提供给 ``torch.amp.cuda.GradScaler`` 的参数; """ - def __init__(self, model, device: "torch.device", fp16: bool = False, **kwargs): + def __init__(self, model, device: "torch.device", fp16: bool = False, torch_kwargs: Dict = {}, **kwargs): if isinstance(model, DistributedDataParallel): raise ValueError("`DistributedDataParallel` is not supported in `TorchSingleDriver`") @@ -47,7 +51,7 @@ class TorchSingleDriver(TorchDriver): logger.info("You have set `CUDA_VISIBLE_DEVICES` to '' in system environment variable, and we are gonna to" "use `cpu` instead of `gpu` device.") - super(TorchSingleDriver, self).__init__(model, fp16=fp16, **kwargs) + super(TorchSingleDriver, self).__init__(model, fp16=fp16, torch_kwargs=torch_kwargs, **kwargs) if device is None: logger.debug("device is not set, fastNLP will try to automatically get it.") diff --git a/fastNLP/core/drivers/torch_driver/torch_driver.py b/fastNLP/core/drivers/torch_driver/torch_driver.py index 1594a903..60bd4147 100644 --- a/fastNLP/core/drivers/torch_driver/torch_driver.py +++ b/fastNLP/core/drivers/torch_driver/torch_driver.py @@ -47,12 +47,15 @@ class TorchDriver(Driver): 您可以在使用 ``TorchSingleDriver`` 和 ``TorchDDPDriver`` 时使用 ``TorchDriver`` 提供的接口; + :param model: 训练时使用的 **pytorch** 模型; + :param fp16: 是否开启混合精度训练; + :param torch_kwargs: """ - def __init__(self, model, fp16: Optional[bool] = False, **kwargs): + def __init__(self, model, fp16: Optional[bool] = False, torch_kwargs: Dict = {}, **kwargs): super(TorchDriver, self).__init__(model) """ 进行 fp16 的设置 """ - self._torch_kwargs = kwargs.get("torch_kwargs", {}) + self._torch_kwargs = torch_kwargs # 因为 ddp 和 single_device 的混合精度训练的设置是一样的,因此可以统一抽象到这里; self.fp16 = fp16 diff --git a/fastNLP/core/metrics/backend/auto_backend.py b/fastNLP/core/metrics/backend/auto_backend.py index e2515313..f671ad2e 100644 --- a/fastNLP/core/metrics/backend/auto_backend.py +++ b/fastNLP/core/metrics/backend/auto_backend.py @@ -8,6 +8,7 @@ from .backend import Backend from .torch_backend.backend import TorchBackend from .paddle_backend.backend import PaddleBackend from .jittor_backend.backend import JittorBackend +from .oneflow_backend.backend import OneflowBackend class AutoBackend(Backend): @@ -52,6 +53,8 @@ class AutoBackend(Backend): self.__class__ = PaddleBackend elif backend == 'jittor': self.__class__ = JittorBackend + elif backend == 'oneflow': + self.__class__ = OneflowBackend elif backend is None: # 不用做任何事情就可以初始化了 pass diff --git a/fastNLP/core/metrics/backend/oneflow_backend/__init__.py b/fastNLP/core/metrics/backend/oneflow_backend/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/fastNLP/core/metrics/backend/oneflow_backend/backend.py b/fastNLP/core/metrics/backend/oneflow_backend/backend.py new file mode 100644 index 00000000..6392b09d --- /dev/null +++ b/fastNLP/core/metrics/backend/oneflow_backend/backend.py @@ -0,0 +1,130 @@ +from typing import List + +import numpy as np + +from fastNLP.core.metrics.backend import Backend +from fastNLP.core.metrics.utils import AggregateMethodError +from fastNLP.core.utils import is_in_oneflow_dist +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW +from fastNLP.core.drivers.oneflow_driver.dist_utils import fastnlp_oneflow_all_gather + + +if _NEED_IMPORT_ONEFLOW: + import oneflow + import oneflow.comm as comm + +__all__ = [] + +class OneflowBackend(Backend): + def __init__(self): + super().__init__() + self._specified = True + + def aggregate(self, tensor, method: str): + """ + 聚集结果,并根据 method 计算后,返回结果 + + :param tensor: 需要聚合的张量 + :param method: 聚合的方法, 目前支持 ``['sum', 'mean', 'max', 'mix']``: + + * method 为 ``'sum'`` 时, 会将多张卡上聚合结果在维度为 `0` 上 累加起来。 + * method 为 ``'mean'`` 时,会将多张卡上聚合结果在维度为 `0` 上取平均值。 + * method 为 ``'max'`` 时,会将多张卡上聚合结果在维度为 `0` 上取最大值。 + * method 为 ``'mix'`` 时,会将多张卡上聚合结果在维度为 `0` 上取最小值。 + + """ + if isinstance(tensor, oneflow.Tensor): + # TODO 暂时没有找到 oneflow 中检测是否初始化了分布式环境的方法 + if is_in_oneflow_dist(): + if method is None: + raise AggregateMethodError(should_have_aggregate_method=True) + tensor = self.all_gather_object(tensor) + if isinstance(tensor[0], oneflow.Tensor): + tensor = oneflow.stack(tensor) + # 第一步, aggregate结果 + if method == 'sum': + tensor = oneflow.sum(tensor, dim=0) + elif method == 'mean': + tensor = oneflow.mean(tensor, dim=0) + elif method == 'max': + tensor, _ = oneflow.max(tensor, dim=0) + elif method == 'min': + tensor, _ = oneflow.min(tensor, dim=0) + else: + raise AggregateMethodError(should_have_aggregate_method=False) + + return tensor + + def create_tensor(self, value: float): + """ + 创建 tensor,并且填入 value 作为值 + + :param value: 创建张量的初始值 + """ + tensor = oneflow.ones(1).fill_(value) + return tensor + + def fill_value(self, tensor, value: float): + """ + 将 tensor 的值设置为 value + + :param tensor: 传入的张量 + :param value: 需要 fill 的值。 + """ + tensor.fill_(value) + return tensor + + def get_scalar(self, tensor) -> float: + """ + 获取 tensor 的 scalar 值 + + :param tensor: 传入的张量 + """ + return tensor.item() + + def tensor2numpy(self, tensor) -> np.array: + """ + 将 tensor 转为 numpy 值, 主要是在 metric 计算中使用 + + :param tensor: 传入的张量 + """ + + if isinstance(tensor, oneflow.Tensor): + return tensor.cpu().detach().numpy() + elif isinstance(tensor, np.ndarray): + return tensor + elif isinstance(tensor, (float, int)): + return tensor + else: + raise ValueError(f"tensor: {tensor} can not convert to ndarray!") + + @staticmethod + def is_distributed() -> bool: + """ + 判断是否为 ddp 状态 + + :return: + """ + return is_in_oneflow_dist() + + def move_tensor_to_device(self, tensor, device): + """ + 将张量移到设备上 + + :param tensor: 需要移动的张量 + :param device: 设备名, 一般为 "cpu", "cuda:0"等字符串 + """ + return tensor.to(device) + + def all_gather_object(self, obj, group=None) -> List: + """ + 给定 obj 将各个 rank 上的 obj 汇总到每个 obj 上。返回一个 list 对象,里面依次为各个 rank 对应的 obj 。 + + :param obj: + :param group: + """ + if self.is_distributed(): + obj_list = fastnlp_oneflow_all_gather(obj) + return obj_list + return [obj] + diff --git a/fastNLP/core/utils/__init__.py b/fastNLP/core/utils/__init__.py index 0857f450..d188bc37 100644 --- a/fastNLP/core/utils/__init__.py +++ b/fastNLP/core/utils/__init__.py @@ -1,5 +1,6 @@ __all__ = [ 'cache_results', + 'is_jittor_module', 'is_jittor_dataset', 'jittor_collate_wraps', 'paddle_to', @@ -9,8 +10,14 @@ __all__ = [ 'is_in_paddle_dist', 'is_in_fnlp_paddle_dist', 'is_in_paddle_launch_dist', + 'is_paddle_module', 'f_rich_progress', 'torch_move_data_to_device', + 'is_torch_module', + 'get_oneflow_device', + 'oneflow_move_data_to_device', + 'is_oneflow_module', + 'is_in_oneflow_dist', 'get_fn_arg_names', 'auto_param_call', 'check_user_specific_params', @@ -28,11 +35,12 @@ __all__ = [ ] from .cache_results import cache_results -from .jittor_utils import is_jittor_dataset, jittor_collate_wraps +from .jittor_utils import is_jittor_dataset, jittor_collate_wraps, is_jittor_module from .paddle_utils import paddle_to, paddle_move_data_to_device, get_paddle_device_id, get_paddle_gpu_str, is_in_paddle_dist, \ - is_in_fnlp_paddle_dist, is_in_paddle_launch_dist + is_in_fnlp_paddle_dist, is_in_paddle_launch_dist, is_paddle_module from .rich_progress import f_rich_progress -from .torch_utils import torch_move_data_to_device +from .torch_utils import torch_move_data_to_device, is_torch_module +from .oneflow_utils import oneflow_move_data_to_device, is_oneflow_module, is_in_oneflow_dist, get_oneflow_device from .utils import * from .tqdm_progress import f_tqdm_progress from .seq_len_to_mask import seq_len_to_mask diff --git a/fastNLP/core/utils/jittor_utils.py b/fastNLP/core/utils/jittor_utils.py index f29b1f46..ac00cd22 100644 --- a/fastNLP/core/utils/jittor_utils.py +++ b/fastNLP/core/utils/jittor_utils.py @@ -1,6 +1,7 @@ __all__ = [ + 'is_jittor_module', 'is_jittor_dataset', - 'jittor_collate_wraps' + 'jittor_collate_wraps', ] from collections.abc import Mapping, Callable @@ -13,6 +14,17 @@ if _NEED_IMPORT_JITTOR: from fastNLP.core.dataset import Instance +def is_jittor_module(model) -> bool: + """ + 判断传入的 ``model`` 是否是 :class:`jittor.Module` 类型 + + :param model: 模型; + :return: 当前模型是否为 ``jittor`` 的模型; + """ + try: + return isinstance(model, jt.Module) + except BaseException: + return False def is_jittor_dataset(dataset) -> bool: """ diff --git a/fastNLP/core/utils/oneflow_utils.py b/fastNLP/core/utils/oneflow_utils.py new file mode 100644 index 00000000..f9225466 --- /dev/null +++ b/fastNLP/core/utils/oneflow_utils.py @@ -0,0 +1,69 @@ +import os +from typing import Any, Union, Optional +from fastNLP.envs.env import FASTNLP_DISTRIBUTED_CHECK +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW + +if _NEED_IMPORT_ONEFLOW: + import oneflow + +__all__ = [ + 'get_oneflow_device' + 'oneflow_move_data_to_device', + 'is_oneflow_module', + 'is_in_oneflow_dist', +] + +from .utils import apply_to_collection + +def get_oneflow_device(device): + """ + 构造一个 :class:`oneflow.device` 实例并返回。 + + :param device: 字符串或 gpu 编号 + :return: :class:`oneflow.device` + """ + if isinstance(device, oneflow.device): + return device + if isinstance(device, int): + return oneflow.device("cuda", device) + if isinstance(device, str): + return oneflow.device(device) + raise RuntimeError(f"Cannot get `oneflow.device` from {device}.") + +def oneflow_move_data_to_device(batch: Any, device: Optional[Union[str, "oneflow.device"]] = None) -> Any: + r""" + 在 **oneflow** 中将数据集合 ``batch`` 传输到给定设备。任何定义方法 ``to(device)`` 的对象都将被移动并且集合中的所有其他对象将保持不变; + + :param batch: 需要迁移的数据; + :param device: 数据应当迁移到的设备;当该参数的值为 ``None`` 时则不执行任何操作; + :return: 迁移到新设备上的数据集合; + """ + if device is None: + return batch + + def batch_to(data: Any) -> Any: + data_output = data.to(device) + if data_output is not None: + return data_output + # user wrongly implemented the `TransferableDataType` and forgot to return `self`. + return data + + return apply_to_collection(batch, dtype=oneflow.Tensor, function=batch_to) + +def is_oneflow_module(model) -> bool: + """ + 判断传入的 ``model`` 是否是 :class:`oneflow.nn.Module` 类型 + + :param model: 模型; + :return: 当前模型是否为 ``oneflow`` 的模型; + """ + try: + return isinstance(model, oneflow.nn.Module) + except BaseException: + return False + +def is_in_oneflow_dist() -> bool: + """ + 判断是否处于 **oneflow** 分布式的进程下。 + """ + return "GLOG_log_dir" in os.environ \ No newline at end of file diff --git a/fastNLP/core/utils/paddle_utils.py b/fastNLP/core/utils/paddle_utils.py index 9e7e73a4..adcbcabd 100644 --- a/fastNLP/core/utils/paddle_utils.py +++ b/fastNLP/core/utils/paddle_utils.py @@ -6,6 +6,7 @@ __all__ = [ "is_in_paddle_dist", "is_in_fnlp_paddle_dist", "is_in_paddle_launch_dist", + "is_paddle_module", ] import os @@ -174,4 +175,16 @@ def is_in_paddle_launch_dist() -> bool: """ 判断是否处于 ``python -m paddle.distributed.launch`` 方法启动的 **paddle** 分布式进程中 """ - return FASTNLP_BACKEND_LAUNCH in os.environ \ No newline at end of file + return FASTNLP_BACKEND_LAUNCH in os.environ + +def is_paddle_module(model) -> bool: + """ + 判断传入的 ``model`` 是否是 :class:`paddle.nn.Layer` 类型 + + :param model: 模型; + :return: 当前模型是否为 ``paddle`` 的模型; + """ + try: + return isinstance(model, paddle.nn.Layer) + except BaseException: + return False \ No newline at end of file diff --git a/fastNLP/core/utils/torch_utils.py b/fastNLP/core/utils/torch_utils.py index 0cef2205..c58715b8 100644 --- a/fastNLP/core/utils/torch_utils.py +++ b/fastNLP/core/utils/torch_utils.py @@ -8,7 +8,8 @@ if _NEED_IMPORT_TORCH: DEFAULT_TORCH_GROUP = torch.distributed.distributed_c10d.group.WORLD __all__ = [ - 'torch_move_data_to_device' + 'torch_move_data_to_device', + 'is_torch_module', ] from .utils import apply_to_collection @@ -64,3 +65,15 @@ def torch_move_data_to_device(batch: Any, device: Optional[Union[str, "torch.dev dtype = TorchTransferableDataType return apply_to_collection(batch, dtype=dtype, function=batch_to) + +def is_torch_module(model) -> bool: + """ + 判断传入的 ``model`` 是否是 :class:`torch.nn.Module` 类型 + + :param model: 模型; + :return: 当前模型是否为 ``torch`` 的模型; + """ + try: + return isinstance(model, torch.nn.Module) + except BaseException: + return False \ No newline at end of file diff --git a/fastNLP/envs/imports.py b/fastNLP/envs/imports.py index 485a9dbf..c0ffffa3 100644 --- a/fastNLP/envs/imports.py +++ b/fastNLP/envs/imports.py @@ -22,6 +22,10 @@ _NEED_IMPORT_FAIRSCALE = not _IS_WINDOWS and _module_available("fairscale") and _NEED_IMPORT_TORCH = _module_available("torch") and 'torch' in need_import _NEED_IMPORT_JITTOR = _module_available("jittor") and 'jittor' in need_import _NEED_IMPORT_PADDLE = _module_available("paddle") and 'paddle' in need_import +<<<<<<< HEAD _NEED_IMPORT_DEEPSPEED = _module_available("deepspeed") and 'torch' in need_import +======= +_NEED_IMPORT_ONEFLOW = _module_available("oneflow") and 'oneflow' in need_import +>>>>>>> dev0.8.0 _TORCH_GREATER_EQUAL_1_8 = _NEED_IMPORT_TORCH and _compare_version("torch", operator.ge, "1.8.0") diff --git a/fastNLP/envs/set_backend.py b/fastNLP/envs/set_backend.py index 1ef27ff6..45674794 100644 --- a/fastNLP/envs/set_backend.py +++ b/fastNLP/envs/set_backend.py @@ -8,7 +8,7 @@ from fastNLP.envs.env import FASTNLP_BACKEND, FASTNLP_GLOBAL_RANK, USER_CUDA_VIS from fastNLP.envs.utils import _module_available, get_gpu_count -SUPPORT_BACKENDS = ['torch', 'paddle', 'jittor'] +SUPPORT_BACKENDS = ['torch', 'paddle', 'jittor', 'oneflow'] def _set_backend(): @@ -145,6 +145,9 @@ def set_env(global_seed=None): if backend == 'torch': assert _module_available(backend), f"You must have {backend} available to use {backend} backend." + if backend == 'oneflow': + assert _module_available(backend), f"You must have {backend} available to use {backend} backend." + def dump_fastnlp_backend(default:bool = False, backend=None): """ diff --git a/fastNLP/envs/set_env_on_import.py b/fastNLP/envs/set_env_on_import.py index f35f8e54..27686ae3 100644 --- a/fastNLP/envs/set_env_on_import.py +++ b/fastNLP/envs/set_env_on_import.py @@ -50,6 +50,15 @@ def set_env_on_import_jittor(): if 'log_silent' not in os.environ: os.environ['log_silent'] = '1' +def set_env_on_import_oneflow(): + if 'GLOG_log_dir' in os.environ: + os.environ[FASTNLP_GLOBAL_RANK] = os.environ['RANK'] + if int(os.environ.get(FASTNLP_REMOVE_LOCAL_RANK, 1)): + remove_local_rank_in_argv() + + if 'GLOG_log_dir' in os.environ and FASTNLP_DISTRIBUTED_CHECK not in os.environ: + os.environ[FASTNLP_BACKEND_LAUNCH] = '1' + def set_env_on_import(): """ @@ -61,6 +70,7 @@ def set_env_on_import(): set_env_on_import_torch() set_env_on_import_paddle() set_env_on_import_jittor() + set_env_on_import_oneflow() # fastNLP 内部使用的一些变量 if FASTNLP_LAUNCH_TIME not in os.environ: diff --git a/tests/core/collators/padders/test_get_padder.py b/tests/core/collators/padders/test_get_padder.py index 5996f023..a0e2dfdc 100644 --- a/tests/core/collators/padders/test_get_padder.py +++ b/tests/core/collators/padders/test_get_padder.py @@ -3,7 +3,7 @@ import numpy as np from fastNLP.core.collators.padders.get_padder import get_padder, InconsistencyError, DtypeError, \ _get_element_shape_dtype -from fastNLP.envs.imports import _NEED_IMPORT_TORCH, _NEED_IMPORT_PADDLE, _NEED_IMPORT_JITTOR +from fastNLP.envs.imports import _NEED_IMPORT_TORCH, _NEED_IMPORT_PADDLE, _NEED_IMPORT_JITTOR, _NEED_IMPORT_ONEFLOW def test_get_element_shape_dtype(): @@ -14,10 +14,11 @@ def test_get_element_shape_dtype(): catalog = _get_element_shape_dtype([np.zeros(3), np.zeros((2, 1))]) -# @pytest.mark.parametrize('backend', ['raw', None, 'numpy', 'torch', 'jittor', 'paddle']) -@pytest.mark.parametrize('backend', ['raw', None, 'numpy', 'torch', 'paddle']) +@pytest.mark.parametrize('backend', ['raw', None, 'numpy', 'torch', 'paddle', 'jittor', 'oneflow']) @pytest.mark.torch @pytest.mark.paddle +@pytest.mark.jittor +@pytest.mark.oneflow def test_get_padder_run(backend): if not _NEED_IMPORT_TORCH and backend == 'torch': pytest.skip("No torch") @@ -25,6 +26,8 @@ def test_get_padder_run(backend): pytest.skip("No paddle") if not _NEED_IMPORT_JITTOR and backend == 'jittor': pytest.skip("No jittor") + if not _NEED_IMPORT_ONEFLOW and backend == 'oneflow': + pytest.skip("No oneflow") batch_field = [1, 2, 3] padder = get_padder(batch_field, pad_val=0, backend=backend, dtype=int, field_name='test') @@ -163,3 +166,57 @@ def test_torch_padder(): assert isinstance(pad_batch, np.ndarray) assert np.shape(pad_batch) == (3, 3, 3) assert (pad_batch == np.zeros(np.shape(pad_batch))).sum()==12 + +@pytest.mark.oneflow +def test_oneflow_padder(): + if not _NEED_IMPORT_ONEFLOW: + pytest.skip("No oneflow.") + import oneflow + backend = 'oneflow' + target_type = oneflow.Tensor + batch_field = [1, 2, 3] + padder = get_padder(batch_field, pad_val=0, backend=backend, dtype=int, field_name='test') + pad_batch = padder(batch_field) + assert isinstance(pad_batch, target_type) + assert (pad_batch == oneflow.LongTensor(batch_field)).sum()==len(batch_field) + + batch_field = [[1], [2, 2], [3, 3, 3]] + padder = get_padder(batch_field, pad_val=0, backend=backend, dtype=int, field_name='test') + pad_batch = padder(batch_field) + assert isinstance(pad_batch, target_type) + assert pad_batch.shape == (3, 3) + assert (pad_batch == oneflow.zeros(pad_batch.shape)).sum()==3 + + batch_field = [oneflow.ones((3,3)), oneflow.ones((2,3)), oneflow.ones((1,3))] + padder = get_padder(batch_field, pad_val=0, backend=backend, dtype=int, field_name='test') + pad_batch = padder(batch_field) + assert isinstance(pad_batch, target_type) + assert pad_batch.shape == (3, 3, 3) + assert (pad_batch == oneflow.zeros(pad_batch.shape)).sum()==9 + + batch_field = [oneflow.ones((3,3)), oneflow.ones((2,3)), oneflow.ones((1,0))] + padder = get_padder(batch_field, pad_val=0, backend=backend, dtype=int, field_name='test') + pad_batch = padder(batch_field) + assert isinstance(pad_batch, target_type) + assert pad_batch.shape == (3, 3, 3) + assert (pad_batch == oneflow.zeros(pad_batch.shape)).sum()==12 + + batch_field = [oneflow.ones((3,3)), oneflow.ones((2,3)), oneflow.ones((1,))] + with pytest.raises(InconsistencyError): + padder = get_padder(batch_field, pad_val=0, backend=backend, dtype=int, field_name='test') + + # 可以是 numpy.ndarray + batch_field = [np.ones((3,3)), np.ones((2,3)), np.ones((1,0))] + padder = get_padder(batch_field, pad_val=0, backend=backend, dtype=int, field_name='test') + pad_batch = padder(batch_field) + assert isinstance(pad_batch, target_type) + assert pad_batch.shape == (3, 3, 3) + assert (pad_batch == oneflow.zeros(pad_batch.shape)).sum()==12 + + # 测试 to numpy + batch_field = [oneflow.ones((3,3)), oneflow.ones((2,3)), oneflow.ones((1,0))] + padder = get_padder(batch_field, pad_val=0, backend='numpy', dtype=int, field_name='test') + pad_batch = padder(batch_field) + assert isinstance(pad_batch, np.ndarray) + assert np.shape(pad_batch) == (3, 3, 3) + assert (pad_batch == np.zeros(np.shape(pad_batch))).sum()==12 diff --git a/tests/core/collators/padders/test_oneflow_padder.py b/tests/core/collators/padders/test_oneflow_padder.py new file mode 100644 index 00000000..9ad31816 --- /dev/null +++ b/tests/core/collators/padders/test_oneflow_padder.py @@ -0,0 +1,105 @@ +import numpy as np +import pytest + +from fastNLP.core.collators.padders.oneflow_padder import OneflowTensorPadder, OneflowSequencePadder, OneflowNumberPadder +from fastNLP.core.collators.padders.exceptions import DtypeError +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW + +if _NEED_IMPORT_ONEFLOW: + import oneflow + + +@pytest.mark.oneflow +class TestOneflowNumberPadder: + def test_run(self): + padder = OneflowNumberPadder(pad_val=-1, ele_dtype=int, dtype=int) + a = [1, 2, 3] + t_a = padder(a) + assert isinstance(t_a, oneflow.Tensor) + assert (t_a == oneflow.LongTensor(a)).sum() == 3 + + +@pytest.mark.oneflow +class TestOneflowSequencePadder: + def test_run(self): + padder = OneflowSequencePadder(pad_val=-1, ele_dtype=int, dtype=int) + a = [[1, 2, 3], [3]] + a = padder(a) + shape = a.shape + assert isinstance(a, oneflow.Tensor) + assert tuple(shape) == (2, 3) + b = oneflow.LongTensor([[1, 2, 3], [3, -1, -1]]) + assert (a == b).sum().item() == shape[0]*shape[1] + + def test_dtype_check(self): + padder = OneflowSequencePadder(pad_val=-1, ele_dtype=np.zeros(3, dtype=np.int8).dtype, dtype=int) + with pytest.raises(DtypeError): + padder = OneflowSequencePadder(pad_val=-1, ele_dtype=str, dtype=int) + padder = OneflowSequencePadder(pad_val=-1, ele_dtype=oneflow.long, dtype=int) + padder = OneflowSequencePadder(pad_val=-1, ele_dtype=np.int8, dtype=None) + a = padder([[1], [2, 322]]) + assert (a>67).sum()==0 # 因为int8的范围为-67 - 66 + padder = OneflowSequencePadder(pad_val=-1, ele_dtype=np.zeros(2).dtype, dtype=None) + + +@pytest.mark.oneflow +class TestOneflowTensorPadder: + def test_run(self): + padder = OneflowTensorPadder(pad_val=-1, ele_dtype=oneflow.zeros(3).dtype, dtype=int) + a = [oneflow.zeros(3), oneflow.zeros(2), oneflow.zeros(0)] + a = padder(a) + shape = a.shape + assert isinstance(a, oneflow.Tensor) + assert tuple(shape) == (3, 3) + b = oneflow.LongTensor([[0, 0, 0], [0, 0, -1], [-1, -1, -1]]) + assert (a == b).sum().item() == shape[0]*shape[1] + + a = [oneflow.zeros((3, 2)), oneflow.zeros((2, 2)), oneflow.zeros((1, 2))] + a = padder(a) + shape = a.shape + assert isinstance(a, oneflow.Tensor) + assert tuple(shape) == (3, 3, 2) + b = oneflow.LongTensor([[[0, 0], [0, 0], [0, 0]], + [[0, 0], [0, 0], [-1, -1]], + [[0, 0], [-1, -1], [-1, -1]]]) + assert (a == b).sum().item() == shape[0]*shape[1]*shape[2] + + a = [oneflow.zeros((3, 2)), oneflow.zeros((2, 2)), oneflow.zeros((1, 1))] + a = padder(a) + shape = a.shape + assert isinstance(a, oneflow.Tensor) + assert tuple(shape) == (3, 3, 2) + b = oneflow.LongTensor([[[0, 0], [0, 0], [0, 0]], + [[0, 0], [0, 0], [-1, -1]], + [[0, -1], [-1, -1], [-1, -1]]]) + assert (a == b).sum().item() == shape[0]*shape[1]*shape[2] + + padder = OneflowTensorPadder(pad_val=-1, ele_dtype=oneflow.zeros(3).dtype, dtype=int) + a = [oneflow.zeros((3, 2)), oneflow.zeros((2, 2)), oneflow.zeros((1, 0))] + a = padder(a) + shape = a.shape + assert isinstance(a, oneflow.Tensor) + assert tuple(shape) == (3, 3, 2) + b = oneflow.LongTensor([[[0, 0], [0, 0], [0, 0]], + [[0, 0], [0, 0], [-1, -1]], + [[-1, -1], [-1, -1], [-1, -1]]]) + assert (a == b).sum().item() == shape[0]*shape[1]*shape[2] + + padder = OneflowTensorPadder(pad_val=-1, ele_dtype=oneflow.zeros(3).dtype, dtype=None) + a = [np.zeros((3, 2)), np.zeros((2, 2)), np.zeros((1, 0))] + a = padder(a) + shape = a.shape + assert isinstance(a, oneflow.Tensor) + assert tuple(shape) == (3, 3, 2) + b = oneflow.FloatTensor([[[0, 0], [0, 0], [0, 0]], + [[0, 0], [0, 0], [-1, -1]], + [[-1, -1], [-1, -1], [-1, -1]]]) + assert (a == b).sum().item() == shape[0]*shape[1]*shape[2] + + def test_dtype_check(self): + padder = OneflowTensorPadder(pad_val=-1, ele_dtype=np.zeros(3, dtype=np.int8).dtype, dtype=int) + with pytest.raises(DtypeError): + padder = OneflowTensorPadder(pad_val=-1, ele_dtype=str, dtype=int) + padder = OneflowTensorPadder(pad_val=-1, ele_dtype=oneflow.long, dtype=int) + padder = OneflowTensorPadder(pad_val=-1, ele_dtype=int, dtype=oneflow.long) + diff --git a/tests/core/collators/test_collator.py b/tests/core/collators/test_collator.py index 8443ef92..d00cbe05 100644 --- a/tests/core/collators/test_collator.py +++ b/tests/core/collators/test_collator.py @@ -2,7 +2,7 @@ import numpy as np import pytest -from fastNLP.envs.imports import _NEED_IMPORT_TORCH, _NEED_IMPORT_PADDLE, _NEED_IMPORT_JITTOR +from fastNLP.envs.imports import _NEED_IMPORT_TORCH, _NEED_IMPORT_PADDLE, _NEED_IMPORT_JITTOR, _NEED_IMPORT_ONEFLOW from fastNLP.core.collators.collator import Collator from ...helpers.utils import Capturing @@ -14,6 +14,10 @@ def _assert_equal(d1, d2): if 'float64' in str(d2.dtype): print(d2.dtype) assert (d1 == d2).all().item() + if 'oneflow' in str(type(d1)): + if 'float64' in str(d2.dtype): + print(d2.dtype) + assert (d1 == d2).all().item() else: assert all(d1 == d2) except TypeError: @@ -43,9 +47,9 @@ def findListDiff(d1, d2): class TestCollator: - @pytest.mark.torch - def test_run(self): - dict_batch = [{ + @staticmethod + def setup_class(cls): + cls.dict_batch = [{ 'str': '1', 'lst_str': ['1'], 'int': 1, @@ -75,17 +79,21 @@ class TestCollator: } ] - list_batch = [['1', ['1'], 1, [1], [[1]], 1.1, [1.1], True, np.ones(1), {'1': '1'}, {'1'}], - ['2', ['2', '2'], 2, [2, 2], [[1], [1, 2]], 2.1, [2.1], False, np.ones(2), {'2': '2'}, {'2'}]] + cls.list_batch = [['1', ['1'], 1, [1], [[1]], 1.1, [1.1], True, np.ones(1), {'1': '1'}, {'1'}], + ['2', ['2', '2'], 2, [2, 2], [[1], [1, 2]], 2.1, [2.1], False, np.ones(2), {'2': '2'}, {'2'}]] + + def test_run_traw(self): raw_pad_batch = {'str': ['1', '2'], 'lst_str': [['1'], ['2', '2']], 'int': [1, 2], 'lst_int': [[1, 0], [1, 2]], 'nest_lst_int': [[[1, 0], [0, 0]], [[1, 0], [1, 2]]], 'float': [1.1, 2.1], 'lst_float': [[1.1], [2.1]], 'bool': [True, False], 'numpy': [np.array([1.]), np.array([0.])], 'dict': {'1': ['1', '2']}, 'set': [{'1'}, {'2'}], 'nested_dict': {'a': [1, 2], 'b': [[1, 2], [1, 2]]}} collator = Collator(backend='raw') - assert raw_pad_batch == collator(dict_batch) + assert raw_pad_batch == collator(self.dict_batch) collator = Collator(backend='raw') raw_pad_lst = [['1', '2'], [['1'], ['2', '2']], [1, 2], [[1, 0], [2, 2]], [[[1, 0], [0, 0]], [[1, 0], [1, 2]]], [1.1, 2.1], [[1.1], [2.1]], [True, False], [[1, 0], [1, 1]], [{'1': '1'}, {'2': '2'}], [{'1'}, {'2'}]] - findListDiff(raw_pad_lst, collator(list_batch)) + findListDiff(raw_pad_lst, collator(self.list_batch)) + + def test_run_numpy(self): collator = Collator(backend='numpy') numpy_pad_batch = {'str': ['1', '2'], 'lst_str': [['1'], ['2', '2']], 'int': np.array([1, 2]), 'lst_int': np.array([[1, 0], [1, 2]]), @@ -94,36 +102,60 @@ class TestCollator: 'dict': {'1': ['1', '2']}, 'set': [{'1'}, {'2'}], 'nested_dict': {'a': np.array([1, 2]), 'b': np.array([[1, 2], [1, 2]])}} - findDictDiff(numpy_pad_batch, collator(dict_batch)) + findDictDiff(numpy_pad_batch, collator(self.dict_batch)) collator = Collator(backend='numpy') numpy_pad_lst = [['1', '2'], [['1'], ['2', '2']], np.array([1, 2]), np.array([[1, 0], [2, 2]]), np.array([[[1, 0], [0, 0]], [[1, 0], [1, 2]]]), np.array([1.1, 2.1]), np.array([[1.1], [2.1]]), np.array([True, False]), np.array([[1, 0], [1, 1]]), [{'1': '1'}, {'2': '2'}], [{'1'}, {'2'}]] - findListDiff(numpy_pad_lst, collator(list_batch)) - - if _NEED_IMPORT_TORCH: - import torch - collator = Collator(backend='torch') - numpy_pad_batch = {'str': ['1', '2'], 'lst_str': [['1'], ['2', '2']], 'int': torch.LongTensor([1, 2]), - 'lst_int': torch.LongTensor([[1, 0], [1, 2]]), - 'nest_lst_int': torch.LongTensor([[[1, 0], [0, 0]], [[1, 0], [1, 2]]]), - 'float': torch.FloatTensor([1.1, 2.1]), - 'lst_float': torch.FloatTensor([[1.1], [2.1]]), 'bool': torch.BoolTensor([True, False]), - 'numpy': torch.FloatTensor([[1], [0]]), - 'dict': {'1': ['1', '2']}, 'set': [{'1'}, {'2'}], 'nested_dict': {'a': torch.LongTensor([1, 2]), - 'b': torch.LongTensor( - [[1, 2], [1, 2]])}} - - findDictDiff(numpy_pad_batch, collator(dict_batch)) - collator = Collator(backend='torch') - torch_pad_lst = [['1', '2'], [['1'], ['2', '2']], torch.LongTensor([1, 2]), torch.LongTensor([[1, 0], [2, 2]]), - torch.LongTensor([[[1, 0], [0, 0]], [[1, 0], [1, 2]]]), - torch.FloatTensor([1.1, 2.1]), torch.FloatTensor([[1.1], [2.1]]), torch.BoolTensor([True, False]), - torch.LongTensor([[1, 0], [1, 1]]), [{'1': '1'}, {'2': '2'}], - [{'1'}, {'2'}]] - findListDiff(torch_pad_lst, collator(list_batch)) + findListDiff(numpy_pad_lst, collator(self.list_batch)) + + @pytest.mark.torch + def test_run_torch(self): + import torch + collator = Collator(backend='torch') + numpy_pad_batch = {'str': ['1', '2'], 'lst_str': [['1'], ['2', '2']], 'int': torch.LongTensor([1, 2]), + 'lst_int': torch.LongTensor([[1, 0], [1, 2]]), + 'nest_lst_int': torch.LongTensor([[[1, 0], [0, 0]], [[1, 0], [1, 2]]]), + 'float': torch.FloatTensor([1.1, 2.1]), + 'lst_float': torch.FloatTensor([[1.1], [2.1]]), 'bool': torch.BoolTensor([True, False]), + 'numpy': torch.FloatTensor([[1], [0]]), + 'dict': {'1': ['1', '2']}, 'set': [{'1'}, {'2'}], 'nested_dict': {'a': torch.LongTensor([1, 2]), + 'b': torch.LongTensor( + [[1, 2], [1, 2]])}} + + findDictDiff(numpy_pad_batch, collator(self.dict_batch)) + collator = Collator(backend='torch') + torch_pad_lst = [['1', '2'], [['1'], ['2', '2']], torch.LongTensor([1, 2]), torch.LongTensor([[1, 0], [2, 2]]), + torch.LongTensor([[[1, 0], [0, 0]], [[1, 0], [1, 2]]]), + torch.FloatTensor([1.1, 2.1]), torch.FloatTensor([[1.1], [2.1]]), torch.BoolTensor([True, False]), + torch.LongTensor([[1, 0], [1, 1]]), [{'1': '1'}, {'2': '2'}], + [{'1'}, {'2'}]] + findListDiff(torch_pad_lst, collator(self.list_batch)) + + @pytest.mark.oneflow + def test_run_oneflow(self): + import oneflow + collator = Collator(backend='oneflow') + numpy_pad_batch = {'str': ['1', '2'], 'lst_str': [['1'], ['2', '2']], 'int': oneflow.LongTensor([1, 2]), + 'lst_int': oneflow.LongTensor([[1, 0], [1, 2]]), + 'nest_lst_int': oneflow.LongTensor([[[1, 0], [0, 0]], [[1, 0], [1, 2]]]), + 'float': oneflow.FloatTensor([1.1, 2.1]), + 'lst_float': oneflow.FloatTensor([[1.1], [2.1]]), 'bool': oneflow.BoolTensor([True, False]), + 'numpy': oneflow.FloatTensor([[1], [0]]), + 'dict': {'1': ['1', '2']}, 'set': [{'1'}, {'2'}], 'nested_dict': {'a': oneflow.LongTensor([1, 2]), + 'b': oneflow.LongTensor( + [[1, 2], [1, 2]])}} + + findDictDiff(numpy_pad_batch, collator(self.dict_batch)) + collator = Collator(backend='oneflow') + oneflow_pad_lst = [['1', '2'], [['1'], ['2', '2']], oneflow.LongTensor([1, 2]), oneflow.LongTensor([[1, 0], [2, 2]]), + oneflow.LongTensor([[[1, 0], [0, 0]], [[1, 0], [1, 2]]]), + oneflow.FloatTensor([1.1, 2.1]), oneflow.FloatTensor([[1.1], [2.1]]), oneflow.BoolTensor([True, False]), + oneflow.LongTensor([[1, 0], [1, 1]]), [{'1': '1'}, {'2': '2'}], + [{'1'}, {'2'}]] + findListDiff(oneflow_pad_lst, collator(self.list_batch)) def test_pad(self): dict_batch = [{ @@ -366,6 +398,46 @@ def test_torch_dl(): with pytest.raises(KeyError): dl.set_pad('i', pad_val=None) +@pytest.mark.oneflow +def test_oneflow_dl(): + from fastNLP import OneflowDataLoader + from fastNLP import DataSet + import numpy as np + import oneflow + + ds = DataSet({ + 'x': [1, 2], 'y': [[1,2], [3]], 'z':[np.ones((1, 2)), np.ones((2, 3))], + 'i': [{'j': [1, 2]}, {'j': [3]}], 'j': ['a', 'b'] + }) + + dl = OneflowDataLoader(ds, batch_size=2) + batch = next(iter(dl)) + assert 'x' in batch and 'y' in batch and 'z' in batch and 'i' in batch and 'j' in batch + assert batch['z'].dtype == oneflow.float32 + assert isinstance(batch['j'], list) + assert batch['i']['j'].dtype, oneflow.long + + dl.set_ignore('x') + batch = next(iter(dl)) + assert 'x' not in batch and 'y' in batch and 'z' in batch + + dl.set_pad('y', pad_val=None) + batch = next(iter(dl)) + assert 'x' not in batch and 'y' in batch and 'z' in batch + assert isinstance(batch['y'], list) + assert len(batch['y'][0])!=len(batch['y'][1]) # 没有 pad + + dl.set_pad(('i', 'j'), pad_val=None) + batch = next(iter(dl)) + assert 'x' not in batch and 'y' in batch and 'z' in batch + assert isinstance(batch['y'], list) + assert len(batch['y'][0])!=len(batch['y'][1]) # 没有 pad + assert isinstance(batch['i']['j'], list) + assert len(batch['i']['j'][0])!=len(batch['i']['j'][1]) # 没有 pad + + with pytest.raises(KeyError): + dl.set_pad('i', pad_val=None) + def test_compare_tuple(): from fastNLP.core.collators.collator import _compare_tuple diff --git a/tests/core/controllers/_test_trainer_oneflow.py b/tests/core/controllers/_test_trainer_oneflow.py new file mode 100644 index 00000000..385aded0 --- /dev/null +++ b/tests/core/controllers/_test_trainer_oneflow.py @@ -0,0 +1,96 @@ +""" +测试 oneflow 动态图的多卡训练:: + + >>> # 不使用 DistributedDataParallel 包裹的情况 + >>> python -m oneflow.distributed.launch --nproc_per_node 2 _test_trainer_oneflow.py + >>> # 使用 DistributedDataParallel 包裹的情况 + >>> python -m oneflow.distributed.launch --nproc_per_node 2 _test_trainer_oneflow.py -w +""" +import sys +sys.path.append("../../../") +import os +from dataclasses import dataclass + +from fastNLP.core.controllers.trainer import Trainer +from fastNLP.core.metrics.accuracy import Accuracy +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW + +if _NEED_IMPORT_ONEFLOW: + import oneflow + from oneflow.nn.parallel import DistributedDataParallel + from oneflow.optim import Adam + from oneflow.utils.data import DataLoader + +from tests.helpers.models.oneflow_model import OneflowNormalModel_Classification_1 +from tests.helpers.datasets.oneflow_data import OneflowArgMaxDataset + +@dataclass +class TrainOneflowConfig: + num_labels: int = 3 + feature_dimension: int = 3 + + batch_size: int = 2 + shuffle: bool = True + evaluate_every = 2 + +def test_trainer_oneflow( + callbacks, + wrapped=False, + n_epochs=2, +): + model = OneflowNormalModel_Classification_1( + num_labels=TrainOneflowConfig.num_labels, + feature_dimension=TrainOneflowConfig.feature_dimension + ) + optimizers = Adam(params=model.parameters(), lr=0.0001) + train_dataloader = DataLoader( + dataset=OneflowArgMaxDataset(20, TrainOneflowConfig.feature_dimension), + batch_size=TrainOneflowConfig.batch_size, + shuffle=True + ) + val_dataloader = DataLoader( + dataset=OneflowArgMaxDataset(12, TrainOneflowConfig.feature_dimension), + batch_size=TrainOneflowConfig.batch_size, + shuffle=True + ) + train_dataloader = train_dataloader + evaluate_dataloaders = val_dataloader + evaluate_every = TrainOneflowConfig.evaluate_every + metrics = {"acc": Accuracy()} + + if wrapped: + model.to(int(os.environ["LOCAL_RANK"])) + model = DistributedDataParallel(model) + + + trainer = Trainer( + model=model, + driver="oneflow", + device=0, + optimizers=optimizers, + train_dataloader=train_dataloader, + evaluate_dataloaders=evaluate_dataloaders, + evaluate_every=evaluate_every, + input_mapping=None, + output_mapping=None, + metrics=metrics, + + n_epochs=n_epochs, + callbacks=callbacks, + ) + trainer.run() + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser() + parser.add_argument( + "-w", + "--wrapped", + default=False, + action="store_true", + help="Use DistributedDataParallal to wrap model first.", + ) + args = parser.parse_args() + + callbacks = [] + test_trainer_oneflow(callbacks, args.wrapped) diff --git a/tests/core/controllers/test_trainer_oneflow.py b/tests/core/controllers/test_trainer_oneflow.py new file mode 100644 index 00000000..e5e2433a --- /dev/null +++ b/tests/core/controllers/test_trainer_oneflow.py @@ -0,0 +1,70 @@ +import os +import pytest +from dataclasses import dataclass + +from fastNLP.core.controllers.trainer import Trainer +from fastNLP.core.metrics.accuracy import Accuracy +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW + +if _NEED_IMPORT_ONEFLOW: + from oneflow.optim import Adam + from oneflow.utils.data import DataLoader + + +from tests.helpers.models.oneflow_model import OneflowNormalModel_Classification_1 +from tests.helpers.datasets.oneflow_data import OneflowArgMaxDataset +from tests.helpers.utils import magic_argv_env_context + +@dataclass +class TrainOneflowConfig: + num_labels: int = 3 + feature_dimension: int = 3 + + batch_size: int = 2 + shuffle: bool = True + evaluate_every = 2 + +@pytest.mark.parametrize("device", ["cpu", 1]) +@pytest.mark.parametrize("callbacks", [[]]) +@pytest.mark.oneflow +@magic_argv_env_context +def test_trainer_oneflow( + device, + callbacks, + n_epochs=2, +): + model = OneflowNormalModel_Classification_1( + num_labels=TrainOneflowConfig.num_labels, + feature_dimension=TrainOneflowConfig.feature_dimension + ) + optimizers = Adam(params=model.parameters(), lr=0.0001) + train_dataloader = DataLoader( + dataset=OneflowArgMaxDataset(20, TrainOneflowConfig.feature_dimension), + batch_size=TrainOneflowConfig.batch_size, + shuffle=True + ) + val_dataloader = DataLoader( + dataset=OneflowArgMaxDataset(12, TrainOneflowConfig.feature_dimension), + batch_size=TrainOneflowConfig.batch_size, + shuffle=True + ) + train_dataloader = train_dataloader + evaluate_dataloaders = val_dataloader + evaluate_every = TrainOneflowConfig.evaluate_every + metrics = {"acc": Accuracy()} + trainer = Trainer( + model=model, + driver="oneflow", + device=device, + optimizers=optimizers, + train_dataloader=train_dataloader, + evaluate_dataloaders=evaluate_dataloaders, + evaluate_every=evaluate_every, + input_mapping=None, + output_mapping=None, + metrics=metrics, + + n_epochs=n_epochs, + callbacks=callbacks, + ) + trainer.run() diff --git a/tests/core/dataloaders/oneflow_dataloader/__init__.py b/tests/core/dataloaders/oneflow_dataloader/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/core/dataloaders/oneflow_dataloader/test_fdl.py b/tests/core/dataloaders/oneflow_dataloader/test_fdl.py new file mode 100644 index 00000000..f6a80d7c --- /dev/null +++ b/tests/core/dataloaders/oneflow_dataloader/test_fdl.py @@ -0,0 +1,169 @@ +import pytest + +from fastNLP.core.dataloaders.oneflow_dataloader import OneflowDataLoader, prepare_oneflow_dataloader +from fastNLP.core.dataset import DataSet +from fastNLP.io.data_bundle import DataBundle +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW +from tests.helpers.utils import Capturing, recover_logger +from fastNLP import logger +import numpy as np + +if _NEED_IMPORT_ONEFLOW: + import oneflow + + +@pytest.mark.oneflow +class TestFdl: + + def test_init_v1(self): + ds = DataSet({"x": [[1, 2], [2, 3, 4], [4, 5, 6, 7]] * 10, "y": [1, 0, 1] * 10}) + fdl = OneflowDataLoader(ds, batch_size=3, shuffle=True, drop_last=True) + # for batch in fdl: + # print(batch) + fdl1 = OneflowDataLoader(ds, batch_size=3, shuffle=True, drop_last=True) + # for batch in fdl1: + # print(batch) + + def test_set_padding(self): + ds = DataSet({"x": [[1, 2], [2, 3, 4], [4, 5, 6, 7]] * 10, "y": [1, 0, 1] * 10}) + fdl = OneflowDataLoader(ds, batch_size=3) + fdl.set_pad("x", -1) + for batch in fdl: + assert batch['x'].shape == oneflow.Size([3, 4]) + + def test_get_batch_indices(self): + ds = DataSet({"x": [[1, 2], [2, 3, 4], [4, 5, 6, 7]] * 10, "y": [1, 0, 1] * 10}) + fdl = OneflowDataLoader(ds, batch_size=3, shuffle=True) + for batch in fdl: + assert len(fdl.get_batch_indices()) == 3 + + def test_other_dataset(self): + import numpy as np + class _DataSet: + + def __init__(self): + pass + + def __getitem__(self, item): + return np.random.randn(5), [[1, 2], [2, 3, 4]] + + def __len__(self): + return 10 + + def __getattribute__(self, item): + return object.__getattribute__(self, item) + + dataset = _DataSet() + dl = OneflowDataLoader(dataset, batch_size=2, shuffle=True) + # dl.set_inputs('data', 'labels') + # dl.set_pad_val('labels', val=None) + for batch in dl: + assert batch[0].shape == oneflow.Size([2, 5]) + assert batch[1].shape == oneflow.Size([2, 2, 3]) + + def test_default_collate_fn(self): + ds = DataSet({"x": [[1, 2], [2, 3, 4], [4, 5, 6, 7]] * 10, "y": [1, 0, 1] * 10}) + with pytest.raises(ValueError): + fdl = OneflowDataLoader(ds, batch_size=3, collate_fn=None) + import numpy as np + class _DataSet: + + def __init__(self): + pass + + def __getitem__(self, item): + return np.random.randn(5), [[1, 2], [2, 3, 4]] + + def __len__(self): + return 10 + + fdl = OneflowDataLoader(_DataSet(), batch_size=3, collate_fn=None, drop_last=True) + for batch in fdl: + assert batch[0].shape == oneflow.Size([3, 5]) + + def test_my_collate_fn(self): + ds = DataSet({"x": [[1, 2], [2, 3, 4], [4, 5, 6, 7]] * 10, "y": [1, 0, 1] * 10}) + def collate_fn(batch): + res = {'x': [], 'y': []} + for ins in batch: + res['x'].append(ins['x']) + res['y'].append(ins['y']) + return res + fdl = OneflowDataLoader(ds, collate_fn=collate_fn, batch_size=3, drop_last=True) + for batch in fdl: + assert batch['x'] == [[1, 2], [2, 3, 4], [4, 5, 6, 7]] + assert batch['y'] == [1, 0, 1] + + def test_prepare_oneflow_dataloader(self): + # 测试 fastNLP 的 dataset + ds = DataSet({"x": [[1, 2], [2, 3, 4], [4, 5, 6, 7]] * 10, "y": [1, 0, 1] * 10}) + dl = prepare_oneflow_dataloader(ds, batch_size=8, shuffle=True, num_workers=2) + assert isinstance(dl, OneflowDataLoader) + + ds1 = DataSet({"x": [[1, 2], [2, 3, 4], [4, 5, 6, 7]] * 10, "y": [1, 0, 1] * 10}) + dbl = DataBundle(datasets={'train': ds, 'val': ds1}) + dl_bundle = prepare_oneflow_dataloader(dbl) + assert isinstance(dl_bundle['train'], OneflowDataLoader) + assert isinstance(dl_bundle['val'], OneflowDataLoader) + + ds_dict = {'train_1': ds, 'val': ds1} + dl_dict = prepare_oneflow_dataloader(ds_dict) + assert isinstance(dl_dict['train_1'], OneflowDataLoader) + assert isinstance(dl_dict['val'], OneflowDataLoader) + + # 测试其他 dataset + class _DataSet: + + def __init__(self): + pass + + def __getitem__(self, item): + return np.random.randn(5), [[1, 2], [2, 3, 4]] + + def __len__(self): + return 10 + + def __getattribute__(self, item): + return object.__getattribute__(self, item) + + ds2 = _DataSet() + dl1 = prepare_oneflow_dataloader(ds2, batch_size=8, shuffle=True, num_workers=2) + assert isinstance(dl1, OneflowDataLoader) + + ds3 = _DataSet() + dbl1 = DataBundle(datasets={'train': ds2, 'val': ds3}) + dl_bundle1 = prepare_oneflow_dataloader(dbl1) + assert isinstance(dl_bundle1['train'], OneflowDataLoader) + assert isinstance(dl_bundle1['val'], OneflowDataLoader) + + ds_dict1 = {'train_1': ds2, 'val': ds3} + dl_dict1 = prepare_oneflow_dataloader(ds_dict1) + assert isinstance(dl_dict1['train_1'], OneflowDataLoader) + assert isinstance(dl_dict1['val'], OneflowDataLoader) + + ds = [[1, [1]], [2, [2, 2]]] + dl = prepare_oneflow_dataloader(ds, batch_size=2) + for batch in dl: + assert (batch[0] == oneflow.LongTensor([1, 2])).sum()==2 + assert (batch[1] == oneflow.LongTensor([[1, 0], [2, 2]])).sum()==4 + + # sequence = [ds, ds1] + # seq_ds = prepare_oneflow_dataloader(sequence) + # assert isinstance(seq_ds[0], OneflowDataLoader) + # assert isinstance(seq_ds[1], OneflowDataLoader) + + def test_get_backend(self): + from fastNLP.core.collators import Collator + from oneflow.utils.data import DataLoader, Dataset + + class MyDatset(DataSet): + def __len__(self): + return 1000 + + def __getitem__(self, item): + return [[1, 0], [1], [1, 2, 4]], [1, 0] + + collate_batch = Collator(backend='auto') + dl = DataLoader(MyDatset(), collate_fn=collate_batch) + for batch in dl: + print(batch) diff --git a/tests/core/drivers/oneflow_driver/__init__.py b/tests/core/drivers/oneflow_driver/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/core/drivers/oneflow_driver/test_ddp.py b/tests/core/drivers/oneflow_driver/test_ddp.py new file mode 100644 index 00000000..8fa92924 --- /dev/null +++ b/tests/core/drivers/oneflow_driver/test_ddp.py @@ -0,0 +1,948 @@ +import os +import sys +sys.path.append("../../../../") +import pytest +from pathlib import Path + +from fastNLP.core.drivers.oneflow_driver.ddp import OneflowDDPDriver +from fastNLP import prepare_oneflow_dataloader +from fastNLP.core.samplers import ( + RandomSampler, + UnrepeatedSampler, + BucketedBatchSampler, + UnrepeatedRandomSampler, + UnrepeatedSequentialSampler, +) +from tests.helpers.models.oneflow_model import OneflowNormalModel_Classification_1 +from tests.helpers.datasets.oneflow_data import OneflowNormalDataset, OneflowNormalXYDataset +from tests.helpers.utils import recover_logger +from fastNLP.envs.distributed import rank_zero_rm +from fastNLP import logger +from fastNLP.core.drivers.oneflow_driver.dist_utils import fastnlp_oneflow_all_gather +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW +if _NEED_IMPORT_ONEFLOW: + import oneflow + import oneflow.comm as comm + import oneflow.env as dist_env + from oneflow.utils.data import DataLoader, BatchSampler + +def generate_driver(labels, features, device=[0,1], fp16=False, output_from_new_proc="all"): + oneflow_model = OneflowNormalModel_Classification_1(labels, features) + oneflow_opt = oneflow.optim.Adam(params=oneflow_model.parameters(), lr=0.01) + device = [oneflow.device("cuda", i) for i in device] + driver = OneflowDDPDriver( + model=oneflow_model, + parallel_device=device, + fp16=fp16, + output_from_new_proc=output_from_new_proc + ) + driver.set_optimizers(oneflow_opt) + driver.setup() + + return driver + +def dataloader_with_bucketedbatchsampler(dataset, length, batch_size, shuffle, drop_last): + """ + 建立一个 batch_sampler 为 BucketedBatchSampler 的 dataloader + """ + dataloader = DataLoader( + dataset=dataset, + batch_sampler=BucketedBatchSampler( + dataset, + length, + batch_size, + shuffle=shuffle, + drop_last=drop_last, + ), + ) + + return dataloader + +def dataloader_with_randomsampler(dataset, batch_size, shuffle, drop_last, seed=0, unrepeated=False): + """ + 建立一个 sampler 为 RandomSampler 的 dataloader + """ + if unrepeated: + sampler = UnrepeatedRandomSampler(dataset, shuffle, seed) + else: + sampler = RandomSampler(dataset, shuffle, seed=seed) + dataloader = DataLoader( + dataset, + sampler=sampler, + drop_last=drop_last, + batch_size=batch_size + ) + return dataloader + +############################################################################ +# +# 测试 OneflowDDPDriver 的一些函数 +# +############################################################################ + +@pytest.mark.oneflow +class TestDDPDriverFunction: + """ + 测试 OneflowDDPDriver 一些简单函数的测试类,基本都是测试能否运行、是否存在 import 错误等问题 + """ + + def test_simple_functions(self): + """ + 简单测试多个函数 + """ + driver = generate_driver(10, 10) + + """ + 测试 move_data_to_device 函数。 + """ + + driver.move_data_to_device(oneflow.rand((32, 64))) + comm.barrier() + + """ + 测试 is_distributed 函数 + """ + assert driver.is_distributed() == True + comm.barrier() + + """ + 测试 get_no_sync_context 函数 + """ + res = driver.get_model_no_sync_context() + comm.barrier() + + """ + 测试 is_global_zero 函数 + """ + driver.is_global_zero() + comm.barrier() + + """ + 测试 unwrap_model 函数 + """ + driver.unwrap_model() + comm.barrier() + + """ + 测试 get_local_rank 函数 + """ + driver.get_local_rank() + comm.barrier() + + """ + 测试 all_gather 函数 + 详细的测试在 test_dist_utils.py 中完成 + """ + obj = { + "rank": driver.global_rank + } + obj_list = driver.all_gather(obj) + for i, res in enumerate(obj_list): + assert res["rank"] == i + + """ + 测试 broadcast_object 函数 + 详细的函数在 test_dist_utils.py 中完成 + """ + if driver.global_rank == 0: + obj = { + "rank": driver.global_rank + } + else: + obj = None + res = driver.broadcast_object(obj, src=0) + assert res["rank"] == 0 + +############################################################################ +# +# 测试 set_dist_repro_dataloader 函数 +# +############################################################################ + +@pytest.mark.oneflow +class TestSetDistReproDataloader: + + @classmethod + def setup_class(cls): + cls.device = [0, 1] + + def setup_method(self): + self.dataset = OneflowNormalDataset(100) + + """ + 传入的 `dist` 参数为具体的 ReproducibleSampler 或 ReproducibleBatchSampler 的情况 + 此时对应 driver.load_checkpoint 中的情况 + """ + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_batch_sampler(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 BucketedBatchSampler 时的表现 + 此时应该将 batch_sampler 替换为 dist 对应的 BucketedBatchSampler + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = DataLoader(self.dataset, batch_size=4, shuffle=not shuffle) + batch_sampler = BucketedBatchSampler(self.dataset, self.dataset._data, batch_size=4, shuffle=shuffle) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, batch_sampler, False) + + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler, BucketedBatchSampler) + assert replaced_loader.batch_sampler is batch_sampler + self.check_distributed_sampler(replaced_loader.batch_sampler) + self.check_set_dist_repro_dataloader(driver, dataloader, replaced_loader, shuffle) + + comm.barrier() + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_sampler(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 RandomSampler 时的表现 + 此时应该将 batch_sampler.sampler 替换为 dist 对应的 RandomSampler + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = DataLoader(self.dataset, batch_size=4, shuffle=not shuffle) + sampler = RandomSampler(self.dataset, shuffle=shuffle) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, sampler, False) + + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler, BatchSampler) + assert isinstance(replaced_loader.batch_sampler.sampler, RandomSampler) + assert not (replaced_loader.batch_sampler is dataloader.batch_sampler) + assert replaced_loader.batch_sampler.sampler is sampler + assert replaced_loader.batch_sampler.batch_size == dataloader.batch_sampler.batch_size + self.check_distributed_sampler(replaced_loader.batch_sampler.sampler) + self.check_set_dist_repro_dataloader(driver, dataloader, replaced_loader, shuffle) + + comm.barrier() + + """ + 传入的参数 `dist` 为 None 的情况,这种情况出现在 trainer 和 evaluator 的初始化过程中,用户指定了 `use_dist_sampler` + 参数为 False。此时函数会根据 `reproducible` 的设置进行不同的处理。 + 当 `reproducible` 为 False 时,需要根据 dataloader 的 batch_sampler 或 sampler 是否为 Reproducible 来决定 + 是否重新实例化 dataloader + """ + + def test_with_dist_none_reproducible_true(self): + """ + 测试 set_dist_repro_dataloader 中 dist 为 None、reproducible 为 True 时的表现 + 当用户在 driver 之外初始化了分布式环境时,fastnlp 不支持进行断点重训,此时应该报错 + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = DataLoader(self.dataset, batch_size=4, shuffle=True) + with pytest.raises(RuntimeError): + # 应当抛出 RuntimeError + replaced_loader = driver.set_dist_repro_dataloader(dataloader, None, True) + + comm.barrier() + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_none_reproducible_false_dataloader_reproducible_batch_sampler(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 None、reproducible 为 False 、dataloader 有 BucketedBatchSampler + 时的表现 + 此时传入的 dataloader 的 batch_sampler 应该已经执行了 set_distributed,产生一个新的 dataloader,其 batch_sampler + 和原 dataloader 相同 + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = dataloader_with_bucketedbatchsampler(self.dataset, self.dataset._data, 4, shuffle, False) + dataloader.batch_sampler.set_distributed( + num_replicas=driver.world_size, + rank=driver.global_rank, + pad=True + ) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, None, False) + + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler, BucketedBatchSampler) + assert replaced_loader.batch_sampler.batch_size == 4 + self.check_distributed_sampler(dataloader.batch_sampler) + self.check_set_dist_repro_dataloader(driver, dataloader, replaced_loader, shuffle) + + comm.barrier() + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_none_reproducible_false_dataloader_reproducible_sampler(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 None、reproducible 为 False 、dataloader 有 RandomSampler 时的表现 + 此时传入的 dataloader 的 batch_sampler.sampler 应该已经执行了 set_distributed,产生一个新的 dataloader,其 + batch_sampler.sampler 和原 dataloader 相同 + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = dataloader_with_randomsampler(self.dataset, 4, shuffle, False, unrepeated=False) + dataloader.batch_sampler.sampler.set_distributed( + num_replicas=driver.world_size, + rank=driver.global_rank + ) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, None, False) + + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler, BatchSampler) + assert not (replaced_loader.batch_sampler is dataloader.batch_sampler) + assert isinstance(replaced_loader.batch_sampler.sampler, RandomSampler) + assert not (replaced_loader.batch_sampler.sampler is dataloader.batch_sampler.sampler) + assert replaced_loader.batch_sampler.batch_size == 4 + assert replaced_loader.batch_sampler.drop_last == False + self.check_distributed_sampler(replaced_loader.batch_sampler.sampler) + self.check_set_dist_repro_dataloader(driver, dataloader, replaced_loader, shuffle) + + comm.barrier() + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_none_reproducible_false_dataloader_normal(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 None、reproducible 为 False 、dataloader 为一般情况时的表现 + 此时直接返回原来的 dataloader,不做任何处理。 + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = DataLoader(self.dataset, batch_size=4, shuffle=shuffle) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, None, False) + + assert replaced_loader is dataloader + comm.barrier() + + """ + 传入的参数 `dist` 为 'dist' 的情况,这种情况出现在 trainer 的初始化过程中,用户指定了 `use_dist_sampler` 参数 + 为 True。此时函数会根据 dataloader 的 batch_sampler 或 sampler 是否为 Reproducible 来决定如何重新实例化 dataloader + """ + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_dist_dataloader_reproducible_batch_sampler(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 'dist'、dataloader.batch_sampler 为 ReproducibleBatchSampler + 的表现 + 此时应该返回一个新的 dataloader,其batch_sampler 和原 dataloader 相同,且应该正确地设置了分布式相关的属性 + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = DataLoader( + dataset=self.dataset, + batch_sampler=BucketedBatchSampler(self.dataset, self.dataset._data, batch_size=4, shuffle=shuffle) + ) + dataloader = dataloader_with_bucketedbatchsampler(self.dataset, self.dataset._data, 4, shuffle, False) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, "dist", False) + + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler, BucketedBatchSampler) + assert not (replaced_loader.batch_sampler is dataloader.batch_sampler) + assert replaced_loader.batch_sampler.batch_size == 4 + assert replaced_loader.drop_last == dataloader.drop_last + self.check_distributed_sampler(replaced_loader.batch_sampler) + comm.barrier() + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_dist_dataloader_reproducible_sampler(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 'dist'、dataloader.batch_sampler.sampler 为 ReproducibleSampler + 的表现 + 此时应该返回一个新的 dataloader,其 batch_sampler.sampler 和原 dataloader 相同,且应该正确地设置了分布式相关 + 的属性 + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = dataloader_with_randomsampler(self.dataset, 4, shuffle, False, unrepeated=False) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, "dist", False) + + assert not (replaced_loader is dataloader) + assert not (replaced_loader.batch_sampler is dataloader.batch_sampler) + assert isinstance(replaced_loader.batch_sampler.sampler, RandomSampler) + assert not (replaced_loader.batch_sampler.sampler is dataloader.batch_sampler.sampler) + assert replaced_loader.batch_sampler.batch_size == 4 + assert replaced_loader.batch_sampler.sampler.shuffle == shuffle + self.check_distributed_sampler(replaced_loader.batch_sampler.sampler) + comm.barrier() + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_dist_dataloader_normal(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 'dist'、dataloader 为一般情况的表现 + 此时应该返回一个新的 dataloader,并替换其 batch_sampler.sampler 为 RandomSampler,且应该正确设置了分布式相关 + 的属性 + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = DataLoader(self.dataset, batch_size=4, shuffle=shuffle) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, "dist", False) + + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler, BatchSampler) + assert not (replaced_loader.batch_sampler is dataloader.batch_sampler) + assert isinstance(replaced_loader.batch_sampler.sampler, RandomSampler) + assert replaced_loader.batch_sampler.batch_size == dataloader.batch_sampler.batch_size + assert replaced_loader.batch_sampler.sampler.shuffle == shuffle + self.check_distributed_sampler(replaced_loader.batch_sampler.sampler) + comm.barrier() + + """ + 传入的参数 `dist` 为 'unrepeatdist' 的情况,这种情况出现在 evaluator 的初始化过程中,用户指定了 `use_dist_sampler` 参数 + 为 True。此时函数会根据 dataloader 的 sampler 是否为 Unrepeated 和 Reproducible 来决定如何重新实例化 dataloader + """ + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_unrepeat_dataloader_reproducible_sampler(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 'unrepeatdist'、dataloader.batch_sampler.sampler 为 ReproducibleSampler + 的表现 + 此时应该返回一个新的 dataloader,且将原来的 Sampler 替换为 UnrepeatedRandomSampler,且正确地设置了分布式相关 + 的属性 + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = dataloader_with_randomsampler(self.dataset, 4, shuffle, False, unrepeated=False) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, "unrepeatdist", False) + + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler, BatchSampler) + assert not (replaced_loader.batch_sampler is dataloader.batch_sampler) + assert isinstance(replaced_loader.batch_sampler.sampler, UnrepeatedRandomSampler) + assert replaced_loader.batch_sampler.batch_size == 4 + assert replaced_loader.batch_sampler.sampler.shuffle == shuffle + self.check_distributed_sampler(replaced_loader.batch_sampler.sampler) + comm.barrier() + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_unrepeat_dataloader_unrepreated_sampler(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 'unrepeatdist'、dataloader.batch_sampler.sampler 为 UnrepeatedSampler + 的表现 + 此时应该返回一个新的 dataloader,且重新实例化了原来的 Sampler + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = dataloader_with_randomsampler(self.dataset, 4, shuffle, False, unrepeated=True) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, "unrepeatdist", False) + + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler, BatchSampler) + assert not (replaced_loader.batch_sampler is dataloader.batch_sampler) + assert isinstance(replaced_loader.batch_sampler.sampler, UnrepeatedRandomSampler) + assert not (replaced_loader.batch_sampler.sampler is dataloader.batch_sampler.sampler) + assert replaced_loader.batch_sampler.batch_size == 4 + assert replaced_loader.drop_last == dataloader.drop_last + self.check_distributed_sampler(replaced_loader.batch_sampler.sampler) + comm.barrier() + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_unrepeat_dataloader_normal(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 'unrepeatdist'、dataloader 为一般情况的表现 + 此时应该返回一个新的 dataloader,且将 sampler 替换为 UnrepeatedSequentialSampler,并正确地设置了分布式相关 + 的属性 + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = DataLoader(self.dataset, batch_size=4, shuffle=shuffle) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, "unrepeatdist", False) + + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler, BatchSampler) + assert not (replaced_loader.batch_sampler is dataloader.batch_sampler) + assert isinstance(replaced_loader.batch_sampler.sampler, UnrepeatedSequentialSampler) + assert replaced_loader.batch_sampler.batch_size == 4 + assert replaced_loader.drop_last == dataloader.drop_last + self.check_distributed_sampler(replaced_loader.batch_sampler.sampler) + comm.barrier() + + def check_distributed_sampler(self, sampler): + """ + 测试替换得到的 sampler 或 batch_sampler 的分布式设置是否正确 + """ + assert sampler.num_replicas == dist_env.get_world_size() + assert sampler.rank == dist_env.get_rank() + if not isinstance(sampler, UnrepeatedSampler): + assert sampler.pad == True + + def check_set_dist_repro_dataloader(self, driver, dataloader, replaced_loader, shuffle): + """ + 测试多卡下 set_dist_repro_dataloader 函数的执行结果是否正确 + """ + # 迭代两个 batch + num_replicas = len(self.device) + num_consumed_batches = 2 + already_seen_idx = set() + if isinstance(replaced_loader.batch_sampler, BucketedBatchSampler): + sampler_states = replaced_loader.batch_sampler.set_epoch(4) + else: + sampler_states = replaced_loader.batch_sampler.sampler.set_epoch(4) + for idx, batch in enumerate(replaced_loader): + if idx >= num_consumed_batches: + break + already_seen_idx.update(batch.tolist()) + comm.barrier() + if isinstance(replaced_loader.batch_sampler, BucketedBatchSampler): + sampler_states = replaced_loader.batch_sampler.state_dict() + else: + sampler_states = replaced_loader.batch_sampler.sampler.state_dict() + + # 重新加载,应该可以输出剩下的内容,且对于 OneflowNormalDataset 来说,排序后应该是一个 range + left_idxes = set() + if isinstance(replaced_loader.batch_sampler, BucketedBatchSampler): + batch_size = replaced_loader.batch_sampler.batch_size + sampler_states["num_consumed_samples"] = num_consumed_batches * batch_size * num_replicas + # 重新改造 dataloader + new_loader = dataloader_with_bucketedbatchsampler( + replaced_loader.dataset, + length=replaced_loader.dataset._data, + batch_size=batch_size, + shuffle=shuffle, + drop_last=False, + ) + new_loader.batch_sampler.set_distributed( + num_replicas=driver.world_size, + rank=driver.global_rank, + pad=True + ) + new_loader.batch_sampler.load_state_dict(sampler_states) + new_loader.batch_sampler.set_epoch(4) + else: + batch_size = replaced_loader.batch_sampler.batch_size + sampler_states["num_consumed_samples"] = num_consumed_batches * batch_size * num_replicas + # 重新构造 dataloader + new_loader = dataloader_with_randomsampler(replaced_loader.dataset, batch_size, shuffle, drop_last=False) + new_loader.batch_sampler.sampler.set_distributed( + num_replicas=driver.world_size, + rank=driver.global_rank + ) + new_loader.batch_sampler.sampler.load_state_dict(sampler_states) + new_loader.batch_sampler.sampler.set_epoch(4) + for idx, batch in enumerate(new_loader): + left_idxes.update(batch.tolist()) + + assert len(left_idxes) + len(already_seen_idx) == len(self.dataset) / num_replicas + assert len(left_idxes | already_seen_idx) == len(self.dataset) / num_replicas + + +############################################################################ +# +# 测试 save 和 load 相关的功能 +# +############################################################################ +@pytest.mark.oneflow +class TestSaveLoad: + """ + 测试多卡情况下 save 和 load 相关函数的表现 + """ + + def setup_method(self): + self.dataset = OneflowNormalXYDataset(100) + + @pytest.mark.parametrize("only_state_dict", ([True, False])) + def test_save_and_load_model(self, only_state_dict): + """ + 测试 save_model 和 load_model 函数 + """ + try: + path = "model" + + dataloader = DataLoader(self.dataset, batch_size=2) + driver1, driver2 = generate_driver(20, 1), generate_driver(20, 1) + + driver1.save_model(path, only_state_dict) + + # 同步 + comm.barrier() + driver2.load_model(path, only_state_dict) + + for idx, batch in enumerate(dataloader): + batch = driver1.move_data_to_device(batch) + res1 = driver1.model.evaluate_step(**batch) + res2 = driver2.model.evaluate_step(**batch) + + assert oneflow.all(res1["preds"] == res2["preds"]) + finally: + rank_zero_rm(path) + + @pytest.mark.parametrize("only_state_dict", ([True, False])) + @pytest.mark.parametrize("fp16", ([True, False])) + @pytest.mark.parametrize("device", ([[0,1]])) + def test_save_and_load_with_bucketedbatchsampler(self, device, only_state_dict, fp16): + """ + 测试save和load函数,主要测试 dataloader 被替换了 sampler 之后的情况 + """ + + try: + path = "model.ckp" + num_replicas = len(device) + + driver1, driver2 = generate_driver(20, 1, device=device, fp16=fp16), \ + generate_driver(20, 1, device=device, fp16=False) + dataloader = dataloader_with_bucketedbatchsampler( + self.dataset, + length=[10 for i in range(len(self.dataset))], + batch_size=4, + shuffle=True, + drop_last=False + ) + dataloader.batch_sampler.set_distributed( + num_replicas=driver1.world_size, + rank=driver1.global_rank, + pad=True + ) + num_consumed_batches = 4 + + already_seen_x_set = set() + already_seen_y_set = set() + driver1.set_sampler_epoch(dataloader, 4) + for idx, batch in enumerate(dataloader): + if idx >= num_consumed_batches: + break + already_seen_x_set.update(batch["x"].reshape(-1, ).tolist()) + already_seen_y_set.update(batch["y"].reshape(-1, ).tolist()) + + # 同步 + comm.barrier() + + # 保存状态 + sampler_states = dataloader.batch_sampler.state_dict() + save_states = {"num_consumed_batches": num_consumed_batches} + driver1.save_checkpoint(Path(path), save_states, dataloader, only_state_dict, should_save_model=True) + comm.barrier() + # 加载 + # 更改 batch_size + dataloader = dataloader_with_bucketedbatchsampler( + self.dataset, + length=[10 for i in range(len(self.dataset))], + batch_size=2, + shuffle=True, + drop_last=False + ) + dataloader.batch_sampler.set_distributed( + num_replicas=driver2.world_size, + rank=driver2.global_rank, + pad=True + ) + comm.barrier() + load_states = driver2.load_checkpoint(Path(path), dataloader, only_state_dict, should_load_model=True) + comm.barrier() + replaced_loader = load_states.pop("dataloader") + + # 1. 检查 optimizer 的状态 + # TODO optimizer 的 state_dict 总是为空 + + # 2. 检查 batch_sampler 是否被正确地加载和替换 + assert not (replaced_loader is dataloader) + assert replaced_loader.batch_sampler is dataloader.batch_sampler + assert isinstance(replaced_loader.batch_sampler, BucketedBatchSampler) + if os.environ['FASTNLP_GLOBAL_RANK'] == '0': + assert replaced_loader.batch_sampler.seed == sampler_states["seed"] + assert replaced_loader.batch_sampler.num_consumed_samples == num_consumed_batches * 4 * num_replicas + + # # 3. 检查 fp16 是否被加载 + # if fp16: + # assert not isinstance(driver2.grad_scaler, oneflow.cuda.amp.GradScaler) + + # 4. 检查 model 的参数是否正确 + # 5. 检查 batch_idx + start_batch = load_states.pop('batch_idx_in_epoch') + assert start_batch == 2 * num_consumed_batches + left_x_batches = set() + left_y_batches = set() + driver2.set_sampler_epoch(replaced_loader, 4) + for idx, batch in enumerate(replaced_loader): + + left_x_batches.update(batch["x"].reshape(-1, ).tolist()) + left_y_batches.update(batch["y"].reshape(-1, ).tolist()) + res1 = driver1.model.evaluate_step(**batch) + res2 = driver2.model.evaluate_step(**batch) + assert oneflow.all(res1["preds"] == res2["preds"]) + + assert len(left_x_batches) + len(already_seen_x_set) == len(self.dataset) / num_replicas + assert len(left_x_batches | already_seen_x_set) == len(self.dataset) / num_replicas + assert len(left_y_batches) + len(already_seen_y_set) == len(self.dataset) / num_replicas + assert len(left_y_batches | already_seen_y_set) == len(self.dataset) / num_replicas + comm.barrier() + finally: + rank_zero_rm(path) + + @pytest.mark.parametrize("only_state_dict", ([True, False])) + @pytest.mark.parametrize("fp16", ([True, False])) + @pytest.mark.parametrize("device", ([[0,1]])) + def test_save_and_load_with_randomsampler(self, device, only_state_dict, fp16): + """ + 测试save和load函数,主要测试 dataloader 被替换了 batch_sampler 的情况 + """ + + try: + path = "checkpoints/" + + num_replicas = len(device) + + driver1 = generate_driver(20, 1, device=device, fp16=fp16) + driver2 = generate_driver(20, 1, device=device, fp16=False) + + dataloader = dataloader_with_randomsampler(self.dataset, 4, True, False, unrepeated=False) + dataloader.batch_sampler.sampler.set_distributed( + num_replicas=driver1.world_size, + rank=driver1.global_rank, + pad=True + ) + num_consumed_batches = 4 + + already_seen_x_set = set() + already_seen_y_set = set() + driver1.set_sampler_epoch(dataloader, 4) + for idx, batch in enumerate(dataloader): + if idx >= num_consumed_batches: + break + already_seen_x_set.update(batch["x"].reshape(-1, ).tolist()) + already_seen_y_set.update(batch["y"].reshape(-1, ).tolist()) + + # 同步 + comm.barrier() + + # 保存状态 + sampler_states = dataloader.batch_sampler.sampler.state_dict() + save_states = {"num_consumed_batches": num_consumed_batches} + driver1.save_checkpoint(Path(path), save_states, dataloader, only_state_dict, should_save_model=True) + comm.barrier() # 等待save成功 + # 加载 + # 更改 batch_size + dataloader = dataloader_with_randomsampler(self.dataset, 2, True, False, unrepeated=False) + dataloader.batch_sampler.sampler.set_distributed( + num_replicas=driver2.world_size, + rank=driver2.global_rank, + pad=True + ) + load_states = driver2.load_checkpoint(Path(path), dataloader, only_state_dict, should_load_model=True) + replaced_loader = load_states.pop("dataloader") + + # 1. 检查 optimizer 的状态 + # TODO optimizer 的 state_dict 总是为空 + + # 2. 检查 sampler 是否被正确地加载和替换 + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler.sampler, RandomSampler) + if os.environ['FASTNLP_GLOBAL_RANK'] == '0': + assert replaced_loader.batch_sampler.sampler.seed == sampler_states["seed"] + assert replaced_loader.batch_sampler.sampler.epoch == sampler_states["epoch"] + assert len(replaced_loader.batch_sampler.sampler.dataset) == sampler_states["length"] + assert replaced_loader.batch_sampler.sampler.shuffle == sampler_states["shuffle"] + assert replaced_loader.batch_sampler.sampler.num_consumed_samples == 4 * num_consumed_batches * num_replicas + + # # 3. 检查 fp16 是否被加载 + # if fp16: + # assert not isinstance(driver2.grad_scaler, oneflow.cuda.amp.GradScaler) + + # 4. 检查 model 的参数是否正确 + # 5. 检查 batch_idx + start_batch = load_states.pop('batch_idx_in_epoch') + assert start_batch == 2 * num_consumed_batches + left_x_batches = set() + left_y_batches = set() + driver2.set_sampler_epoch(replaced_loader, 4) + for idx, batch in enumerate(replaced_loader): + + left_x_batches.update(batch["x"].reshape(-1, ).tolist()) + left_y_batches.update(batch["y"].reshape(-1, ).tolist()) + res1 = driver1.model.evaluate_step(**batch) + res2 = driver2.model.evaluate_step(**batch) + assert oneflow.all(res1["preds"] == res2["preds"]) + + assert len(left_x_batches) + len(already_seen_x_set) == len(self.dataset) / num_replicas + assert len(left_x_batches | already_seen_x_set) == len(self.dataset) / num_replicas + assert len(left_y_batches) + len(already_seen_y_set) == len(self.dataset) / num_replicas + assert len(left_y_batches | already_seen_y_set) == len(self.dataset) / num_replicas + + finally: + rank_zero_rm(path) + + +@pytest.mark.oneflow +@pytest.mark.parametrize("shuffle", ([True, False])) +@pytest.mark.parametrize("batch_size", ([1, 3, 16, 17])) +@pytest.mark.parametrize("drop_last", ([True, False])) +def test_shuffle_dataloader(shuffle, batch_size, drop_last, reproducible=True): + try: + # 需要检验一下 set_dist_repro_dataloader 没有修改参数 + num_samples = 200 + dataset = OneflowNormalXYDataset(num_samples) + dl = prepare_oneflow_dataloader(dataset, shuffle=shuffle, batch_size=batch_size, drop_last=drop_last) + model = OneflowNormalModel_Classification_1(10, 32) + device = [oneflow.device("cuda", i) for i in [0, 1]] + + driver = OneflowDDPDriver(model, parallel_device=device) + driver.setup() + dl = driver.set_dist_repro_dataloader(dataloader=dl, dist='dist', reproducible=reproducible) + + data = [] + flags = [] + for batch in dl: + flags.append(batch['x'].size(0) == batch_size) + data.extend(batch['x'].reshape(-1).tolist()) + + _num_samples = num_samples//2 + + if drop_last and _num_samples%batch_size != 0: + assert len(data)!=_num_samples + assert all(flags) == True + elif _num_samples%batch_size!=0: + assert flags[-1] is False + else: + assert len(data) == _num_samples + + if not shuffle: + for i in range(1, len(data)-1): + assert data[i]>data[i-1] + else: + flags = [] + for i in range(1, len(data)-1): + flags.append(data[i]>data[i-1]) + assert all(flags) is False + datas = fastnlp_oneflow_all_gather(data) + if drop_last: + assert len(set(datas[0] + datas[1])) == num_samples-_num_samples%batch_size*2 + else: + assert len(set(datas[0] + datas[1])) == num_samples + finally: + pass + + +@pytest.mark.oneflow +@pytest.mark.parametrize("shuffle", ([True, False])) +@pytest.mark.parametrize("batch_size", ([1, 3, 16, 17])) +@pytest.mark.parametrize("drop_last", ([True, False])) +def test_batch_sampler_dataloader(shuffle, batch_size, drop_last, reproducible=True): + try: + # 需要检验一下 set_dist_repro_dataloader 没有修改参数 + num_samples = 200 + num_device = 2 + dataset = OneflowNormalXYDataset(num_samples) + sampler = BucketedBatchSampler(dataset, length=dataset._data, batch_size=batch_size, drop_last=drop_last, + shuffle=shuffle, num_batch_per_bucket=2) + dl = prepare_oneflow_dataloader(dataset, batch_sampler=sampler) + model = OneflowNormalModel_Classification_1(10, 32) + device = [oneflow.device("cuda", i) for i in [0, 1]] + driver = OneflowDDPDriver(model, parallel_device=device) + driver.setup() + dl = driver.set_dist_repro_dataloader(dataloader=dl, dist='dist', reproducible=reproducible) + + data = [] + flags = [] + for batch in dl: + d = batch['x'].reshape(-1).tolist() + diff = max(d) - min(d) + assert diff= num_consumed_batches: + break + already_seen_idx.update(batch.tolist()) + if isinstance(replaced_loader.batch_sampler, ReproduceBatchSampler): + sampler_states = replaced_loader.batch_sampler.state_dict() + else: + sampler_states = replaced_loader.batch_sampler.sampler.state_dict() + + # 重新加载,应该可以输出剩下的内容,且对于 OneflowNormalDataset 来说,排序后应该是一个 range + left_idxes = set() + if isinstance(replaced_loader.batch_sampler, ReproduceBatchSampler): + batch_size = replaced_loader.batch_sampler.batch_size + sampler_states["num_consumed_samples"] = num_consumed_batches * batch_size + # 重新改造 dataloader + new_loader = dataloader_with_randombatchsampler(replaced_loader.dataset, batch_size, shuffle, False) + new_loader.batch_sampler.load_state_dict(sampler_states) + new_loader.batch_sampler.set_epoch(3) + else: + batch_size = replaced_loader.batch_sampler.batch_size + sampler_states["num_consumed_samples"] = num_consumed_batches * batch_size + # 重新构造 dataloader + new_loader = dataloader_with_randomsampler(replaced_loader.dataset, batch_size, shuffle, False) + new_loader.batch_sampler.sampler.load_state_dict(sampler_states) + new_loader.batch_sampler.sampler.set_epoch(3) + for idx, batch in enumerate(new_loader): + left_idxes.update(batch.tolist()) + + assert len(left_idxes) + len(already_seen_idx) == len(self.dataset) + assert len(left_idxes | already_seen_idx) == len(self.dataset) + +############################################################################ +# +# 测试 save 和 load 相关的功能 +# +############################################################################ + +def generate_random_driver(labels, features, fp16=False, device="cpu"): + """ + 生成driver + """ + model = OneflowNormalModel_Classification_1(labels, features) + opt = oneflow.optim.Adam(params=model.parameters(), lr=0.01) + driver = OneflowSingleDriver(model, device=device, fp16=fp16) + driver.set_optimizers(opt) + driver.setup() + + return driver + +@pytest.mark.oneflow +@pytest.mark.parametrize("only_state_dict", ([True, False])) +def test_save_and_load_model(only_state_dict): + """ + 测试 save_model 和 load_model 函数 + """ + try: + path = "model" + dataset = OneflowNormalXYDataset(20) + dataloader = DataLoader(dataset, batch_size=4) + driver1, driver2 = generate_random_driver(20, 1), generate_random_driver(20, 1) + + driver1.save_model(path, only_state_dict) + driver2.load_model(path, only_state_dict) + + for batch in dataloader: + batch = driver1.move_data_to_device(batch) + res1 = driver1.model.evaluate_step(**batch) + res2 = driver2.model.evaluate_step(**batch) + + assert oneflow.all(res1["preds"] == res2["preds"]) + finally: + rank_zero_rm(path) + +@pytest.mark.oneflow +@pytest.mark.parametrize("only_state_dict", ([True, False])) +@pytest.mark.parametrize("fp16", ([True, False])) +def test_save_and_load_with_randombatchsampler(only_state_dict, fp16): + """ + 测试save和load函数,主要测试 dataloader 被替换了 sampler 之后的情况 + """ + + try: + path = "model.ckp" + dataset = OneflowNormalXYDataset(20) + dataloader = dataloader_with_randombatchsampler(dataset, 4, True, False) + driver1, driver2 = generate_random_driver(20, 1, fp16, "cuda"), generate_random_driver(20, 1, False, "cuda") + + num_consumed_batches = 2 + + already_seen_x_set = set() + already_seen_y_set = set() + driver1.set_sampler_epoch(dataloader, 3) + for idx, batch in enumerate(dataloader): + if idx >= num_consumed_batches: + break + already_seen_x_set.update(batch["x"].reshape(-1, ).tolist()) + already_seen_y_set.update(batch["y"].reshape(-1, ).tolist()) + + sampler_states = dataloader.batch_sampler.state_dict() + save_states = {"num_consumed_batches": num_consumed_batches} + driver1.save_checkpoint(Path(path), save_states, dataloader, only_state_dict, should_save_model=True) + # 加载 + # 更改 batch_size + + dataloader = dataloader_with_randombatchsampler(dataset, 2, True, False) + load_states = driver2.load_checkpoint(Path(path), dataloader, only_state_dict, should_load_model=True) + replaced_loader = load_states.pop("dataloader") + # 1. 检查 optimizer 的状态 + # TODO optimizer 的 state_dict 总是为空 + + # 2. 检查 batch_sampler 是否被正确地加载和替换 + assert not (replaced_loader is dataloader) + assert replaced_loader.batch_sampler is dataloader.batch_sampler + assert isinstance(replaced_loader.batch_sampler, ReproduceBatchSampler) + assert replaced_loader.batch_sampler.index_list == sampler_states["index_list"] + assert replaced_loader.batch_sampler.num_consumed_samples == num_consumed_batches * 4 + + # # 3. 检查 fp16 是否被加载 + # if fp16: + # assert not isinstance(driver2.grad_scaler, oneflow.cuda.amp.GradScaler) + + # 4. 检查 model 的参数是否正确 + # 5. 检查 batch_idx + start_batch = load_states.pop('batch_idx_in_epoch') + assert start_batch == 2 * num_consumed_batches + left_x_batches = set() + left_y_batches = set() + driver1.set_sampler_epoch(replaced_loader, 3) + for idx, batch in enumerate(replaced_loader): + + batch = driver2.move_data_to_device(batch) + left_x_batches.update(batch["x"].reshape(-1, ).tolist()) + left_y_batches.update(batch["y"].reshape(-1, ).tolist()) + res1 = driver1.model.evaluate_step(**batch) + res2 = driver2.model.evaluate_step(**batch) + assert oneflow.all(res1["preds"] == res2["preds"]) + + assert len(left_x_batches) + len(already_seen_x_set) == len(dataset) + assert len(left_x_batches | already_seen_x_set) == len(dataset) + assert len(left_y_batches) + len(already_seen_y_set) == len(dataset) + assert len(left_y_batches | already_seen_y_set) == len(dataset) + finally: + rank_zero_rm(path) + +@pytest.mark.oneflow +@pytest.mark.parametrize("only_state_dict", ([True, False])) +@pytest.mark.parametrize("fp16", ([True, False])) +def test_save_and_load_with_randomsampler(only_state_dict, fp16): + """ + 测试save和load函数,主要测试 dataloader 被替换了 sampler 的情况 + """ + + try: + path = "model.ckp" + + driver1, driver2 = generate_random_driver(40, 1, fp16, "cuda"), generate_random_driver(40, 1, False, "cuda") + dataset = OneflowNormalXYDataset(40) + dataloader = dataloader_with_randomsampler(dataset, 4, True, False) + num_consumed_batches = 2 + + already_seen_x_set = set() + already_seen_y_set = set() + driver1.set_sampler_epoch(dataloader, 3) + for idx, batch in enumerate(dataloader): + if idx >= num_consumed_batches: + break + already_seen_x_set.update(batch["x"].reshape(-1, ).tolist()) + already_seen_y_set.update(batch["y"].reshape(-1, ).tolist()) + + sampler_states = dataloader.batch_sampler.sampler.state_dict() + save_states = {"num_consumed_batches": num_consumed_batches} + driver1.save_checkpoint(Path(path), save_states, dataloader, only_state_dict, should_save_model=True) + + # 加载 + # 更改 batch_size + dataloader = dataloader_with_randomsampler(dataset, 2, True, False) + load_states = driver2.load_checkpoint(Path(path), dataloader, only_state_dict, should_load_model=True) + replaced_loader = load_states.pop("dataloader") + + # 1. 检查 optimizer 的状态 + # TODO optimizer 的 state_dict 总是为空 + + # 2. 检查 sampler 是否被正确地加载和替换 + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler.sampler, RandomSampler) + assert replaced_loader.batch_sampler.sampler.seed == sampler_states["seed"] + assert replaced_loader.batch_sampler.sampler.epoch == sampler_states["epoch"] + assert replaced_loader.batch_sampler.sampler.num_consumed_samples == 4 * num_consumed_batches + assert len(replaced_loader.batch_sampler.sampler.dataset) == sampler_states["length"] + assert replaced_loader.batch_sampler.sampler.shuffle == sampler_states["shuffle"] + + # # 3. 检查 fp16 是否被加载 + # if fp16: + # assert not isinstance(driver2.grad_scaler, oneflow.cuda.amp.GradScaler) + + # 4. 检查 model 的参数是否正确 + # 5. 检查 batch_idx + start_batch = load_states.pop('batch_idx_in_epoch') + assert start_batch == 2 * num_consumed_batches + left_x_batches = set() + left_y_batches = set() + # set epoch + driver2.set_sampler_epoch(replaced_loader, 3) + for idx, batch in enumerate(replaced_loader): + + batch = driver2.move_data_to_device(batch) + left_x_batches.update(batch["x"].reshape(-1, ).tolist()) + left_y_batches.update(batch["y"].reshape(-1, ).tolist()) + res1 = driver1.model.evaluate_step(**batch) + res2 = driver2.model.evaluate_step(**batch) + assert oneflow.all(res1["preds"] == res2["preds"]) + + assert len(left_x_batches) + len(already_seen_x_set) == len(dataset) + assert len(left_x_batches | already_seen_x_set) == len(dataset) + assert len(left_y_batches) + len(already_seen_y_set) == len(dataset) + assert len(left_y_batches | already_seen_y_set) == len(dataset) + finally: + rank_zero_rm(path) + + +@pytest.mark.oneflow +@pytest.mark.parametrize("shuffle", ([True, False])) +@pytest.mark.parametrize("batch_size", ([1, 3, 16, 17])) +@pytest.mark.parametrize("drop_last", ([True, False])) +@pytest.mark.parametrize("reproducible", ([True, False])) +def test_shuffle_dataloader(shuffle, batch_size, drop_last, reproducible): + # 需要检验一下 set_dist_repro_dataloader 没有修改参数 + num_samples = 100 + dataset = OneflowNormalXYDataset(num_samples) + dl = prepare_oneflow_dataloader(dataset, shuffle=shuffle, batch_size=batch_size, drop_last=drop_last) + model = OneflowNormalModel_Classification_1(10, 32) + driver = OneflowSingleDriver(model, device="cpu") + dl = driver.set_dist_repro_dataloader(dataloader=dl, reproducible=reproducible) + + data = [] + flags = [] + for batch in dl: + flags.append(batch['x'].size(0) == batch_size) + data.extend(batch['x'].reshape(-1).tolist()) + + if drop_last and num_samples%batch_size != 0: + assert len(data)!=num_samples + assert all(flags) == True + elif num_samples%batch_size!=0: + assert flags[-1] is False + else: + assert len(data) == num_samples + + if not shuffle: + for i in range(1, len(data)): + assert data[i]>data[i-1] + else: + flags = [] + for i in range(1, len(data)): + flags.append(data[i]>data[i-1]) + assert all(flags) is False + + +@pytest.mark.oneflow +@pytest.mark.parametrize("shuffle", ([True, False])) +@pytest.mark.parametrize("batch_size", ([1, 3, 16, 17])) +@pytest.mark.parametrize("drop_last", ([True, False])) +@pytest.mark.parametrize("reproducible", ([True, False])) +def test_batch_sampler_dataloader(shuffle, batch_size, drop_last, reproducible): + # 需要检验一下 set_dist_repro_dataloader 没有修改参数 + num_samples = 100 + dataset = OneflowNormalXYDataset(num_samples) + sampler = BucketedBatchSampler(dataset, length=dataset._data, batch_size=batch_size, drop_last=drop_last, + shuffle=shuffle, num_batch_per_bucket=2) + dl = prepare_oneflow_dataloader(dataset, batch_sampler=sampler) + model = OneflowNormalModel_Classification_1(10, 32) + driver = OneflowSingleDriver(model, device="cpu") + dl = driver.set_dist_repro_dataloader(dataloader=dl, reproducible=reproducible) + + data = [] + flags = [] + for batch in dl: + d = batch['x'].reshape(-1).tolist() + diff = max(d) - min(d) + assert diff\n", + "\n", + "\n", + "\n", + "``ERNIE 2.0`` 则提出了连续学习(``Continual Learning``)的概念,即首先用一个简单的任务来初始化模型,在更新时用前一个任务训练好的参数作为下一个任务模型初始化的参数。这样在训练新的任务时,模型便可以记住之前学习到的知识,使得模型在新任务上获得更好的表现。``ERNIE 2.0`` 分别构建了词法、语法、语义不同级别的预训练任务,并使用不同的 task id 来标示不同的任务,在共计16个中英文任务上都取得了SOTA效果。\n", + "\n", + "\n", + "\n", + "``ERNIE 3.0`` 将自回归和自编码网络融合在一起进行预训练,其中自编码网络采用 ``ERNIE 2.0`` 的多任务学习增量式构建预训练任务,持续进行语义理解学习。其中自编码网络增加了知识增强的预训练任务。自回归网络则基于 ``Tranformer-XL`` 结构,支持长文本语言模型建模,并在多个自然语言处理任务中取得了SOTA的效果。\n", + "\n", + "\n", + "\n", + "接下来,我们将展示如何在 ``FastNLP`` 中使用基于 ``paddle`` 的 ``ERNIE 1.0`` 框架进行中文情感分析。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. 使用 tokenizer 处理数据并构造 dataloader\n", + "\n", + "#### 2.1 加载中文数据集 ChnSentiCorp\n", + "\n", + "``ChnSentiCorp`` 数据集是由中国科学院发布的中文句子级情感分析数据集,包含了从网络上获取的酒店、电影、书籍等多个领域的评论,每条评论都被划分为两个标签:消极(``0``)和积极(``1``),可以用于二分类的中文情感分析任务。通过 ``paddlenlp.datasets.load_dataset`` 函数,我们可以加载并查看 ``ChnSentiCorp`` 数据集的内容。" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "训练集大小: 9600\n", + "{'text': '选择珠江花园的原因就是方便,有电动扶梯直接到达海边,周围餐馆、食廊、商场、超市、摊位一应俱全。酒店装修一般,但还算整洁。 泳池在大堂的屋顶,因此很小,不过女儿倒是喜欢。 包的早餐是西式的,还算丰富。 服务吗,一般', 'label': 1, 'qid': ''}\n", + "{'text': '15.4寸笔记本的键盘确实爽,基本跟台式机差不多了,蛮喜欢数字小键盘,输数字特方便,样子也很美观,做工也相当不错', 'label': 1, 'qid': ''}\n", + "{'text': '房间太小。其他的都一般。。。。。。。。。', 'label': 0, 'qid': ''}\n" + ] + } + ], + "source": [ + "from paddlenlp.datasets import load_dataset\n", + "\n", + "train_dataset, val_dataset, test_dataset = load_dataset(\"chnsenticorp\", splits=[\"train\", \"dev\", \"test\"])\n", + "print(\"训练集大小:\", len(train_dataset))\n", + "for i in range(3):\n", + " print(train_dataset[i])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 2.2 处理数据\n", + "\n", + "可以看到,原本的数据集仅包含中文的文本和标签,这样的数据是无法被模型识别的。同英文文本分类任务一样,我们需要使用 ``tokenizer`` 对文本进行分词并转换为数字形式的结果。我们可以加载已经预训练好的中文分词模型 ``ernie-1.0-base-zh``,将分词的过程写在函数 ``_process`` 中,然后调用数据集的 ``map`` 函数对每一条数据进行分词。其中:\n", + "- 参数 ``max_length`` 代表句子的最大长度;\n", + "- ``padding=\"max_length\"`` 表示将长度不足的结果 padding 至和最大长度相同;\n", + "- ``truncation=True`` 表示将长度过长的句子进行截断。\n", + "\n", + "至此,我们得到了每条数据长度均相同的数据集。" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[2022-06-22 21:31:04,168] [ INFO]\u001b[0m - We are using to load 'ernie-1.0-base-zh'.\u001b[0m\n", + "\u001b[32m[2022-06-22 21:31:04,171] [ INFO]\u001b[0m - Already cached /remote-home/shxing/.paddlenlp/models/ernie-1.0-base-zh/vocab.txt\u001b[0m\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'text': '选择珠江花园的原因就是方便,有电动扶梯直接到达海边,周围餐馆、食廊、商场、超市、摊位一应俱全。酒店装修一般,但还算整洁。 泳池在大堂的屋顶,因此很小,不过女儿倒是喜欢。 包的早餐是西式的,还算丰富。 服务吗,一般', 'label': 1, 'qid': '', 'input_ids': [1, 352, 790, 1252, 409, 283, 509, 5, 250, 196, 113, 10, 58, 518, 4, 9, 128, 70, 1495, 1855, 339, 293, 45, 302, 233, 554, 4, 544, 637, 1134, 774, 6, 494, 2068, 6, 278, 191, 6, 634, 99, 6, 2678, 144, 7, 149, 1573, 62, 12043, 661, 737, 371, 435, 7, 689, 4, 255, 201, 559, 407, 1308, 12043, 2275, 1110, 11, 19, 842, 5, 1207, 878, 4, 196, 198, 321, 96, 4, 16, 93, 291, 464, 1099, 10, 692, 811, 12043, 392, 5, 748, 1134, 10, 213, 220, 5, 4, 201, 559, 723, 595, 12043, 231, 112, 1114, 4, 7, 689, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}\n" + ] + } + ], + "source": [ + "max_len = 128\n", + "model_checkpoint = \"ernie-1.0-base-zh\"\n", + "tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)\n", + "def _process(data):\n", + " data.update(tokenizer(\n", + " data[\"text\"],\n", + " max_length=max_len,\n", + " padding=\"max_length\",\n", + " truncation=True,\n", + " return_attention_mask=True,\n", + " ))\n", + " return data\n", + "\n", + "train_dataset.map(_process, num_workers=5)\n", + "val_dataset.map(_process, num_workers=5)\n", + "test_dataset.map(_process, num_workers=5)\n", + "\n", + "print(train_dataset[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "得到数据集之后,我们便可以将数据集包裹在 ``PaddleDataLoader`` 中,用于之后的训练。``FastNLP`` 提供的 ``PaddleDataLoader`` 拓展了 ``paddle.io.DataLoader`` 的功能,详情可以查看相关的文档。" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "from fastNLP.core import PaddleDataLoader\n", + "import paddle.nn as nn\n", + "\n", + "train_dataloader = PaddleDataLoader(train_dataset, batch_size=32, shuffle=True)\n", + "val_dataloader = PaddleDataLoader(val_dataset, batch_size=32, shuffle=False)\n", + "test_dataloader = PaddleDataLoader(test_dataset, batch_size=1, shuffle=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. 模型训练:加载 ERNIE 预训练模型,使用 FastNLP 进行训练\n", + "\n", + "#### 3.1 使用 ERNIE 预训练模型\n", + "\n", + "为了实现文本分类,我们首先需要定义文本分类的模型。``paddlenlp.transformers`` 提供了模型 ``AutoModelForSequenceClassification``,我们可以利用它来加载不同权重的文本分类模型。在 ``FastNLP`` 中,我们可以定义 ``train_step`` 和 ``evaluate_step`` 函数来实现训练和验证过程中的不同行为。\n", + "\n", + "- ``train_step`` 函数在获得返回值 ``logits`` (大小为 ``(batch_size, num_labels)``)后计算交叉熵损失 ``CrossEntropyLoss``,然后将 ``loss`` 放在字典中返回。``FastNLP`` 也支持返回 ``dataclass`` 类型的训练结果,但二者都需要包含名为 **``loss``** 的键或成员。\n", + "- ``evaluate_step`` 函数在获得返回值 ``logits`` 后,将 ``logits`` 和标签 ``label`` 放在字典中返回。\n", + "\n", + "这两个函数的参数均为数据集中字典**键**的子集,``FastNLP`` 会自动进行参数匹配然后输入到模型中。" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[2022-06-22 21:31:15,577] [ INFO]\u001b[0m - We are using to load 'ernie-1.0-base-zh'.\u001b[0m\n", + "\u001b[32m[2022-06-22 21:31:15,580] [ INFO]\u001b[0m - Already cached /remote-home/shxing/.paddlenlp/models/ernie-1.0-base-zh/ernie_v1_chn_base.pdparams\u001b[0m\n" + ] + } + ], + "source": [ + "import paddle.nn as nn\n", + "\n", + "class SeqClsModel(nn.Layer):\n", + " def __init__(self, model_checkpoint, num_labels):\n", + " super(SeqClsModel, self).__init__()\n", + " self.model = AutoModelForSequenceClassification.from_pretrained(\n", + " model_checkpoint,\n", + " num_classes=num_labels,\n", + " )\n", + "\n", + " def forward(self, input_ids, attention_mask, token_type_ids):\n", + " logits = self.model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)\n", + " return logits\n", + "\n", + " def train_step(self, input_ids, attention_mask, token_type_ids, label):\n", + " logits = self(input_ids, attention_mask, token_type_ids)\n", + " loss = nn.CrossEntropyLoss()(logits, label)\n", + " return {\"loss\": loss}\n", + "\n", + " def evaluate_step(self, input_ids, attention_mask, token_type_ids, label):\n", + " logits = self(input_ids, attention_mask, token_type_ids)\n", + " return {'pred': logits, 'target': label}\n", + "\n", + "model = SeqClsModel(model_checkpoint, num_labels=2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 3.2 设置参数并使用 Trainer 开始训练\n", + "\n", + "现在我们可以着手使用 ``FastNLP.Trainer`` 进行训练了。\n", + "\n", + "首先,为了高效地训练 ``ERNIE`` 模型,我们最好为学习率指定一定的策略。``paddlenlp`` 提供的 ``LinearDecayWithWarmup`` 可以令学习率在一段时间内从 0 开始线性地增长(预热),然后再线性地衰减至 0 。在本篇教程中,我们将学习率设置为 ``5e-5``,预热时间为 ``0.1``,然后将得到的的 ``lr_scheduler`` 赋值给 ``AdamW`` 优化器。\n", + "\n", + "其次,我们还可以为 ``Trainer`` 指定多个 ``Callback`` 来在基础的训练过程之外进行额外的定制操作。在本篇教程中,我们使用的 ``Callback`` 有以下三种:\n", + "\n", + "- ``LRSchedCallback`` - 由于我们使用了 ``Scheduler``,因此需要将 ``lr_scheduler`` 传给该 ``Callback`` 以在训练中进行更新。\n", + "- ``LoadBestModelCallback`` - 该 ``Callback`` 会评估结果中的 ``'acc#accuracy'`` 值,保存训练中出现的正确率最高的模型,并在训练结束时加载到模型上,方便对模型进行测试和评估。\n", + "\n", + "在 ``Trainer`` 中,我们还可以设置 ``metrics`` 来衡量模型的表现。``Accuracy`` 能够根据传入的预测值和真实值计算出模型预测的正确率。还记得模型中 ``evaluate_step`` 函数的返回值吗?键 ``pred`` 和 ``target`` 分别为 ``Accuracy.update`` 的参数名,在验证过程中 ``FastNLP`` 会自动将键和参数名匹配从而计算出正确率,这也是我们规定模型需要返回字典类型数据的原因。\n", + "\n", + "``Accuracy`` 的返回值包含三个部分:``acc``、``total`` 和 ``correct``,分别代表 ``正确率``、 ``数据总数`` 和 ``预测正确的数目``,这让您能够直观地知晓训练中模型的变化,``LoadBestModelCallback`` 的参数 ``'acc#accuracy'`` 也正是代表了 ``accuracy`` 指标的 ``acc`` 结果。\n", + "\n", + "在设定好参数之后,调用 ``run`` 函数便可以进行训练和验证了。" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
[21:31:16] INFO     Running evaluator sanity check for 2 batches.              trainer.py:631\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m[21:31:16]\u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Running evaluator sanity check for \u001b[1;36m2\u001b[0m batches. \u001b]8;id=4641;file://../fastNLP/core/controllers/trainer.py\u001b\\\u001b[2mtrainer.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=822054;file://../fastNLP/core/controllers/trainer.py#631\u001b\\\u001b[2m631\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "data": {
+      "text/html": [
+       "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:0, Batch:60 -----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m60\u001b[0m -----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.895833,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1075.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.895833\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1075.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:0, Batch:120 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m120\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.8975,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1077.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.8975\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1077.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:0, Batch:180 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m180\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.911667,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1094.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.911667\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1094.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:0, Batch:240 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m240\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.9225,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1107.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.9225\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1107.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:0, Batch:300 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m300\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.9275,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1113.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.9275\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1113.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:1, Batch:60 -----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m1\u001b[0m, Batch:\u001b[1;36m60\u001b[0m -----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.930833,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1117.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.930833\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1117.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:1, Batch:120 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m1\u001b[0m, Batch:\u001b[1;36m120\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.935833,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1123.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.935833\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1123.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:1, Batch:180 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m1\u001b[0m, Batch:\u001b[1;36m180\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.935833,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1123.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.935833\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1123.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:1, Batch:240 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m1\u001b[0m, Batch:\u001b[1;36m240\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.9375,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1125.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.9375\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1125.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:1, Batch:300 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m1\u001b[0m, Batch:\u001b[1;36m300\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.941667,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1130.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.941667\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1130.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
[21:34:28] INFO     Loading best model from fnlp-ernie/2022-0 load_best_model_callback.py:111\n",
+       "                    6-22-21_29_12_898095/best_so_far with                                    \n",
+       "                    acc#accuracy: 0.941667...                                                \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m[21:34:28]\u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Loading best model from fnlp-ernie/\u001b[1;36m2022\u001b[0m-\u001b[1;36m0\u001b[0m \u001b]8;id=340364;file://../fastNLP/core/callbacks/load_best_model_callback.py\u001b\\\u001b[2mload_best_model_callback.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=763898;file://../fastNLP/core/callbacks/load_best_model_callback.py#111\u001b\\\u001b[2m111\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b[2;36m \u001b[0m \u001b[1;36m6\u001b[0m-\u001b[1;36m22\u001b[0m-21_29_12_898095/best_so_far with \u001b[2m \u001b[0m\n", + "\u001b[2;36m \u001b[0m acc#accuracy: \u001b[1;36m0.941667\u001b[0m\u001b[33m...\u001b[0m \u001b[2m \u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
[21:34:34] INFO     Deleting fnlp-ernie/2022-06-22-21_29_12_8 load_best_model_callback.py:131\n",
+       "                    98095/best_so_far...                                                     \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m[21:34:34]\u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Deleting fnlp-ernie/\u001b[1;36m2022\u001b[0m-\u001b[1;36m06\u001b[0m-\u001b[1;36m22\u001b[0m-21_29_12_8 \u001b]8;id=430330;file://../fastNLP/core/callbacks/load_best_model_callback.py\u001b\\\u001b[2mload_best_model_callback.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=508566;file://../fastNLP/core/callbacks/load_best_model_callback.py#131\u001b\\\u001b[2m131\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b[2;36m \u001b[0m 98095/best_so_far\u001b[33m...\u001b[0m \u001b[2m \u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "data": {
+      "text/html": [
+       "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from fastNLP import LRSchedCallback, LoadBestModelCallback\n", + "from fastNLP import Trainer, Accuracy\n", + "from paddlenlp.transformers import LinearDecayWithWarmup\n", + "\n", + "n_epochs = 2\n", + "num_training_steps = len(train_dataloader) * n_epochs\n", + "lr_scheduler = LinearDecayWithWarmup(5e-5, num_training_steps, 0.1)\n", + "optimizer = paddle.optimizer.AdamW(\n", + " learning_rate=lr_scheduler,\n", + " parameters=model.parameters(),\n", + ")\n", + "callbacks = [\n", + " LRSchedCallback(lr_scheduler, step_on=\"batch\"),\n", + " LoadBestModelCallback(\"acc#accuracy\", larger_better=True, save_folder=\"fnlp-ernie\"),\n", + "]\n", + "trainer = Trainer(\n", + " model=model,\n", + " driver=\"paddle\",\n", + " optimizers=optimizer,\n", + " device=0,\n", + " n_epochs=n_epochs,\n", + " train_dataloader=train_dataloader,\n", + " evaluate_dataloaders=val_dataloader,\n", + " evaluate_every=60,\n", + " metrics={\"accuracy\": Accuracy()},\n", + " callbacks=callbacks,\n", + ")\n", + "trainer.run()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 3.3 测试和评估\n", + "\n", + "现在我们已经得到了一个表现良好的 ``ERNIE`` 模型,接下来可以在测试集上测试模型的效果了。``FastNLP.Evaluator`` 提供了定制函数的功能。我们以 ``test_dataloader`` 初始化一个 ``Evaluator``,然后将写好的测试函数 ``test_batch_step_fn`` 传给参数 ``evaluate_batch_step_fn``,``Evaluate`` 在对每个 batch 进行评估时就会调用我们自定义的 ``test_batch_step_fn`` 函数而不是 ``evaluate_step`` 函数。在这里,我们仅测试 5 条数据并输出文本和对应的标签。" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
text: ['这个宾馆比较陈旧了,特价的房间也很一般。总体来说一般']\n",
+       "
\n" + ], + "text/plain": [ + "text: ['这个宾馆比较陈旧了,特价的房间也很一般。总体来说一般']\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
labels: 0\n",
+       "
\n" + ], + "text/plain": [ + "labels: 0\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
text: ['怀着十分激动的心情放映,可是看着看着发现,在放映完毕后,出现一集米老鼠的动画片!开始\n",
+       "还怀疑是不是赠送的个别现象,可是后来发现每张DVD后面都有!真不知道生产商怎么想的,我想看的是猫\n",
+       "和老鼠,不是米老鼠!如果厂家是想赠送的话,那就全套米老鼠和唐老鸭都赠送,只在每张DVD后面添加一\n",
+       "集算什么??简直是画蛇添足!!']\n",
+       "
\n" + ], + "text/plain": [ + "text: ['怀着十分激动的心情放映,可是看着看着发现,在放映完毕后,出现一集米老鼠的动画片!开始\n", + "还怀疑是不是赠送的个别现象,可是后来发现每张DVD后面都有!真不知道生产商怎么想的,我想看的是猫\n", + "和老鼠,不是米老鼠!如果厂家是想赠送的话,那就全套米老鼠和唐老鸭都赠送,只在每张DVD后面添加一\n", + "集算什么??简直是画蛇添足!!']\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
labels: 0\n",
+       "
\n" + ], + "text/plain": [ + "labels: 0\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
text: ['还稍微重了点,可能是硬盘大的原故,还要再轻半斤就好了。其他要进一步验证。贴的几种膜气\n",
+       "泡较多,用不了多久就要更换了,屏幕膜稍好点,但比没有要强多了。建议配赠几张膜让用用户自己贴。'\n",
+       "]\n",
+       "
\n" + ], + "text/plain": [ + "text: ['还稍微重了点,可能是硬盘大的原故,还要再轻半斤就好了。其他要进一步验证。贴的几种膜气\n", + "泡较多,用不了多久就要更换了,屏幕膜稍好点,但比没有要强多了。建议配赠几张膜让用用户自己贴。'\n", + "]\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
labels: 0\n",
+       "
\n" + ], + "text/plain": [ + "labels: 0\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
text: ['交通方便;环境很好;服务态度很好 房间较小']\n",
+       "
\n" + ], + "text/plain": [ + "text: ['交通方便;环境很好;服务态度很好 房间较小']\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
labels: 1\n",
+       "
\n" + ], + "text/plain": [ + "labels: 1\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
text: ['不错,作者的观点很颠覆目前中国父母的教育方式,其实古人们对于教育已经有了很系统的体系\n",
+       "了,可是现在的父母以及祖父母们更多的娇惯纵容孩子,放眼看去自私的孩子是大多数,父母觉得自己的\n",
+       "孩子在外面只要不吃亏就是好事,完全把古人几千年总结的教育古训抛在的九霄云外。所以推荐准妈妈们\n",
+       "可以在等待宝宝降临的时候,好好学习一下,怎么把孩子教育成一个有爱心、有责任心、宽容、大度的人\n",
+       "。']\n",
+       "
\n" + ], + "text/plain": [ + "text: ['不错,作者的观点很颠覆目前中国父母的教育方式,其实古人们对于教育已经有了很系统的体系\n", + "了,可是现在的父母以及祖父母们更多的娇惯纵容孩子,放眼看去自私的孩子是大多数,父母觉得自己的\n", + "孩子在外面只要不吃亏就是好事,完全把古人几千年总结的教育古训抛在的九霄云外。所以推荐准妈妈们\n", + "可以在等待宝宝降临的时候,好好学习一下,怎么把孩子教育成一个有爱心、有责任心、宽容、大度的人\n", + "。']\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
labels: 1\n",
+       "
\n" + ], + "text/plain": [ + "labels: 1\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "data": {
+      "text/plain": [
+       "{}"
+      ]
+     },
+     "execution_count": 14,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "from fastNLP import Evaluator\n",
+    "def test_batch_step_fn(evaluator, batch):\n",
+    "    input_ids = batch[\"input_ids\"]\n",
+    "    attention_mask = batch[\"attention_mask\"]\n",
+    "    token_type_ids = batch[\"token_type_ids\"]\n",
+    "    logits = model(input_ids, attention_mask, token_type_ids)\n",
+    "    predict = logits.argmax().item()\n",
+    "    print(\"text:\", batch['text'])\n",
+    "    print(\"labels:\", predict)\n",
+    "\n",
+    "evaluator = Evaluator(\n",
+    "    model=model,\n",
+    "    dataloaders=test_dataloader,\n",
+    "    driver=\"paddle\",\n",
+    "    device=0,\n",
+    "    evaluate_batch_step_fn=test_batch_step_fn,\n",
+    ")\n",
+    "evaluator.run(5)    "
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3.7.13 ('fnlp-paddle')",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.7.13"
+  },
+  "orig_nbformat": 4,
+  "vscode": {
+   "interpreter": {
+    "hash": "31f2d9d3efc23c441973d7c4273acfea8b132b6a578f002629b6b44b8f65e720"
+   }
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/tutorials/fastnlp_tutorial_paddle_e2.ipynb b/tutorials/fastnlp_tutorial_paddle_e2.ipynb
new file mode 100644
index 00000000..c17be405
--- /dev/null
+++ b/tutorials/fastnlp_tutorial_paddle_e2.ipynb
@@ -0,0 +1,1510 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# 使用 paddlenlp 和 FastNLP 训练中文阅读理解任务\n",
+    "\n",
+    "本篇教程属于 **`FastNLP v0.8 tutorial` 的 `paddle examples` 系列**。在本篇教程中,我们将为您展示如何在 `FastNLP` 中通过自定义 `Metric` 和 损失函数来完成进阶的问答任务。\n",
+    "\n",
+    "1. 基础介绍:自然语言处理中的阅读理解任务\n",
+    "\n",
+    "2. 准备工作:加载 `DuReader-robust` 数据集,并使用 `tokenizer` 处理数据\n",
+    "\n",
+    "3. 模型训练:自己定义评测用的 `Metric` 实现更加自由的任务评测"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 1. 基础介绍:自然语言处理中的阅读理解任务\n",
+    "\n",
+    "阅读理解任务,顾名思义,就是给出一段文字,然后让模型理解这段文字所含的语义。大部分机器阅读理解任务都采用问答式测评,即设计与文章内容相关的自然语言式问题,让模型理解问题并根据文章作答。与文本分类任务不同的是,在阅读理解任务中我们有时需要需要输入“一对”句子,分别代表问题和上下文;答案的格式也分为多种:\n",
+    "\n",
+    "- 多项选择:让模型从多个答案选项中选出正确答案\n",
+    "- 区间答案:答案为上下文的一段子句,需要模型给出答案的起始位置\n",
+    "- 自由回答:不做限制,让模型自行生成答案\n",
+    "- 完形填空:在原文中挖空部分关键词,让模型补全;这类答案往往不需要问题\n",
+    "\n",
+    "如果您对 `transformers` 有所了解的话,其中的 `ModelForQuestionAnswering` 系列模型就可以用于这项任务。阅读理解模型的泛用性是衡量该技术能否在实际应用中大规模落地的重要指标之一,随着当前技术的进步,许多模型虽然能够在一些测试集上取得较好的性能,但在实际应用中,这些模型仍然难以让人满意。在本篇教程中,我们将会为您展示如何训练一个问答模型。\n",
+    "\n",
+    "在这一领域,`SQuAD` 数据集是一个影响深远的数据集。它的全称是斯坦福问答数据集(Stanford Question Answering Dataset),每条数据包含 `(问题,上下文,答案)` 三部分,规模大(约十万条,2.0又新增了五万条),在提出之后很快成为训练问答任务的经典数据集之一。`SQuAD` 数据集有两个指标来衡量模型的表现:`EM`(Exact Match,精确匹配)和 `F1`(模糊匹配)。前者反应了模型给出的答案中有多少和正确答案完全一致,后者则反应了模型给出的答案中与正确答案重叠的部分,均为越高越好。"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 2. 准备工作:加载 DuReader-robust 数据集,并使用 tokenizer 处理数据"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/remote-home/shxing/anaconda3/envs/fnlp-paddle/lib/python3.7/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
+      "  from .autonotebook import tqdm as notebook_tqdm\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "2.3.3\n"
+     ]
+    }
+   ],
+   "source": [
+    "import sys\n",
+    "sys.path.append(\"../\")\n",
+    "import paddle\n",
+    "import paddlenlp\n",
+    "\n",
+    "print(paddlenlp.__version__)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "在数据集方面,我们选用 `DuReader-robust` 中文数据集作为训练数据。它是一种抽取式问答数据集,采用 `SQuAD` 数据格式,能够评估真实应用场景下模型的泛用性。"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 17,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "Reusing dataset dureader_robust (/remote-home/shxing/.cache/huggingface/datasets/dureader_robust/plain_text/1.0.0/d462ecadc8c010cee20f57632f1413f272867cd802a91a602df48c7d34eb0c27)\n",
+      "Reusing dataset dureader_robust (/remote-home/shxing/.cache/huggingface/datasets/dureader_robust/plain_text/1.0.0/d462ecadc8c010cee20f57632f1413f272867cd802a91a602df48c7d34eb0c27)\n",
+      "\u001b[32m[2022-06-27 19:22:46,998] [    INFO]\u001b[0m - Already cached /remote-home/shxing/.paddlenlp/models/ernie-1.0-base-zh/vocab.txt\u001b[0m\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "{'id': '0a25cb4bc1ab6f474c699884e04601e4', 'title': '', 'context': '第35集雪见缓缓张开眼睛,景天又惊又喜之际,长卿和紫萱的仙船驶至,见众人无恙,也十分高兴。众人登船,用尽合力把自身的真气和水分输给她。雪见终于醒过来了,但却一脸木然,全无反应。众人向常胤求助,却发现人世界竟没有雪见的身世纪录。长卿询问清微的身世,清微语带双关说一切上了天界便有答案。长卿驾驶仙船,众人决定立马动身,往天界而去。众人来到一荒山,长卿指出,魔界和天界相连。由魔界进入通过神魔之井,便可登天。众人至魔界入口,仿若一黑色的蝙蝠洞,但始终无法进入。后来花楹发现只要有翅膀便能飞入。于是景天等人打下许多乌鸦,模仿重楼的翅膀,制作数对翅膀状巨物。刚佩戴在身,便被吸入洞口。众人摔落在地,抬头发现魔界守卫。景天和众魔套交情,自称和魔尊重楼相熟,众魔不理,打了起来。', 'question': '仙剑奇侠传3第几集上天界', 'answers': {'text': ['第35集'], 'answer_start': [0]}}\n",
+      "{'id': '7de192d6adf7d60ba73ba25cf590cc1e', 'title': '', 'context': '选择燃气热水器时,一定要关注这几个问题:1、出水稳定性要好,不能出现忽热忽冷的现象2、快速到达设定的需求水温3、操作要智能、方便4、安全性要好,要装有安全报警装置 市场上燃气热水器品牌众多,购买时还需多加对比和仔细鉴别。方太今年主打的磁化恒温热水器在使用体验方面做了全面升级:9秒速热,可快速进入洗浴模式;水温持久稳定,不会出现忽热忽冷的现象,并通过水量伺服技术将出水温度精确控制在±0.5℃,可满足家里宝贝敏感肌肤洗护需求;配备CO和CH4双气体报警装置更安全(市场上一般多为CO单气体报警)。另外,这款热水器还有智能WIFI互联功能,只需下载个手机APP即可用手机远程操作热水器,实现精准调节水温,满足家人多样化的洗浴需求。当然方太的磁化恒温系列主要的是增加磁化功能,可以有效吸附水中的铁锈、铁屑等微小杂质,防止细菌滋生,使沐浴水质更洁净,长期使用磁化水沐浴更利于身体健康。', 'question': '燃气热水器哪个牌子好', 'answers': {'text': ['方太'], 'answer_start': [110]}}\n",
+      "{'id': 'b9e74d4b9228399b03701d1fe6d52940', 'title': '', 'context': '迈克尔.乔丹在NBA打了15个赛季。他在84年进入nba,期间在1993年10月6日第一次退役改打棒球,95年3月18日重新回归,在99年1月13日第二次退役,后于2001年10月31日复出,在03年最终退役。迈克尔·乔丹(Michael Jordan),1963年2月17日生于纽约布鲁克林,美国著名篮球运动员,司职得分后卫,历史上最伟大的篮球运动员。1984年的NBA选秀大会,乔丹在首轮第3顺位被芝加哥公牛队选中。 1986-87赛季,乔丹场均得到37.1分,首次获得分王称号。1990-91赛季,乔丹连夺常规赛MVP和总决赛MVP称号,率领芝加哥公牛首次夺得NBA总冠军。 1997-98赛季,乔丹获得个人职业生涯第10个得分王,并率领公牛队第六次夺得总冠军。2009年9月11日,乔丹正式入选NBA名人堂。', 'question': '乔丹打了多少个赛季', 'answers': {'text': ['15个'], 'answer_start': [12]}}\n",
+      "训练集大小: 14520\n",
+      "验证集大小: 1417\n"
+     ]
+    }
+   ],
+   "source": [
+    "from paddlenlp.datasets import load_dataset\n",
+    "train_dataset = load_dataset(\"PaddlePaddle/dureader_robust\", splits=\"train\")\n",
+    "val_dataset = load_dataset(\"PaddlePaddle/dureader_robust\", splits=\"validation\")\n",
+    "for i in range(3):\n",
+    "    print(train_dataset[i])\n",
+    "print(\"训练集大小:\", len(train_dataset))\n",
+    "print(\"验证集大小:\", len(val_dataset))\n",
+    "\n",
+    "MODEL_NAME = \"ernie-1.0-base-zh\"\n",
+    "from paddlenlp.transformers import ErnieTokenizer\n",
+    "tokenizer =ErnieTokenizer.from_pretrained(MODEL_NAME)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### 2.1 处理训练集\n",
+    "\n",
+    "对于阅读理解任务,数据处理的方式较为麻烦。接下来我们会为您详细讲解处理函数 `_process_train` 的功能,同时也将通过实践展示关于 `tokenizer` 的更多功能,让您更加深入地了解自然语言处理任务。首先让我们向 `tokenizer` 输入一条数据(以列表的形式):"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "2\n",
+      "dict_keys(['offset_mapping', 'input_ids', 'token_type_ids', 'overflow_to_sample'])\n"
+     ]
+    }
+   ],
+   "source": [
+    "result = tokenizer(\n",
+    "    [train_dataset[0][\"question\"]],\n",
+    "    [train_dataset[0][\"context\"]],\n",
+    "    stride=128,\n",
+    "    max_length=256,\n",
+    "    padding=\"max_length\",\n",
+    "    return_dict=False\n",
+    ")\n",
+    "\n",
+    "print(len(result))\n",
+    "print(result[0].keys())"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "首先不难理解的是,模型必须要同时接受问题(`question`)和上下文(`context`)才能够进行阅读理解,因此我们需要将二者同时进行分词(`tokenize`)。所幸,`Tokenizer` 提供了这一功能,当我们调用 `tokenizer` 的时候,其第一个参数名为 `text`,第二个参数名为 `text_pair`,这使得我们可以同时对一对文本进行分词。同时,`tokenizer` 还需要标记出一条数据中哪些属于问题,哪些属于上下文,这一功能则由 `token_type_ids` 完成。`token_type_ids` 会将输入的第一个文本(问题)标记为 `0`,第二个文本(上下文)标记为 `1`,这样模型在训练时便可以将问题和上下文区分开来:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "[1, 1034, 1189, 734, 2003, 241, 284, 131, 553, 271, 28, 125, 280, 2, 131, 1773, 271, 1097, 373, 1427, 1427, 501, 88, 662, 1906, 4, 561, 125, 311, 1168, 311, 692, 46, 430, 4, 84, 2073, 14, 1264, 3967, 5, 1034, 1020, 1829, 268, 4, 373, 539, 8, 154, 5210, 4, 105, 167, 59, 69, 685, 12043, 539, 8, 883, 1020, 4, 29, 720, 95, 90, 427, 67, 262, 5, 384, 266, 14, 101, 59, 789, 416, 237, 12043, 1097, 373, 616, 37, 1519, 93, 61, 15, 4, 255, 535, 7, 1529, 619, 187, 4, 62, 154, 451, 149, 12043, 539, 8, 253, 223, 3679, 323, 523, 4, 535, 34, 87, 8, 203, 280, 1186, 340, 9, 1097, 373, 5, 262, 203, 623, 704, 12043, 84, 2073, 1137, 358, 334, 702, 5, 262, 203, 4, 334, 702, 405, 360, 653, 129, 178, 7, 568, 28, 15, 125, 280, 518, 9, 1179, 487, 12043, 84, 2073, 1621, 1829, 1034, 1020, 4, 539, 8, 448, 91, 202, 466, 70, 262, 4, 638, 125, 280, 83, 299, 12043, 539, 8, 61, 45, 7, 1537, 176, 4, 84, 2073, 288, 39, 4, 889, 280, 14, 125, 280, 156, 538, 12043, 190, 889, 280, 71, 109, 124, 93, 292, 889, 46, 1248, 4, 518, 48, 883, 125, 12043, 539, 8, 268, 889, 280, 109, 270, 4, 1586, 845, 7, 669, 199, 5, 3964, 3740, 1084, 4, 255, 440, 616, 154, 72, 71, 109, 12043, 49, 61, 283, 3591, 34, 87, 297, 41, 9, 1993, 2602, 518, 52, 706, 109, 2]\n",
+      "['[CLS]', '仙', '剑', '奇', '侠', '传', '3', '第', '几', '集', '上', '天', '界', '[SEP]', '第', '35', '集', '雪', '见', '缓', '缓', '张', '开', '眼', '睛', ',', '景', '天', '又', '惊', '又', '喜', '之', '际', ',', '长', '卿', '和', '紫', '萱', '的', '仙', '船', '驶', '至', ',', '见', '众', '人', '无', '恙', ',', '也', '十', '分', '高', '兴', '。', '众', '人', '登', '船', ',', '用', '尽', '合', '力', '把', '自', '身', '的', '真', '气', '和', '水', '分', '输', '给', '她', '。', '雪', '见', '终', '于', '醒', '过', '来', '了', ',', '但', '却', '一', '脸', '木', '然', ',', '全', '无', '反', '应', '。', '众', '人', '向', '常', '胤', '求', '助', ',', '却', '发', '现', '人', '世', '界', '竟', '没', '有', '雪', '见', '的', '身', '世', '纪', '录', '。', '长', '卿', '询', '问', '清', '微', '的', '身', '世', ',', '清', '微', '语', '带', '双', '关', '说', '一', '切', '上', '了', '天', '界', '便', '有', '答', '案', '。', '长', '卿', '驾', '驶', '仙', '船', ',', '众', '人', '决', '定', '立', '马', '动', '身', ',', '往', '天', '界', '而', '去', '。', '众', '人', '来', '到', '一', '荒', '山', ',', '长', '卿', '指', '出', ',', '魔', '界', '和', '天', '界', '相', '连', '。', '由', '魔', '界', '进', '入', '通', '过', '神', '魔', '之', '井', ',', '便', '可', '登', '天', '。', '众', '人', '至', '魔', '界', '入', '口', ',', '仿', '若', '一', '黑', '色', '的', '蝙', '蝠', '洞', ',', '但', '始', '终', '无', '法', '进', '入', '。', '后', '来', '花', '楹', '发', '现', '只', '要', '有', '翅', '膀', '便', '能', '飞', '入', '[SEP]']\n",
+      "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(result[0][\"input_ids\"])\n",
+    "print(tokenizer.convert_ids_to_tokens(result[0][\"input_ids\"]))\n",
+    "print(result[0][\"token_type_ids\"])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "根据上面的输出我们可以看出,`tokenizer` 会将数据开头用 `[CLS]` 标记,用 `[SEP]` 来分割句子。同时,根据 `token_type_ids` 得到的 0、1 串,我们也很容易将问题和上下文区分开。顺带一提,如果一条数据进行了 `padding`,那么这部分会被标记为 `0` 。\n",
+    "\n",
+    "在输出的 `keys` 中还有一项名为 `offset_mapping` 的键。该项数据能够表示分词后的每个 `token` 在原文中对应文字或词语的位置。比如我们可以像下面这样将数据打印出来:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "[(0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (0, 0), (0, 1), (1, 3), (3, 4), (4, 5), (5, 6), (6, 7)]\n",
+      "[1, 1034, 1189, 734, 2003, 241, 284, 131, 553, 271, 28, 125, 280, 2, 131, 1773, 271, 1097, 373, 1427]\n",
+      "['[CLS]', '仙', '剑', '奇', '侠', '传', '3', '第', '几', '集', '上', '天', '界', '[SEP]', '第', '35', '集', '雪', '见', '缓']\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(result[0][\"offset_mapping\"][:20])\n",
+    "print(result[0][\"input_ids\"][:20])\n",
+    "print(tokenizer.convert_ids_to_tokens(result[0][\"input_ids\"])[:20])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "`[CLS]` 由于是 `tokenizer` 自己添加进去用于标记数据的 `token`,因此它在原文中找不到任何对应的词语,所以给出的位置范围就是 `(0, 0)`;第二个 `token` 对应第一个 `“仙”` 字,因此映射的位置就是 `(0, 1)`;同理,后面的 `[SEP]` 也不对应任何文字,映射的位置为 `(0, 0)`;而接下来的 `token` 对应 **上下文** 中的第一个字 `“第”`,映射出的位置为 `(0, 1)`;再后面的 `token` 对应原文中的两个字符 `35`,因此其位置映射为 `(1, 3)` 。通过这种手段,我们可以更方便地获取 `token` 与原文的对应关系。\n",
+    "\n",
+    "最后,您也许会注意到我们获取的 `result` 长度为 2 。这是文本在分词后长度超过了 `max_length` 256 ,`tokenizer` 将数据分成了两部分所致。在阅读理解任务中,我们不可能像文本分类那样轻易地将一条数据截断,因为答案很可能就出现在后面被丢弃的那部分数据中,因此,我们需要保留所有的数据(当然,您也可以直接丢弃这些超长的数据)。`overflow_to_sample` 则可以标识当前数据在原数据的索引:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "[CLS]仙剑奇侠传3第几集上天界[SEP]第35集雪见缓缓张开眼睛,景天又惊又喜之际,长卿和紫萱的仙船驶至,见众人无恙,也十分高兴。众人登船,用尽合力把自身的真气和水分输给她。雪见终于醒过来了,但却一脸木然,全无反应。众人向常胤求助,却发现人世界竟没有雪见的身世纪录。长卿询问清微的身世,清微语带双关说一切上了天界便有答案。长卿驾驶仙船,众人决定立马动身,往天界而去。众人来到一荒山,长卿指出,魔界和天界相连。由魔界进入通过神魔之井,便可登天。众人至魔界入口,仿若一黑色的蝙蝠洞,但始终无法进入。后来花楹发现只要有翅膀便能飞入[SEP]\n",
+      "overflow_to_sample:  0\n",
+      "[CLS]仙剑奇侠传3第几集上天界[SEP]说一切上了天界便有答案。长卿驾驶仙船,众人决定立马动身,往天界而去。众人来到一荒山,长卿指出,魔界和天界相连。由魔界进入通过神魔之井,便可登天。众人至魔界入口,仿若一黑色的蝙蝠洞,但始终无法进入。后来花楹发现只要有翅膀便能飞入。于是景天等人打下许多乌鸦,模仿重楼的翅膀,制作数对翅膀状巨物。刚佩戴在身,便被吸入洞口。众人摔落在地,抬头发现魔界守卫。景天和众魔套交情,自称和魔尊重楼相熟,众魔不理,打了起来。[SEP][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD]\n",
+      "overflow_to_sample:  0\n"
+     ]
+    }
+   ],
+   "source": [
+    "for res in result:\n",
+    "    tokens = tokenizer.convert_ids_to_tokens(res[\"input_ids\"])\n",
+    "    print(\"\".join(tokens))\n",
+    "    print(\"overflow_to_sample: \", res[\"overflow_to_sample\"])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "将两条数据均输出之后可以看到,它们都出自我们传入的数据,并且存在一部分重合。`tokenizer` 的 `stride` 参数可以设置重合部分的长度,这也可以帮助模型识别被分割开的两条数据;`overflow_to_sample` 的 `0` 则代表它们来自于第 `0` 条数据。\n",
+    "\n",
+    "基于以上信息,我们处理训练集的思路如下:\n",
+    "\n",
+    "1. 通过 `overflow_to_sample` 来获取原来的数据\n",
+    "2. 通过原数据的 `answers` 找到答案的起始位置\n",
+    "3. 通过 `offset_mapping` 给出的映射关系在分词处理后的数据中找到答案的起始位置,分别记录在 `start_pos` 和 `end_pos` 中;如果没有找到答案(比如答案被截断了),那么答案的起始位置就被标记为 `[CLS]` 的位置。\n",
+    "\n",
+    "这样 `_process_train` 函数就呼之欲出了,我们调用 `train_dataset.map` 函数,并将 `batched` 参数设置为 `True` ,将所有数据批量地进行更新。有一点需要注意的是,**在处理过后数据量会增加**。"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 18,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "{'offset_mapping': [(0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (0, 0), (0, 1), (1, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13), (13, 14), (14, 15), (15, 16), (16, 17), (17, 18), (18, 19), (19, 20), (20, 21), (21, 22), (22, 23), (23, 24), (24, 25), (25, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31), (31, 32), (32, 33), (33, 34), (34, 35), (35, 36), (36, 37), (37, 38), (38, 39), (39, 40), (40, 41), (41, 42), (42, 43), (43, 44), (44, 45), (45, 46), (46, 47), (47, 48), (48, 49), (49, 50), (50, 51), (51, 52), (52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60), (60, 61), (61, 62), (62, 63), (63, 64), (64, 65), (65, 66), (66, 67), (67, 68), (68, 69), (69, 70), (70, 71), (71, 72), (72, 73), (73, 74), (74, 75), (75, 76), (76, 77), (77, 78), (78, 79), (79, 80), (80, 81), (81, 82), (82, 83), (83, 84), (84, 85), (85, 86), (86, 87), (87, 88), (88, 89), (89, 90), (90, 91), (91, 92), (92, 93), (93, 94), (94, 95), (95, 96), (96, 97), (97, 98), (98, 99), (99, 100), (100, 101), (101, 102), (102, 103), (103, 104), (104, 105), (105, 106), (106, 107), (107, 108), (108, 109), (109, 110), (110, 111), (111, 112), (112, 113), (113, 114), (114, 115), (115, 116), (116, 117), (117, 118), (118, 119), (119, 120), (120, 121), (121, 122), (122, 123), (123, 124), (124, 125), (125, 126), (126, 127), (127, 128), (128, 129), (129, 130), (130, 131), (131, 132), (132, 133), (133, 134), (134, 135), (135, 136), (136, 137), (137, 138), (138, 139), (139, 140), (140, 141), (141, 142), (142, 143), (143, 144), (144, 145), (145, 146), (146, 147), (147, 148), (148, 149), (149, 150), (150, 151), (151, 152), (152, 153), (153, 154), (154, 155), (155, 156), (156, 157), (157, 158), (158, 159), (159, 160), (160, 161), (161, 162), (162, 163), (163, 164), (164, 165), (165, 166), (166, 167), (167, 168), (168, 169), (169, 170), (170, 171), (171, 172), (172, 173), (173, 174), (174, 175), (175, 176), (176, 177), (177, 178), (178, 179), (179, 180), (180, 181), (181, 182), (182, 183), (183, 184), (184, 185), (185, 186), (186, 187), (187, 188), (188, 189), (189, 190), (190, 191), (191, 192), (192, 193), (193, 194), (194, 195), (195, 196), (196, 197), (197, 198), (198, 199), (199, 200), (200, 201), (201, 202), (202, 203), (203, 204), (204, 205), (205, 206), (206, 207), (207, 208), (208, 209), (209, 210), (210, 211), (211, 212), (212, 213), (213, 214), (214, 215), (215, 216), (216, 217), (217, 218), (218, 219), (219, 220), (220, 221), (221, 222), (222, 223), (223, 224), (224, 225), (225, 226), (226, 227), (227, 228), (228, 229), (229, 230), (230, 231), (231, 232), (232, 233), (233, 234), (234, 235), (235, 236), (236, 237), (237, 238), (238, 239), (239, 240), (240, 241), (241, 242), (0, 0)], 'input_ids': [1, 1034, 1189, 734, 2003, 241, 284, 131, 553, 271, 28, 125, 280, 2, 131, 1773, 271, 1097, 373, 1427, 1427, 501, 88, 662, 1906, 4, 561, 125, 311, 1168, 311, 692, 46, 430, 4, 84, 2073, 14, 1264, 3967, 5, 1034, 1020, 1829, 268, 4, 373, 539, 8, 154, 5210, 4, 105, 167, 59, 69, 685, 12043, 539, 8, 883, 1020, 4, 29, 720, 95, 90, 427, 67, 262, 5, 384, 266, 14, 101, 59, 789, 416, 237, 12043, 1097, 373, 616, 37, 1519, 93, 61, 15, 4, 255, 535, 7, 1529, 619, 187, 4, 62, 154, 451, 149, 12043, 539, 8, 253, 223, 3679, 323, 523, 4, 535, 34, 87, 8, 203, 280, 1186, 340, 9, 1097, 373, 5, 262, 203, 623, 704, 12043, 84, 2073, 1137, 358, 334, 702, 5, 262, 203, 4, 334, 702, 405, 360, 653, 129, 178, 7, 568, 28, 15, 125, 280, 518, 9, 1179, 487, 12043, 84, 2073, 1621, 1829, 1034, 1020, 4, 539, 8, 448, 91, 202, 466, 70, 262, 4, 638, 125, 280, 83, 299, 12043, 539, 8, 61, 45, 7, 1537, 176, 4, 84, 2073, 288, 39, 4, 889, 280, 14, 125, 280, 156, 538, 12043, 190, 889, 280, 71, 109, 124, 93, 292, 889, 46, 1248, 4, 518, 48, 883, 125, 12043, 539, 8, 268, 889, 280, 109, 270, 4, 1586, 845, 7, 669, 199, 5, 3964, 3740, 1084, 4, 255, 440, 616, 154, 72, 71, 109, 12043, 49, 61, 283, 3591, 34, 87, 297, 41, 9, 1993, 2602, 518, 52, 706, 109, 2], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'overflow_to_sample': 0, 'start_pos': 14, 'end_pos': 16}\n",
+      "处理后的训练集大小: 26198\n"
+     ]
+    }
+   ],
+   "source": [
+    "max_length = 256\n",
+    "doc_stride = 128\n",
+    "def _process_train(data):\n",
+    "\n",
+    "    contexts = [data[i][\"context\"] for i in range(len(data))]\n",
+    "    questions = [data[i][\"question\"] for i in range(len(data))]\n",
+    "\n",
+    "    tokenized_data_list = tokenizer(\n",
+    "        questions,\n",
+    "        contexts,\n",
+    "        stride=doc_stride,\n",
+    "        max_length=max_length,\n",
+    "        padding=\"max_length\",\n",
+    "        return_dict=False\n",
+    "    )\n",
+    "\n",
+    "    for i, tokenized_data in enumerate(tokenized_data_list):\n",
+    "        # 获取 [CLS] 对应的位置\n",
+    "        input_ids = tokenized_data[\"input_ids\"]\n",
+    "        cls_index = input_ids.index(tokenizer.cls_token_id)\n",
+    "\n",
+    "        # 在 tokenize 的过程中,汉字和 token 在位置上并非一一对应的\n",
+    "        # 而 offset mapping 记录了每个 token 在原文中对应的起始位置\n",
+    "        offsets = tokenized_data[\"offset_mapping\"]\n",
+    "        # token_type_ids 记录了一条数据中哪些是问题,哪些是上下文\n",
+    "        token_type_ids = tokenized_data[\"token_type_ids\"]\n",
+    "\n",
+    "        # 一条数据可能因为长度过长而在 tokenized_data 中存在多个结果\n",
+    "        # overflow_to_sample 表示了当前 tokenize_example 属于 data 中的哪一条数据\n",
+    "        sample_index = tokenized_data[\"overflow_to_sample\"]\n",
+    "        answers = data[sample_index][\"answers\"]\n",
+    "\n",
+    "        # answers 和 answer_starts 均为长度为 1 的 list\n",
+    "        # 我们可以计算出答案的结束位置\n",
+    "        start_char = answers[\"answer_start\"][0]\n",
+    "        end_char = start_char + len(answers[\"text\"][0])\n",
+    "\n",
+    "        token_start_index = 0\n",
+    "        while token_type_ids[token_start_index] != 1:\n",
+    "            token_start_index += 1\n",
+    "\n",
+    "        token_end_index = len(input_ids) - 1\n",
+    "        while token_type_ids[token_end_index] != 1:\n",
+    "            token_end_index -= 1\n",
+    "        # 分词后一条数据的结尾一定是 [SEP],因此还需要减一\n",
+    "        token_end_index -= 1\n",
+    "\n",
+    "        if not (offsets[token_start_index][0] <= start_char and\n",
+    "                offsets[token_end_index][1] >= end_char):\n",
+    "            # 如果答案不在这条数据中,则将答案位置标记为 [CLS] 的位置\n",
+    "            tokenized_data_list[i][\"start_pos\"] = cls_index\n",
+    "            tokenized_data_list[i][\"end_pos\"] = cls_index\n",
+    "        else:\n",
+    "            # 否则,我们可以找到答案对应的 token 的起始位置,记录在 start_pos 和 end_pos 中\n",
+    "            while token_start_index < len(offsets) and offsets[\n",
+    "                    token_start_index][0] <= start_char:\n",
+    "                token_start_index += 1\n",
+    "            tokenized_data_list[i][\"start_pos\"] = token_start_index - 1\n",
+    "            while offsets[token_end_index][1] >= end_char:\n",
+    "                token_end_index -= 1\n",
+    "            tokenized_data_list[i][\"end_pos\"] = token_end_index + 1\n",
+    "\n",
+    "    return tokenized_data_list\n",
+    "\n",
+    "train_dataset.map(_process_train, batched=True, num_workers=5)\n",
+    "print(train_dataset[0])\n",
+    "print(\"处理后的训练集大小:\", len(train_dataset))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### 2.2 处理验证集\n",
+    "\n",
+    "对于验证集的处理则简单得多,我们只需要保存原数据的 `id` 并将 `offset_mapping` 中不属于上下文的部分设置为 `None` 即可。"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       ""
+      ]
+     },
+     "execution_count": 8,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "def _process_val(data):\n",
+    "\n",
+    "    contexts = [data[i][\"context\"] for i in range(len(data))]\n",
+    "    questions = [data[i][\"question\"] for i in range(len(data))]\n",
+    "\n",
+    "    tokenized_data_list = tokenizer(\n",
+    "        questions,\n",
+    "        contexts,\n",
+    "        stride=doc_stride,\n",
+    "        max_length=max_length,\n",
+    "        return_dict=False\n",
+    "    )\n",
+    "\n",
+    "    for i, tokenized_data in enumerate(tokenized_data_list):\n",
+    "        token_type_ids = tokenized_data[\"token_type_ids\"]\n",
+    "        # 保存数据对应的 id\n",
+    "        sample_index = tokenized_data[\"overflow_to_sample\"]\n",
+    "        tokenized_data_list[i][\"example_id\"] = data[sample_index][\"id\"]\n",
+    "\n",
+    "        # 将不属于 context 的 offset 设置为 None\n",
+    "        tokenized_data_list[i][\"offset_mapping\"] = [\n",
+    "            (o if token_type_ids[k] == 1 else None)\n",
+    "            for k, o in enumerate(tokenized_data[\"offset_mapping\"])\n",
+    "        ]\n",
+    "\n",
+    "    return tokenized_data_list\n",
+    "\n",
+    "val_dataset.map(_process_val, batched=True, num_workers=5)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### 2.3 DataLoader\n",
+    "\n",
+    "最后使用 `PaddleDataLoader` 将数据集包裹起来即可。"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from fastNLP.core import PaddleDataLoader\n", + "\n", + "train_dataloader = PaddleDataLoader(train_dataset, batch_size=32, shuffle=True)\n", + "val_dataloader = PaddleDataLoader(val_dataset, batch_size=16)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. 模型训练:自己定义评测用的 Metric 实现更加自由的任务评测\n", + "\n", + "#### 3.1 损失函数\n", + "\n", + "对于阅读理解任务,我们使用的是 `ErnieForQuestionAnswering` 模型。该模型在接受输入后会返回两个值:`start_logits` 和 `end_logits` ,大小均为 `(batch_size, sequence_length)`,反映了每条数据每个词语为答案起始位置的可能性,因此我们需要自定义一个损失函数来计算 `loss`。 `CrossEntropyLossForSquad` 会分别对答案起始位置的预测值和真实值计算交叉熵,最后返回其平均值作为最终的损失。" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "class CrossEntropyLossForSquad(paddle.nn.Layer):\n", + " def __init__(self):\n", + " super(CrossEntropyLossForSquad, self).__init__()\n", + "\n", + " def forward(self, start_logits, end_logits, start_pos, end_pos):\n", + " start_pos = paddle.unsqueeze(start_pos, axis=-1)\n", + " end_pos = paddle.unsqueeze(end_pos, axis=-1)\n", + " start_loss = paddle.nn.functional.softmax_with_cross_entropy(\n", + " logits=start_logits, label=start_pos)\n", + " start_loss = paddle.mean(start_loss)\n", + " end_loss = paddle.nn.functional.softmax_with_cross_entropy(\n", + " logits=end_logits, label=end_pos)\n", + " end_loss = paddle.mean(end_loss)\n", + "\n", + " loss = (start_loss + end_loss) / 2\n", + " return loss" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 3.2 定义模型\n", + "\n", + "模型的核心则是 `ErnieForQuestionAnswering` 的 `ernie-1.0-base-zh` 预训练模型,同时按照 `FastNLP` 的规定定义 `train_step` 和 `evaluate_step` 函数。这里 `evaluate_step` 函数并没有像文本分类那样直接返回该批次数据的评测结果,这一点我们将在下面为您讲解。" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[2022-06-27 19:00:15,825] [ INFO]\u001b[0m - Already cached /remote-home/shxing/.paddlenlp/models/ernie-1.0-base-zh/ernie_v1_chn_base.pdparams\u001b[0m\n", + "W0627 19:00:15.831080 21543 gpu_context.cc:278] Please NOTE: device: 0, GPU Compute Capability: 7.5, Driver API Version: 11.2, Runtime API Version: 11.2\n", + "W0627 19:00:15.843276 21543 gpu_context.cc:306] device: 0, cuDNN Version: 8.1.\n" + ] + } + ], + "source": [ + "from paddlenlp.transformers import ErnieForQuestionAnswering\n", + "\n", + "class QAModel(paddle.nn.Layer):\n", + " def __init__(self, model_checkpoint):\n", + " super(QAModel, self).__init__()\n", + " self.model = ErnieForQuestionAnswering.from_pretrained(model_checkpoint)\n", + " self.loss_func = CrossEntropyLossForSquad()\n", + "\n", + " def forward(self, input_ids, token_type_ids):\n", + " start_logits, end_logits = self.model(input_ids, token_type_ids)\n", + " return start_logits, end_logits\n", + "\n", + " def train_step(self, input_ids, token_type_ids, start_pos, end_pos):\n", + " start_logits, end_logits = self(input_ids, token_type_ids)\n", + " loss = self.loss_func(start_logits, end_logits, start_pos, end_pos)\n", + " return {\"loss\": loss}\n", + "\n", + " def evaluate_step(self, input_ids, token_type_ids):\n", + " start_logits, end_logits = self(input_ids, token_type_ids)\n", + " return {\"start_logits\": start_logits, \"end_logits\": end_logits}\n", + "\n", + "model = QAModel(MODEL_NAME)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 3.3 自定义 Metric 进行数据的评估\n", + "\n", + "`paddlenlp` 为我们提供了评测 `SQuAD` 格式数据集的函数 `compute_prediction` 和 `squad_evaluate`:\n", + "- `compute_prediction` 函数要求传入原数据 `examples` 、处理后的数据 `features` 和 `features` 对应的结果 `predictions`(一个包含所有数据 `start_logits` 和 `end_logits` 的元组)\n", + "- `squad_evaluate` 要求传入原数据 `examples` 和预测结果 `all_predictions`(通常来自于 `compute_prediction`)\n", + "\n", + "在使用这两个函数的时候,我们需要向其中传入数据集,但显然根据 `fastNLP` 的设计,我们无法在 `evaluate_step` 里实现这一过程,并且 `FastNLP` 也并没有提供计算 `F1` 和 `EM` 的 `Metric`,故我们需要自己定义用于评测的 `Metric`。\n", + "\n", + "在初始化之外,一个 `Metric` 还需要实现三个函数:\n", + "\n", + "1. `reset` - 该函数会在验证数据集的迭代之前被调用,用于清空数据;在我们自定义的 `Metric` 中,我们需要将 `all_start_logits` 和 `all_end_logits` 清空,重新收集每个 `batch` 的结果。\n", + "2. `update` - 该函数会在在每个 `batch` 得到结果后被调用,用于更新 `Metric` 的状态;它的参数即为 `evaluate_step` 返回的内容。我们在这里将得到的 `start_logits` 和 `end_logits` 收集起来。\n", + "3. `get_metric` - 该函数会在数据集被迭代完毕后调用,用于计算评测的结果。现在我们有了整个验证集的 `all_start_logits` 和 `all_end_logits` ,将他们传入 `compute_predictions` 函数得到预测的结果,并继续使用 `squad_evaluate` 函数得到评测的结果。\n", + " - 注:`suqad_evaluate` 函数会自己输出评测结果,为了不让其干扰 `FastNLP` 输出,这里我们使用 `contextlib.redirect_stdout(None)` 将函数的标准输出屏蔽掉。\n", + "\n", + "综上,`SquadEvaluateMetric` 实现的评估过程是:将验证集中所有数据的 `logits` 收集起来,然后统一传入 `compute_prediction` 和 `squad_evaluate` 中进行评估。值得一提的是,`paddlenlp.datasets.load_dataset` 返回的结果是一个 `MapDataset` 类型,其 `data` 成员为加载时的数据,`new_data` 为经过 `map` 函数处理后更新的数据,因此可以分别作为 `examples` 和 `features` 传入。" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "from fastNLP.core import Metric\n", + "from paddlenlp.metrics.squad import squad_evaluate, compute_prediction\n", + "import contextlib\n", + "\n", + "class SquadEvaluateMetric(Metric):\n", + " def __init__(self, examples, features, testing=False):\n", + " super(SquadEvaluateMetric, self).__init__(\"paddle\", False)\n", + " self.examples = examples\n", + " self.features = features\n", + " self.all_start_logits = []\n", + " self.all_end_logits = []\n", + " self.testing = testing\n", + "\n", + " def reset(self):\n", + " self.all_start_logits = []\n", + " self.all_end_logits = []\n", + "\n", + " def update(self, start_logits, end_logits):\n", + " for start, end in zip(start_logits, end_logits):\n", + " self.all_start_logits.append(start.numpy())\n", + " self.all_end_logits.append(end.numpy())\n", + "\n", + " def get_metric(self):\n", + " all_predictions, _, _ = compute_prediction(\n", + " self.examples, self.features[:len(self.all_start_logits)],\n", + " (self.all_start_logits, self.all_end_logits),\n", + " False, 20, 30\n", + " )\n", + " with contextlib.redirect_stdout(None):\n", + " result = squad_evaluate(\n", + " examples=self.examples,\n", + " preds=all_predictions,\n", + " is_whitespace_splited=False\n", + " )\n", + "\n", + " if self.testing:\n", + " self.print_predictions(all_predictions)\n", + " return result\n", + "\n", + " def print_predictions(self, preds):\n", + " for i, data in enumerate(self.examples):\n", + " if i >= 5:\n", + " break\n", + " print()\n", + " print(\"原文:\", data[\"context\"])\n", + " print(\"问题:\", data[\"question\"], \\\n", + " \"答案:\", preds[data[\"id\"]], \\\n", + " \"正确答案:\", data[\"answers\"][\"text\"])\n", + "\n", + "metric = SquadEvaluateMetric(\n", + " val_dataloader.dataset.data,\n", + " val_dataloader.dataset.new_data,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 3.4 训练\n", + "\n", + "至此所有的准备工作已经完成,可以使用 `Trainer` 进行训练了。学习率我们依旧采用线性预热策略 `LinearDecayWithWarmup`,优化器为 `AdamW`;回调模块我们选择 `LRSchedCallback` 更新学习率和 `LoadBestModelCallback` 监视评测结果的 `f1` 分数。初始化好 `Trainer` 之后,就将训练的过程交给 `FastNLP` 吧。" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
[19:04:54] INFO     Running evaluator sanity check for 2 batches.              trainer.py:631\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m[19:04:54]\u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Running evaluator sanity check for \u001b[1;36m2\u001b[0m batches. \u001b]8;id=367046;file://../fastNLP/core/controllers/trainer.py\u001b\\\u001b[2mtrainer.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=96810;file://../fastNLP/core/controllers/trainer.py#631\u001b\\\u001b[2m631\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "data": {
+      "text/html": [
+       "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:0, Batch:100 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m100\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"exact#squad\": 49.25899788285109,\n",
+       "  \"f1#squad\": 66.55559127349602,\n",
+       "  \"total#squad\": 1417,\n",
+       "  \"HasAns_exact#squad\": 49.25899788285109,\n",
+       "  \"HasAns_f1#squad\": 66.55559127349602,\n",
+       "  \"HasAns_total#squad\": 1417\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"exact#squad\"\u001b[0m: \u001b[1;36m49.25899788285109\u001b[0m,\n", + " \u001b[1;34m\"f1#squad\"\u001b[0m: \u001b[1;36m66.55559127349602\u001b[0m,\n", + " \u001b[1;34m\"total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m,\n", + " \u001b[1;34m\"HasAns_exact#squad\"\u001b[0m: \u001b[1;36m49.25899788285109\u001b[0m,\n", + " \u001b[1;34m\"HasAns_f1#squad\"\u001b[0m: \u001b[1;36m66.55559127349602\u001b[0m,\n", + " \u001b[1;34m\"HasAns_total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:0, Batch:200 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m200\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"exact#squad\": 57.37473535638673,\n",
+       "  \"f1#squad\": 70.93036525200617,\n",
+       "  \"total#squad\": 1417,\n",
+       "  \"HasAns_exact#squad\": 57.37473535638673,\n",
+       "  \"HasAns_f1#squad\": 70.93036525200617,\n",
+       "  \"HasAns_total#squad\": 1417\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"exact#squad\"\u001b[0m: \u001b[1;36m57.37473535638673\u001b[0m,\n", + " \u001b[1;34m\"f1#squad\"\u001b[0m: \u001b[1;36m70.93036525200617\u001b[0m,\n", + " \u001b[1;34m\"total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m,\n", + " \u001b[1;34m\"HasAns_exact#squad\"\u001b[0m: \u001b[1;36m57.37473535638673\u001b[0m,\n", + " \u001b[1;34m\"HasAns_f1#squad\"\u001b[0m: \u001b[1;36m70.93036525200617\u001b[0m,\n", + " \u001b[1;34m\"HasAns_total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:0, Batch:300 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m300\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"exact#squad\": 63.86732533521524,\n",
+       "  \"f1#squad\": 78.62546663568186,\n",
+       "  \"total#squad\": 1417,\n",
+       "  \"HasAns_exact#squad\": 63.86732533521524,\n",
+       "  \"HasAns_f1#squad\": 78.62546663568186,\n",
+       "  \"HasAns_total#squad\": 1417\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"exact#squad\"\u001b[0m: \u001b[1;36m63.86732533521524\u001b[0m,\n", + " \u001b[1;34m\"f1#squad\"\u001b[0m: \u001b[1;36m78.62546663568186\u001b[0m,\n", + " \u001b[1;34m\"total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m,\n", + " \u001b[1;34m\"HasAns_exact#squad\"\u001b[0m: \u001b[1;36m63.86732533521524\u001b[0m,\n", + " \u001b[1;34m\"HasAns_f1#squad\"\u001b[0m: \u001b[1;36m78.62546663568186\u001b[0m,\n", + " \u001b[1;34m\"HasAns_total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:0, Batch:400 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m400\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"exact#squad\": 64.92589978828511,\n",
+       "  \"f1#squad\": 79.36746074079691,\n",
+       "  \"total#squad\": 1417,\n",
+       "  \"HasAns_exact#squad\": 64.92589978828511,\n",
+       "  \"HasAns_f1#squad\": 79.36746074079691,\n",
+       "  \"HasAns_total#squad\": 1417\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"exact#squad\"\u001b[0m: \u001b[1;36m64.92589978828511\u001b[0m,\n", + " \u001b[1;34m\"f1#squad\"\u001b[0m: \u001b[1;36m79.36746074079691\u001b[0m,\n", + " \u001b[1;34m\"total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m,\n", + " \u001b[1;34m\"HasAns_exact#squad\"\u001b[0m: \u001b[1;36m64.92589978828511\u001b[0m,\n", + " \u001b[1;34m\"HasAns_f1#squad\"\u001b[0m: \u001b[1;36m79.36746074079691\u001b[0m,\n", + " \u001b[1;34m\"HasAns_total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:0, Batch:500 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m500\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"exact#squad\": 65.70218772053634,\n",
+       "  \"f1#squad\": 80.33295482054824,\n",
+       "  \"total#squad\": 1417,\n",
+       "  \"HasAns_exact#squad\": 65.70218772053634,\n",
+       "  \"HasAns_f1#squad\": 80.33295482054824,\n",
+       "  \"HasAns_total#squad\": 1417\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"exact#squad\"\u001b[0m: \u001b[1;36m65.70218772053634\u001b[0m,\n", + " \u001b[1;34m\"f1#squad\"\u001b[0m: \u001b[1;36m80.33295482054824\u001b[0m,\n", + " \u001b[1;34m\"total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m,\n", + " \u001b[1;34m\"HasAns_exact#squad\"\u001b[0m: \u001b[1;36m65.70218772053634\u001b[0m,\n", + " \u001b[1;34m\"HasAns_f1#squad\"\u001b[0m: \u001b[1;36m80.33295482054824\u001b[0m,\n", + " \u001b[1;34m\"HasAns_total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:0, Batch:600 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m600\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"exact#squad\": 65.41990119971771,\n",
+       "  \"f1#squad\": 79.7483487059053,\n",
+       "  \"total#squad\": 1417,\n",
+       "  \"HasAns_exact#squad\": 65.41990119971771,\n",
+       "  \"HasAns_f1#squad\": 79.7483487059053,\n",
+       "  \"HasAns_total#squad\": 1417\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"exact#squad\"\u001b[0m: \u001b[1;36m65.41990119971771\u001b[0m,\n", + " \u001b[1;34m\"f1#squad\"\u001b[0m: \u001b[1;36m79.7483487059053\u001b[0m,\n", + " \u001b[1;34m\"total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m,\n", + " \u001b[1;34m\"HasAns_exact#squad\"\u001b[0m: \u001b[1;36m65.41990119971771\u001b[0m,\n", + " \u001b[1;34m\"HasAns_f1#squad\"\u001b[0m: \u001b[1;36m79.7483487059053\u001b[0m,\n", + " \u001b[1;34m\"HasAns_total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:0, Batch:700 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m700\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"exact#squad\": 66.61961891319689,\n",
+       "  \"f1#squad\": 80.32432238994133,\n",
+       "  \"total#squad\": 1417,\n",
+       "  \"HasAns_exact#squad\": 66.61961891319689,\n",
+       "  \"HasAns_f1#squad\": 80.32432238994133,\n",
+       "  \"HasAns_total#squad\": 1417\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"exact#squad\"\u001b[0m: \u001b[1;36m66.61961891319689\u001b[0m,\n", + " \u001b[1;34m\"f1#squad\"\u001b[0m: \u001b[1;36m80.32432238994133\u001b[0m,\n", + " \u001b[1;34m\"total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m,\n", + " \u001b[1;34m\"HasAns_exact#squad\"\u001b[0m: \u001b[1;36m66.61961891319689\u001b[0m,\n", + " \u001b[1;34m\"HasAns_f1#squad\"\u001b[0m: \u001b[1;36m80.32432238994133\u001b[0m,\n", + " \u001b[1;34m\"HasAns_total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:0, Batch:800 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m800\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"exact#squad\": 65.84333098094567,\n",
+       "  \"f1#squad\": 79.23169801265415,\n",
+       "  \"total#squad\": 1417,\n",
+       "  \"HasAns_exact#squad\": 65.84333098094567,\n",
+       "  \"HasAns_f1#squad\": 79.23169801265415,\n",
+       "  \"HasAns_total#squad\": 1417\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"exact#squad\"\u001b[0m: \u001b[1;36m65.84333098094567\u001b[0m,\n", + " \u001b[1;34m\"f1#squad\"\u001b[0m: \u001b[1;36m79.23169801265415\u001b[0m,\n", + " \u001b[1;34m\"total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m,\n", + " \u001b[1;34m\"HasAns_exact#squad\"\u001b[0m: \u001b[1;36m65.84333098094567\u001b[0m,\n", + " \u001b[1;34m\"HasAns_f1#squad\"\u001b[0m: \u001b[1;36m79.23169801265415\u001b[0m,\n", + " \u001b[1;34m\"HasAns_total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "data": {
+      "text/html": [
+       "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
[19:20:28] INFO     Loading best model from fnlp-ernie-squad/ load_best_model_callback.py:111\n",
+       "                    2022-06-27-19_00_15_388554/best_so_far                                   \n",
+       "                    with f1#squad: 80.33295482054824...                                      \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m[19:20:28]\u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Loading best model from fnlp-ernie-squad/ \u001b]8;id=163935;file://../fastNLP/core/callbacks/load_best_model_callback.py\u001b\\\u001b[2mload_best_model_callback.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=31503;file://../fastNLP/core/callbacks/load_best_model_callback.py#111\u001b\\\u001b[2m111\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b[2;36m \u001b[0m \u001b[1;36m2022\u001b[0m-\u001b[1;36m06\u001b[0m-\u001b[1;36m27\u001b[0m-19_00_15_388554/best_so_far \u001b[2m \u001b[0m\n", + "\u001b[2;36m \u001b[0m with f1#squad: \u001b[1;36m80.33295482054824\u001b[0m\u001b[33m...\u001b[0m \u001b[2m \u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
           INFO     Deleting fnlp-ernie-squad/2022-06-27-19_0 load_best_model_callback.py:131\n",
+       "                    0_15_388554/best_so_far...                                               \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Deleting fnlp-ernie-squad/\u001b[1;36m2022\u001b[0m-\u001b[1;36m06\u001b[0m-\u001b[1;36m27\u001b[0m-19_0 \u001b]8;id=560859;file://../fastNLP/core/callbacks/load_best_model_callback.py\u001b\\\u001b[2mload_best_model_callback.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=573263;file://../fastNLP/core/callbacks/load_best_model_callback.py#131\u001b\\\u001b[2m131\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b[2;36m \u001b[0m 0_15_388554/best_so_far\u001b[33m...\u001b[0m \u001b[2m \u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from fastNLP import Trainer, LRSchedCallback, LoadBestModelCallback\n", + "from paddlenlp.transformers import LinearDecayWithWarmup\n", + "\n", + "n_epochs = 1\n", + "num_training_steps = len(train_dataloader) * n_epochs\n", + "lr_scheduler = LinearDecayWithWarmup(3e-5, num_training_steps, 0.1)\n", + "optimizer = paddle.optimizer.AdamW(\n", + " learning_rate=lr_scheduler,\n", + " parameters=model.parameters(),\n", + ")\n", + "callbacks=[\n", + " LRSchedCallback(lr_scheduler, step_on=\"batch\"),\n", + " LoadBestModelCallback(\"f1#squad\", larger_better=True, save_folder=\"fnlp-ernie-squad\")\n", + "]\n", + "trainer = Trainer(\n", + " model=model,\n", + " train_dataloader=train_dataloader,\n", + " evaluate_dataloaders=val_dataloader,\n", + " device=1,\n", + " optimizers=optimizer,\n", + " n_epochs=n_epochs,\n", + " callbacks=callbacks,\n", + " evaluate_every=100,\n", + " metrics={\"squad\": metric},\n", + ")\n", + "trainer.run()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 3.5 测试\n", + "\n", + "最后,我们可以使用 `Evaluator` 查看我们训练的结果。我们在之前为 `SquadEvaluateMetric` 设置了 `testing` 参数来在测试阶段进行输出,可以看到,训练的结果还是比较不错的。" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
原文: 爬行垫根据中间材料的不同可以分为:XPE爬行垫、EPE爬行垫、EVA爬行垫、PVC爬行垫;其中XPE爬\n",
+       "行垫、EPE爬行垫都属于PE材料加保鲜膜复合而成,都是无异味的环保材料,但是XPE爬行垫是品质较好的爬\n",
+       "行垫,韩国进口爬行垫都是这种爬行垫,而EPE爬行垫是国内厂家为了减低成本,使用EPE(珍珠棉)作为原料生\n",
+       "产的一款爬行垫,该材料弹性差,易碎,开孔发泡防水性弱。EVA爬行垫、PVC爬行垫是用EVA或PVC作为原材料\n",
+       "与保鲜膜复合的而成的爬行垫,或者把图案转印在原材料上,这两款爬行垫通常有异味,如果是图案转印的爬\n",
+       "行垫,油墨外露容易脱落。 \n",
+       "当时我儿子爬的时候,我们也买了垫子,但是始终有味。最后就没用了,铺的就的薄毯子让他爬。\n",
+       "
\n" + ], + "text/plain": [ + "原文: 爬行垫根据中间材料的不同可以分为:XPE爬行垫、EPE爬行垫、EVA爬行垫、PVC爬行垫;其中XPE爬\n", + "行垫、EPE爬行垫都属于PE材料加保鲜膜复合而成,都是无异味的环保材料,但是XPE爬行垫是品质较好的爬\n", + "行垫,韩国进口爬行垫都是这种爬行垫,而EPE爬行垫是国内厂家为了减低成本,使用EPE(珍珠棉)作为原料生\n", + "产的一款爬行垫,该材料弹性差,易碎,开孔发泡防水性弱。EVA爬行垫、PVC爬行垫是用EVA或PVC作为原材料\n", + "与保鲜膜复合的而成的爬行垫,或者把图案转印在原材料上,这两款爬行垫通常有异味,如果是图案转印的爬\n", + "行垫,油墨外露容易脱落。 \n", + "当时我儿子爬的时候,我们也买了垫子,但是始终有味。最后就没用了,铺的就的薄毯子让他爬。\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
问题: 爬行垫什么材质的好 答案: EPE(珍珠棉 正确答案: ['XPE']\n",
+       "
\n" + ], + "text/plain": [ + "问题: 爬行垫什么材质的好 答案: EPE(珍珠棉 正确答案: ['XPE']\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
原文: 真实情况是160-162。她平时谎报的168是因为不离脚穿高水台恨天高(15厘米) 图1她穿着高水台恨\n",
+       "天高和刘亦菲一样高,(刘亦菲对外报身高172)范冰冰礼服下厚厚的高水台暴露了她的心机,对比一下两者的\n",
+       "鞋子吧 图2 穿着高水台恨天高才和刘德华谢霆锋持平,如果她真的有168,那么加上鞋高,刘和谢都要有180?\n",
+       "明显是不可能的。所以刘德华对外报的身高174减去10-15厘米才是范冰冰的真实身高 图3,范冰冰有一次脱\n",
+       "鞋上场,这个最说明问题了,看看她的身体比例吧。还有目测一下她手上鞋子的鞋跟有多高多厚吧,至少超过\n",
+       "10厘米。\n",
+       "
\n" + ], + "text/plain": [ + "原文: 真实情况是160-162。她平时谎报的168是因为不离脚穿高水台恨天高(15厘米) 图1她穿着高水台恨\n", + "天高和刘亦菲一样高,(刘亦菲对外报身高172)范冰冰礼服下厚厚的高水台暴露了她的心机,对比一下两者的\n", + "鞋子吧 图2 穿着高水台恨天高才和刘德华谢霆锋持平,如果她真的有168,那么加上鞋高,刘和谢都要有180?\n", + "明显是不可能的。所以刘德华对外报的身高174减去10-15厘米才是范冰冰的真实身高 图3,范冰冰有一次脱\n", + "鞋上场,这个最说明问题了,看看她的身体比例吧。还有目测一下她手上鞋子的鞋跟有多高多厚吧,至少超过\n", + "10厘米。\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
问题: 范冰冰多高真实身高 答案: 160-162 正确答案: ['160-162']\n",
+       "
\n" + ], + "text/plain": [ + "问题: 范冰冰多高真实身高 答案: 160-162 正确答案: ['160-162']\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
原文: 防水作为目前高端手机的标配,特别是苹果也支持防水之后,国产大多数高端旗舰手机都已经支持防\n",
+       "水。虽然我们真的不会故意把手机放入水中,但是有了防水之后,用户心里会多一重安全感。那么近日最为\n",
+       "火热的小米6防水吗?小米6的防水级别又是多少呢? 小编查询了很多资料发现,小米6确实是防水的,但是为\n",
+       "了保持低调,同时为了不被别人说防水等级不够,很多资料都没有标注小米是否防水。根据评测资料显示,小\n",
+       "米6是支持IP68级的防水,是绝对能够满足日常生活中的防水需求的。\n",
+       "
\n" + ], + "text/plain": [ + "原文: 防水作为目前高端手机的标配,特别是苹果也支持防水之后,国产大多数高端旗舰手机都已经支持防\n", + "水。虽然我们真的不会故意把手机放入水中,但是有了防水之后,用户心里会多一重安全感。那么近日最为\n", + "火热的小米6防水吗?小米6的防水级别又是多少呢? 小编查询了很多资料发现,小米6确实是防水的,但是为\n", + "了保持低调,同时为了不被别人说防水等级不够,很多资料都没有标注小米是否防水。根据评测资料显示,小\n", + "米6是支持IP68级的防水,是绝对能够满足日常生活中的防水需求的。\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
问题: 小米6防水等级 答案: IP68级 正确答案: ['IP68级']\n",
+       "
\n" + ], + "text/plain": [ + "问题: 小米6防水等级 答案: IP68级 正确答案: ['IP68级']\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
原文: 这位朋友你好,女性出现妊娠反应一般是从6-12周左右,也就是女性怀孕1个多月就会开始出现反应,\n",
+       "第3个月的时候,妊辰反应基本结束。 而大部分女性怀孕初期都会出现恶心、呕吐的感觉,这些症状都是因\n",
+       "人而异的,除非恶心、呕吐的非常厉害,才需要就医,否则这些都是刚怀孕的的正常症状。1-3个月的时候可\n",
+       "以观察一下自己的皮肤,一般女性怀孕初期可能会产生皮肤色素沉淀或是腹壁产生妊娠纹,特别是在怀孕的\n",
+       "后期更加明显。 还有很多女性怀孕初期会出现疲倦、嗜睡的情况。怀孕三个月的时候,膀胱会受到日益胀\n",
+       "大的子宫的压迫,容量会变小,所以怀孕期间也会有尿频的现象出现。月经停止也是刚怀孕最容易出现的症\n",
+       "状,只要是平时月经正常的女性,在性行为后超过正常经期两周,就有可能是怀孕了。 如果你想判断自己是\n",
+       "否怀孕,可以看看自己有没有这些反应。当然这也只是多数人的怀孕表现,也有部分女性怀孕表现并不完全\n",
+       "是这样,如果你无法确定自己是否怀孕,最好去医院检查一下。\n",
+       "
\n" + ], + "text/plain": [ + "原文: 这位朋友你好,女性出现妊娠反应一般是从6-12周左右,也就是女性怀孕1个多月就会开始出现反应,\n", + "第3个月的时候,妊辰反应基本结束。 而大部分女性怀孕初期都会出现恶心、呕吐的感觉,这些症状都是因\n", + "人而异的,除非恶心、呕吐的非常厉害,才需要就医,否则这些都是刚怀孕的的正常症状。1-3个月的时候可\n", + "以观察一下自己的皮肤,一般女性怀孕初期可能会产生皮肤色素沉淀或是腹壁产生妊娠纹,特别是在怀孕的\n", + "后期更加明显。 还有很多女性怀孕初期会出现疲倦、嗜睡的情况。怀孕三个月的时候,膀胱会受到日益胀\n", + "大的子宫的压迫,容量会变小,所以怀孕期间也会有尿频的现象出现。月经停止也是刚怀孕最容易出现的症\n", + "状,只要是平时月经正常的女性,在性行为后超过正常经期两周,就有可能是怀孕了。 如果你想判断自己是\n", + "否怀孕,可以看看自己有没有这些反应。当然这也只是多数人的怀孕表现,也有部分女性怀孕表现并不完全\n", + "是这样,如果你无法确定自己是否怀孕,最好去医院检查一下。\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
问题: 怀孕多久会有反应 答案: 6-12周左右 正确答案: ['6-12周左右', '6-12周', '1个多月']\n",
+       "
\n" + ], + "text/plain": [ + "问题: 怀孕多久会有反应 答案: 6-12周左右 正确答案: ['6-12周左右', '6-12周', '1个多月']\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
原文: 【东奥会计在线——中级会计职称频道推荐】根据《关于提高科技型中小企业研究开发费用税前加计\n",
+       "扣除比例的通知》的规定,研发费加计扣除比例提高到75%。|财政部、国家税务总局、科技部发布《关于提\n",
+       "高科技型中小企业研究开发费用税前加计扣除比例的通知》。|通知称,为进一步激励中小企业加大研发投\n",
+       "入,支持科技创新,就提高科技型中小企业研究开发费用(以下简称研发费用)税前加计扣除比例有关问题发\n",
+       "布通知。|通知明确,科技型中小企业开展研发活动中实际发生的研发费用,未形成无形资产计入当期损益的\n",
+       ",在按规定据实扣除的基础上,在2017年1月1日至2019年12月31日期间,再按照实际发生额的75%在税前加计\n",
+       "扣除;形成无形资产的,在上述期间按照无形资产成本的175%在税前摊销。|科技型中小企业享受研发费用税\n",
+       "前加计扣除政策的其他政策口径按照《财政部国家税务总局科技部关于完善研究开发费用税前加计扣除政\n",
+       "策的通知》(财税〔2015〕119号)规定执行。|科技型中小企业条件和管理办法由科技部、财政部和国家税\n",
+       "务总局另行发布。科技、财政和税务部门应建立信息共享机制,及时共享科技型中小企业的相关信息,加强\n",
+       "协调配合,保障优惠政策落实到位。|上一篇文章:关于2016年度企业研究开发费用税前加计扣除政策企业所\n",
+       "得税纳税申报问题的公告 下一篇文章:关于提高科技型中小企业研究开发费用税前加计扣除比例的通知\n",
+       "
\n" + ], + "text/plain": [ + "原文: 【东奥会计在线——中级会计职称频道推荐】根据《关于提高科技型中小企业研究开发费用税前加计\n", + "扣除比例的通知》的规定,研发费加计扣除比例提高到75%。|财政部、国家税务总局、科技部发布《关于提\n", + "高科技型中小企业研究开发费用税前加计扣除比例的通知》。|通知称,为进一步激励中小企业加大研发投\n", + "入,支持科技创新,就提高科技型中小企业研究开发费用(以下简称研发费用)税前加计扣除比例有关问题发\n", + "布通知。|通知明确,科技型中小企业开展研发活动中实际发生的研发费用,未形成无形资产计入当期损益的\n", + ",在按规定据实扣除的基础上,在2017年1月1日至2019年12月31日期间,再按照实际发生额的75%在税前加计\n", + "扣除;形成无形资产的,在上述期间按照无形资产成本的175%在税前摊销。|科技型中小企业享受研发费用税\n", + "前加计扣除政策的其他政策口径按照《财政部国家税务总局科技部关于完善研究开发费用税前加计扣除政\n", + "策的通知》(财税〔2015〕119号)规定执行。|科技型中小企业条件和管理办法由科技部、财政部和国家税\n", + "务总局另行发布。科技、财政和税务部门应建立信息共享机制,及时共享科技型中小企业的相关信息,加强\n", + "协调配合,保障优惠政策落实到位。|上一篇文章:关于2016年度企业研究开发费用税前加计扣除政策企业所\n", + "得税纳税申报问题的公告 下一篇文章:关于提高科技型中小企业研究开发费用税前加计扣除比例的通知\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
问题: 研发费用加计扣除比例 答案: 75% 正确答案: ['75%']\n",
+       "
\n" + ], + "text/plain": [ + "问题: 研发费用加计扣除比例 答案: 75% 正确答案: ['75%']\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "data": {
+      "text/html": [
+       "
{\n",
+       "    'exact#squad': 65.70218772053634,\n",
+       "    'f1#squad': 80.33295482054824,\n",
+       "    'total#squad': 1417,\n",
+       "    'HasAns_exact#squad': 65.70218772053634,\n",
+       "    'HasAns_f1#squad': 80.33295482054824,\n",
+       "    'HasAns_total#squad': 1417\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[32m'exact#squad'\u001b[0m: \u001b[1;36m65.70218772053634\u001b[0m,\n", + " \u001b[32m'f1#squad'\u001b[0m: \u001b[1;36m80.33295482054824\u001b[0m,\n", + " \u001b[32m'total#squad'\u001b[0m: \u001b[1;36m1417\u001b[0m,\n", + " \u001b[32m'HasAns_exact#squad'\u001b[0m: \u001b[1;36m65.70218772053634\u001b[0m,\n", + " \u001b[32m'HasAns_f1#squad'\u001b[0m: \u001b[1;36m80.33295482054824\u001b[0m,\n", + " \u001b[32m'HasAns_total#squad'\u001b[0m: \u001b[1;36m1417\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from fastNLP import Evaluator\n", + "evaluator = Evaluator(\n", + " model=model,\n", + " dataloaders=val_dataloader,\n", + " device=1,\n", + " metrics={\n", + " \"squad\": SquadEvaluateMetric(\n", + " val_dataloader.dataset.data,\n", + " val_dataloader.dataset.new_data,\n", + " testing=True,\n", + " ),\n", + " },\n", + ")\n", + "result = evaluator.run()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.7.13 ('fnlp-paddle')", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.13" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "31f2d9d3efc23c441973d7c4273acfea8b132b6a578f002629b6b44b8f65e720" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tutorials/figures/paddle-ernie-1.0-masking-levels.png b/tutorials/figures/paddle-ernie-1.0-masking-levels.png new file mode 100644 index 00000000..ff2519c4 Binary files /dev/null and b/tutorials/figures/paddle-ernie-1.0-masking-levels.png differ diff --git a/tutorials/figures/paddle-ernie-1.0-masking.png b/tutorials/figures/paddle-ernie-1.0-masking.png new file mode 100644 index 00000000..ed003a2f Binary files /dev/null and b/tutorials/figures/paddle-ernie-1.0-masking.png differ diff --git a/tutorials/figures/paddle-ernie-2.0-continual-pretrain.png b/tutorials/figures/paddle-ernie-2.0-continual-pretrain.png new file mode 100644 index 00000000..d45f65d8 Binary files /dev/null and b/tutorials/figures/paddle-ernie-2.0-continual-pretrain.png differ diff --git a/tutorials/figures/paddle-ernie-3.0-framework.png b/tutorials/figures/paddle-ernie-3.0-framework.png new file mode 100644 index 00000000..f50ddb1c Binary files /dev/null and b/tutorials/figures/paddle-ernie-3.0-framework.png differ