Browse Source

MERGE

tags/v1.0.0alpha
YWMditto 2 years ago
parent
commit
8ea8992a47
11 changed files with 316 additions and 492 deletions
  1. +41
    -13
      fastNLP/core/callbacks/callback_manager.py
  2. +0
    -2
      fastNLP/core/callbacks/progress_callback.py
  3. +3
    -13
      fastNLP/core/controllers/trainer.py
  4. +2
    -2
      fastNLP/core/metrics/accuracy.py
  5. +2
    -2
      fastNLP/core/metrics/classify_f1_pre_rec_metric.py
  6. +7
    -4
      tests/core/callbacks/test_checkpoint_callback_torch.py
  7. +2
    -2
      tests/core/callbacks/test_load_best_model_callback_torch.py
  8. +2
    -1
      tests/core/callbacks/test_more_evaluate_callback.py
  9. +6
    -3
      tests/helpers/utils.py
  10. +251
    -450
      tutorials/fastnlp_tutorial_0.ipynb
  11. BIN
      tutorials/figures/T0-fig-trainer-and-evaluator.png

+ 41
- 13
fastNLP/core/callbacks/callback_manager.py View File

@@ -9,6 +9,8 @@ __all__ = [
from .callback_events import Events from .callback_events import Events
from .callback import Callback from .callback import Callback
from fastNLP.core.log import logger from fastNLP.core.log import logger
from .progress_callback import ProgressCallback, choose_progress_callback
from fastNLP.envs import rank_zero_call




def _transfer(func): def _transfer(func):
@@ -26,6 +28,43 @@ def _transfer(func):
return wrapper return wrapper




def prepare_callbacks(callbacks, progress_bar):
"""

:param callbacks:
:param progress_bar:
:return:
"""
_callbacks = []
if callbacks is not None:
if isinstance(callbacks, Callback):
callbacks = [callbacks]
if not isinstance(callbacks, Sequence):
raise ValueError("Parameter `callbacks` should be type 'List' or 'Tuple'.")
callbacks = list(callbacks)
for _callback in callbacks:
if not isinstance(_callback, Callback):
raise TypeError(f"callbacks must be of Callback type, instead of `{type(_callback)}`")
_callbacks += callbacks

has_no_progress = False
for _callback in _callbacks:
if isinstance(_callback, ProgressCallback):
has_no_progress = True
if not has_no_progress:
callback = choose_progress_callback(progress_bar)
if callback is not None:
_callbacks.append(callback)
elif progress_bar is not None and progress_bar != 'auto':
logger.warning(f"Since you have passed in ProgressBar callback, progress_bar will be ignored.")

if has_no_progress and progress_bar is None:
rank_zero_call(logger.warning)("No progress bar is provided, there will have no information output "
"during training.")

return _callbacks


class CallbackManager: class CallbackManager:
r""" r"""
用来管理训练过程中的所有的 callback 实例; 用来管理训练过程中的所有的 callback 实例;
@@ -45,24 +84,13 @@ class CallbackManager:
""" """
self._need_reproducible_sampler = False self._need_reproducible_sampler = False


_callbacks = []
if callbacks is not None:
if isinstance(callbacks, Callback):
callbacks = [callbacks]
if not isinstance(callbacks, Sequence):
raise ValueError("Parameter `callbacks` should be type 'List' or 'Tuple'.")
callbacks = list(callbacks)
for _callback in callbacks:
if not isinstance(_callback, Callback):
raise TypeError(f"callbacks must be of Callback type, instead of `{type(_callback)}`")
_callbacks += callbacks
self.callback_fns = defaultdict(list) self.callback_fns = defaultdict(list)
# 因为理论上用户最多只能通过 'trainer.on_train_begin' 或者 'trainer.callback_manager.on_train_begin' 来调用,即其是没办法 # 因为理论上用户最多只能通过 'trainer.on_train_begin' 或者 'trainer.callback_manager.on_train_begin' 来调用,即其是没办法
# 直接调用具体的某一个 callback 函数,而不调用其余的同名的 callback 函数的,因此我们只需要记录具体 Event 的时机即可; # 直接调用具体的某一个 callback 函数,而不调用其余的同名的 callback 函数的,因此我们只需要记录具体 Event 的时机即可;
self.callback_counter = defaultdict(lambda: 0) self.callback_counter = defaultdict(lambda: 0)
if len(_callbacks):
if len(callbacks):
# 这一对象是为了保存原始的类 callback 对象来帮助用户进行 debug,理论上在正常的使用中你并不会需要它; # 这一对象是为了保存原始的类 callback 对象来帮助用户进行 debug,理论上在正常的使用中你并不会需要它;
self.class_callbacks = _callbacks
self.class_callbacks = callbacks
else: else:
self.class_callbacks: Optional[List[Callback]] = [] self.class_callbacks: Optional[List[Callback]] = []




+ 0
- 2
fastNLP/core/callbacks/progress_callback.py View File

@@ -11,8 +11,6 @@ __all__ = [
from .has_monitor_callback import HasMonitorCallback from .has_monitor_callback import HasMonitorCallback
from fastNLP.core.utils import f_rich_progress from fastNLP.core.utils import f_rich_progress
from fastNLP.core.log import logger from fastNLP.core.log import logger
from fastNLP.core.utils.utils import is_notebook





class ProgressCallback(HasMonitorCallback): class ProgressCallback(HasMonitorCallback):


+ 3
- 13
fastNLP/core/controllers/trainer.py View File

@@ -19,8 +19,8 @@ from .evaluator import Evaluator
from fastNLP.core.controllers.utils.utils import TrainerEventTrigger, _TruncatedDataLoader from fastNLP.core.controllers.utils.utils import TrainerEventTrigger, _TruncatedDataLoader
from fastNLP.core.callbacks import Callback, CallbackManager, Events, EventsList from fastNLP.core.callbacks import Callback, CallbackManager, Events, EventsList
from fastNLP.core.callbacks.callback import _CallbackWrapper from fastNLP.core.callbacks.callback import _CallbackWrapper
from fastNLP.core.callbacks.callback_manager import prepare_callbacks
from fastNLP.core.callbacks.callback_events import _SingleEventState from fastNLP.core.callbacks.callback_events import _SingleEventState
from fastNLP.core.callbacks.progress_callback import choose_progress_callback
from fastNLP.core.drivers import Driver from fastNLP.core.drivers import Driver
from fastNLP.core.drivers.utils import choose_driver from fastNLP.core.drivers.utils import choose_driver
from fastNLP.core.utils import get_fn_arg_names, match_and_substitute_params, nullcontext from fastNLP.core.utils import get_fn_arg_names, match_and_substitute_params, nullcontext
@@ -133,7 +133,7 @@ class Trainer(TrainerEventTrigger):
["all", "ignore", "only_error"];当该参数的值不是以上值时,该值应当表示一个文件夹的名字,我们会将其他 rank 的输出流重定向到 ["all", "ignore", "only_error"];当该参数的值不是以上值时,该值应当表示一个文件夹的名字,我们会将其他 rank 的输出流重定向到
log 文件中,然后将 log 文件保存在通过该参数值设定的文件夹中;默认为 "only_error"; log 文件中,然后将 log 文件保存在通过该参数值设定的文件夹中;默认为 "only_error";
progress_bar: 以哪种方式显示 progress ,目前支持[None, 'raw', 'rich', 'auto'] 或者 RichCallback, RawTextCallback对象, progress_bar: 以哪种方式显示 progress ,目前支持[None, 'raw', 'rich', 'auto'] 或者 RichCallback, RawTextCallback对象,
默认为 auto , auto 表示如果检测到当前 terminal 为交互型 则使用 RichCallback,否则使用 RawTextCallback对象。如果
默认为 auto , auto 表示如果检测到当前 terminal 为交互型则使用 RichCallback,否则使用 RawTextCallback对象。如果
需要定制 progress bar 的参数,例如打印频率等,可以传入 RichCallback, RawTextCallback 对象。 需要定制 progress bar 的参数,例如打印频率等,可以传入 RichCallback, RawTextCallback 对象。
train_input_mapping: 与 input_mapping 一致,但是只用于 train 中。与 input_mapping 互斥。 train_input_mapping: 与 input_mapping 一致,但是只用于 train 中。与 input_mapping 互斥。
train_output_mapping: 与 output_mapping 一致,但是只用于 train 中。与 output_mapping 互斥。 train_output_mapping: 与 output_mapping 一致,但是只用于 train 中。与 output_mapping 互斥。
@@ -212,17 +212,7 @@ class Trainer(TrainerEventTrigger):
self.driver.set_optimizers(optimizers=optimizers) self.driver.set_optimizers(optimizers=optimizers)


# 根据 progress_bar 参数选择 ProgressBarCallback # 根据 progress_bar 参数选择 ProgressBarCallback
progress_bar_callback = choose_progress_callback(kwargs.get('progress_bar', 'auto'))
if progress_bar_callback is not None:
if callbacks is None:
callbacks = []
elif not isinstance(callbacks, Sequence):
callbacks = [callbacks]

callbacks = list(callbacks) + [progress_bar_callback]
else:
rank_zero_call(logger.warning)("No progress bar is provided, there will have no information output "
"during training.")
callbacks = prepare_callbacks(callbacks, kwargs.get('progress_bar', 'auto'))
# 初始化 callback manager; # 初始化 callback manager;
self.callback_manager = CallbackManager(callbacks) self.callback_manager = CallbackManager(callbacks)
# 添加所有的函数式 callbacks; # 添加所有的函数式 callbacks;


+ 2
- 2
fastNLP/core/metrics/accuracy.py View File

@@ -28,7 +28,7 @@ class Accuracy(Metric):


def get_metric(self) -> dict: def get_metric(self) -> dict:
r""" r"""
get_metric 函数将根据 evaluate 函数累计的评价指标统计量来计算最终的评价结果.
get_metric 函数将根据 update 函数累计的评价指标统计量来计算最终的评价结果.


:return dict evaluate_result: {"acc": float} :return dict evaluate_result: {"acc": float}
""" """
@@ -37,7 +37,7 @@ class Accuracy(Metric):


def update(self, pred, target, seq_len=None): def update(self, pred, target, seq_len=None):
r""" r"""
evaluate函数将针对一个批次的预测结果做评价指标的累计
update 函数将针对一个批次的预测结果做评价指标的累计


:param torch.Tensor pred: 预测的tensor, tensor的形状可以是torch.Size([B,]), torch.Size([B, n_classes]), :param torch.Tensor pred: 预测的tensor, tensor的形状可以是torch.Size([B,]), torch.Size([B, n_classes]),
torch.Size([B, max_len]), 或者torch.Size([B, max_len, n_classes]) torch.Size([B, max_len]), 或者torch.Size([B, max_len, n_classes])


+ 2
- 2
fastNLP/core/metrics/classify_f1_pre_rec_metric.py View File

@@ -56,7 +56,7 @@ class ClassifyFPreRecMetric(Metric):


def get_metric(self) -> dict: def get_metric(self) -> dict:
r""" r"""
get_metric函数将根据evaluate函数累计的评价指标统计量来计算最终的评价结果.
get_metric函数将根据update函数累计的评价指标统计量来计算最终的评价结果.


:return dict evaluate_result: {"acc": float} :return dict evaluate_result: {"acc": float}
""" """
@@ -117,7 +117,7 @@ class ClassifyFPreRecMetric(Metric):


def update(self, pred, target, seq_len=None): def update(self, pred, target, seq_len=None):
r""" r"""
evaluate函数将针对一个批次的预测结果做评价指标的累计
update 函数将针对一个批次的预测结果做评价指标的累计


:param torch.Tensor pred: 预测的tensor, tensor的形状可以是torch.Size([B,]), torch.Size([B, n_classes]), :param torch.Tensor pred: 预测的tensor, tensor的形状可以是torch.Size([B,]), torch.Size([B, n_classes]),
torch.Size([B, max_len]), 或者torch.Size([B, max_len, n_classes]) torch.Size([B, max_len]), 或者torch.Size([B, max_len, n_classes])


+ 7
- 4
tests/core/callbacks/test_checkpoint_callback_torch.py View File

@@ -74,7 +74,7 @@ def model_and_optimizers(request):
@pytest.mark.parametrize("driver,device", [("torch", "cpu"), ("torch_ddp", [0, 1]), ("torch", 1)]) # ("torch", "cpu"), ("torch_ddp", [0, 1]), ("torch", 1) @pytest.mark.parametrize("driver,device", [("torch", "cpu"), ("torch_ddp", [0, 1]), ("torch", 1)]) # ("torch", "cpu"), ("torch_ddp", [0, 1]), ("torch", 1)
@pytest.mark.parametrize("version", [0, 1]) @pytest.mark.parametrize("version", [0, 1])
@pytest.mark.parametrize("only_state_dict", [True, False]) @pytest.mark.parametrize("only_state_dict", [True, False])
@magic_argv_env_context
@magic_argv_env_context(timeout=100)
def test_model_checkpoint_callback_1( def test_model_checkpoint_callback_1(
model_and_optimizers: TrainerParameters, model_and_optimizers: TrainerParameters,
driver, driver,
@@ -194,7 +194,7 @@ def test_model_checkpoint_callback_1(
trainer.load_model(folder, only_state_dict=only_state_dict) trainer.load_model(folder, only_state_dict=only_state_dict)


trainer.run() trainer.run()
trainer.driver.barrier()
finally: finally:
rank_zero_rm(path) rank_zero_rm(path)


@@ -205,7 +205,7 @@ def test_model_checkpoint_callback_1(
@pytest.mark.torch @pytest.mark.torch
@pytest.mark.parametrize("driver,device", [("torch", "cpu"), ("torch_ddp", [0, 1]), ("torch", 1)]) # ("torch", "cpu"), ("torch_ddp", [0, 1]), ("torch", 1) @pytest.mark.parametrize("driver,device", [("torch", "cpu"), ("torch_ddp", [0, 1]), ("torch", 1)]) # ("torch", "cpu"), ("torch_ddp", [0, 1]), ("torch", 1)
@pytest.mark.parametrize("only_state_dict", [True]) @pytest.mark.parametrize("only_state_dict", [True])
@magic_argv_env_context
@magic_argv_env_context(timeout=100)
def test_model_checkpoint_callback_2( def test_model_checkpoint_callback_2(
model_and_optimizers: TrainerParameters, model_and_optimizers: TrainerParameters,
driver, driver,
@@ -285,6 +285,7 @@ def test_model_checkpoint_callback_2(


trainer.load_model(folder, only_state_dict=only_state_dict) trainer.load_model(folder, only_state_dict=only_state_dict)
trainer.run() trainer.run()
trainer.driver.barrier()


finally: finally:
rank_zero_rm(path) rank_zero_rm(path)
@@ -298,7 +299,7 @@ def test_model_checkpoint_callback_2(
@pytest.mark.parametrize("driver,device", [("torch", "cpu"), ("torch_ddp", [0, 1]), ("torch", 0)]) # ("torch", "cpu"), ("torch_ddp", [0, 1]), ("torch", 1) @pytest.mark.parametrize("driver,device", [("torch", "cpu"), ("torch_ddp", [0, 1]), ("torch", 0)]) # ("torch", "cpu"), ("torch_ddp", [0, 1]), ("torch", 1)
@pytest.mark.parametrize("version", [0, 1]) @pytest.mark.parametrize("version", [0, 1])
@pytest.mark.parametrize("only_state_dict", [True, False]) @pytest.mark.parametrize("only_state_dict", [True, False])
@magic_argv_env_context
@magic_argv_env_context(timeout=100)
def test_trainer_checkpoint_callback_1( def test_trainer_checkpoint_callback_1(
model_and_optimizers: TrainerParameters, model_and_optimizers: TrainerParameters,
driver, driver,
@@ -416,6 +417,7 @@ def test_trainer_checkpoint_callback_1(
trainer.load(folder, only_state_dict=only_state_dict) trainer.load(folder, only_state_dict=only_state_dict)


trainer.run() trainer.run()
trainer.driver.barrier()


finally: finally:
rank_zero_rm(path) rank_zero_rm(path)
@@ -664,6 +666,7 @@ def test_trainer_checkpoint_callback_2(
trainer.load(folder, model_load_fn=model_load_fn) trainer.load(folder, model_load_fn=model_load_fn)


trainer.run() trainer.run()
trainer.driver.barrier()


finally: finally:
rank_zero_rm(path) rank_zero_rm(path)


+ 2
- 2
tests/core/callbacks/test_load_best_model_callback_torch.py View File

@@ -16,7 +16,6 @@ from fastNLP.core.controllers.trainer import Trainer
from fastNLP.core.metrics.accuracy import Accuracy from fastNLP.core.metrics.accuracy import Accuracy
from fastNLP.core.callbacks.load_best_model_callback import LoadBestModelCallback from fastNLP.core.callbacks.load_best_model_callback import LoadBestModelCallback
from fastNLP.core import Evaluator from fastNLP.core import Evaluator
from fastNLP.core import rank_zero_rm
from fastNLP.core.drivers.torch_driver import TorchSingleDriver from fastNLP.core.drivers.torch_driver import TorchSingleDriver
from tests.helpers.models.torch_model import TorchNormalModel_Classification_1 from tests.helpers.models.torch_model import TorchNormalModel_Classification_1
from tests.helpers.datasets.torch_data import TorchArgMaxDataset from tests.helpers.datasets.torch_data import TorchArgMaxDataset
@@ -112,7 +111,8 @@ def test_load_best_model_callback(
results = evaluator.run() results = evaluator.run()
assert np.allclose(callbacks[0].monitor_value, results['acc#acc#dl1']) assert np.allclose(callbacks[0].monitor_value, results['acc#acc#dl1'])
if save_folder: if save_folder:
rank_zero_rm(save_folder)
import shutil
shutil.rmtree(save_folder, ignore_errors=True)
if dist.is_initialized(): if dist.is_initialized():
dist.destroy_process_group() dist.destroy_process_group()




+ 2
- 1
tests/core/callbacks/test_more_evaluate_callback.py View File

@@ -172,7 +172,7 @@ def test_model_more_evaluate_callback_1(
trainer.load_model(folder, only_state_dict=only_state_dict) trainer.load_model(folder, only_state_dict=only_state_dict)


trainer.run() trainer.run()
trainer.driver.barrier()
finally: finally:
rank_zero_rm(path) rank_zero_rm(path)


@@ -257,6 +257,7 @@ def test_trainer_checkpoint_callback_1(
trainer.load(folder, only_state_dict=only_state_dict) trainer.load(folder, only_state_dict=only_state_dict)


trainer.run() trainer.run()
trainer.driver.barrier()


finally: finally:
rank_zero_rm(path) rank_zero_rm(path)


+ 6
- 3
tests/helpers/utils.py View File

@@ -33,6 +33,8 @@ def recover_logger(fn):
def magic_argv_env_context(fn=None, timeout=600): def magic_argv_env_context(fn=None, timeout=600):
""" """
用来在测试时包裹每一个单独的测试函数,使得 ddp 测试正确; 用来在测试时包裹每一个单独的测试函数,使得 ddp 测试正确;
会丢掉 pytest 中的 arg 参数。

:param timeout: 表示一个测试如果经过多久还没有通过的话就主动将其 kill 掉,默认为 10 分钟,单位为秒; :param timeout: 表示一个测试如果经过多久还没有通过的话就主动将其 kill 掉,默认为 10 分钟,单位为秒;
:return: :return:
""" """
@@ -46,9 +48,10 @@ def magic_argv_env_context(fn=None, timeout=600):
env = deepcopy(os.environ.copy()) env = deepcopy(os.environ.copy())


used_args = [] used_args = []
for each_arg in sys.argv[1:]:
if "test" not in each_arg:
used_args.append(each_arg)
# for each_arg in sys.argv[1:]:
# # warning,否则 可能导致 pytest -s . 中的点混入其中,导致多卡启动的 collect tests items 不为 1
# if each_arg.startswith('-'):
# used_args.append(each_arg)


pytest_current_test = os.environ.get('PYTEST_CURRENT_TEST') pytest_current_test = os.environ.get('PYTEST_CURRENT_TEST')




+ 251
- 450
tutorials/fastnlp_tutorial_0.ipynb View File

@@ -15,15 +15,15 @@
"\n", "\n",
"    1.3   trainer 内部初始化 evaluater\n", "    1.3   trainer 内部初始化 evaluater\n",
"\n", "\n",
"  2   使用 trainer 训练模型\n",
"  2   使用 fastNLP 0.8 搭建 argmax 模型\n",
"\n", "\n",
"    2.1   argmax 模型实例\n",
"    2.1   trainer_step 和 evaluator_step\n",
"\n", "\n",
"    2.2   trainer 的参数匹配\n",
"    2.2   trainer 和 evaluator 的参数匹配\n",
"\n", "\n",
"    2.3   trainer 的实际使用 \n",
"    2.3   一个实际案例:argmax 模型\n",
"\n", "\n",
"  3   使用 evaluator 评测模型\n",
"  3   使用 fastNLP 0.8 训练 argmax 模型\n",
" \n", " \n",
"    3.1   trainer 外部初始化的 evaluator\n", "    3.1   trainer 外部初始化的 evaluator\n",
"\n", "\n",
@@ -50,21 +50,21 @@
"\n", "\n",
"```python\n", "```python\n",
"trainer = Trainer(\n", "trainer = Trainer(\n",
" model=model,\n",
" train_dataloader=train_dataloader,\n",
" optimizers=optimizer,\n",
" model=model, # 模型基于 torch.nn.Module\n",
" train_dataloader=train_dataloader, # 加载模块基于 torch.utils.data.DataLoader \n",
" optimizers=optimizer, # 优化模块基于 torch.optim.*\n",
"\t...\n", "\t...\n",
"\tdriver=\"torch\",\n",
"\tdevice=0,\n",
"\tdriver=\"torch\", # 使用 pytorch 模块进行训练 \n",
"\tdevice='cuda', # 使用 GPU:0 显卡执行训练\n",
"\t...\n", "\t...\n",
")\n", ")\n",
"...\n", "...\n",
"evaluator = Evaluator(\n", "evaluator = Evaluator(\n",
" model=model,\n",
" dataloaders=evaluate_dataloader,\n",
" metrics={'acc': Accuracy()} \n",
" model=model, # 模型基于 torch.nn.Module\n",
" dataloaders=evaluate_dataloader, # 加载模块基于 torch.utils.data.DataLoader\n",
" metrics={'acc': Accuracy()}, # 测评方法使用 fastNLP.core.metrics.Accuracy \n",
" ...\n", " ...\n",
" driver=trainer.driver,\n",
" driver=trainer.driver, # 保持同 trainer 的 driver 一致\n",
"\tdevice=None,\n", "\tdevice=None,\n",
" ...\n", " ...\n",
")\n", ")\n",
@@ -88,7 +88,7 @@
"\n", "\n",
"注:在同一脚本中,`Trainer`和`Evaluator`使用的`driver`应当保持一致\n", "注:在同一脚本中,`Trainer`和`Evaluator`使用的`driver`应当保持一致\n",
"\n", "\n",
"  一个不能违背的原则在于:**不要将多卡的`driver`前使用单卡的`driver`**(???),这样使用可能会带来很多意想不到的错误"
"  一个不能违背的原则在于:**不要将多卡的`driver`前使用单卡的`driver`**(???),这样使用可能会带来很多意想不到的错误"
] ]
}, },
{ {
@@ -109,10 +109,10 @@
" optimizers=optimizer,\n", " optimizers=optimizer,\n",
"\t...\n", "\t...\n",
"\tdriver=\"torch\",\n", "\tdriver=\"torch\",\n",
"\tdevice=0,\n",
"\tdevice='cuda',\n",
"\t...\n", "\t...\n",
" evaluate_dataloaders=evaluate_dataloader,\n",
" metrics={'acc': Accuracy()},\n",
" evaluate_dataloaders=evaluate_dataloader, # 传入参数 evaluator_dataloaders\n",
" metrics={'acc': Accuracy()}, # 传入参数 metrics\n",
"\t...\n", "\t...\n",
")\n", ")\n",
"```" "```"
@@ -123,7 +123,7 @@
"id": "0c9c7dda", "id": "0c9c7dda",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## 2. 使用 trainer 训练模型"
"## 2. argmax 模型的搭建实例"
] ]
}, },
{ {
@@ -131,71 +131,41 @@
"id": "524ac200", "id": "524ac200",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### 2.1 argmax 模型实例\n",
"### 2.1 trainer_step 和 evaluator_step\n",
"\n", "\n",
"本节将通过训练`argmax`模型,简单介绍如何`Trainer`模块的使用方式\n",
"在`fastNLP 0.8`中,使用`pytorch.nn.Module`搭建需要训练的模型,在搭建模型过程中,除了\n",
"\n", "\n",
"  使用`pytorch`定义`argmax`模型,输入一组固定维度的向量,输出其中数值最大的数的索引\n",
"\n",
"  除了添加`pytorch`要求的`forward`方法外,还需要添加 **`train_step`** 和 **`evaluate_step`** 这两个方法"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5314482b",
"metadata": {
"pycharm": {
"is_executing": true
}
},
"outputs": [],
"source": [
"import torch\n",
"import torch.nn as nn\n",
"\n",
"class ArgMaxModel(nn.Module):\n",
" def __init__(self, num_labels, feature_dimension):\n",
" super(ArgMaxModel, self).__init__()\n",
" self.num_labels = num_labels\n",
"\n",
" self.linear1 = nn.Linear(in_features=feature_dimension, out_features=10)\n",
" self.ac1 = nn.ReLU()\n",
" self.linear2 = nn.Linear(in_features=10, out_features=10)\n",
" self.ac2 = nn.ReLU()\n",
" self.output = nn.Linear(in_features=10, out_features=num_labels)\n",
" self.loss_fn = nn.CrossEntropyLoss()\n",
"  添加`pytorch`要求的`forward`方法外,还需要添加 **`train_step`** 和 **`evaluate_step`** 这两个方法\n",
"***\n",
"```python\n",
"class Model(torch.nn.Module):\n",
" def __init__(self):\n",
" super(Model, self).__init__()\n",
" self.loss_fn = torch.nn.CrossEntropyLoss()\n",
" pass\n",
"\n", "\n",
" def forward(self, x):\n", " def forward(self, x):\n",
" x = self.ac1(self.linear1(x))\n",
" x = self.ac2(self.linear2(x))\n",
" x = self.output(x)\n",
" return x\n",
" pass\n",
"\n", "\n",
" def train_step(self, x, y):\n", " def train_step(self, x, y):\n",
" x = self(x)\n",
" return {\"loss\": self.loss_fn(x, y)}\n",
" pred = self(x)\n",
" return {\"loss\": self.loss_fn(pred, y)}\n",
"\n", "\n",
" def evaluate_step(self, x, y):\n", " def evaluate_step(self, x, y):\n",
" x = self(x)\n",
" x = torch.max(x, dim=-1)[1]\n",
" return {\"pred\": x, \"target\": y}"
]
},
{
"cell_type": "markdown",
"id": "ca897322",
"metadata": {},
"source": [
" pred = self(x)\n",
" pred = torch.max(pred, dim=-1)[1]\n",
" return {\"pred\": pred, \"target\": y}\n",
"```\n",
"***\n",
"在`fastNLP 0.8`中,**函数`train_step`是`Trainer`中参数`train_fn`的默认值**\n", "在`fastNLP 0.8`中,**函数`train_step`是`Trainer`中参数`train_fn`的默认值**\n",
"\n", "\n",
"  由于,在`Trainer`训练时,**`Trainer`通过参数`_train_fn_`对应的模型方法获得当前数据批次的损失值**\n",
"  由于,在`Trainer`训练时,**`Trainer`通过参数`train_fn`对应的模型方法获得当前数据批次的损失值**\n",
"\n", "\n",
"  因此,在`Trainer`训练时,`Trainer`首先会寻找模型是否定义了`train_step`这一方法\n", "  因此,在`Trainer`训练时,`Trainer`首先会寻找模型是否定义了`train_step`这一方法\n",
"\n", "\n",
"    如果没有找到,那么`Trainer`会默认使用模型的`forward`函数来进行训练的前向传播过程\n", "    如果没有找到,那么`Trainer`会默认使用模型的`forward`函数来进行训练的前向传播过程\n",
"\n", "\n",
"注:在`fastNLP 0.8`中,`Trainer`要求模型通过`train_step`来返回一个字典,将损失值作为`loss`的键值\n",
"注:在`fastNLP 0.8`中,**`Trainer`要求模型通过`train_step`来返回一个字典**,**满足如`{\"loss\": loss}`的形式**\n",
"\n", "\n",
"  此外,这里也可以通过传入`Trainer`的参数`output_mapping`来实现高度化的定制,具体请见这一note(???)\n", "  此外,这里也可以通过传入`Trainer`的参数`output_mapping`来实现高度化的定制,具体请见这一note(???)\n",
"\n", "\n",
@@ -205,7 +175,11 @@
"\n", "\n",
"  从用户角度,模型通过`evaluate_step`方法来返回一个字典,内容与传入`Evaluator`的`metrics`一致\n", "  从用户角度,模型通过`evaluate_step`方法来返回一个字典,内容与传入`Evaluator`的`metrics`一致\n",
"\n", "\n",
"<!-- &emsp; 从模块角度,`fastNLP 0.8`会匹配该字典的键值和一个`metric`的更新函数的函数签名,自动地将`metric`所需要的内容传给该`metric`,也就是我们会自动进行“**参数匹配**”。 -->"
"&emsp; 从模块角度,该字典的键值和`metric`中的`update`函数的签名一致,这样的机制在传参时被称为“**参数匹配**”\n",
"\n",
"***\n",
"\n",
"![fastNLP 0.8 中,Trainer 和 Evaluator 的关系图](./figures/T0-fig-trainer-and-evaluator.png)"
] ]
}, },
{ {
@@ -213,13 +187,52 @@
"id": "fb3272eb", "id": "fb3272eb",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### 2.2 trainer 的参数匹配\n",
"### 2.2 trainer 和 evaluator 的参数匹配\n",
"\n",
"在`fastNLP 0.8`中,参数匹配涉及到两个方面,分别是在\n",
"\n",
"&emsp; 一方面,**在模型的前向传播中**,**`dataloader`向`train_step`或`evaluate_step`函数传递`batch`**\n",
"\n",
"&emsp; 另方面,**在模型的评测过程中**,**`evaluate_dataloader`向`metric`的`update`函数传递`batch`**\n",
"\n", "\n",
"`fastNLP 0.8`中的参数匹配涉及到两个方面,一是在模型训练或者评测的前向传播过程中,如果从`dataloader`中出来一个`batch`的数据是一个字典,那么我们会查看模型的`train_step`和`evaluate_step`方法的参数签名,然后对于每一个参数,我们会根据其名字从 batch 这一字典中选择出对应的数据传入进去。例如在接下来的定义`Dataset`的部分,注意`ArgMaxDatset`的`__getitem__`方法,您可以通过在`Trainer`和`Evaluator`中设置参数 `model_wo_auto_param_call`来关闭这一行为。当您关闭了这一行为后,我们会将`batch`直接传给您的`train_step`、`evaluate_step`或者 `forward`函数。\n",
"对于前者,在`Trainer`和`Evaluator`中的参数`model_wo_auto_param_call`被设置为`False`时\n",
"\n", "\n",
"二是在传入`Trainer`或者`Evaluator metrics`后,我们会在需要评测的时间点主动调用`metrics`来对`evaluate_dataloaders`进行评测,这一功能主要就是通过对`metrics`的`update`方法和一个`batch`的数据进行参数评测实现的。首先需要明确的是一个 metric 的计算通常分为 `update` 和 `get_metric`两步,其中`update`表示更新一个`batch`的评测数据,`get_metric` 表示根据已经得到的评测数据计算出最终的评测值,例如对于 `Accuracy`来说,其在`update`的时候会更新一个`batch`计算正确的数量 right_num 和计算错误的数量 total_num,最终在 `get_metric` 时返回评测值`right_num / total_num`。\n",
"&emsp; &emsp; **`fastNLP 0.8`要求`dataloader`生成的每个`batch`**,**满足如`{\"x\": x, \"y\": y}`的形式**\n",
"\n",
"&emsp; 同时,`fastNLP 0.8`会查看模型的`train_step`和`evaluate_step`方法的参数签名,并为对应参数传入对应数值\n",
"\n",
"&emsp; &emsp; **字典形式的定义**,**对应在`Dataset`定义的`__getitem__`方法中**,例如下方的`ArgMaxDatset`\n",
"\n",
"&emsp; 而在`Trainer`和`Evaluator`中的参数`model_wo_auto_param_call`被设置为`True`时\n",
"\n",
"&emsp; &emsp; `fastNLP 0.8`会将`batch`直接传给模型的`train_step`、`evaluate_step`或`forward`函数\n",
"***\n",
"```python\n",
"class Dataset(torch.utils.data.Dataset):\n",
" def __init__(self, x, y):\n",
" self.x = x\n",
" self.y = y\n",
"\n",
" def __len__(self):\n",
" return len(self.x)\n",
"\n",
" def __getitem__(self, item):\n",
" return {\"x\": self.x[item], \"y\": self.y[item]}\n",
"```\n",
"***\n",
"对于后者,首先要明确,在`Trainer`和`Evaluator`中,`metrics`的计算分为`update`和`get_metric`两步\n",
"\n", "\n",
"因为`fastNLP 0.8`的`metrics`是自动计算的(只需要传给`Trainer`或者`Evaluator`),因此其一定依赖于参数匹配。对于从`evaluate_dataloader`中生成的一个`batch`的数据,我们会查看传给 `Trainer`(最终是传给`Evaluator`)和`Evaluator`的每一个`metric`,然后查看其`update`函数的函数签名,然后根据每一个参数的名字从`batch`字典中选择出对应的数据传入进去。"
"&emsp; &emsp; **`update`函数**,**针对一个`batch`的预测结果**,计算其累计的评价指标\n",
"\n",
"&emsp; &emsp; **`get_metric`函数**,**统计`update`函数累计的评价指标**,来计算最终的评价结果\n",
"\n",
"&emsp; 例如对于`Accuracy`来说,`update`函数会更新一个`batch`的正例数量`right_num`和负例数量`total_num`\n",
"\n",
"&emsp; &emsp; 而`get_metric`函数则会返回所有`batch`的评测值`right_num / total_num`\n",
"\n",
"&emsp; 在此基础上,**`fastNLP 0.8`要求`evaluate_dataloader`生成的每个`batch`传递给对应的`metric`**\n",
"\n",
"&emsp; &emsp; **以`{\"pred\": y_pred, \"target\": y_true}`的形式**,对应其`update`函数的函数签名"
] ]
}, },
{ {
@@ -227,9 +240,65 @@
"id": "f62b7bb1", "id": "f62b7bb1",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### 2.3 trainer的实际使用\n",
"### 2.3 一个实际案例:argmax 模型\n",
"\n", "\n",
"接下来我们创建用于训练的 dataset,其接受三个参数:数据维度、数据量和随机数种子,生成指定数量的维度为 `feature_dimension` 向量,而每一个向量的标签就是该向量中最大值的索引。"
"下文将通过训练`argmax`模型,简单介绍如何`Trainer`模块的使用方式\n",
"\n",
"&emsp; 首先,使用`pytorch.nn.Module`定义`argmax`模型,目标是输入一组固定维度的向量,输出其中数值最大的数的索引"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "5314482b",
"metadata": {
"pycharm": {
"is_executing": false
}
},
"outputs": [],
"source": [
"import torch\n",
"import torch.nn as nn\n",
"\n",
"class ArgMaxModel(nn.Module):\n",
" def __init__(self, num_labels, feature_dimension):\n",
" super(ArgMaxModel, self).__init__()\n",
" self.num_labels = num_labels\n",
"\n",
" self.linear1 = nn.Linear(in_features=feature_dimension, out_features=10)\n",
" self.ac1 = nn.ReLU()\n",
" self.linear2 = nn.Linear(in_features=10, out_features=10)\n",
" self.ac2 = nn.ReLU()\n",
" self.output = nn.Linear(in_features=10, out_features=num_labels)\n",
" self.loss_fn = nn.CrossEntropyLoss()\n",
"\n",
" def forward(self, x):\n",
" pred = self.ac1(self.linear1(x))\n",
" pred = self.ac2(self.linear2(pred))\n",
" pred = self.output(pred)\n",
" return pred\n",
"\n",
" def train_step(self, x, y):\n",
" pred = self(x)\n",
" return {\"loss\": self.loss_fn(pred, y)}\n",
"\n",
" def evaluate_step(self, x, y):\n",
" pred = self(x)\n",
" pred = torch.max(pred, dim=-1)[1]\n",
" return {\"pred\": pred, \"target\": y}"
]
},
{
"cell_type": "markdown",
"id": "71f3fa6b",
"metadata": {},
"source": [
"&emsp; 接着,使用`torch.utils.data.Dataset`定义`ArgMaxDataset`数据集\n",
"\n",
"&emsp; &emsp; 数据集包含三个参数:维度`feature_dimension`、数据量`data_num`和随机种子`seed`\n",
"\n",
"&emsp; &emsp; 数据及初始化是,自动生成指定维度的向量,并为每个向量标注出其中最大值的索引作为预测标签"
] ]
}, },
{ {
@@ -245,7 +314,7 @@
"source": [ "source": [
"from torch.utils.data import Dataset\n", "from torch.utils.data import Dataset\n",
"\n", "\n",
"class ArgMaxDatset(Dataset):\n",
"class ArgMaxDataset(Dataset):\n",
" def __init__(self, feature_dimension, data_num=1000, seed=0):\n", " def __init__(self, feature_dimension, data_num=1000, seed=0):\n",
" self.num_labels = feature_dimension\n", " self.num_labels = feature_dimension\n",
" self.feature_dimension = feature_dimension\n", " self.feature_dimension = feature_dimension\n",
@@ -269,7 +338,9 @@
"id": "2cb96332", "id": "2cb96332",
"metadata": {}, "metadata": {},
"source": [ "source": [
"现在准备好数据和模型。"
"&emsp; 然后,根据`ArgMaxModel`类初始化模型实例,保持输入维度`feature_dimension`和输出标签数量`num_labels`一致\n",
"\n",
"&emsp; &emsp; 再根据`ArgMaxDataset`类初始化两个数据集实例,分别用来模型测试和模型评测,数据量各1000笔"
] ]
}, },
{ {
@@ -283,16 +354,10 @@
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"from torch.utils.data import DataLoader\n",
"\n",
"train_dataset = ArgMaxDatset(feature_dimension=10, data_num=1000)\n",
"evaluate_dataset = ArgMaxDatset(feature_dimension=10, data_num=100)\n",
"\n",
"train_dataloader = DataLoader(train_dataset, batch_size=8, shuffle=True)\n",
"evaluate_dataloader = DataLoader(evaluate_dataset, batch_size=8)\n",
"model = ArgMaxModel(num_labels=10, feature_dimension=10)\n",
"\n", "\n",
"# num_labels 设置为 10,与 feature_dimension 保持一致,因为我们是预测十个位置中哪一个的概率最大。\n",
"model = ArgMaxModel(num_labels=10, feature_dimension=10)"
"train_dataset = ArgMaxDataset(feature_dimension=10, data_num=1000)\n",
"evaluate_dataset = ArgMaxDataset(feature_dimension=10, data_num=100)"
] ]
}, },
{ {
@@ -300,12 +365,33 @@
"id": "4e7d25ee", "id": "4e7d25ee",
"metadata": {}, "metadata": {},
"source": [ "source": [
"将优化器也定义好。"
"&emsp; 此外,使用`torch.utils.data.DataLoader`初始化两个数据加载模块,批量大小同为8,分别用于训练和测评"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 4, "execution_count": 4,
"id": "363b5b09",
"metadata": {},
"outputs": [],
"source": [
"from torch.utils.data import DataLoader\n",
"\n",
"train_dataloader = DataLoader(train_dataset, batch_size=8, shuffle=True)\n",
"evaluate_dataloader = DataLoader(evaluate_dataset, batch_size=8)"
]
},
{
"cell_type": "markdown",
"id": "c8d4443f",
"metadata": {},
"source": [
"&emsp; 最后,使用`torch.optim.SGD`初始化一个优化模块,基于随机梯度下降法"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "dc28a2d9", "id": "dc28a2d9",
"metadata": { "metadata": {
"pycharm": { "pycharm": {
@@ -321,15 +407,33 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "4f1fba81",
"id": "eb8ca6cf",
"metadata": {},
"source": [
"## 3. 使用 fastNLP 0.8 训练 argmax 模型\n",
"\n",
"### 3.1 trainer 外部初始化的 evaluator"
]
},
{
"cell_type": "markdown",
"id": "55145553",
"metadata": {}, "metadata": {},
"source": [ "source": [
"现在万事俱备,开始使用 Trainer 进行训练!"
"通过从`fastNLP`库中导入`Trainer`类,初始化`trainer`实例,对模型进行训练\n",
"\n",
"&emsp; 需要导入预先定义好的模型`model`、对应的数据加载模块`train_dataloader`、优化模块`optimizer`\n",
"\n",
"&emsp; 通过`progress_bar`设定进度条格式,默认为`\"auto\"`,此外还有`\"rich\"`、`\"raw\"`和`None`\n",
"\n",
"&emsp; &emsp; 但对于`\"auto\"`和`\"rich\"`格式,训练结束后进度条会不显示(???)\n",
"\n",
"&emsp; 通过`n_epochs`设定优化迭代轮数,默认为20;全部`Trainer`的全部变量与函数可以通过`dir(trainer)`查询"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 5,
"execution_count": 6,
"id": "b51b7a2d", "id": "b51b7a2d",
"metadata": { "metadata": {
"pycharm": { "pycharm": {
@@ -349,167 +453,20 @@
}, },
"metadata": {}, "metadata": {},
"output_type": "display_data" "output_type": "display_data"
},
{
"data": {
"text/plain": [
"['__annotations__',\n",
" '__class__',\n",
" '__delattr__',\n",
" '__dict__',\n",
" '__dir__',\n",
" '__doc__',\n",
" '__eq__',\n",
" '__format__',\n",
" '__ge__',\n",
" '__getattribute__',\n",
" '__gt__',\n",
" '__hash__',\n",
" '__init__',\n",
" '__init_subclass__',\n",
" '__le__',\n",
" '__lt__',\n",
" '__module__',\n",
" '__ne__',\n",
" '__new__',\n",
" '__reduce__',\n",
" '__reduce_ex__',\n",
" '__repr__',\n",
" '__setattr__',\n",
" '__sizeof__',\n",
" '__str__',\n",
" '__subclasshook__',\n",
" '__weakref__',\n",
" '_check_callback_called_legality',\n",
" '_check_train_batch_loop_legality',\n",
" '_custom_callbacks',\n",
" '_driver',\n",
" '_evaluate_dataloaders',\n",
" '_fetch_matched_fn_callbacks',\n",
" '_set_num_eval_batch_per_dl',\n",
" '_train_batch_loop',\n",
" '_train_dataloader',\n",
" '_train_step',\n",
" '_train_step_signature_fn',\n",
" 'accumulation_steps',\n",
" 'add_callback_fn',\n",
" 'backward',\n",
" 'batch_idx_in_epoch',\n",
" 'batch_step_fn',\n",
" 'callback_manager',\n",
" 'check_batch_step_fn',\n",
" 'cur_epoch_idx',\n",
" 'data_device',\n",
" 'dataloader',\n",
" 'device',\n",
" 'driver',\n",
" 'driver_name',\n",
" 'epoch_validate',\n",
" 'evaluate_batch_step_fn',\n",
" 'evaluate_dataloaders',\n",
" 'evaluate_every',\n",
" 'evaluate_fn',\n",
" 'evaluator',\n",
" 'extract_loss_from_outputs',\n",
" 'fp16',\n",
" 'get_no_sync_context',\n",
" 'global_forward_batches',\n",
" 'has_checked_train_batch_loop',\n",
" 'input_mapping',\n",
" 'kwargs',\n",
" 'larger_better',\n",
" 'load',\n",
" 'load_model',\n",
" 'marker',\n",
" 'metrics',\n",
" 'model',\n",
" 'model_device',\n",
" 'monitor',\n",
" 'move_data_to_device',\n",
" 'n_epochs',\n",
" 'num_batches_per_epoch',\n",
" 'on',\n",
" 'on_after_backward',\n",
" 'on_after_optimizers_step',\n",
" 'on_after_trainer_initialized',\n",
" 'on_after_zero_grad',\n",
" 'on_before_backward',\n",
" 'on_before_optimizers_step',\n",
" 'on_before_zero_grad',\n",
" 'on_exception',\n",
" 'on_fetch_data_begin',\n",
" 'on_fetch_data_end',\n",
" 'on_load_checkpoint',\n",
" 'on_load_model',\n",
" 'on_sanity_check_begin',\n",
" 'on_sanity_check_end',\n",
" 'on_save_checkpoint',\n",
" 'on_save_model',\n",
" 'on_train_batch_begin',\n",
" 'on_train_batch_end',\n",
" 'on_train_begin',\n",
" 'on_train_end',\n",
" 'on_train_epoch_begin',\n",
" 'on_train_epoch_end',\n",
" 'on_validate_begin',\n",
" 'on_validate_end',\n",
" 'optimizers',\n",
" 'output_mapping',\n",
" 'run',\n",
" 'save',\n",
" 'save_model',\n",
" 'set_grad_to_none',\n",
" 'state',\n",
" 'step',\n",
" 'step_validate',\n",
" 'total_batches',\n",
" 'train_batch_loop',\n",
" 'train_dataloader',\n",
" 'train_fn',\n",
" 'train_step',\n",
" 'trainer_state',\n",
" 'zero_grad']"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
} }
], ],
"source": [ "source": [
"from fastNLP import Trainer\n", "from fastNLP import Trainer\n",
"\n", "\n",
"# 定义一个 Trainer\n",
"trainer = Trainer(\n", "trainer = Trainer(\n",
" model=model,\n", " model=model,\n",
" driver=\"torch\", # 使用 pytorch 进行训练\n",
" device=0, # 使用 GPU:0\n",
" driver=\"torch\",\n",
" device='cuda',\n",
" train_dataloader=train_dataloader,\n", " train_dataloader=train_dataloader,\n",
" optimizers=optimizer,\n", " optimizers=optimizer,\n",
" n_epochs=10, # 训练 40 个 epoch\n",
" progress_bar=\"rich\"\n",
")\n",
"dir(trainer)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "f8fe9c32",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"FullArgSpec(args=['self', 'num_train_batch_per_epoch', 'num_eval_batch_per_dl', 'num_eval_sanity_batch', 'resume_from', 'resume_training', 'catch_KeyboardInterrupt'], varargs=None, varkw=None, defaults=(-1, -1, 2, None, True, None), kwonlyargs=[], kwonlydefaults=None, annotations={'num_train_batch_per_epoch': <class 'int'>, 'num_eval_batch_per_dl': <class 'int'>, 'num_eval_sanity_batch': <class 'int'>, 'resume_from': <class 'str'>, 'resume_training': <class 'bool'>})\n"
]
}
],
"source": [
"import inspect \n",
"\n",
"print(inspect.getfullargspec(trainer.run))"
" n_epochs=10, # 设定迭代轮数 \n",
" progress_bar=\"auto\" # 设定进度条格式\n",
")"
] ]
}, },
{ {
@@ -517,16 +474,20 @@
"id": "6e202d6e", "id": "6e202d6e",
"metadata": {}, "metadata": {},
"source": [ "source": [
"没有问题,那么开始真正的训练!"
"通过使用`Trainer`类的`run`函数,进行训练\n",
"\n",
"&emsp; 其中,可以通过参数`num_train_batch_per_epoch`决定每个`epoch`运行多少个`batch`后停止,默认全部\n",
"\n",
"&emsp; 此外,可以通过`inspect.getfullargspec(trainer.run)`查询`run`函数的全部参数列表"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 9,
"execution_count": 7,
"id": "ba047ead", "id": "ba047ead",
"metadata": { "metadata": {
"pycharm": { "pycharm": {
"is_executing": false
"is_executing": true
} }
}, },
"outputs": [ "outputs": [
@@ -585,29 +546,27 @@
"trainer.run()" "trainer.run()"
] ]
}, },
{
"cell_type": "markdown",
"id": "eb8ca6cf",
"metadata": {},
"source": [
"## 3. 使用 evaluator 评测模型"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "c16c5fa4", "id": "c16c5fa4",
"metadata": {}, "metadata": {},
"source": [ "source": [
"模型训练好了我们开始使用 Evaluator 进行评测,查看效果怎么样吧。"
"通过从`fastNLP`库中导入`Evaluator`类,初始化`evaluator`实例,对模型进行评测\n",
"\n",
"&emsp; 需要导入预先定义好的模型`model`、对应的数据加载模块`evaluate_dataloader`\n",
"\n",
"&emsp; 需要注意的是评测方法`metrics`,设定为形如`{'acc': fastNLP.core.metrics.Accuracy()}`的字典\n",
"\n",
"&emsp; 类似地,也可以通过`progress_bar`限定进度条格式,默认为`\"auto\"`"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 10,
"execution_count": 8,
"id": "1c6b6b36", "id": "1c6b6b36",
"metadata": { "metadata": {
"pycharm": { "pycharm": {
"is_executing": false
"is_executing": true
} }
}, },
"outputs": [], "outputs": [],
@@ -617,100 +576,32 @@
"\n", "\n",
"evaluator = Evaluator(\n", "evaluator = Evaluator(\n",
" model=model,\n", " model=model,\n",
" driver=trainer.driver, # 使用 trainer 已经启动的 driver\n",
" driver=trainer.driver, # 需要使用 trainer 已经启动的 driver\n",
" device=None,\n", " device=None,\n",
" dataloaders=evaluate_dataloader,\n", " dataloaders=evaluate_dataloader,\n",
" metrics={'acc': Accuracy()} # 注意这里一定得是一个字典;\n",
" metrics={'acc': Accuracy()} # 需要严格使用此种形式的字典\n",
")" ")"
] ]
}, },
{ {
"cell_type": "code",
"execution_count": 11,
"id": "257061df",
"metadata": {
"scrolled": true
},
"outputs": [
{
"data": {
"text/plain": [
"['__annotations__',\n",
" '__class__',\n",
" '__delattr__',\n",
" '__dict__',\n",
" '__dir__',\n",
" '__doc__',\n",
" '__eq__',\n",
" '__format__',\n",
" '__ge__',\n",
" '__getattribute__',\n",
" '__gt__',\n",
" '__hash__',\n",
" '__init__',\n",
" '__init_subclass__',\n",
" '__le__',\n",
" '__lt__',\n",
" '__module__',\n",
" '__ne__',\n",
" '__new__',\n",
" '__reduce__',\n",
" '__reduce_ex__',\n",
" '__repr__',\n",
" '__setattr__',\n",
" '__sizeof__',\n",
" '__str__',\n",
" '__subclasshook__',\n",
" '__weakref__',\n",
" '_dist_sampler',\n",
" '_evaluate_batch_loop',\n",
" '_evaluate_step',\n",
" '_evaluate_step_signature_fn',\n",
" '_metric_wrapper',\n",
" '_metrics',\n",
" 'dataloaders',\n",
" 'device',\n",
" 'driver',\n",
" 'evaluate_batch_loop',\n",
" 'evaluate_batch_step_fn',\n",
" 'evaluate_fn',\n",
" 'evaluate_step',\n",
" 'finally_progress_bar',\n",
" 'get_dataloader_metric',\n",
" 'input_mapping',\n",
" 'metrics',\n",
" 'metrics_wrapper',\n",
" 'model',\n",
" 'model_use_eval_mode',\n",
" 'move_data_to_device',\n",
" 'output_mapping',\n",
" 'progress_bar',\n",
" 'remove_progress_bar',\n",
" 'reset',\n",
" 'run',\n",
" 'separator',\n",
" 'start_progress_bar',\n",
" 'update',\n",
" 'update_progress_bar',\n",
" 'verbose']"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"cell_type": "markdown",
"id": "8157bb9b",
"metadata": {},
"source": [ "source": [
"dir(evaluator)"
"通过使用`Evaluator`类的`run`函数,进行训练\n",
"\n",
"&emsp; 其中,可以通过参数`num_eval_batch_per_dl`决定每个`evaluate_dataloader`运行多少个`batch`停止,默认全部\n",
"\n",
"&emsp; 最终,输出形如`{'acc#acc': acc}`的字典,中间的进度条会在运行结束后丢弃掉(???)"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 12,
"execution_count": 9,
"id": "f7cb0165", "id": "f7cb0165",
"metadata": { "metadata": {
"pycharm": { "pycharm": {
"is_executing": false
"is_executing": true
} }
}, },
"outputs": [ "outputs": [
@@ -750,11 +641,11 @@
{ {
"data": { "data": {
"text/html": [ "text/html": [
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"font-weight: bold\">{</span><span style=\"color: #008000; text-decoration-color: #008000\">'acc#acc'</span>: <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">0.3</span><span style=\"font-weight: bold\">}</span>\n",
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"font-weight: bold\">{</span><span style=\"color: #008000; text-decoration-color: #008000\">'acc#acc'</span>: <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">0.43</span><span style=\"font-weight: bold\">}</span>\n",
"</pre>\n" "</pre>\n"
], ],
"text/plain": [ "text/plain": [
"\u001b[1m{\u001b[0m\u001b[32m'acc#acc'\u001b[0m: \u001b[1;36m0.3\u001b[0m\u001b[1m}\u001b[0m\n"
"\u001b[1m{\u001b[0m\u001b[32m'acc#acc'\u001b[0m: \u001b[1;36m0.43\u001b[0m\u001b[1m}\u001b[0m\n"
] ]
}, },
"metadata": {}, "metadata": {},
@@ -763,10 +654,10 @@
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"{'acc#acc': 0.3}"
"{'acc#acc': 0.43}"
] ]
}, },
"execution_count": 12,
"execution_count": 9,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@@ -780,39 +671,37 @@
"id": "dd9f68fa", "id": "dd9f68fa",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## 4. 在 trainer 中加入 metric 来自动评测;"
]
},
{
"cell_type": "markdown",
"id": "ca97c9a4",
"metadata": {},
"source": [
"现在我们尝试在训练过程中进行评测。"
"### 3.2 trainer 内部初始化的 evaluator \n",
"\n",
"通过在初始化`trainer`实例时加入`evaluate_dataloaders`和`metrics`,可以实现在训练过程中进行评测\n",
"\n",
"&emsp; 通过`progress_bar`同时设定训练和评估进度条格式,训练结束后进度条会不显示(???)\n",
"\n",
"&emsp; **通过`evaluate_every`设定评估频率**,可以为负数、正数或者函数:\n",
"\n",
"&emsp; &emsp; **为负数时**,**表示每隔几个`epoch`评估一次**;**为正数时**,**则表示每隔几个`batch`评估一次**"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 13,
"execution_count": 10,
"id": "183c7d19", "id": "183c7d19",
"metadata": { "metadata": {
"pycharm": { "pycharm": {
"is_executing": false
"is_executing": true
} }
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"# 重新定义一个 Trainer\n",
"\n",
"trainer = Trainer(\n", "trainer = Trainer(\n",
" model=model,\n", " model=model,\n",
" driver=trainer.driver, # 因为我们是在同一脚本中,因此这里的 driver 同样需要重用;\n",
" driver=trainer.driver, # 因为是在同个脚本中,这里的 driver 同样需要重用\n",
" train_dataloader=train_dataloader,\n", " train_dataloader=train_dataloader,\n",
" evaluate_dataloaders=evaluate_dataloader,\n", " evaluate_dataloaders=evaluate_dataloader,\n",
" metrics={'acc': Accuracy()},\n", " metrics={'acc': Accuracy()},\n",
" optimizers=optimizer,\n", " optimizers=optimizer,\n",
" n_epochs=10, # 训练 40 个 epoch;\n",
" evaluate_every=-1, # 表示每一个 epoch 的结束会进行 evaluate;\n",
" n_epochs=10, \n",
" evaluate_every=-1, # 表示每个 epoch 的结束进行评估\n",
")" ")"
] ]
}, },
@@ -821,16 +710,18 @@
"id": "714cc404", "id": "714cc404",
"metadata": {}, "metadata": {},
"source": [ "source": [
"再次训练。"
"通过使用`Trainer`类的`run`函数,进行训练\n",
"\n",
"&emsp; 还可以通过参数`num_eval_sanity_batch`决定每次训练前运行多少个`evaluate_batch`进行评测,默认为2"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 14,
"execution_count": 11,
"id": "2e4daa2c", "id": "2e4daa2c",
"metadata": { "metadata": {
"pycharm": { "pycharm": {
"is_executing": false
"is_executing": true
} }
}, },
"outputs": [ "outputs": [
@@ -884,96 +775,6 @@
"source": [ "source": [
"trainer.run()" "trainer.run()"
] ]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "eabda5eb",
"metadata": {},
"outputs": [],
"source": [
"evaluator = Evaluator(\n",
" model=model,\n",
" driver=trainer.driver, # 使用 trainer 已经启动的 driver;\n",
" dataloaders=evaluate_dataloader,\n",
" metrics={'acc': Accuracy()} # 注意这里一定得是一个字典;\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "a310d157",
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"></pre>\n"
],
"text/plain": []
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"></pre>\n"
],
"text/plain": []
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">\n",
"</pre>\n"
],
"text/plain": [
"\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"font-weight: bold\">{</span><span style=\"color: #008000; text-decoration-color: #008000\">'acc#acc'</span>: <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">0.5</span><span style=\"font-weight: bold\">}</span>\n",
"</pre>\n"
],
"text/plain": [
"\u001b[1m{\u001b[0m\u001b[32m'acc#acc'\u001b[0m: \u001b[1;36m0.5\u001b[0m\u001b[1m}\u001b[0m\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
"{'acc#acc': 0.5}"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"evaluator.run()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f1ef78f0",
"metadata": {},
"outputs": [],
"source": []
} }
], ],
"metadata": { "metadata": {


BIN
tutorials/figures/T0-fig-trainer-and-evaluator.png View File

Before After
Width: 1303  |  Height: 821  |  Size: 105 kB

Loading…
Cancel
Save