From 08416a3a6c4e6b5afbd8cc71773416fc23ea2bc0 Mon Sep 17 00:00:00 2001 From: yhcc Date: Sun, 5 Jun 2022 19:51:27 +0800 Subject: [PATCH 01/52] =?UTF-8?q?1.=E4=BF=AE=E5=A4=8Dlr=5Fschedulder?= =?UTF-8?q?=E7=9A=84=E8=B0=83=E7=94=A8=E6=97=B6=E6=9C=BA=E9=97=AE=E9=A2=98?= =?UTF-8?q?;2.=E4=BF=AE=E5=A4=8Dreplace=5Fsampler=E7=9A=84=E5=88=9D?= =?UTF-8?q?=E5=A7=8B=E5=8C=96=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../core/callbacks/lr_scheduler_callback.py | 2 +- fastNLP/core/drivers/paddle_driver/utils.py | 18 +++++------------- fastNLP/core/drivers/torch_driver/utils.py | 13 ++++--------- 3 files changed, 10 insertions(+), 23 deletions(-) diff --git a/fastNLP/core/callbacks/lr_scheduler_callback.py b/fastNLP/core/callbacks/lr_scheduler_callback.py index 37d089bd..a71428ca 100644 --- a/fastNLP/core/callbacks/lr_scheduler_callback.py +++ b/fastNLP/core/callbacks/lr_scheduler_callback.py @@ -19,7 +19,7 @@ class LRSchedCallback(Callback): self.scheduler = scheduler self.step_on = 0 if step_on == 'batch' else 1 - def on_before_optimizers_step(self, trainer, optimizers): + def on_after_optimizers_step(self, trainer, optimizers): if self.step_on == 0: self.scheduler.step() diff --git a/fastNLP/core/drivers/paddle_driver/utils.py b/fastNLP/core/drivers/paddle_driver/utils.py index b1815fbd..e53f4066 100644 --- a/fastNLP/core/drivers/paddle_driver/utils.py +++ b/fastNLP/core/drivers/paddle_driver/utils.py @@ -178,19 +178,11 @@ def replace_batch_sampler(dataloader: "DataLoader", batch_sampler: "BatchSampler # 中寻找;VAR_KEYWORD 代表 **kwargs has_variadic_kwargs = any(v.kind is v.VAR_KEYWORD for k, v in init_params.items()) if has_variadic_kwargs: - init_params.update(dict(inspect.signature(DataLoader.__init__).parameters)) - del init_params["self"] - - # 因为我们刚才可能用 DataLoader 的默认参数将用户定制的 dataloader 的参数覆盖掉了,因此需要重新弄一遍; - # 将同时在实例名和参数名中出现且不是默认值的参数收集起来 - non_default_params = {name for name, p in init_params.items() if - name in instance_attrs and p.default != instance_attrs[name]} - # add `dataset` as it might have been replaced with `*args` - non_default_params.add("dataset") - - # 收集不是默认值的参数和它的值 - reconstruct_args = {k: v for k, v in instance_attrs.items() if k in non_default_params} - # persistent_workers 在类中的对应成员带有下划线,因此添加进来 + for key, value in dict(inspect.signature(DataLoader.__init__).parameters).items(): + if key not in init_params and key != 'self': + init_params[key] = value + + reconstruct_args = {k: v for k, v in instance_attrs.items() if k in init_params} reconstruct_args.update({ "batch_sampler": batch_sampler, "shuffle": False, "drop_last": False, "batch_size": 1, "persistent_workers": dataloader._persistent_workers, diff --git a/fastNLP/core/drivers/torch_driver/utils.py b/fastNLP/core/drivers/torch_driver/utils.py index 2d13a8e8..a874bf3b 100644 --- a/fastNLP/core/drivers/torch_driver/utils.py +++ b/fastNLP/core/drivers/torch_driver/utils.py @@ -189,16 +189,11 @@ def replace_sampler(dataloader: "DataLoader", sampler): # 中寻找; has_variadic_kwargs = any(v.kind is v.VAR_KEYWORD for k, v in init_params.items()) if has_variadic_kwargs: - init_params.update(dict(inspect.signature(DataLoader.__init__).parameters)) - del init_params["self"] + for key, value in dict(inspect.signature(DataLoader.__init__).parameters).items(): + if key not in init_params and key != 'self': + init_params[key] = value - # 因为我们刚才可能用 DataLoader 的默认参数将用户定制的 dataloader 的参数覆盖掉了,因此需要重新弄一遍; - non_default_params = {name for name, p in init_params.items() if - name in instance_attrs and p.default != instance_attrs[name]} - # add `dataset` as it might have been replaced with `*args` - non_default_params.add("dataset") - - reconstruct_args = {k: v for k, v in instance_attrs.items() if k in non_default_params} + reconstruct_args = {k: v for k, v in instance_attrs.items() if k in init_params} reconstruct_args.update(_dataloader_init_kwargs_resolve_sampler(dataloader, sampler)) required_args = { From 8d0602f5ded9b4c8c28b86c0e715ef8e5ea67c9f Mon Sep 17 00:00:00 2001 From: YWMditto Date: Sun, 5 Jun 2022 20:07:36 +0800 Subject: [PATCH 02/52] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=20torch=5Fdriver/utils?= =?UTF-8?q?/replace=5Fsampler=20=E7=9A=84bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/drivers/torch_driver/utils.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/fastNLP/core/drivers/torch_driver/utils.py b/fastNLP/core/drivers/torch_driver/utils.py index a874bf3b..13281cfe 100644 --- a/fastNLP/core/drivers/torch_driver/utils.py +++ b/fastNLP/core/drivers/torch_driver/utils.py @@ -189,11 +189,18 @@ def replace_sampler(dataloader: "DataLoader", sampler): # 中寻找; has_variadic_kwargs = any(v.kind is v.VAR_KEYWORD for k, v in init_params.items()) if has_variadic_kwargs: + # 这里之所以这样写是因为用户自己定制的 Dataloader 中名字一样的参数所设置的默认值可能不同;因此不能直接使用 update 覆盖掉了; for key, value in dict(inspect.signature(DataLoader.__init__).parameters).items(): if key not in init_params and key != 'self': init_params[key] = value - reconstruct_args = {k: v for k, v in instance_attrs.items() if k in init_params} + # 如果初始化dataloader所使用的参数不是默认值,那么我们需要将其记录下来用于重新初始化时设置; + non_default_params = {name for name, p in init_params.items() if + name in instance_attrs and p.default != instance_attrs[name]} + # add `dataset` as it might have been replaced with `*args` + non_default_params.add("dataset") + + reconstruct_args = {k: v for k, v in instance_attrs.items() if k in non_default_params} reconstruct_args.update(_dataloader_init_kwargs_resolve_sampler(dataloader, sampler)) required_args = { From 47b7c4e832be06cf87fda2bd2ed7c5f65b2029c9 Mon Sep 17 00:00:00 2001 From: yhcc Date: Sun, 5 Jun 2022 23:15:51 +0800 Subject: [PATCH 03/52] =?UTF-8?q?=E5=A2=9E=E5=8A=A0set=5Fdist=5Frepro=5Fda?= =?UTF-8?q?taloader=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/drivers/paddle_driver/utils.py | 8 +- .../core/drivers/paddle_driver/test_fleet.py | 112 +++++++++++++++++- .../paddle_driver/test_single_device.py | 83 +++++++++++++ tests/core/drivers/torch_driver/test_ddp.py | 111 +++++++++++++++++ .../torch_driver/test_single_device.py | 83 +++++++++++++ 5 files changed, 395 insertions(+), 2 deletions(-) diff --git a/fastNLP/core/drivers/paddle_driver/utils.py b/fastNLP/core/drivers/paddle_driver/utils.py index e53f4066..9f35cf2a 100644 --- a/fastNLP/core/drivers/paddle_driver/utils.py +++ b/fastNLP/core/drivers/paddle_driver/utils.py @@ -182,7 +182,13 @@ def replace_batch_sampler(dataloader: "DataLoader", batch_sampler: "BatchSampler if key not in init_params and key != 'self': init_params[key] = value - reconstruct_args = {k: v for k, v in instance_attrs.items() if k in init_params} + # 如果初始化dataloader所使用的参数不是默认值,那么我们需要将其记录下来用于重新初始化时设置; + non_default_params = {name for name, p in init_params.items() if + name in instance_attrs and p.default != instance_attrs[name]} + # add `dataset` as it might have been replaced with `*args` + non_default_params.add("dataset") + + reconstruct_args = {k: v for k, v in instance_attrs.items() if k in non_default_params} reconstruct_args.update({ "batch_sampler": batch_sampler, "shuffle": False, "drop_last": False, "batch_size": 1, "persistent_workers": dataloader._persistent_workers, diff --git a/tests/core/drivers/paddle_driver/test_fleet.py b/tests/core/drivers/paddle_driver/test_fleet.py index 93d3e832..4421e4b1 100644 --- a/tests/core/drivers/paddle_driver/test_fleet.py +++ b/tests/core/drivers/paddle_driver/test_fleet.py @@ -13,6 +13,8 @@ from tests.helpers.models.paddle_model import PaddleNormalModel_Classification_1 from tests.helpers.datasets.paddle_data import PaddleNormalDataset, PaddleNormalXYDataset from tests.helpers.utils import magic_argv_env_context from fastNLP.envs.distributed import rank_zero_rm +from fastNLP import prepare_paddle_dataloader +from fastNLP.core.drivers.paddle_driver.dist_utils import fastnlp_paddle_all_gather from fastNLP.envs.imports import _NEED_IMPORT_PADDLE if _NEED_IMPORT_PADDLE: import paddle @@ -814,4 +816,112 @@ class TestSaveLoad: assert len(left_y_batches | already_seen_y_set) == len(self.dataset) / num_replicas finally: - rank_zero_rm(path) \ No newline at end of file + rank_zero_rm(path) + + +@pytest.mark.torch +@magic_argv_env_context +@pytest.mark.parametrize("shuffle", ([True, False])) +@pytest.mark.parametrize("batch_size", ([1, 3, 16, 17])) +@pytest.mark.parametrize("drop_last", ([True, False])) +def test_shuffle_dataloader(shuffle, batch_size, drop_last, reproducible=True): + try: + # 需要检验一下 set_dist_repro_dataloader 没有修改参数 + num_samples = 200 + dataset = PaddleNormalXYDataset(num_samples) + dl = prepare_paddle_dataloader(dataset, shuffle=shuffle, batch_size=batch_size, drop_last=drop_last) + model = PaddleNormalModel_Classification_1(10, 32) + device = [0, 1] + driver = PaddleFleetDriver(model, parallel_device=device) + driver.setup() + dl = driver.set_dist_repro_dataloader(dataloader=dl, dist='dist', reproducible=reproducible) + + data = [] + flags = [] + for batch in dl: + flags.append(batch['x'].shape[0] == batch_size) + data.extend(batch['x'].reshape(-1).tolist()) + + _num_samples = num_samples//2 + + if drop_last and _num_samples%batch_size != 0: + assert len(data)!=_num_samples + assert all(flags) == True + elif _num_samples%batch_size!=0: + assert flags[-1] is False + else: + assert len(data) == _num_samples + + if not shuffle: + for i in range(1, len(data)-1): + assert data[i]>data[i-1] + else: + flags = [] + for i in range(1, len(data)-1): + flags.append(data[i]>data[i-1]) + assert all(flags) is False + datas = fastnlp_paddle_all_gather(data) + if drop_last: + assert len(set(datas[0] + datas[1])) == num_samples-_num_samples%batch_size*2 + else: + assert len(set(datas[0] + datas[1])) == num_samples + finally: + if dist.is_initialized(): + dist.barrier() + dist.destroy_process_group() + + +@pytest.mark.torch +@magic_argv_env_context +@pytest.mark.parametrize("shuffle", ([True, False])) +@pytest.mark.parametrize("batch_size", ([1, 3, 16, 17])) +@pytest.mark.parametrize("drop_last", ([True, False])) +def test_batch_sampler_dataloader(shuffle, batch_size, drop_last, reproducible=True): + try: + # 需要检验一下 set_dist_repro_dataloader 没有修改参数 + num_samples = 200 + num_device = 2 + dataset = PaddleNormalXYDataset(num_samples) + sampler = BucketedBatchSampler(dataset, length=dataset._data, batch_size=batch_size, drop_last=drop_last, + shuffle=shuffle, num_batch_per_bucket=2) + dl = prepare_paddle_dataloader(dataset, batch_sampler=sampler) + model = PaddleNormalModel_Classification_1(10, 32) + device = [0, 1] + driver = PaddleFleetDriver(model, parallel_device=device) + driver.setup() + dl = driver.set_dist_repro_dataloader(dataloader=dl, dist='dist', reproducible=reproducible) + + data = [] + flags = [] + for batch in dl: + d = batch['x'].reshape(-1).tolist() + diff = max(d) - min(d) + assert diffdata[i-1] + else: + flags = [] + for i in range(1, len(data)): + flags.append(data[i]>data[i-1]) + assert all(flags) is False + + +@pytest.mark.torch +@pytest.mark.parametrize("shuffle", ([True, False])) +@pytest.mark.parametrize("batch_size", ([1, 3, 16, 17])) +@pytest.mark.parametrize("drop_last", ([True, False])) +@pytest.mark.parametrize("reproducible", ([True, False])) +def test_batch_sampler_dataloader(shuffle, batch_size, drop_last, reproducible): + # 需要检验一下 set_dist_repro_dataloader 没有修改参数 + num_samples = 200 + dataset = PaddleNormalXYDataset(num_samples) + sampler = BucketedBatchSampler(dataset, length=dataset._data, batch_size=batch_size, drop_last=drop_last, + shuffle=shuffle, num_batch_per_bucket=2) + dl = prepare_paddle_dataloader(dataset, batch_sampler=sampler) + model = PaddleNormalModel_Classification_1(1, 2) + driver = PaddleSingleDriver(model, device="cpu") + dl = driver.set_dist_repro_dataloader(dataloader=dl, reproducible=reproducible) + + data = [] + flags = [] + for batch in dl: + d = batch['x'].reshape(-1).tolist() + diff = max(d) - min(d) + assert diffdata[i-1] + else: + flags = [] + for i in range(1, len(data)-1): + flags.append(data[i]>data[i-1]) + assert all(flags) is False + datas = fastnlp_torch_all_gather(data) + if drop_last: + assert len(set(datas[0] + datas[1])) == num_samples-_num_samples%batch_size*2 + else: + assert len(set(datas[0] + datas[1])) == num_samples + finally: + if dist.is_initialized(): + dist.barrier() + dist.destroy_process_group() + + +@pytest.mark.torch +@magic_argv_env_context +@pytest.mark.parametrize("shuffle", ([True, False])) +@pytest.mark.parametrize("batch_size", ([1, 3, 16, 17])) +@pytest.mark.parametrize("drop_last", ([True, False])) +def test_batch_sampler_dataloader(shuffle, batch_size, drop_last, reproducible=True): + try: + # 需要检验一下 set_dist_repro_dataloader 没有修改参数 + num_samples = 200 + num_device = 2 + dataset = TorchNormalXYDataset(num_samples) + sampler = BucketedBatchSampler(dataset, length=dataset._data, batch_size=batch_size, drop_last=drop_last, + shuffle=shuffle, num_batch_per_bucket=2) + dl = prepare_torch_dataloader(dataset, batch_sampler=sampler) + model = TorchNormalModel_Classification_1(10, 32) + device = [torch.device(i) for i in [0, 1]] + driver = TorchDDPDriver(model, parallel_device=device) + driver.setup() + dl = driver.set_dist_repro_dataloader(dataloader=dl, dist='dist', reproducible=reproducible) + + data = [] + flags = [] + for batch in dl: + d = batch['x'].reshape(-1).tolist() + diff = max(d) - min(d) + assert diffdata[i-1] + else: + flags = [] + for i in range(1, len(data)): + flags.append(data[i]>data[i-1]) + assert all(flags) is False + + +@pytest.mark.torch +@pytest.mark.parametrize("shuffle", ([True, False])) +@pytest.mark.parametrize("batch_size", ([1, 3, 16, 17])) +@pytest.mark.parametrize("drop_last", ([True, False])) +@pytest.mark.parametrize("reproducible", ([True, False])) +def test_batch_sampler_dataloader(shuffle, batch_size, drop_last, reproducible): + # 需要检验一下 set_dist_repro_dataloader 没有修改参数 + num_samples = 100 + dataset = TorchNormalXYDataset(num_samples) + sampler = BucketedBatchSampler(dataset, length=dataset._data, batch_size=batch_size, drop_last=drop_last, + shuffle=shuffle, num_batch_per_bucket=2) + dl = prepare_torch_dataloader(dataset, batch_sampler=sampler) + model = TorchNormalModel_Classification_1(10, 32) + driver = TorchSingleDriver(model, device="cpu") + dl = driver.set_dist_repro_dataloader(dataloader=dl, reproducible=reproducible) + + data = [] + flags = [] + for batch in dl: + d = batch['x'].reshape(-1).tolist() + diff = max(d) - min(d) + assert diff Date: Sun, 5 Jun 2022 23:55:38 +0000 Subject: [PATCH 04/52] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=96=B0=E5=A2=9E?= =?UTF-8?q?=E7=9A=84set=5Fdist=5Frepro=5Fdataloader=E5=87=BD=E6=95=B0?= =?UTF-8?q?=E6=B5=8B=E8=AF=95=E4=BE=8B=E5=9C=A8paddle=E6=83=85=E5=86=B5?= =?UTF-8?q?=E4=B8=8B=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../core/dataloaders/paddle_dataloader/fdl.py | 8 +- .../core/drivers/paddle_driver/test_fleet.py | 205 +++++++++--------- .../paddle_driver/test_single_device.py | 160 +++++++------- 3 files changed, 180 insertions(+), 193 deletions(-) diff --git a/fastNLP/core/dataloaders/paddle_dataloader/fdl.py b/fastNLP/core/dataloaders/paddle_dataloader/fdl.py index 37130e3e..c84c1aaf 100644 --- a/fastNLP/core/dataloaders/paddle_dataloader/fdl.py +++ b/fastNLP/core/dataloaders/paddle_dataloader/fdl.py @@ -137,9 +137,11 @@ class PaddleDataLoader(DataLoader): if batch_sampler is None: batch_sampler = RandomBatchSampler(dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last) - batch_size = 1 - shuffle = False - drop_last = False + # 因为无论如何传给 DataLoader 的 batch_sampler 都不是 None + # 所以要恢复默认值防止报错 + batch_size = 1 + shuffle = False + drop_last = False if isinstance(collate_fn, str): if collate_fn == 'auto': diff --git a/tests/core/drivers/paddle_driver/test_fleet.py b/tests/core/drivers/paddle_driver/test_fleet.py index 4421e4b1..b303249c 100644 --- a/tests/core/drivers/paddle_driver/test_fleet.py +++ b/tests/core/drivers/paddle_driver/test_fleet.py @@ -522,6 +522,103 @@ class TestSetDistReproDataloader: assert len(left_idxes) + len(already_seen_idx) == len(self.dataset) / num_replicas assert len(left_idxes | already_seen_idx) == len(self.dataset) / num_replicas + @magic_argv_env_context + @pytest.mark.parametrize("shuffle", ([True, False])) + @pytest.mark.parametrize("batch_size", ([1, 3, 16, 17])) + @pytest.mark.parametrize("drop_last", ([True, False])) + def test_shuffle_dataloader(self, shuffle, batch_size, drop_last, reproducible=True): + try: + # 需要检验一下 set_dist_repro_dataloader 没有修改参数 + num_samples = 200 + dataset = PaddleNormalXYDataset(num_samples) + dl = prepare_paddle_dataloader(dataset, shuffle=shuffle, batch_size=batch_size, drop_last=drop_last) + model = PaddleNormalModel_Classification_1(10, 32) + self.driver.setup() + dl = self.driver.set_dist_repro_dataloader(dataloader=dl, dist='dist', reproducible=reproducible) + + data = [] + flags = [] + for batch in dl: + flags.append(batch['x'].shape[0] == batch_size) + data.extend(batch['x'].reshape((-1, )).tolist()) + + _num_samples = num_samples//2 + + if drop_last and _num_samples%batch_size != 0: + assert len(data)!=_num_samples + assert all(flags) == True + elif _num_samples%batch_size!=0: + assert flags[-1] is False + else: + assert len(data) == _num_samples + + if not shuffle: + for i in range(1, len(data)-1): + assert data[i]>data[i-1] + else: + flags = [] + for i in range(1, len(data)-1): + flags.append(data[i]>data[i-1]) + assert all(flags) is False + datas = fastnlp_paddle_all_gather(data) + if drop_last: + assert len(set(datas[0] + datas[1])) == num_samples-_num_samples%batch_size*2 + else: + assert len(set(datas[0] + datas[1])) == num_samples + finally: + dist.barrier() + + @magic_argv_env_context + @pytest.mark.parametrize("shuffle", ([True, False])) + @pytest.mark.parametrize("batch_size", ([1, 3, 16, 17])) + @pytest.mark.parametrize("drop_last", ([True, False])) + def test_batch_sampler_dataloader(self, shuffle, batch_size, drop_last, reproducible=True): + try: + # 需要检验一下 set_dist_repro_dataloader 没有修改参数 + num_samples = 200 + num_device = 2 + dataset = PaddleNormalXYDataset(num_samples) + sampler = BucketedBatchSampler(dataset, length=dataset._data, batch_size=batch_size, drop_last=drop_last, + shuffle=shuffle, num_batch_per_bucket=2) + dl = prepare_paddle_dataloader(dataset, batch_sampler=sampler) + model = PaddleNormalModel_Classification_1(10, 32) + device = [0, 1] + self.driver.setup() + dl = self.driver.set_dist_repro_dataloader(dataloader=dl, dist='dist', reproducible=reproducible) + + data = [] + flags = [] + for batch in dl: + d = batch['x'].reshape((-1, )).tolist() + diff = max(d) - min(d) + assert diffdata[i-1] - else: - flags = [] - for i in range(1, len(data)-1): - flags.append(data[i]>data[i-1]) - assert all(flags) is False - datas = fastnlp_paddle_all_gather(data) - if drop_last: - assert len(set(datas[0] + datas[1])) == num_samples-_num_samples%batch_size*2 - else: - assert len(set(datas[0] + datas[1])) == num_samples - finally: - if dist.is_initialized(): - dist.barrier() - dist.destroy_process_group() - - -@pytest.mark.torch -@magic_argv_env_context -@pytest.mark.parametrize("shuffle", ([True, False])) -@pytest.mark.parametrize("batch_size", ([1, 3, 16, 17])) -@pytest.mark.parametrize("drop_last", ([True, False])) -def test_batch_sampler_dataloader(shuffle, batch_size, drop_last, reproducible=True): - try: - # 需要检验一下 set_dist_repro_dataloader 没有修改参数 - num_samples = 200 - num_device = 2 - dataset = PaddleNormalXYDataset(num_samples) - sampler = BucketedBatchSampler(dataset, length=dataset._data, batch_size=batch_size, drop_last=drop_last, - shuffle=shuffle, num_batch_per_bucket=2) - dl = prepare_paddle_dataloader(dataset, batch_sampler=sampler) - model = PaddleNormalModel_Classification_1(10, 32) - device = [0, 1] - driver = PaddleFleetDriver(model, parallel_device=device) - driver.setup() - dl = driver.set_dist_repro_dataloader(dataloader=dl, dist='dist', reproducible=reproducible) - - data = [] - flags = [] - for batch in dl: - d = batch['x'].reshape(-1).tolist() - diff = max(d) - min(d) - assert diffdata[i-1] + else: + flags = [] + for i in range(1, len(data)): + flags.append(data[i]>data[i-1]) + assert all(flags) is False + + + @pytest.mark.paddle + @pytest.mark.parametrize("shuffle", ([True, False])) + @pytest.mark.parametrize("batch_size", ([1, 3, 16, 17])) + @pytest.mark.parametrize("drop_last", ([True, False])) + @pytest.mark.parametrize("reproducible", ([True, False])) + def test_batch_sampler_dataloader(self, shuffle, batch_size, drop_last, reproducible): + # 需要检验一下 set_dist_repro_dataloader 没有修改参数 + num_samples = 200 + dataset = PaddleNormalXYDataset(num_samples) + sampler = BucketedBatchSampler(dataset, length=dataset._data, batch_size=batch_size, drop_last=drop_last, + shuffle=shuffle, num_batch_per_bucket=2) + dl = prepare_paddle_dataloader(dataset, batch_sampler=sampler) + model = PaddleNormalModel_Classification_1(1, 2) + dl = self.driver.set_dist_repro_dataloader(dataloader=dl, reproducible=reproducible) + + data = [] + flags = [] + for batch in dl: + d = batch['x'].reshape((-1, )).tolist() + diff = max(d) - min(d) + assert diffdata[i-1] - else: - flags = [] - for i in range(1, len(data)): - flags.append(data[i]>data[i-1]) - assert all(flags) is False - - -@pytest.mark.torch -@pytest.mark.parametrize("shuffle", ([True, False])) -@pytest.mark.parametrize("batch_size", ([1, 3, 16, 17])) -@pytest.mark.parametrize("drop_last", ([True, False])) -@pytest.mark.parametrize("reproducible", ([True, False])) -def test_batch_sampler_dataloader(shuffle, batch_size, drop_last, reproducible): - # 需要检验一下 set_dist_repro_dataloader 没有修改参数 - num_samples = 200 - dataset = PaddleNormalXYDataset(num_samples) - sampler = BucketedBatchSampler(dataset, length=dataset._data, batch_size=batch_size, drop_last=drop_last, - shuffle=shuffle, num_batch_per_bucket=2) - dl = prepare_paddle_dataloader(dataset, batch_sampler=sampler) - model = PaddleNormalModel_Classification_1(1, 2) - driver = PaddleSingleDriver(model, device="cpu") - dl = driver.set_dist_repro_dataloader(dataloader=dl, reproducible=reproducible) - - data = [] - flags = [] - for batch in dl: - d = batch['x'].reshape(-1).tolist() - diff = max(d) - min(d) - assert diff Date: Tue, 7 Jun 2022 14:52:09 +0800 Subject: [PATCH 05/52] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E4=B8=80=E4=B8=AATimer?= =?UTF-8?q?Callback=E7=94=A8=E4=BA=8E=E8=AE=A1=E6=97=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/__init__.py | 1 + fastNLP/core/callbacks/__init__.py | 6 +- .../core/callbacks/has_monitor_callback.py | 2 +- .../callbacks/load_best_model_callback.py | 4 +- fastNLP/core/callbacks/progress_callback.py | 30 +++- fastNLP/core/callbacks/timer_callback.py | 152 ++++++++++++++++++ .../controllers/loops/train_batch_loop.py | 4 +- .../core/dataloaders/torch_dataloader/fdl.py | 3 + 8 files changed, 188 insertions(+), 14 deletions(-) create mode 100644 fastNLP/core/callbacks/timer_callback.py diff --git a/fastNLP/core/__init__.py b/fastNLP/core/__init__.py index 4f5ee3d8..6cf73d3b 100644 --- a/fastNLP/core/__init__.py +++ b/fastNLP/core/__init__.py @@ -16,6 +16,7 @@ __all__ = [ "ResultsMonitor", 'HasMonitorCallback', "FitlogCallback", + "TimerCallback", # collators 'Collator', diff --git a/fastNLP/core/callbacks/__init__.py b/fastNLP/core/callbacks/__init__.py index 48699b68..d1f19b96 100644 --- a/fastNLP/core/callbacks/__init__.py +++ b/fastNLP/core/callbacks/__init__.py @@ -21,7 +21,9 @@ __all__ = [ "ResultsMonitor", 'HasMonitorCallback', - "FitlogCallback" + "FitlogCallback", + + "TimerCallback" ] @@ -37,4 +39,4 @@ from .torch_callbacks import * from .more_evaluate_callback import MoreEvaluateCallback from .has_monitor_callback import ResultsMonitor, HasMonitorCallback from .fitlog_callback import FitlogCallback - +from .timer_callback import TimerCallback diff --git a/fastNLP/core/callbacks/has_monitor_callback.py b/fastNLP/core/callbacks/has_monitor_callback.py index 0b57bf53..4fadc3d7 100644 --- a/fastNLP/core/callbacks/has_monitor_callback.py +++ b/fastNLP/core/callbacks/has_monitor_callback.py @@ -171,7 +171,7 @@ class ResultsMonitor: @property def log_name(self) -> str: """ - 内部用于打印信息使用 + 内部用于打印当前类别信息使用 :return: """ diff --git a/fastNLP/core/callbacks/load_best_model_callback.py b/fastNLP/core/callbacks/load_best_model_callback.py index 4f52720f..ec6579a6 100644 --- a/fastNLP/core/callbacks/load_best_model_callback.py +++ b/fastNLP/core/callbacks/load_best_model_callback.py @@ -106,11 +106,11 @@ class LoadBestModelCallback(HasMonitorCallback): def on_train_end(self, trainer): if abs(self.monitor_value) != float('inf'): # 如果是 inf 说明从来没有运行过。 if self.real_save_folder: - logger.info(f"Loading best model from {self.real_save_folder} with {self.monitor_name}: {self.monitor_value}...") + logger.info(f"Loading best model from {self.real_save_folder} with {self._real_monitor}: {self.monitor_value}...") trainer.load_model(folder=self.real_save_folder, only_state_dict=self.only_state_dict, model_load_fn=self.model_load_fn) else: - logger.info(f"Loading best model from buffer with {self.monitor_name}: {self.monitor_value}...") + logger.info(f"Loading best model from buffer with {self._real_monitor}: {self.monitor_value}...") self.buffer.seek(0) trainer.load_model(folder=self.buffer, only_state_dict=self.only_state_dict) if self.delete_after_after: diff --git a/fastNLP/core/callbacks/progress_callback.py b/fastNLP/core/callbacks/progress_callback.py index 36524a6b..2fa62c87 100644 --- a/fastNLP/core/callbacks/progress_callback.py +++ b/fastNLP/core/callbacks/progress_callback.py @@ -1,5 +1,4 @@ import json -import sys from typing import Union __all__ = [ @@ -16,8 +15,21 @@ from fastNLP.core.log import logger class ProgressCallback(HasMonitorCallback): + def __init__(self, monitor, larger_better, must_have_monitor=False): + super(ProgressCallback, self).__init__(monitor=monitor, larger_better=larger_better, + must_have_monitor=must_have_monitor) + self.best_monitor_epoch = -1 + self.best_monitor_step = -1 + + def record_better_monitor(self, trainer): + self.best_monitor_step = trainer.global_forward_batches + self.best_monitor_epoch = trainer.cur_epoch_idx + def on_train_end(self, trainer): - f_rich_progress.stop() + if self.best_monitor_epoch != -1: + msg = f"The best performance for monitor {self._real_monitor}:{self.monitor_value} was achieved in" \ + f" Epoch:{self.best_monitor_epoch}, Global Batch:{self.best_monitor_step}." + logger.info(msg) @property def name(self): # progress bar的名称 @@ -97,6 +109,7 @@ class RichCallback(ProgressCallback): advance=None, completed=trainer.cur_epoch_idx, refresh=True) def on_train_end(self, trainer): + super(RichCallback, self).on_train_end(trainer) self.clear_tasks() def on_before_backward(self, trainer, outputs): @@ -121,8 +134,8 @@ class RichCallback(ProgressCallback): text_style = '' characters = '-' if self.monitor is not None: - monitor_value = self.get_monitor_value(results) - if self.is_better_monitor_value(monitor_value, keep_if_better=True): + if self.is_better_results(results, keep_if_better=True): + self.record_better_monitor(trainer) if abs(self.monitor_value) != float('inf'): rule_style = 'spring_green3' text_style = '[bold]' @@ -201,8 +214,8 @@ class RawTextCallback(ProgressCallback): base_text = f'Eval. results on Epoch:{trainer.cur_epoch_idx}, Batch:{trainer.batch_idx_in_epoch}' text = '' if self.monitor is not None: - monitor_value = self.get_monitor_value(results) - if self.is_better_monitor_value(monitor_value, keep_if_better=True): + if self.is_better_results(results, keep_if_better=True): + self.record_better_monitor(trainer) if abs(self.monitor_value) != float('inf'): text = '+'*self.num_signs + base_text + '+'*self.num_signs if len(text) == 0: @@ -266,6 +279,7 @@ class TqdmCallback(ProgressCallback): self.progress_bar.set_description_str(self.task2id['epoch'], f'Epoch:{trainer.cur_epoch_idx}', refresh=True) def on_train_end(self, trainer): + super(TqdmCallback, self).on_train_end(trainer) self.clear_tasks() def on_before_backward(self, trainer, outputs): @@ -287,8 +301,8 @@ class TqdmCallback(ProgressCallback): base_text = f'Eval. results on Epoch:{trainer.cur_epoch_idx}, Batch:{trainer.batch_idx_in_epoch}' text = '' if self.monitor is not None: - monitor_value = self.get_monitor_value(results) - if self.is_better_monitor_value(monitor_value, keep_if_better=True): + if self.is_better_results(results, keep_if_better=True): + self.record_better_monitor(trainer) if abs(self.monitor_value) != float('inf'): text = '+'*self.num_signs + base_text + '+'*self.num_signs if len(text) == 0: diff --git a/fastNLP/core/callbacks/timer_callback.py b/fastNLP/core/callbacks/timer_callback.py new file mode 100644 index 00000000..f0dafcb6 --- /dev/null +++ b/fastNLP/core/callbacks/timer_callback.py @@ -0,0 +1,152 @@ +import time +from .callback import Callback +from ..log import logger +__all__ = ['TimerCallback'] + + +class _Timer: + """Timer.""" + + def __init__(self, name): + self.name_ = name + self.elapsed_ = 0.0 + self.started_ = False + self.start_time = time.time() + + def start(self): + """Start the timer.""" + assert not self.started_, f'{self.name_} timer has already been started' + self.start_time = time.time() + self.started_ = True + + def stop(self): + """Stop the timer.""" + assert self.started_, f'{self.name_} timer is not started' + self.elapsed_ += (time.time() - self.start_time) + self.started_ = False + + def reset(self): + """Reset timer.""" + self.elapsed_ = 0.0 + self.started_ = False + + def elapsed(self, reset=True): + """Calculate the elapsed time.""" + started_ = self.started_ + # If the timing in progress, end it first. + if self.started_: + self.stop() + # Get the elapsed time. + elapsed_ = self.elapsed_ + # Reset the elapsed time + if reset: + self.reset() + # If timing was in progress, set it back. + if started_: + self.start() + return elapsed_ + + +class Timers: + """Group of timers.""" + + def __init__(self): + self.timers = {} + + def __call__(self, name): + if name not in self.timers: + self.timers[name] = _Timer(name) + return self.timers[name] + + def __contains__(self, item): + return item in self.timers + + def reset(self): + for timer in self.timers.values(): + timer.reset() + + +class TimerCallback(Callback): + """ + 这个 callback 的作用是打印训练过程中的相关时间信息,例如训练时长,评测时长,总的时长等 + + """ + def __init__(self, print_every=-1, time_ndigit=3): + """ + + :param print_every: 在哪个时候打印时间信息。 + + * *负数*: 表示每隔多少 epoch 结束打印一次; + * *0*: 表示整个训练结束才打印; + * *正数*: 每隔多少个 step 打印一次; + + :param time_ndigit: 保留多少位的小数 + """ + assert isinstance(print_every, int), "print_every must be an int number." + self.timers = Timers() + self.print_every = print_every + self.time_ndigit = time_ndigit + + def on_train_begin(self, trainer): + self.timers('total').start() + self.timers('train').start() + + def on_fetch_data_begin(self, trainer): + self.timers('fetch-data').start() + + def on_fetch_data_end(self, trainer): + self.timers('fetch-data').stop() + + def on_train_batch_begin(self, trainer, batch, indices): + self.timers('forward').start() + + def on_before_backward(self, trainer, outputs): + self.timers('forward').stop() + self.timers('backward').start() + + def on_after_backward(self, trainer): + self.timers('backward').stop() + + def on_before_optimizers_step(self, trainer, optimizers): + self.timers('optimize').start() + + def on_after_optimizers_step(self, trainer, optimizers): + self.timers('optimize').stop() + + def on_evaluate_begin(self, trainer): + self.timers('train').stop() + self.timers('evaluate').start() + + def on_evaluate_end(self, trainer, results): + self.timers('evaluate').stop() + self.timers('train').start() + + def format_timer(self, reset=True): + line = '' + timers = ['fetch-data', 'forward', 'backward', 'optimize', 'evaluate', 'train', 'total'] + for timer_name in timers: + if not timer_name in self.timers: + continue + timer = self.timers(timer_name) + elapsed = round(timer.elapsed(reset=reset), self.time_ndigit) + if elapsed != 0: + line = line + f', {timer_name}: {elapsed}s' + return line + + def on_train_batch_end(self, trainer): + if self.print_every>0 and trainer.global_forward_batches % self.print_every == 0: + line = self.format_timer() + logger.info(f"Running {self.print_every} batches{line}") + + def on_train_epoch_end(self, trainer): + if self.print_every < 0 and trainer.cur_epoch_idx % abs(self.print_every) == 0: + line = self.format_timer() + logger.info(f"Running {abs(self.print_every)} epochs{line}") + + def on_train_end(self, trainer): + if self.print_every == 0: + line = self.format_timer() + logger.info(f"Training finished{line}") + + + diff --git a/fastNLP/core/controllers/loops/train_batch_loop.py b/fastNLP/core/controllers/loops/train_batch_loop.py index 645f4224..ca8389b1 100644 --- a/fastNLP/core/controllers/loops/train_batch_loop.py +++ b/fastNLP/core/controllers/loops/train_batch_loop.py @@ -41,10 +41,12 @@ class TrainBatchLoop(Loop): batch = next(dataloader) indices = get_batch_indices() except StopIteration: + trainer.on_fetch_data_end() break + trainer.on_fetch_data_end() + try: - trainer.on_fetch_data_end() batch = match_and_substitute_params(trainer.input_mapping, batch) batch = trainer.move_data_to_device(batch) diff --git a/fastNLP/core/dataloaders/torch_dataloader/fdl.py b/fastNLP/core/dataloaders/torch_dataloader/fdl.py index 09fa2ff6..2a119260 100644 --- a/fastNLP/core/dataloaders/torch_dataloader/fdl.py +++ b/fastNLP/core/dataloaders/torch_dataloader/fdl.py @@ -108,6 +108,9 @@ class TorchDataLoader(DataLoader): if not isinstance(dataset, _FDataSet): dataset = _FDataSet(dataset) + if num_workers>0 and multiprocessing_context is None: + multiprocessing_context = 'fork' # 这里默认使用fork的方式来启动多进程 + if batch_sampler is not None: batch_size = 1 shuffle = False From e4a7e64600a2e76de040a4da797f4a553d380196 Mon Sep 17 00:00:00 2001 From: yhcc Date: Sat, 11 Jun 2022 22:30:20 +0800 Subject: [PATCH 06/52] =?UTF-8?q?progress=E6=89=93=E5=8D=B0=E5=A2=9E?= =?UTF-8?q?=E5=8A=A0=E4=B8=80=E7=A7=8D=E7=89=B9=E6=AE=8Ayueding?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/callbacks/progress_callback.py | 18 ++++++++++++------ fastNLP/core/utils/utils.py | 2 +- fastNLP/io/data_bundle.py | 20 ++++++++++++-------- 3 files changed, 25 insertions(+), 15 deletions(-) diff --git a/fastNLP/core/callbacks/progress_callback.py b/fastNLP/core/callbacks/progress_callback.py index 2fa62c87..890864ec 100644 --- a/fastNLP/core/callbacks/progress_callback.py +++ b/fastNLP/core/callbacks/progress_callback.py @@ -57,7 +57,7 @@ def choose_progress_callback(progress_bar: Union[str, ProgressCallback]) -> Prog class RichCallback(ProgressCallback): """ 在训练过程中打印 rich progress bar 的 callback 。在 Trainer 中,默认就会使用这个 callback 来显示进度。如果需要定制这个 Callback 的 - 参数,请通过实例化本 Callback 并传入到 Trainer 中实现。 + 参数,请通过实例化本 Callback 并传入到 Trainer 中实现。在打印 evaluate 的结果时,不会打印名称以 "_" 开头的内容。 :param print_every: 多少个 batch 更新一次显示。 :param loss_round_ndigit: 显示的 loss 保留多少位有效数字 @@ -144,8 +144,10 @@ class RichCallback(ProgressCallback): self.progress_bar.console.rule(text_style+f"Eval. results on Epoch:{trainer.cur_epoch_idx}, " f"Batch:{trainer.batch_idx_in_epoch}", style=rule_style, characters=characters) + results = {key:trainer.driver.tensor_to_numeric(value) for key, value in results.items() if + not key.startswith('_')} if self.format_json: - self.progress_bar.console.print_json(json.dumps(trainer.driver.tensor_to_numeric(results))) + self.progress_bar.console.print_json(json.dumps(results)) else: self.progress_bar.print(results) @@ -165,7 +167,7 @@ class RawTextCallback(ProgressCallback): def __init__(self, print_every:int = 1, loss_round_ndigit:int = 6, monitor:str=None, larger_better:bool=True, format_json=True): """ - 通过向命令行打印进度的方式显示 + 通过向命令行打印进度的方式显示。在打印 evaluate 的结果时,不会打印名称以 "_" 开头的内容。 :param print_every: 多少个 batch 更新一次显示。 :param loss_round_ndigit: 显示的 loss 保留多少位有效数字 @@ -222,8 +224,10 @@ class RawTextCallback(ProgressCallback): text = '-'*self.num_signs + base_text + '-'*self.num_signs logger.info(text) + results = {key:trainer.driver.tensor_to_numeric(value) for key, value in results.items() if + not key.startswith('_')} if self.format_json: - logger.info(json.dumps(trainer.driver.tensor_to_numeric(results))) + logger.info(json.dumps(results)) else: logger.info(results) @@ -235,7 +239,7 @@ class RawTextCallback(ProgressCallback): class TqdmCallback(ProgressCallback): """ 在训练过程中打印 tqdm progress bar 的 callback 。在 Trainer 中,默认就会使用这个 callback 来显示进度。如果需要定制这个 Callback 的 - 参数,请通过实例化本 Callback 并传入到 Trainer 中实现。 + 参数,请通过实例化本 Callback 并传入到 Trainer 中实现。在打印 evaluate 的结果时,不会打印名称以 "_" 开头的内容。 :param print_every: 多少个 batch 更新一次显示。 :param loss_round_ndigit: 显示的 loss 保留多少位有效数字 @@ -309,8 +313,10 @@ class TqdmCallback(ProgressCallback): text = '-'*self.num_signs + base_text + '-'*self.num_signs logger.info(text) + results = {key:trainer.driver.tensor_to_numeric(value) for key, value in results.items() if + not key.startswith('_')} if self.format_json: - logger.info(json.dumps(trainer.driver.tensor_to_numeric(results))) + logger.info(json.dumps(results)) else: logger.info(results) diff --git a/fastNLP/core/utils/utils.py b/fastNLP/core/utils/utils.py index 11256d45..33a7ee7e 100644 --- a/fastNLP/core/utils/utils.py +++ b/fastNLP/core/utils/utils.py @@ -630,7 +630,7 @@ def is_notebook(): def flat_nest_dict(d:Dict, separator:str='#', compress_none_key:bool=True, top_down:bool=False) -> Dict: """ - 讲一个 nested 的 dict 转成 flat 的 dict,例如 + 将一个 nested 的 dict 转成 flat 的 dict,例如 ex:: d = {'test': {'f1': {'f': 0.2, 'rec': 0.1}}} -> {'f#f1#test':0.2, 'rec#f1#test':0.1} diff --git a/fastNLP/io/data_bundle.py b/fastNLP/io/data_bundle.py index 58538d61..4029e092 100644 --- a/fastNLP/io/data_bundle.py +++ b/fastNLP/io/data_bundle.py @@ -245,8 +245,9 @@ class DataBundle: """ _progress_desc = progress_desc for name, dataset in self.datasets.items(): - if _progress_desc: - progress_desc = _progress_desc + f' for `{name}`' + if len(_progress_desc) == 0: + _progress_desc = 'Processing' + progress_desc = _progress_desc + f' for `{name}`' if dataset.has_field(field_name=field_name): dataset.apply_field(func=func, field_name=field_name, new_field_name=new_field_name, num_proc=num_proc, progress_desc=progress_desc, progress_bar=progress_bar) @@ -284,8 +285,9 @@ class DataBundle: res = {} _progress_desc = progress_desc for name, dataset in self.datasets.items(): - if _progress_desc: - progress_desc = _progress_desc + f' for `{name}`' + if len(_progress_desc) == 0: + _progress_desc = 'Processing' + progress_desc = _progress_desc + f' for `{name}`' if dataset.has_field(field_name=field_name): res[name] = dataset.apply_field_more(func=func, field_name=field_name, num_proc=num_proc, modify_fields=modify_fields, @@ -317,8 +319,9 @@ class DataBundle: """ _progress_desc = progress_desc for name, dataset in self.datasets.items(): - if _progress_desc: - progress_desc = _progress_desc + f' for `{name}`' + if len(_progress_desc) == 0: + _progress_desc = 'Processing' + progress_desc = _progress_desc + f' for `{name}`' dataset.apply(func, new_field_name=new_field_name, num_proc=num_proc, progress_bar=progress_bar, progress_desc=progress_desc) return self @@ -349,8 +352,9 @@ class DataBundle: res = {} _progress_desc = progress_desc for name, dataset in self.datasets.items(): - if _progress_desc: - progress_desc = _progress_desc + f' for `{name}`' + if len(_progress_desc) == 0: + _progress_desc = 'Processing' + progress_desc = _progress_desc + f' for `{name}`' res[name] = dataset.apply_more(func, modify_fields=modify_fields, num_proc=num_proc, progress_bar=progress_bar, progress_desc=progress_desc) return res From 70dea71cdb3e7914eac1e90a1cbc44832aa80f05 Mon Sep 17 00:00:00 2001 From: YWMditto Date: Mon, 13 Jun 2022 17:53:23 +0800 Subject: [PATCH 07/52] =?UTF-8?q?=E4=B8=BA=20ddp=20=E5=9C=A8=E7=94=A8?= =?UTF-8?q?=E6=88=B7=E4=BD=BF=E7=94=A8=E8=87=AA=E5=B7=B1=E7=9A=84sampler?= =?UTF-8?q?=20=E5=92=8C=20batch=20sampler=20=E6=98=AF=E6=B7=BB=E5=8A=A0?= =?UTF-8?q?=E7=A6=81=E6=AD=A2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/controllers/evaluator.py | 2 +- fastNLP/core/controllers/trainer.py | 4 +-- fastNLP/core/drivers/torch_driver/ddp.py | 8 ++++++ fastNLP/core/drivers/torch_driver/utils.py | 30 ++++------------------ 4 files changed, 16 insertions(+), 28 deletions(-) diff --git a/fastNLP/core/controllers/evaluator.py b/fastNLP/core/controllers/evaluator.py index a1d4adf8..22eac708 100644 --- a/fastNLP/core/controllers/evaluator.py +++ b/fastNLP/core/controllers/evaluator.py @@ -107,7 +107,7 @@ class Evaluator: ``dropout`` 与 ``batch normalization`` 将会关闭。默认为 ``True``。如果为 ``False``,``fastNLP`` 不会对 ``model`` 的 ``evaluate`` 状态做任何设置。无论 该值是什么,``fastNLP`` 都会在评测后将 ``model`` 的状态设置为 ``train``; * *use_dist_sampler* -- - 是否使用分布式评测的方式。仅当 ``driver`` 为分布式类型时,该参数才有效。默认为根据 ``driver`` 是否支持 + True / False, 是否使用分布式评测的方式。仅当 ``driver`` 为分布式类型时,该参数才有效。默认为根据 ``driver`` 是否支持 分布式进行设置。如果为 ``True``,将使得每个进程上的 ``dataloader`` 自动使用不同数据,所有进程的数据并集是整个数据集; * *output_from_new_proc* -- 等价于 ``Trainer`` 中的 ``output_from_new_proc`` 参数; * *progress_bar* -- 等价于 ``Trainer`` 中的 ``progress_bar`` 参数; diff --git a/fastNLP/core/controllers/trainer.py b/fastNLP/core/controllers/trainer.py index 41fca6ba..c9674f3a 100644 --- a/fastNLP/core/controllers/trainer.py +++ b/fastNLP/core/controllers/trainer.py @@ -290,9 +290,9 @@ class Trainer(TrainerEventTrigger): driver 实例的 ``model_device`` 才会为 None; 3. 对于 paddle,该参数无效; - * *use_dist_sampler* -- 表示是否使用分布式的 ``sampler``。在多卡时,分布式 ``sampler`` 将自动决定每张卡上读取的 sample ,使得一个 epoch + * *use_dist_sampler* -- True / False, 表示是否使用分布式的 ``sampler``。在多卡时,分布式 ``sampler`` 将自动决定每张卡上读取的 sample ,使得一个 epoch 内所有卡的 sample 加起来为一整个数据集的 sample。默认会根据 driver 是否为分布式进行设置。 - * *evaluate_use_dist_sampler* -- 表示在 ``Evaluator`` 中在使用分布式的时候是否将 dataloader 的 ``sampler`` 替换为分布式的 ``sampler``; + * *evaluate_use_dist_sampler* -- True / False, 表示在 ``Evaluator`` 中在使用分布式的时候是否将 dataloader 的 ``sampler`` 替换为分布式的 ``sampler``; 不传入该值时,该值与 ``use_dist_sampler`` 参数保持一致; * *output_from_new_proc* -- 应当为一个字符串,表示在多进程的 driver 中其它进程的输出流应当被做如何处理;其值应当为以下之一: ["all", "ignore", "only_error"];当该参数的值不是以上值时,该值应当表示一个文件夹的名字,我们会将其他 rank 的输出流重定向到 diff --git a/fastNLP/core/drivers/torch_driver/ddp.py b/fastNLP/core/drivers/torch_driver/ddp.py index 43c6bc36..008df0d0 100644 --- a/fastNLP/core/drivers/torch_driver/ddp.py +++ b/fastNLP/core/drivers/torch_driver/ddp.py @@ -565,6 +565,13 @@ class TorchDDPDriver(TorchDriver): ) return replace_sampler(dataloader, sampler) else: + if type(args.batch_sampler) is not BatchSampler or (type(args.sampler) not in {torch.utils.data.RandomSampler, + torch.utils.data.SequentialSampler}): + raise TypeError("Using customized ``batch_sampler`` or ``sampler`` with 'DDP' may cause unseen problems, cause" + "we will substitute your dataloader's sampler into our ``fastNLP.RandomSampler``. You should make" + "your customized sampler being able to be used in distributed setting before you initialize ``Trainer`` by yourself," + "and then set the parameter ``use_dist_sampler`` of ``Trainer`` to ``False``.") + sampler = RandomSampler( dataset=args.dataset, shuffle=args.shuffle, @@ -582,6 +589,7 @@ class TorchDDPDriver(TorchDriver): if isinstance(args.sampler, ReproducibleSampler): sampler = conversion_between_reproducible_and_unrepeated_sampler(args.sampler) elif not isinstance(args.sampler, UnrepeatedSampler): + # todo same as dist sampler = UnrepeatedSequentialSampler( dataset=args.dataset ) diff --git a/fastNLP/core/drivers/torch_driver/utils.py b/fastNLP/core/drivers/torch_driver/utils.py index 13281cfe..f0704dd5 100644 --- a/fastNLP/core/drivers/torch_driver/utils.py +++ b/fastNLP/core/drivers/torch_driver/utils.py @@ -14,7 +14,7 @@ from fastNLP.envs import ( FASTNLP_BACKEND_LAUNCH, FASTNLP_GLOBAL_SEED, ) -from fastNLP.core.samplers import re_instantiate_sampler +from fastNLP.core.samplers import re_instantiate_sampler, ReproducibleBatchSampler from fastNLP.core.utils import auto_param_call from fastNLP.core.log import logger @@ -23,7 +23,6 @@ if _NEED_IMPORT_TORCH: # import torch.nn as nn from torch.nn import Module from torch.utils.data import DataLoader, BatchSampler - from torch.utils.data.sampler import Sampler else: from fastNLP.core.utils.dummy_class import DummyClass as Module @@ -201,7 +200,10 @@ def replace_sampler(dataloader: "DataLoader", sampler): non_default_params.add("dataset") reconstruct_args = {k: v for k, v in instance_attrs.items() if k in non_default_params} - reconstruct_args.update(_dataloader_init_kwargs_resolve_sampler(dataloader, sampler)) + + batch_sampler = getattr(dataloader, "batch_sampler") + if batch_sampler is not None and isinstance(batch_sampler, ReproducibleBatchSampler): + raise RuntimeError("It should not be running here, please report a bug to us.") required_args = { p.name @@ -243,28 +245,6 @@ def replace_sampler(dataloader: "DataLoader", sampler): return type(dataloader)(**reconstruct_args) -def _dataloader_init_kwargs_resolve_sampler( - dataloader: "DataLoader", sampler: Optional["Sampler"] -) -> Dict[str, Any]: - r""" - 此函数用于处理与 DataLoader 关联的采样器、batch_sampler 参数重新实例化; - """ - batch_sampler = getattr(dataloader, "batch_sampler") - # checking the batch sampler type is different than PyTorch default. - if batch_sampler is not None and not isinstance(batch_sampler, BatchSampler): - batch_sampler = re_instantiate_sampler(batch_sampler) - - return { - "sampler": None, - "shuffle": False, - "batch_sampler": batch_sampler, - "batch_size": 1, - "drop_last": False, - } - - return {"sampler": sampler, "shuffle": False, "batch_sampler": None} - - def replace_batch_sampler(dataloader, new_batch_sampler): r""" 替换一个 dataloader 的 batch_sampler; From 7283bf27b2f777cab74e4236aaf1a79ccabbaf3b Mon Sep 17 00:00:00 2001 From: yhcc Date: Mon, 13 Jun 2022 20:57:25 +0800 Subject: [PATCH 08/52] =?UTF-8?q?=E5=9C=A8=E7=A7=81=E6=9C=89=E5=AE=9A?= =?UTF-8?q?=E5=88=B6Sampler=E7=9A=84=E6=83=85=E5=86=B5=E4=B8=8B=EF=BC=8C?= =?UTF-8?q?=E5=A4=9A=E5=8D=A1=E4=B8=8D=E6=9B=BF=E6=8D=A2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/controllers/evaluator.py | 7 +- fastNLP/core/controllers/trainer.py | 27 ++-- fastNLP/core/drivers/paddle_driver/fleet.py | 3 +- fastNLP/core/drivers/torch_driver/ddp.py | 17 ++- .../drivers/torch_driver/single_device.py | 26 ++-- fastNLP/core/drivers/torch_driver/utils.py | 19 ++- tests/core/drivers/torch_driver/test_ddp.py | 117 +++++++++++++++++- 7 files changed, 175 insertions(+), 41 deletions(-) diff --git a/fastNLP/core/controllers/evaluator.py b/fastNLP/core/controllers/evaluator.py index 22eac708..84ca03bd 100644 --- a/fastNLP/core/controllers/evaluator.py +++ b/fastNLP/core/controllers/evaluator.py @@ -107,8 +107,11 @@ class Evaluator: ``dropout`` 与 ``batch normalization`` 将会关闭。默认为 ``True``。如果为 ``False``,``fastNLP`` 不会对 ``model`` 的 ``evaluate`` 状态做任何设置。无论 该值是什么,``fastNLP`` 都会在评测后将 ``model`` 的状态设置为 ``train``; * *use_dist_sampler* -- - True / False, 是否使用分布式评测的方式。仅当 ``driver`` 为分布式类型时,该参数才有效。默认为根据 ``driver`` 是否支持 - 分布式进行设置。如果为 ``True``,将使得每个进程上的 ``dataloader`` 自动使用不同数据,所有进程的数据并集是整个数据集; + 表示在 ``Evaluator`` 中在使用分布式的时候是否将保证 dataloader 的 ``sampler`` 替换为 + 分布式的 ``sampler``,其特点是每个卡上的数据之间不重叠,所有卡上数据的加起来是整个数据集。若传入的 dataloader + 的 sampler 为 (a) 深度学习框架自带的默认 sampler ; (b) fastNLP 的 Sampler 等,则将替换为 + :class:`~fastNLP.UnrepeatedSequentialSampler`,如果这个行为不是期待的,请本参数设置为 ``False``,并针对每个卡控制其可以 + 用到的数据。如果不是以上两类 sampler ,fastNLP 将报错。 * *output_from_new_proc* -- 等价于 ``Trainer`` 中的 ``output_from_new_proc`` 参数; * *progress_bar* -- 等价于 ``Trainer`` 中的 ``progress_bar`` 参数; * *check_dataloader_legality* -- 是否检查 ``DataLoader`` 是否合法,默认为 ``True`` 。 diff --git a/fastNLP/core/controllers/trainer.py b/fastNLP/core/controllers/trainer.py index c9674f3a..f92611dd 100644 --- a/fastNLP/core/controllers/trainer.py +++ b/fastNLP/core/controllers/trainer.py @@ -65,6 +65,13 @@ class Trainer(TrainerEventTrigger): 您传入的 ``Driver`` 实例中的模型; :param train_dataloader: 训练数据集,注意其必须是单独的一个数据集,不能是 List 或者 Dict; + + .. warning:: + + 当使用分布式训练时, ``fastNLP`` 会默认将 ``dataloader`` 中的 ``Sampler`` 进行处理,以使得在一个 ``epcoh`` 中,不同卡 + 用以训练的数据是不重叠的。如果你对 sampler 有特殊处理,那么请将 ``use_dist_sampler`` 参数设置为 ``False`` ,此刻需要由 + 你自身保证每张卡上所使用的数据是不同的。 + :param optimizers: 训练所需要的优化器;可以是单独的一个优化器实例,也可以是多个优化器组成的 List; :param device: 该参数用来指定具体训练时使用的机器;注意当该参数仅当您通过 `torch.distributed.launch/run` 启动时可以为 None, 此时 fastNLP 不会对模型和数据进行设备之间的移动处理,但是你可以通过参数 `input_mapping` 和 `output_mapping` 来实现设备之间 @@ -93,9 +100,9 @@ class Trainer(TrainerEventTrigger): .. warning:: - 注意参数 ``device`` 仅当您通过 pytorch 或者其它训练框架自身的并行训练启动脚本启动 ddp 训练时才允许为 ``None``! + 注意参数 ``device`` 仅当您通过训练框架自身的并行训练启动脚本启动 ddp 训练时才允许为 ``None``! - 例如,当您使用:: + 例如,在 pytorch 中,当您使用:: python -m torch.distributed.launch --nproc_per_node 2 train.py @@ -290,16 +297,22 @@ class Trainer(TrainerEventTrigger): driver 实例的 ``model_device`` 才会为 None; 3. 对于 paddle,该参数无效; - * *use_dist_sampler* -- True / False, 表示是否使用分布式的 ``sampler``。在多卡时,分布式 ``sampler`` 将自动决定每张卡上读取的 sample ,使得一个 epoch - 内所有卡的 sample 加起来为一整个数据集的 sample。默认会根据 driver 是否为分布式进行设置。 - * *evaluate_use_dist_sampler* -- True / False, 表示在 ``Evaluator`` 中在使用分布式的时候是否将 dataloader 的 ``sampler`` 替换为分布式的 ``sampler``; - 不传入该值时,该值与 ``use_dist_sampler`` 参数保持一致; + * *use_dist_sampler* -- 表示是否使用分布式的 ``sampler``。在多卡时,分布式 ``sampler`` 将自动决定每张卡上读取的 sample ,使得一个 epoch + 内所有卡的 sample 加起来为一整个数据集的 sample,同时为了保证所有卡上拥有相同数量的 sample ,有的卡上可能会有重复的 sample ,例如 + 8卡训练,只有9个sample,如果batch_size为1,那么第二个batch时,有7张卡将没有 sample 可用,因此只有重复使用 sample 来 pad 到第二个 + batch 中。如果不希望 fastNLP 对 dataloader 的sampler 做特殊设置,请将该值设置为 False ,若确实需要分布式的训练,请在 Trainer 外 + 对 train_dataloader 做的数据做特殊处理使得其在不同的卡之间 sample 是 + * *evaluate_use_dist_sampler* -- 表示在 ``Evaluator`` 中在使用分布式的时候是否将保证 dataloader 的 ``sampler`` 替换为 + evaluate 时使用的分布式的 ``sampler``,其特点是每个卡上的数据之间不重叠,所有卡上数据的加起来是整个数据集。若传入的 dataloader + 的 sampler 为 (a) 深度学习框架自带的默认 sampler ; (b) fastNLP 的 Sampler 等,则将替换为 + :class:`~fastNLP.UnrepeatedSequentialSampler`,如果这个行为不是期待的,请本参数设置为 ``False``,并针对每个卡控制其可以 + 用到的数据。 * *output_from_new_proc* -- 应当为一个字符串,表示在多进程的 driver 中其它进程的输出流应当被做如何处理;其值应当为以下之一: ["all", "ignore", "only_error"];当该参数的值不是以上值时,该值应当表示一个文件夹的名字,我们会将其他 rank 的输出流重定向到 log 文件中,然后将 log 文件保存在通过该参数值设定的文件夹中;默认为 "only_error"; 注意该参数仅当使用分布式的 ``driver`` 时才有效,例如 ``TorchDDPDriver``; - * *progress_bar* -- 以哪种方式显示 progress ,目前支持[None, 'raw', 'rich', 'auto', 'tqdm'] 或者 :class:`~.fastNLP.RichCallback`, :class:`~fastNLP.RawTextCallback`等对象, + * *progress_bar* -- 以哪种方式显示 progress ,目前支持[None, 'raw', 'rich', 'auto', 'tqdm'] 或者 :class:`~fastNLP.RichCallback`, :class:`~fastNLP.RawTextCallback`等对象, 默认为 auto , auto 表示如果检测到当前 terminal 为交互型则使用 :class:`~fastNLP.RichCallback`,否则使用 :class:`~fastNLP.RawTextCallback` 对象。如果 需要定制 progress bar 的参数,例如打印频率等,可以传入 :class:`~fastNLP.RichCallback`, :class:`~fastNLP.RawTextCallback` 等对象。 * *train_input_mapping* -- 与 input_mapping 一致,但是只用于 ``Trainer`` 中。与 input_mapping 互斥。 diff --git a/fastNLP/core/drivers/paddle_driver/fleet.py b/fastNLP/core/drivers/paddle_driver/fleet.py index 98c07495..d19da9fe 100644 --- a/fastNLP/core/drivers/paddle_driver/fleet.py +++ b/fastNLP/core/drivers/paddle_driver/fleet.py @@ -422,8 +422,7 @@ class PaddleFleetDriver(PaddleDriver): # trainer, evaluator if dist is None: if reproducible: - raise RuntimeError("It is not allowed to use checkpoint retraining when you initialize fleet out of our " - "control.") + raise RuntimeError("It is not allowed to save checkpoint if the sampler is not allowed to be replaced.") else: args = self.get_dataloader_args(dataloader) if isinstance(args.batch_sampler, ReproducibleBatchSampler): diff --git a/fastNLP/core/drivers/torch_driver/ddp.py b/fastNLP/core/drivers/torch_driver/ddp.py index 008df0d0..45a1a61a 100644 --- a/fastNLP/core/drivers/torch_driver/ddp.py +++ b/fastNLP/core/drivers/torch_driver/ddp.py @@ -140,6 +140,9 @@ if _NEED_IMPORT_TORCH: import torch.distributed as dist from torch.nn.parallel import DistributedDataParallel from torch.utils.data import BatchSampler + from torch.utils.data import RandomSampler as TorchRandomSampler + from torch.utils.data import SequentialSampler as TorchSequentialSampler + from torch.utils.data import BatchSampler as TorchBatchSampler __all__ = [ 'TorchDDPDriver' @@ -159,6 +162,7 @@ from fastNLP.core.samplers import ReproducibleSampler, RandomSampler, Unrepeated from fastNLP.envs import FASTNLP_DISTRIBUTED_CHECK, FASTNLP_GLOBAL_RANK, FASTNLP_GLOBAL_SEED, FASTNLP_NO_SYNC from fastNLP.core.log import logger from fastNLP.core.drivers.torch_driver.dist_utils import fastnlp_torch_all_gather, fastnlp_torch_broadcast_object +from .utils import _check_dataloader_args_for_distributed class TorchDDPDriver(TorchDriver): @@ -535,8 +539,7 @@ class TorchDDPDriver(TorchDriver): # trainer, evaluator if dist is None: if reproducible: - raise RuntimeError("It is not allowed to use checkpoint retraining when you initialize ddp out of our " - "control.") + raise RuntimeError("It is not allowed to save checkpoint if the sampler is not allowed to be replaced.") else: args = self.get_dataloader_args(dataloader) if isinstance(args.batch_sampler, ReproducibleBatchSampler): @@ -565,13 +568,7 @@ class TorchDDPDriver(TorchDriver): ) return replace_sampler(dataloader, sampler) else: - if type(args.batch_sampler) is not BatchSampler or (type(args.sampler) not in {torch.utils.data.RandomSampler, - torch.utils.data.SequentialSampler}): - raise TypeError("Using customized ``batch_sampler`` or ``sampler`` with 'DDP' may cause unseen problems, cause" - "we will substitute your dataloader's sampler into our ``fastNLP.RandomSampler``. You should make" - "your customized sampler being able to be used in distributed setting before you initialize ``Trainer`` by yourself," - "and then set the parameter ``use_dist_sampler`` of ``Trainer`` to ``False``.") - + _check_dataloader_args_for_distributed(args, controller='Trainer') sampler = RandomSampler( dataset=args.dataset, shuffle=args.shuffle, @@ -589,7 +586,7 @@ class TorchDDPDriver(TorchDriver): if isinstance(args.sampler, ReproducibleSampler): sampler = conversion_between_reproducible_and_unrepeated_sampler(args.sampler) elif not isinstance(args.sampler, UnrepeatedSampler): - # todo same as dist + _check_dataloader_args_for_distributed(args, controller='Evaluator') sampler = UnrepeatedSequentialSampler( dataset=args.dataset ) diff --git a/fastNLP/core/drivers/torch_driver/single_device.py b/fastNLP/core/drivers/torch_driver/single_device.py index c36e0f8d..263cf712 100644 --- a/fastNLP/core/drivers/torch_driver/single_device.py +++ b/fastNLP/core/drivers/torch_driver/single_device.py @@ -8,6 +8,7 @@ if _NEED_IMPORT_TORCH: from torch.nn.parallel import DistributedDataParallel from torch.utils.data import RandomSampler as TorchRandomSampler from torch.utils.data import SequentialSampler as TorchSequentialSampler + from torch.utils.data import BatchSampler as TorchBatchSampler __all__ = [ 'TorchSingleDriver' @@ -123,19 +124,20 @@ class TorchSingleDriver(TorchDriver): return replace_sampler(dataloader, sampler) if reproducible: - if isinstance(args.sampler, TorchRandomSampler): - if getattr(args.sampler, '_num_samples', None) is None \ - and getattr(args.sampler, 'replacements', False) is False \ - and getattr(args.sampler, 'generator', None) is None: - # 如果本来就是随机的,并且没有定制,直接替换掉吧。 - sampler = RandomSampler(args.sampler.data_source, shuffle=True) - logger.debug("Replace torch RandomSampler into fastNLP RandomSampler.") + if type(args.batch_sampler) is TorchBatchSampler: + if type(args.sampler) is TorchRandomSampler: + if getattr(args.sampler, '_num_samples', None) is None \ + and getattr(args.sampler, 'replacements', False) is False \ + and getattr(args.sampler, 'generator', None) is None: + # 如果本来就是随机的,并且没有定制,直接替换掉吧。 + sampler = RandomSampler(args.sampler.data_source, shuffle=True) + logger.debug("Replace torch RandomSampler into fastNLP RandomSampler.") + return replace_sampler(dataloader, sampler) + elif type(args.sampler) is TorchSequentialSampler: + # 需要替换为不要 shuffle 的。 + sampler = RandomSampler(args.sampler.data_source, shuffle=False) + logger.debug("Replace torch SequentialSampler into fastNLP RandomSampler.") return replace_sampler(dataloader, sampler) - elif isinstance(args.sampler, TorchSequentialSampler): - # 需要替换为不要 shuffle 的。 - sampler = RandomSampler(args.sampler.data_source, shuffle=False) - logger.debug("Replace torch SequentialSampler into fastNLP RandomSampler.") - return replace_sampler(dataloader, sampler) batch_sampler = ReproduceBatchSampler( batch_sampler=args.batch_sampler, batch_size=args.batch_size, diff --git a/fastNLP/core/drivers/torch_driver/utils.py b/fastNLP/core/drivers/torch_driver/utils.py index f0704dd5..300bf196 100644 --- a/fastNLP/core/drivers/torch_driver/utils.py +++ b/fastNLP/core/drivers/torch_driver/utils.py @@ -22,7 +22,11 @@ if _NEED_IMPORT_TORCH: import torch # import torch.nn as nn from torch.nn import Module - from torch.utils.data import DataLoader, BatchSampler + from torch.utils.data import DataLoader + from torch.utils.data import RandomSampler as TorchRandomSampler + from torch.utils.data import SequentialSampler as TorchSequentialSampler + from torch.utils.data import BatchSampler as TorchBatchSampler + else: from fastNLP.core.utils.dummy_class import DummyClass as Module @@ -200,6 +204,7 @@ def replace_sampler(dataloader: "DataLoader", sampler): non_default_params.add("dataset") reconstruct_args = {k: v for k, v in instance_attrs.items() if k in non_default_params} + reconstruct_args.update({"sampler": sampler, "shuffle": False, "batch_sampler": None}) batch_sampler = getattr(dataloader, "batch_sampler") if batch_sampler is not None and isinstance(batch_sampler, ReproducibleBatchSampler): @@ -277,5 +282,13 @@ def optimizer_state_to_device(state, device): return new_state - - +def _check_dataloader_args_for_distributed(args, controller='Trainer'): + if type(args.batch_sampler) is not TorchBatchSampler and (type(args.sampler) not in {TorchRandomSampler, + TorchSequentialSampler}): + mode = 'training' if controller == 'Trainer' else 'evaluation' + substitution = 'fastNLP.RandomSampler' if controller == 'Trainer' else 'fastNLP.UnrepeatedSequentialSampler' + raise TypeError(f"Using customized ``batch_sampler`` or ``sampler`` for distributed {mode} may cause " + f"unpredictable problems, because fastNLP will substitute the dataloader's sampler into " + f"``{substitution}``. The customized sampler should set for distributed running " + f"before initializing ``{controller}`` , and then set the " + f"parameter ``use_dist_sampler`` of ``{controller}`` to ``False``.") diff --git a/tests/core/drivers/torch_driver/test_ddp.py b/tests/core/drivers/torch_driver/test_ddp.py index 3f3dde74..74f44c04 100644 --- a/tests/core/drivers/torch_driver/test_ddp.py +++ b/tests/core/drivers/torch_driver/test_ddp.py @@ -12,8 +12,9 @@ from fastNLP.core.samplers import ( ) from tests.helpers.models.torch_model import TorchNormalModel_Classification_1 from tests.helpers.datasets.torch_data import TorchNormalDataset, TorchNormalXYDataset -from tests.helpers.utils import magic_argv_env_context +from tests.helpers.utils import magic_argv_env_context, recover_logger, Capturing from fastNLP.envs.distributed import rank_zero_rm +from fastNLP import logger from fastNLP.core.drivers.torch_driver.dist_utils import fastnlp_torch_all_gather from fastNLP.envs.imports import _NEED_IMPORT_TORCH if _NEED_IMPORT_TORCH: @@ -936,12 +937,118 @@ def test_batch_sampler_dataloader(shuffle, batch_size, drop_last, reproducible=T for i in range(1, len(data)-1): flags.append(data[i] Date: Mon, 13 Jun 2022 23:15:06 +0800 Subject: [PATCH 09/52] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E8=AE=BE=E7=BD=AE?= =?UTF-8?q?=E4=BA=86global=20seed=E7=9A=84bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../core/drivers/torch_driver/torch_driver.py | 24 ++++--------------- fastNLP/core/drivers/torch_driver/utils.py | 2 +- .../samplers/reproducible_batch_sampler.py | 9 ++++--- fastNLP/core/samplers/reproducible_sampler.py | 5 ++-- fastNLP/core/samplers/unrepeated_sampler.py | 5 ++-- tests/core/drivers/torch_driver/test_ddp.py | 23 +++++++++++------- 6 files changed, 28 insertions(+), 40 deletions(-) diff --git a/fastNLP/core/drivers/torch_driver/torch_driver.py b/fastNLP/core/drivers/torch_driver/torch_driver.py index 96529073..fb01b6c3 100644 --- a/fastNLP/core/drivers/torch_driver/torch_driver.py +++ b/fastNLP/core/drivers/torch_driver/torch_driver.py @@ -202,26 +202,12 @@ class TorchDriver(Driver): num_consumed_batches = states.pop('num_consumed_batches') if hasattr(sampler, 'state_dict') and callable(sampler.state_dict): sampler_states = sampler.state_dict() - # 需要针对 num_consumed_samples 做特殊的处理。因为DataLoader存在预取行为,直接使用sampler中的num_consumed_samples - # 会造成多余实际消耗的问题。因为 - num_consumed_samples_array = sampler_states.pop('num_consumed_samples_array', None) - if num_consumed_samples_array is not None: - if isinstance(sampler, ReproducibleSampler): # 如果是 sampler 的话,需要考虑 batch_size 。 - if dataloader_args.batch_size is not None: - num_consumed_batches = num_consumed_batches * dataloader_args.batch_size - else: # 有可能 batch_size 为 None,就只有损失精度了 - logger.rank_zero_warning("fastNLP cannot get batch_size, we have to save based on `num_consumed_samples`, " - "it may cause missing some samples when reload.") - num_consumed_batches = sampler_states['num_consumed_samples'] - sampler_states['num_consumed_samples'] = num_consumed_samples_array[num_consumed_batches] - assert sampler_states['num_consumed_samples'] != -1, "This is a bug, please report." + if dataloader_args.batch_size is not None: + sampler_states['num_consumed_samples'] = sampler.num_replicas * dataloader_args.batch_size \ + * num_consumed_batches else: - if dataloader_args.batch_size is not None: - sampler_states['num_consumed_samples'] = sampler.num_replicas * dataloader_args.batch_size \ - * num_consumed_batches - else: - logger.rank_zero_warning("fastNLP cannot get batch_size, we have to save based on `num_consumed_samples`, " - "it may cause missing some samples when reload.") + logger.rank_zero_warning("fastNLP cannot get batch_size, we have to save based on sampler's " + "`num_consumed_samples`, it may cause missing some samples when reload.") states['sampler_states'] = sampler_states else: diff --git a/fastNLP/core/drivers/torch_driver/utils.py b/fastNLP/core/drivers/torch_driver/utils.py index 300bf196..f5a76a9e 100644 --- a/fastNLP/core/drivers/torch_driver/utils.py +++ b/fastNLP/core/drivers/torch_driver/utils.py @@ -283,7 +283,7 @@ def optimizer_state_to_device(state, device): def _check_dataloader_args_for_distributed(args, controller='Trainer'): - if type(args.batch_sampler) is not TorchBatchSampler and (type(args.sampler) not in {TorchRandomSampler, + if type(args.batch_sampler) is not TorchBatchSampler or (type(args.sampler) not in {TorchRandomSampler, TorchSequentialSampler}): mode = 'training' if controller == 'Trainer' else 'evaluation' substitution = 'fastNLP.RandomSampler' if controller == 'Trainer' else 'fastNLP.UnrepeatedSequentialSampler' diff --git a/fastNLP/core/samplers/reproducible_batch_sampler.py b/fastNLP/core/samplers/reproducible_batch_sampler.py index f522f997..50276ba1 100644 --- a/fastNLP/core/samplers/reproducible_batch_sampler.py +++ b/fastNLP/core/samplers/reproducible_batch_sampler.py @@ -13,7 +13,6 @@ from itertools import chain import numpy as np from fastNLP.core.dataset import DataSet -from fastNLP.envs.utils import get_global_seed from fastNLP.core.log import logger from .utils import create_array from abc import abstractmethod @@ -171,7 +170,7 @@ class RandomBatchSampler(ReproducibleBatchSampler): :param kwargs: fastNLP 保留使用 """ def __init__(self, dataset, batch_size:int = 32, shuffle: bool = True, - drop_last: bool = False, seed: int = None, **kwargs): + drop_last: bool = False, seed: int = 0, **kwargs): super().__init__() self.dataset = dataset @@ -179,7 +178,7 @@ class RandomBatchSampler(ReproducibleBatchSampler): self.batch_size = batch_size self.shuffle = shuffle self.drop_last = drop_last - self.seed = get_global_seed() if seed is None else seed + self.seed = int(seed) self.num_consumed_samples = kwargs.get("num_consumed_samples", 0) # 总共迭代了多少数据了,包括多卡情况下的其它卡上的输出的数量 @@ -398,7 +397,7 @@ class BucketedBatchSampler(ReproducibleBatchSampler): :param kwargs: fastNLP 保留使用 """ def __init__(self, dataset, length: Union[List[int], str], batch_size:int = 32, num_batch_per_bucket:int = 10, - shuffle: bool = True, drop_last: bool = False, seed: int = None, **kwargs): + shuffle: bool = True, drop_last: bool = False, seed: int = 0, **kwargs): super().__init__() if isinstance(dataset, DataSet) and isinstance(length, str): length = dataset.get_field(length).content @@ -423,7 +422,7 @@ class BucketedBatchSampler(ReproducibleBatchSampler): self.num_batch_per_bucket = num_batch_per_bucket self.shuffle = shuffle self.drop_last = drop_last - self.seed = get_global_seed() if seed is None else seed + self.seed = int(seed) self.num_consumed_samples = kwargs.get("num_consumed_samples", 0) # 总共迭代了多少数据了,包括多卡情况下的其它卡上的输出的数量 diff --git a/fastNLP/core/samplers/reproducible_sampler.py b/fastNLP/core/samplers/reproducible_sampler.py index dc396851..e1a06fa1 100644 --- a/fastNLP/core/samplers/reproducible_sampler.py +++ b/fastNLP/core/samplers/reproducible_sampler.py @@ -12,7 +12,6 @@ import numpy as np from fastNLP.core.log import logger from fastNLP.core.dataset import DataSet -from fastNLP.envs.utils import get_global_seed class ReproducibleSampler: @@ -66,11 +65,11 @@ class RandomSampler(ReproducibleSampler): :param seed: 随机数种子。 :param kwargs: 用户不需要使用,fastNLP 内部使用 """ - def __init__(self, dataset, shuffle: bool = True, seed: int = None, **kwargs): + def __init__(self, dataset, shuffle: bool = True, seed: int = 0, **kwargs): super(RandomSampler, self).__init__() self.dataset = dataset self.shuffle = shuffle - self.seed = get_global_seed() if seed is None else seed + self.seed = int(seed) self.num_consumed_samples = kwargs.get("num_consumed_samples", 0) # 总共迭代了多少数据了,包括多卡情况下的其它卡上的输出的数量 diff --git a/fastNLP/core/samplers/unrepeated_sampler.py b/fastNLP/core/samplers/unrepeated_sampler.py index 22207274..e959a4d0 100644 --- a/fastNLP/core/samplers/unrepeated_sampler.py +++ b/fastNLP/core/samplers/unrepeated_sampler.py @@ -7,7 +7,6 @@ __all__ = [ from typing import List, Union from fastNLP.core.dataset import DataSet -from fastNLP.envs.utils import get_global_seed import numpy as np @@ -28,10 +27,10 @@ class UnrepeatedRandomSampler(UnrepeatedSampler): :param seed: 设置的随机数种子 :param kwargs: fastNLP 保留使用 """ - def __init__(self, dataset, shuffle: bool = False, seed: int = None, **kwargs): + def __init__(self, dataset, shuffle: bool = False, seed: int = 0, **kwargs): self.dataset = dataset self.shuffle = shuffle - self.seed = get_global_seed() if seed is None else seed + self.seed = int(seed) # 多卡的相关的参数 self.num_replicas = kwargs.get('num_replicas', 1) diff --git a/tests/core/drivers/torch_driver/test_ddp.py b/tests/core/drivers/torch_driver/test_ddp.py index 74f44c04..46abd84c 100644 --- a/tests/core/drivers/torch_driver/test_ddp.py +++ b/tests/core/drivers/torch_driver/test_ddp.py @@ -1,3 +1,5 @@ +import os + import pytest from pathlib import Path @@ -185,7 +187,7 @@ class TestSetDistReproDataloader: cls.device = [0, 1] def setup_method(self): - self.dataset = TorchNormalDataset(40) + self.dataset = TorchNormalDataset(100) """ 传入的 `dist` 参数为具体的 ReproducibleSampler 或 ReproducibleBatchSampler 的情况 @@ -571,7 +573,7 @@ class TestSaveLoad: """ def setup_method(self): - self.dataset = TorchNormalXYDataset(20) + self.dataset = TorchNormalXYDataset(100) @magic_argv_env_context @pytest.mark.parametrize("only_state_dict", ([True, False])) @@ -641,7 +643,7 @@ class TestSaveLoad: rank=driver1.global_rank, pad=True ) - num_consumed_batches = 2 + num_consumed_batches = 4 already_seen_x_set = set() already_seen_y_set = set() @@ -686,7 +688,8 @@ class TestSaveLoad: assert not (replaced_loader is dataloader) assert replaced_loader.batch_sampler is dataloader.batch_sampler assert isinstance(replaced_loader.batch_sampler, BucketedBatchSampler) - assert replaced_loader.batch_sampler.seed == sampler_states["seed"] + if os.environ['FASTNLP_GLOBAL_RANK'] == '0': + assert replaced_loader.batch_sampler.seed == sampler_states["seed"] assert replaced_loader.batch_sampler.num_consumed_samples == num_consumed_batches * 4 * num_replicas # 3. 检查 fp16 是否被加载 @@ -753,7 +756,7 @@ class TestSaveLoad: rank=driver1.global_rank, pad=True ) - num_consumed_batches = 2 + num_consumed_batches = 4 already_seen_x_set = set() already_seen_y_set = set() @@ -792,11 +795,13 @@ class TestSaveLoad: # 2. 检查 sampler 是否被正确地加载和替换 assert not (replaced_loader is dataloader) assert isinstance(replaced_loader.batch_sampler.sampler, RandomSampler) - assert replaced_loader.batch_sampler.sampler.seed == sampler_states["seed"] - assert replaced_loader.batch_sampler.sampler.epoch == sampler_states["epoch"] + if os.environ['FASTNLP_GLOBAL_RANK'] == '0': + assert replaced_loader.batch_sampler.sampler.seed == sampler_states["seed"] + assert replaced_loader.batch_sampler.sampler.epoch == sampler_states["epoch"] + assert len(replaced_loader.batch_sampler.sampler.dataset) == sampler_states["length"] + assert replaced_loader.batch_sampler.sampler.shuffle == sampler_states["shuffle"] assert replaced_loader.batch_sampler.sampler.num_consumed_samples == 4 * num_consumed_batches * num_replicas - assert len(replaced_loader.batch_sampler.sampler.dataset) == sampler_states["length"] - assert replaced_loader.batch_sampler.sampler.shuffle == sampler_states["shuffle"] + # 3. 检查 fp16 是否被加载 if fp16: assert not isinstance(driver2.grad_scaler, torch.cuda.amp.GradScaler) From cd0957fb5b7f2646ed1a2e930ea76346cf6e4356 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Tue, 14 Jun 2022 13:19:56 +0000 Subject: [PATCH 10/52] =?UTF-8?q?=E8=B7=9F=E8=BF=9Bpaddle=20jittor=20?= =?UTF-8?q?=E5=85=B3=E4=BA=8E=20set=5Fdist=5Frepro=5Fdataloader=E5=87=BD?= =?UTF-8?q?=E6=95=B0=E4=B8=AD=E7=9A=84=E4=BF=AE=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../drivers/jittor_driver/jittor_driver.py | 24 +---- .../drivers/jittor_driver/single_device.py | 4 +- fastNLP/core/drivers/paddle_driver/fleet.py | 2 + .../drivers/paddle_driver/paddle_driver.py | 24 +---- .../drivers/paddle_driver/single_device.py | 31 +++--- fastNLP/core/drivers/paddle_driver/utils.py | 13 ++- .../core/drivers/paddle_driver/test_fleet.py | 95 ++++++++++++++++++- 7 files changed, 136 insertions(+), 57 deletions(-) diff --git a/fastNLP/core/drivers/jittor_driver/jittor_driver.py b/fastNLP/core/drivers/jittor_driver/jittor_driver.py index 63ac6ec4..c2e338bb 100644 --- a/fastNLP/core/drivers/jittor_driver/jittor_driver.py +++ b/fastNLP/core/drivers/jittor_driver/jittor_driver.py @@ -138,26 +138,12 @@ class JittorDriver(Driver): num_consumed_batches = states.pop('num_consumed_batches') if hasattr(sampler, 'state_dict') and callable(sampler.state_dict): sampler_states = sampler.state_dict() - # 需要针对 num_consumed_samples 做特殊的处理。因为DataLoader存在预取行为,直接使用sampler中的num_consumed_samples - # 会造成多余实际消耗的问题。因为 - num_consumed_samples_array = sampler_states.pop('num_consumed_samples_array', None) - if num_consumed_samples_array is not None: - if isinstance(sampler, ReproducibleSampler): # 如果是 sampler 的话,需要考虑 batch_size 。 - if dataloader_args.batch_size is not None: - num_consumed_batches = num_consumed_batches * dataloader_args.batch_size - else: # 有可能 batch_size 为 None,就只有损失精度了 - logger.rank_zero_warning("fastNLP cannot get batch_size, we have to save based on `num_consumed_samples`, " - "it may cause missing some samples when reload.") - num_consumed_batches = sampler_states['num_consumed_samples'] - sampler_states['num_consumed_samples'] = num_consumed_samples_array[num_consumed_batches] - assert sampler_states['num_consumed_samples'] != -1, "This is a bug, please report." + if dataloader_args.batch_size is not None: + sampler_states['num_consumed_samples'] = sampler.num_replicas * dataloader_args.batch_size \ + * num_consumed_batches else: - if dataloader_args.batch_size is not None: - sampler_states['num_consumed_samples'] = sampler.num_replicas * dataloader_args.batch_size \ - * num_consumed_batches - else: - logger.rank_zero_warning("fastNLP cannot get batch_size, we have to save based on `num_consumed_samples`, " - "it may cause missing some samples when reload.") + logger.rank_zero_warning("fastNLP cannot get batch_size, we have to save based on `num_consumed_samples`, " + "it may cause missing some samples when reload.") states['sampler_states'] = sampler_states else: diff --git a/fastNLP/core/drivers/jittor_driver/single_device.py b/fastNLP/core/drivers/jittor_driver/single_device.py index 4e9b3447..386f8694 100644 --- a/fastNLP/core/drivers/jittor_driver/single_device.py +++ b/fastNLP/core/drivers/jittor_driver/single_device.py @@ -118,14 +118,14 @@ class JittorSingleDriver(JittorDriver): if args.sampler is None: sampler = RandomSampler(args.dataset, args.shuffle) return replace_sampler(dataloader, sampler) - elif isinstance(args.sampler, JittorRandomSampler): + elif type(args.sampler) is JittorRandomSampler: if getattr(args.sampler, '_num_samples', None) is None \ and getattr(args.sampler, 'rep', False) is False: # 如果本来就是随机的,并且没有定制,直接替换掉吧。 sampler = RandomSampler(args.sampler.dataset, shuffle=True) logger.debug("Replace jittor RandomSampler into fastNLP RandomSampler.") return replace_sampler(dataloader, sampler) - elif isinstance(args.sampler, JittorSequentialSampler): + elif type(args.sampler) is JittorSequentialSampler: # 需要替换为不要 shuffle 的。 sampler = RandomSampler(args.sampler.dataset, shuffle=False) logger.debug("Replace jittor SequentialSampler into fastNLP RandomSampler.") diff --git a/fastNLP/core/drivers/paddle_driver/fleet.py b/fastNLP/core/drivers/paddle_driver/fleet.py index d19da9fe..9344f515 100644 --- a/fastNLP/core/drivers/paddle_driver/fleet.py +++ b/fastNLP/core/drivers/paddle_driver/fleet.py @@ -73,6 +73,7 @@ from .utils import ( _FleetWrappingModel, replace_sampler, replace_batch_sampler, + _check_dataloader_args_for_distributed ) from .dist_utils import fastnlp_paddle_all_gather, fastnlp_paddle_broadcast_object @@ -453,6 +454,7 @@ class PaddleFleetDriver(PaddleDriver): ) return replace_sampler(dataloader, sampler) else: + _check_dataloader_args_for_distributed(args, controller='Trainer') sampler = RandomSampler( dataset=args.dataset, shuffle=args.shuffle, diff --git a/fastNLP/core/drivers/paddle_driver/paddle_driver.py b/fastNLP/core/drivers/paddle_driver/paddle_driver.py index 4527f1ed..6ef0aaae 100644 --- a/fastNLP/core/drivers/paddle_driver/paddle_driver.py +++ b/fastNLP/core/drivers/paddle_driver/paddle_driver.py @@ -222,26 +222,12 @@ class PaddleDriver(Driver): num_consumed_batches = states.pop("num_consumed_batches") if hasattr(sampler, "state_dict") and callable(sampler.state_dict): sampler_states = sampler.state_dict() - # 如果有,需要针对 num_consumed_samples 做特殊的处理。因为DataLoader存在预取行为,直接使用sampler中的num_consumed_samples - # 会造成多余实际消耗的问题。 - num_consumed_samples_array = sampler_states.pop('num_consumed_samples_array', None) - if num_consumed_samples_array is not None: - if isinstance(sampler, ReproducibleSampler): # 如果是 sampler 的话,需要考虑 batch_size 。 - if dataloader_args.batch_size is not None: - num_consumed_batches = num_consumed_batches * dataloader_args.batch_size - else: # 有可能 batch_size 为 None,就只有损失精度了 - logger.rank_zero_warning("fastNLP cannot get batch_size, we have to save based on `num_consumed_samples`, " - "it may cause missing some samples when reload.") - num_consumed_batches = sampler_states['num_consumed_samples'] - sampler_states['num_consumed_samples'] = num_consumed_samples_array[num_consumed_batches] - assert sampler_states['num_consumed_samples'] != -1, "This is a bug, please report." + if dataloader_args.batch_size is not None: + sampler_states['num_consumed_samples'] = sampler.num_replicas * dataloader_args.batch_size \ + * num_consumed_batches else: - if dataloader_args.batch_size is not None: - sampler_states['num_consumed_samples'] = sampler.num_replicas * dataloader_args.batch_size \ - * num_consumed_batches - else: - logger.rank_zero_warning("fastNLP cannot get batch_size, we have to save based on `num_consumed_samples`, " - "it may cause missing some samples when reload.") + logger.rank_zero_warning("fastNLP cannot get batch_size, we have to save based on `num_consumed_samples`, " + "it may cause missing some samples when reload.") else: raise RuntimeError( "The sampler has no `state_dict()` method, it will fail to recover to the specific batch.") diff --git a/fastNLP/core/drivers/paddle_driver/single_device.py b/fastNLP/core/drivers/paddle_driver/single_device.py index ba404814..4105bf20 100644 --- a/fastNLP/core/drivers/paddle_driver/single_device.py +++ b/fastNLP/core/drivers/paddle_driver/single_device.py @@ -26,6 +26,11 @@ if _NEED_IMPORT_PADDLE: import paddle from paddle import DataParallel from paddle.fluid.reader import _DatasetKind + from paddle.io import ( + RandomSampler as PaddleRandomSampler, + SequenceSampler as PaddleSequenialSampler, + BatchSampler as PaddleBatchSampler, + ) __all__ = [ "PaddleSingleDriver", @@ -122,19 +127,21 @@ class PaddleSingleDriver(PaddleDriver): return replace_sampler(dataloader, sampler) if reproducible: - if isinstance(args.sampler, paddle.io.RandomSampler): - if getattr(args.sampler, '_num_samples', None) is None \ - and getattr(args.sampler, 'replacements', False) is False \ - and getattr(args.sampler, 'generator', None) is None: - # 如果本来就是随机的,并且没有定制,直接替换掉。 - sampler = RandomSampler(args.sampler.data_source, shuffle=True) - logger.debug("Replace paddle RandomSampler into fastNLP RandomSampler.") + if type(args.batch_sampler) is PaddleBatchSampler: + if type(args.sampler) is PaddleRandomSampler: + if isinstance(args.sampler, PaddleRandomSampler): + if getattr(args.sampler, '_num_samples', None) is None \ + and getattr(args.sampler, 'replacements', False) is False \ + and getattr(args.sampler, 'generator', None) is None: + # 如果本来就是随机的,并且没有定制,直接替换掉。 + sampler = RandomSampler(args.sampler.data_source, shuffle=True) + logger.debug("Replace paddle RandomSampler into fastNLP RandomSampler.") + return replace_sampler(dataloader, sampler) + elif type(args.sampler) is PaddleSequenialSampler: + # 需要替换为不要 shuffle 的。 + sampler = RandomSampler(args.sampler.data_source, shuffle=False) + logger.debug("Replace paddle SequentialSampler into fastNLP RandomSampler.") return replace_sampler(dataloader, sampler) - elif isinstance(args.sampler, paddle.io.SequenceSampler): - # 需要替换为不要 shuffle 的。 - sampler = RandomSampler(args.sampler.data_source, shuffle=False) - logger.debug("Replace paddle SequentialSampler into fastNLP RandomSampler.") - return replace_sampler(dataloader, sampler) batch_sampler = ReproduceBatchSampler( batch_sampler=args.batch_sampler, batch_size=args.batch_size, diff --git a/fastNLP/core/drivers/paddle_driver/utils.py b/fastNLP/core/drivers/paddle_driver/utils.py index 9f35cf2a..1191b60c 100644 --- a/fastNLP/core/drivers/paddle_driver/utils.py +++ b/fastNLP/core/drivers/paddle_driver/utils.py @@ -23,7 +23,7 @@ if _NEED_IMPORT_PADDLE: import paddle from paddle import nn from paddle.nn import Layer - from paddle.io import DataLoader, BatchSampler + from paddle.io import DataLoader, BatchSampler, RandomSampler, SequenceSampler from paddle.amp import auto_cast, GradScaler else: from fastNLP.core.utils.dummy_class import DummyClass as Layer @@ -249,3 +249,14 @@ def optimizer_state_to_device(state, device): else: new_state[name] = param return new_state + +def _check_dataloader_args_for_distributed(args, controller='Trainer'): + if type(args.batch_sampler) is not BatchSampler or (type(args.sampler) not in {RandomSampler, + SequenceSampler}): + mode = 'training' if controller == 'Trainer' else 'evaluation' + substitution = 'fastNLP.RandomSampler' if controller == 'Trainer' else 'fastNLP.UnrepeatedSequentialSampler' + raise TypeError(f"Using customized ``batch_sampler`` or ``sampler`` for distributed {mode} may cause " + f"unpredictable problems, because fastNLP will substitute the dataloader's sampler into " + f"``{substitution}``. The customized sampler should set for distributed running " + f"before initializing ``{controller}`` , and then set the " + f"parameter ``use_dist_sampler`` of ``{controller}`` to ``False``.") diff --git a/tests/core/drivers/paddle_driver/test_fleet.py b/tests/core/drivers/paddle_driver/test_fleet.py index b303249c..80d494da 100644 --- a/tests/core/drivers/paddle_driver/test_fleet.py +++ b/tests/core/drivers/paddle_driver/test_fleet.py @@ -11,11 +11,12 @@ from fastNLP.core.samplers import ( ) from tests.helpers.models.paddle_model import PaddleNormalModel_Classification_1 from tests.helpers.datasets.paddle_data import PaddleNormalDataset, PaddleNormalXYDataset -from tests.helpers.utils import magic_argv_env_context +from tests.helpers.utils import magic_argv_env_context, recover_logger from fastNLP.envs.distributed import rank_zero_rm from fastNLP import prepare_paddle_dataloader from fastNLP.core.drivers.paddle_driver.dist_utils import fastnlp_paddle_all_gather from fastNLP.envs.imports import _NEED_IMPORT_PADDLE +from fastNLP import logger if _NEED_IMPORT_PADDLE: import paddle import paddle.distributed as dist @@ -532,7 +533,6 @@ class TestSetDistReproDataloader: num_samples = 200 dataset = PaddleNormalXYDataset(num_samples) dl = prepare_paddle_dataloader(dataset, shuffle=shuffle, batch_size=batch_size, drop_last=drop_last) - model = PaddleNormalModel_Classification_1(10, 32) self.driver.setup() dl = self.driver.set_dist_repro_dataloader(dataloader=dl, dist='dist', reproducible=reproducible) @@ -581,8 +581,6 @@ class TestSetDistReproDataloader: sampler = BucketedBatchSampler(dataset, length=dataset._data, batch_size=batch_size, drop_last=drop_last, shuffle=shuffle, num_batch_per_bucket=2) dl = prepare_paddle_dataloader(dataset, batch_sampler=sampler) - model = PaddleNormalModel_Classification_1(10, 32) - device = [0, 1] self.driver.setup() dl = self.driver.set_dist_repro_dataloader(dataloader=dl, dist='dist', reproducible=reproducible) @@ -619,6 +617,95 @@ class TestSetDistReproDataloader: finally: dist.barrier() + @magic_argv_env_context + @recover_logger + @pytest.mark.parametrize("inherit", ([True, False])) + def test_customized_batch_sampler_dataloader(self, inherit): + try: + logger.set_stdout('raw', level='info') + # 需要检验一下 set_dist_repro_dataloader 是否可以在定制 batch_sampler 的情况下正确运行 + num_samples = 10 + dataset = PaddleNormalXYDataset(num_samples) + if inherit: + class BatchSampler(paddle.io.BatchSampler): + def __init__(self, dataset, batch_size): + self.dataset = dataset + self.batch_size = batch_size + + def __iter__(self): + indices = list(range(len(dataset))) + for i in range(len(self)): + start = i * self.batch_size + end = (i + 1) * self.batch_size + return indices[start:end] + + def __len__(self): + return (len(self.dataset)+self.batch_size-1)//self.batch_size + else: + class BatchSampler: + def __init__(self, dataset, batch_size): + self.dataset = dataset + self.batch_size = batch_size + + def __iter__(self): + indices = list(range(len(dataset))) + for i in range(len(self)): + start = i * self.batch_size + end = (i + 1) * self.batch_size + return indices[start:end] + + def __len__(self): + return (len(self.dataset)+self.batch_size-1)//self.batch_size + + dl = prepare_paddle_dataloader(dataset, batch_sampler=BatchSampler(dataset, batch_size=4)) + self.driver.setup() + with pytest.raises(TypeError): + dl = self.driver.set_dist_repro_dataloader(dataloader=dl, dist='dist', reproducible=False) + finally: + pass + + @magic_argv_env_context + @recover_logger + @pytest.mark.parametrize("inherit", ([True, False])) + def test_customized_sampler_dataloader(self, inherit): + try: + logger.set_stdout('raw', level='info') + # 需要检验一下 set_dist_repro_dataloader 是否可以在定制 batch_sampler 的情况下正确运行 + num_samples = 10 + dataset = PaddleNormalXYDataset(num_samples) + if inherit: + class Sampler(paddle.io.RandomSampler): + def __init__(self, dataset, batch_size): + self.dataset = dataset + self.batch_size = batch_size + + def __iter__(self): + indices = list(range(len(dataset))) + return iter(indices) + + def __len__(self): + return len(self.dataset) + else: + class Sampler: + def __init__(self, dataset, batch_size): + self.dataset = dataset + self.batch_size = batch_size + + def __iter__(self): + indices = list(range(len(dataset))) + return iter(indices) + + def __len__(self): + return len(self.dataset) + + dl = prepare_paddle_dataloader(dataset, sampler=Sampler(dataset, batch_size=4)) + self.driver.setup() + # TODO 这里需要raise + with pytest.raises(TypeError): + dl = self.driver.set_dist_repro_dataloader(dataloader=dl, dist='dist', reproducible=False) + finally: + pass + ############################################################################ # From 55d8738def5f9c38241e44e718e06abe108b8390 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Tue, 14 Jun 2022 15:03:41 +0000 Subject: [PATCH 11/52] deepspeed driver init --- .../core/drivers/torch_driver/deepspeed.py | 165 ++++++++++++++++++ fastNLP/core/drivers/torch_driver/utils.py | 87 ++++++++- fastNLP/envs/imports.py | 1 + 3 files changed, 252 insertions(+), 1 deletion(-) create mode 100644 fastNLP/core/drivers/torch_driver/deepspeed.py diff --git a/fastNLP/core/drivers/torch_driver/deepspeed.py b/fastNLP/core/drivers/torch_driver/deepspeed.py new file mode 100644 index 00000000..0e0637a1 --- /dev/null +++ b/fastNLP/core/drivers/torch_driver/deepspeed.py @@ -0,0 +1,165 @@ +from typing import Optional, Union, Callable, Dict, Tuple, Sequence, List +from .torch_driver import TorchDriver +from .utils import _create_default_config +from fastNLP.core.utils import auto_param_call +from fastNLP.core.utils.utils import _get_fun_msg +from fastNLP.core.samplers import ReproducibleBatchSampler, ReproducibleSampler, re_instantiate_sampler, \ + ReproduceBatchSampler +from fastNLP.core.log import logger +from fastNLP.envs.imports import _NEED_IMPORT_TORCH, _NEED_IMPORT_DEEPSPEED + +if _NEED_IMPORT_TORCH: + import pytorch_lightning + import torch + from torch.nn import DataParallel + +if _NEED_IMPORT_DEEPSPEED: + import deepspeed + +__all__ = [ + "DeepSpeedDriver", +] + +class DeepSpeedDriver(TorchDriver): + def __init__(self, model, fp16, strategy, **kwargs): + super(DeepSpeedDriver, self).__init__(model, fp16) + + self.strategy = strategy + + def setup(self): + + if self.strategy == "deepspeed": + self.config = _create_default_config(stage=2) + elif self.strategy == "deepspeed_stage_1": + self.config = _create_default_config(stage=1) + elif self.strategy == "deepspeed_stage_2": + self.config = _create_default_config(stage=2) + elif self.strategy == "deepspeed_stage_2_offload": + self.config = _create_default_config(stage=2, offload_optimizer=True) + elif self.strategy == "deepspeed_stage_3": + self.config = _create_default_config(stage=3) + elif self.strategy == "deepspeed_stage_3_offload": + self.config = _create_default_config( + stage=3, + offload_optimizer=True, + offload_parameters=True, + ) + elif self.strategy == "deepspeed_stage_3_offload_nvme": + self.config = _create_default_config( + stage=3, + offload_optimizer=True, + offload_parameters=True, + remote_device="nvme", + offload_params_device="nvme", + offload_optimizer_device="nvme", + ) + for i, optimizer in enumerate(self.optimizers): + # TODO 多个 optimizer + engine, optimizer_ds, _, _ = deepspeed.initialize( + model=self.model, + optimizer=optimizer, + config=self.config + ) + self._optimizers[i] = optimizer_ds + self.model = engine + + self._set_deepspeed_activation_checkpointing() + + def model_call(self, batch, fn: Callable, signature_fn: Optional[Callable]) -> Dict: + if isinstance(batch, Dict) and not self.wo_auto_param_call: + return auto_param_call(fn, batch, signature_fn=signature_fn) + else: + return fn(batch) + + def get_model_call_fn(self, fn: str) -> Tuple: + if hasattr(self.model, fn): + fn = getattr(self.model, fn) + if not callable(fn): + raise RuntimeError(f"The `{fn}` attribute is not `Callable`.") + logger.debug(f'Use {_get_fun_msg(fn, with_fp=False)}...') + return fn, None + elif fn in {"train_step", "evaluate_step"}: + logger.debug(f'Use {_get_fun_msg(self.model.forward, with_fp=False)}...') + return self.model, self.model.forward + else: + raise RuntimeError(f"There is no `{fn}` method in your {type(self.model)}.") + + def set_dist_repro_dataloader(self, dataloader, + dist: Union[str, ReproducibleBatchSampler, ReproducibleSampler] = None, + reproducible: bool = False): + return dataloader + # 如果 dist 为 ReproducibleBatchSampler, ReproducibleIterator 说明是在断点重训时 driver.load_checkpoint 函数调用; + if isinstance(dist, ReproducibleBatchSampler): + return replace_batch_sampler(dataloader, dist) + elif isinstance(dist, ReproducibleSampler): + return replace_sampler(dataloader, dist) + + # 如果 dist 为 str 或者 None,说明是在 trainer 初试化时调用; + args = self.get_dataloader_args(dataloader) + if isinstance(args.batch_sampler, ReproducibleBatchSampler): + batch_sampler = re_instantiate_sampler(args.batch_sampler) + return replace_batch_sampler(dataloader, batch_sampler) + elif isinstance(args.sampler, ReproducibleSampler): + sampler = re_instantiate_sampler(args.sampler) + return replace_sampler(dataloader, sampler) + + if reproducible: + if type(args.batch_sampler) is TorchBatchSampler: + if type(args.sampler) is TorchRandomSampler: + if getattr(args.sampler, '_num_samples', None) is None \ + and getattr(args.sampler, 'replacements', False) is False \ + and getattr(args.sampler, 'generator', None) is None: + # 如果本来就是随机的,并且没有定制,直接替换掉吧。 + sampler = RandomSampler(args.sampler.data_source, shuffle=True) + logger.debug("Replace torch RandomSampler into fastNLP RandomSampler.") + return replace_sampler(dataloader, sampler) + elif type(args.sampler) is TorchSequentialSampler: + # 需要替换为不要 shuffle 的。 + sampler = RandomSampler(args.sampler.data_source, shuffle=False) + logger.debug("Replace torch SequentialSampler into fastNLP RandomSampler.") + return replace_sampler(dataloader, sampler) + batch_sampler = ReproduceBatchSampler( + batch_sampler=args.batch_sampler, + batch_size=args.batch_size, + drop_last=args.drop_last + ) + return replace_batch_sampler(dataloader, batch_sampler) + else: + return dataloader + + def unwrap_model(self): + r""" + :return: 返回原本的模型,例如没有被 ``DataParallel`` 包裹; + """ + if isinstance(self.model, deepspeed.DeepSpeedEngine): + print(type(self.model.module), self.model.module) + return self.model.module + if isinstance(self.model, torch.nn.DataParallel) or \ + isinstance(self.model, torch.nn.parallel.DistributedDataParallel): + return self.model.module + else: + return self.model + + @property + def data_device(self): + r""" + 注意单卡模式下使用 ``driver.data_device`` 等价于使用 ``driver.model_device``; + """ + return self.model_device + + def is_distributed(self): + r""" + :return: 返回当前使用的 driver 是否是分布式的 driver,对于 ``TorchSingleDriver`` 来说直接返回 ``False``; + """ + return False + + def _set_deepspeed_activation_checkpointing(self): + if self.config.get("activation_checkpointing"): + checkpoint_config = self.config["activation_checkpointing"] + deepspeed.checkpointing.configure( + mpu_=None, + partition_activations=checkpoint_config.get("partition_activations"), + contiguous_checkpointing=checkpoint_config.get("contiguous_memory_optimization"), + checkpoint_in_cpu=checkpoint_config.get("cpu_checkpointing"), + profile=checkpoint_config.get("profile"), + ) \ No newline at end of file diff --git a/fastNLP/core/drivers/torch_driver/utils.py b/fastNLP/core/drivers/torch_driver/utils.py index f5a76a9e..da621b60 100644 --- a/fastNLP/core/drivers/torch_driver/utils.py +++ b/fastNLP/core/drivers/torch_driver/utils.py @@ -1,6 +1,6 @@ import os -from typing import Any, Dict, Optional +from typing import Any, Dict, Optional, Union from enum import IntEnum import contextlib import random @@ -292,3 +292,88 @@ def _check_dataloader_args_for_distributed(args, controller='Trainer'): f"``{substitution}``. The customized sampler should set for distributed running " f"before initializing ``{controller}`` , and then set the " f"parameter ``use_dist_sampler`` of ``{controller}`` to ``False``.") + +def _create_default_config( + zero_optimization: bool = True, + zero_allow_untested_optimizer: bool = True, + logging_batch_size_per_gpu: Union[str, int] = "auto", + partition_activations: bool = False, + cpu_checkpointing: bool = False, + contiguous_memory_optimization: bool = False, + synchronize_checkpoint_boundary: bool = False, + offload_optimizer: bool = False, + offload_parameters: bool = False, + offload_params_device: str = "cpu", + nvme_path: str = "/local_nvme", + params_buffer_count: int = 5, + params_buffer_size: int = 100_000_000, + max_in_cpu: int = 1_000_000_000, + offload_optimizer_device: str = "cpu", + optimizer_buffer_count: int = 4, + pin_memory: bool = False, + block_size: int = 1048576, + queue_depth: int = 8, + single_submit: bool = False, + overlap_events: bool = True, + thread_count: int = 1, + stage: int = 2, + contiguous_gradients: bool = True, + overlap_comm: bool = True, + allgather_partitions: bool = True, + reduce_scatter: bool = True, + allgather_bucket_size: int = 200_000_000, + reduce_bucket_size: int = 200_000_000, + sub_group_size: int = 1_000_000_000_000, +) -> Dict: + cfg = { + "activation_checkpointing": { + "partition_activations": partition_activations, + "cpu_checkpointing": cpu_checkpointing, + "contiguous_memory_optimization": contiguous_memory_optimization, + "synchronize_checkpoint_boundary": synchronize_checkpoint_boundary, + }, + "aio": { + "block_size": block_size, + "queue_depth": queue_depth, + "single_submit": single_submit, + "overlap_events": overlap_events, + "thread_count": thread_count, + }, + } + zero_kwargs = { + "stage": stage, + "contiguous_gradients": contiguous_gradients, + "overlap_comm": overlap_comm, + "allgather_partitions": allgather_partitions, + "reduce_scatter": reduce_scatter, + "allgather_bucket_size": allgather_bucket_size, + "reduce_bucket_size": reduce_bucket_size, + "sub_group_size": sub_group_size, + } + if zero_optimization: + zero_config = zero_kwargs + + if offload_optimizer: + zero_config["offload_optimizer"] = { + "device": offload_optimizer_device, + "nvme_path": nvme_path, + "buffer_count": optimizer_buffer_count, + "pin_memory": pin_memory, + } + if offload_parameters: + zero_config["offload_param"] = { + "device": offload_params_device, + "nvme_path": nvme_path, + "buffer_count": params_buffer_count, + "buffer_size": params_buffer_size, + "max_in_cpu": max_in_cpu, + "pin_memory": pin_memory, + } + cfg = { + "zero_allow_untested_optimizer": zero_allow_untested_optimizer, + "zero_optimization": zero_config, + **cfg, + } + if logging_batch_size_per_gpu != "auto": + cfg = {"train_micro_batch_size_per_gpu": logging_batch_size_per_gpu, **cfg} + return cfg \ No newline at end of file diff --git a/fastNLP/envs/imports.py b/fastNLP/envs/imports.py index 77b642c3..52f49e59 100644 --- a/fastNLP/envs/imports.py +++ b/fastNLP/envs/imports.py @@ -22,5 +22,6 @@ _NEED_IMPORT_FAIRSCALE = not _IS_WINDOWS and _module_available("fairscale") and _NEED_IMPORT_TORCH = _module_available("torch") and 'torch' in need_import _NEED_IMPORT_JITTOR = _module_available("jittor") and 'jittor' in need_import _NEED_IMPORT_PADDLE = _module_available("paddle") and 'paddle' in need_import +_NEED_IMPORT_DEEPSPEED = _module_available("deepspeed") and 'deepspeed' in need_import _TORCH_GREATER_EQUAL_1_8 = _NEED_IMPORT_TORCH and _compare_version("torch", operator.ge, "1.8.0") From 024fecfbf3e6837ff19ba9e128e62df0f881c8aa Mon Sep 17 00:00:00 2001 From: YWMditto Date: Thu, 16 Jun 2022 22:30:02 +0800 Subject: [PATCH 12/52] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E4=BA=86=20overfit=20?= =?UTF-8?q?=E7=9A=84=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/controllers/trainer.py | 52 +++++++++++-------- fastNLP/core/dataloaders/__init__.py | 7 ++- fastNLP/core/dataloaders/utils.py | 41 ++++++++++++++- .../core/drivers/torch_driver/torch_driver.py | 3 +- .../test_trainer_w_evaluator_torch.py | 42 +++++++++++++++ .../test_trainer_wo_evaluator_torch.py | 27 ++++++++++ 6 files changed, 146 insertions(+), 26 deletions(-) diff --git a/fastNLP/core/controllers/trainer.py b/fastNLP/core/controllers/trainer.py index f92611dd..00a18f1d 100644 --- a/fastNLP/core/controllers/trainer.py +++ b/fastNLP/core/controllers/trainer.py @@ -35,6 +35,7 @@ from fastNLP.envs import rank_zero_call from fastNLP.core.log import logger from fastNLP.envs import FASTNLP_MODEL_FILENAME, FASTNLP_CHECKPOINT_FILENAME from fastNLP.core.utils.exceptions import EarlyStopException +from fastNLP.core.dataloaders import OverfitDataLoader class Trainer(TrainerEventTrigger): @@ -356,6 +357,7 @@ class Trainer(TrainerEventTrigger): optimizers, device: Optional[Union[int, List[int], str]] = "cpu", n_epochs: int = 20, + overfit_batches: int = 0, evaluate_dataloaders=None, batch_step_fn: Optional[Callable] = None, evaluate_batch_step_fn: Optional[Callable] = None, @@ -469,9 +471,6 @@ class Trainer(TrainerEventTrigger): n_batches=n_batches ) - if metrics is None and evaluate_dataloaders is not None: - raise ValueError("You have set 'evaluate_dataloaders' but forget to set 'metrics'.") - if metrics is not None and evaluate_dataloaders is None: raise ValueError("You have set 'metrics' but forget to set 'evaluate_dataloaders'.") @@ -495,33 +494,44 @@ class Trainer(TrainerEventTrigger): else: _dist_sampler = None + self.dataloader = self.train_dataloader + self.driver.set_deterministic_dataloader(self.dataloader) + + self.dataloader = self.driver.set_dist_repro_dataloader(dataloader=self.train_dataloader, dist=_dist_sampler, + reproducible=self.callback_manager._need_reproducible_sampler) + # 进行 overfit 相关的设置; + if overfit_batches != 0: + self.dataloader = OverfitDataLoader(self.dataloader, overfit_batches) + self.overfit_batches = overfit_batches + self.evaluator = None self.monitor = monitor self.larger_better = larger_better - if metrics is not None and evaluate_dataloaders is not None: - check_evaluate_every(evaluate_every) - progress_bar = kwargs.get('progress_bar', 'auto') # 如果不为 - if not (isinstance(progress_bar, str) or progress_bar is None): # 应该是ProgressCallback,获取其名称。 - progress_bar = progress_bar.name - self.evaluator = Evaluator(model=model, dataloaders=evaluate_dataloaders, metrics=metrics, - driver=self.driver, evaluate_batch_step_fn=evaluate_batch_step_fn, - evaluate_fn=evaluate_fn, input_mapping=evaluate_input_mapping, - output_mapping=evaluate_output_mapping, fp16=fp16, verbose=0, - use_dist_sampler=kwargs.get("evaluate_use_dist_sampler", use_dist_sampler), - progress_bar=progress_bar, - check_dataloader_legality=kwargs.get('check_dataloader_legality', True)) + if metrics is not None: + if overfit_batches != 0: + logger.warning("Notice you are trying to 'overfit' the model and also using 'metrics', it may cause error " + "because 'metrics' are prepared for 'evaluate_dataloaders', but now 'train_dataloader'.") + evaluate_dataloaders = self.dataloader + if evaluate_dataloaders is not None: + check_evaluate_every(evaluate_every) + progress_bar = kwargs.get('progress_bar', 'auto') # 如果不为 + if not (isinstance(progress_bar, str) or progress_bar is None): # 应该是ProgressCallback,获取其名称。 + progress_bar = progress_bar.name + self.evaluator = Evaluator(model=model, dataloaders=evaluate_dataloaders, metrics=metrics, + driver=self.driver, evaluate_batch_step_fn=evaluate_batch_step_fn, + evaluate_fn=evaluate_fn, input_mapping=evaluate_input_mapping, + output_mapping=evaluate_output_mapping, fp16=fp16, verbose=0, + use_dist_sampler=kwargs.get("evaluate_use_dist_sampler", use_dist_sampler), + progress_bar=progress_bar, + check_dataloader_legality=kwargs.get('check_dataloader_legality', True)) + else: + raise ValueError("You have set 'evaluate_dataloaders' but forget to set 'metrics'.") if train_fn is not None and not isinstance(train_fn, str): raise TypeError("Parameter `train_fn` can only be `str` type when it is not None.") self._train_step, self._train_step_signature_fn = self.driver.get_model_call_fn("train_step" if train_fn is None else train_fn) self.train_fn = train_fn - self.dataloader = self.train_dataloader - self.driver.set_deterministic_dataloader(self.dataloader) - - self.dataloader = self.driver.set_dist_repro_dataloader(dataloader=self.train_dataloader, dist=_dist_sampler, - reproducible=self.callback_manager._need_reproducible_sampler) - self.evaluate_batch_step_fn = evaluate_batch_step_fn self.kwargs = kwargs diff --git a/fastNLP/core/dataloaders/__init__.py b/fastNLP/core/dataloaders/__init__.py index 84f8b288..b18e371c 100644 --- a/fastNLP/core/dataloaders/__init__.py +++ b/fastNLP/core/dataloaders/__init__.py @@ -7,10 +7,13 @@ __all__ = [ 'prepare_paddle_dataloader', 'prepare_torch_dataloader', - "prepare_dataloader" + "prepare_dataloader", + + "OverfitDataLoader" ] from .jittor_dataloader import JittorDataLoader, prepare_jittor_dataloader from .torch_dataloader import TorchDataLoader, prepare_torch_dataloader, MixDataLoader from .paddle_dataloader import PaddleDataLoader, prepare_paddle_dataloader -from .prepare_dataloader import prepare_dataloader \ No newline at end of file +from .prepare_dataloader import prepare_dataloader +from .utils import OverfitDataLoader \ No newline at end of file diff --git a/fastNLP/core/dataloaders/utils.py b/fastNLP/core/dataloaders/utils.py index d905101f..9f8b608c 100644 --- a/fastNLP/core/dataloaders/utils.py +++ b/fastNLP/core/dataloaders/utils.py @@ -1,4 +1,4 @@ -from typing import Callable, Any, Union +from typing import Callable, Any, Union, Sequence from abc import ABC import inspect import ast @@ -6,7 +6,8 @@ import ast from ..log import logger from ..utils.cache_results import get_func_calls, truncate_start_blanks __all__ = [ - "indice_collate_wrapper" + "indice_collate_wrapper", + "OverfitDataLoader" ] @@ -111,6 +112,42 @@ class HasLenGetitemType(ABC): return NotImplemented +class OverfitDataLoader: + """ + 实现一个简单的迭代器来模拟实际的 dataloader,从给定的 dataloader 中取出部分数据,来让 Trainer 实现 overfit 的功能; + """ + + def __init__(self, dataloader, overfit_batches: int): + self.dataloader = dataloader # 需要将实际的 dataloader 挂载到该对象上,从而应付一些对于实际的 dataloader 的操作; + self.batches = [] + + if isinstance(overfit_batches, int): + if overfit_batches < 0 and overfit_batches != -1: + raise ValueError("Parameter 'overfit_batches' can only be '-1' when it is smaller than 0, and it means" + "that you use all the data to check whether it could be overfitted.") + else: + raise TypeError("Parameter 'overfit_batches' can only be 'int' type, check the parameter you input into 'Trainer'.") + + if overfit_batches > len(dataloader): + logger.warning("Parameter 'overfit_batches' is bigger than the real length of 'train dataloader'.") + + for idx, batch in enumerate(dataloader): + + if idx < overfit_batches or overfit_batches == -1: + self.batches.append(batch) + + def __len__(self): + return len(self.batches) + + def __iter__(self): + for batch in self.batches: + yield batch + + def __getattr__(self, item): + return getattr(self.dataloader, item) + + + if __name__ == '__main__': def demo(*args, **kwargs): pass diff --git a/fastNLP/core/drivers/torch_driver/torch_driver.py b/fastNLP/core/drivers/torch_driver/torch_driver.py index fb01b6c3..84e4aa70 100644 --- a/fastNLP/core/drivers/torch_driver/torch_driver.py +++ b/fastNLP/core/drivers/torch_driver/torch_driver.py @@ -31,6 +31,7 @@ from fastNLP.envs import rank_zero_call from fastNLP.envs import FASTNLP_GLOBAL_RANK, FASTNLP_MODEL_FILENAME, FASTNLP_CHECKPOINT_FILENAME from fastNLP.core.log import logger from fastNLP.core.samplers import ReproducibleBatchSampler, ReproducibleSampler, ReproduceBatchSampler, RandomSampler +from fastNLP.core.dataloaders import OverfitDataLoader class TorchDriver(Driver): @@ -92,7 +93,7 @@ class TorchDriver(Driver): self.grad_scaler.update() def check_dataloader_legality(self, dataloader): - if not isinstance(dataloader, DataLoader): + if not isinstance(dataloader, DataLoader) and not isinstance(dataloader, OverfitDataLoader): raise TypeError(f"{DataLoader} is expected, instead of `{type(dataloader)}`") if len(dataloader) == 0: logger.rank_zero_warning("Your dataloader is empty, which is not recommended because it " diff --git a/tests/core/controllers/test_trainer_w_evaluator_torch.py b/tests/core/controllers/test_trainer_w_evaluator_torch.py index 752e06d8..a70766f5 100644 --- a/tests/core/controllers/test_trainer_w_evaluator_torch.py +++ b/tests/core/controllers/test_trainer_w_evaluator_torch.py @@ -286,6 +286,9 @@ def test_trainer_specific_params_1( assert trainer.driver.non_blocking is False assert trainer.driver.wo_auto_param_call is True + if dist.is_initialized(): + dist.destroy_process_group() + @pytest.mark.torch @pytest.mark.parametrize("driver,device", [("torch", [0, 1])]) # ("torch", [0, 1]),("torch", 1) @@ -332,5 +335,44 @@ def test_trainer_specific_params_2( assert _ddp_kwargs.get("broadcast_buffers") is True assert _ddp_kwargs.get("find_unused_parameters") is True + if dist.is_initialized(): + dist.destroy_process_group() +@pytest.mark.torch +@pytest.mark.parametrize("overfit_batches,num_train_batch_per_epoch", [(-1, -1), (0, -1), (3, 10), (6, -1)]) +@magic_argv_env_context +def test_trainer_w_evaluator_overfit_torch( + model_and_optimizers: TrainerParameters, + overfit_batches, + num_train_batch_per_epoch +): + """ + 测试一些特殊的参数是否能够正确地传递; + """ + trainer = Trainer( + model=model_and_optimizers.model, + driver="torch", + device=0, + overfit_batches=overfit_batches, + optimizers=model_and_optimizers.optimizers, + train_dataloader=model_and_optimizers.train_dataloader, + evaluate_dataloaders={"dl": model_and_optimizers.evaluate_dataloaders}, + input_mapping=model_and_optimizers.input_mapping, + output_mapping=model_and_optimizers.output_mapping, + metrics=model_and_optimizers.metrics, + n_epochs=2, + output_from_new_proc="all", + evaluate_every=-1, + + torch_kwargs={ + "non_blocking": False, + "set_grad_to_none": True + } + + ) + + trainer.run(num_train_batch_per_epoch=num_train_batch_per_epoch) + + if dist.is_initialized(): + dist.destroy_process_group() \ No newline at end of file diff --git a/tests/core/controllers/test_trainer_wo_evaluator_torch.py b/tests/core/controllers/test_trainer_wo_evaluator_torch.py index be04bcd3..a7eeeda6 100644 --- a/tests/core/controllers/test_trainer_wo_evaluator_torch.py +++ b/tests/core/controllers/test_trainer_wo_evaluator_torch.py @@ -361,5 +361,32 @@ def test_torch_wo_auto_param_call( dist.destroy_process_group() +# 测试 accumulation_steps; +@pytest.mark.torch +@pytest.mark.parametrize("overfit_batches,num_train_batch_per_epoch", [(-1, -1), (0, -1), (3, 10), (6, -1)]) +@magic_argv_env_context +def test_trainer_overfit_torch( + model_and_optimizers: TrainerParameters, + overfit_batches, + num_train_batch_per_epoch +): + trainer = Trainer( + model=model_and_optimizers.model, + driver="torch", + device=0, + overfit_batches=overfit_batches, + optimizers=model_and_optimizers.optimizers, + train_dataloader=model_and_optimizers.train_dataloader, + evaluate_dataloaders=model_and_optimizers.evaluate_dataloaders, + input_mapping=model_and_optimizers.input_mapping, + output_mapping=model_and_optimizers.output_mapping, + metrics=model_and_optimizers.metrics, + output_from_new_proc="all", + n_epochs=2, + ) + + trainer.run(num_train_batch_per_epoch=num_train_batch_per_epoch) + if dist.is_initialized(): + dist.destroy_process_group() From a6fc5225cd20638af77756456881c21f53e9cd44 Mon Sep 17 00:00:00 2001 From: YWMditto Date: Thu, 16 Jun 2022 22:37:43 +0800 Subject: [PATCH 13/52] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E4=BA=86=20overfit=5Fb?= =?UTF-8?q?atches=20=E7=9A=84=E6=B3=A8=E9=87=8A?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/controllers/trainer.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/fastNLP/core/controllers/trainer.py b/fastNLP/core/controllers/trainer.py index 00a18f1d..1259a38e 100644 --- a/fastNLP/core/controllers/trainer.py +++ b/fastNLP/core/controllers/trainer.py @@ -119,6 +119,19 @@ class Trainer(TrainerEventTrigger): 对于使用 ``TorchDDPDriver`` 的更多细节,请见 :class:`~fastNLP.core.drivers.torch_driver.TorchDDPDriver`。 :param n_epochs: 训练总共的 epoch 的数量,默认为 20;也可以通过 ``n_batches`` 参数设置总共迭代多少个 ``batch`` 。 + :param overfit_batches: 使用该参数来支持 '过拟合' 的功能;支持的值为 ``-1``、``0`` 或者 大于 0 的整数,表示使用多少 batch 的数据 + 来进行过拟合训练;其中 0 为 默认值表示不进行过拟合;-1 表示使用所有的数据进行训练; + + .. note:: + + 您可以使用该参数来简单地查看您的模型是否是 '正确的',即您的模型是否能够在少量的数据上快速进行收敛,从而说明损失函数以及优化器等 + 没有问题。当使用该参数时,我们会直接从 ``train_dataloader`` 中提取固定大小的 batch,然后在之后的所有 epoch 中都是用这些数据来进行过拟合训练; + + .. warning:: + + 在使用该参数时,您同样可以指定 ``metrics`` 参数来进行简单的验证,当该参数和 ``metrics`` 同时出现时,我们会将 evaluate_dataloaders + 直接替换为在过拟合中所使用的训练数据;因此您需要保证您的 ``metrics`` 是能够在 ``train_dataloader`` 上使用的; + :param evaluate_dataloaders: 验证数据集,其可以是单独的一个数据集,也可以是多个数据集;当为多个数据集时,注意其必须是 Dict;默认 为 None; :param batch_step_fn: 定制每次训练时前向运行一个 batch 的数据所执行的函数。该函数应接受两个参数为 ``trainer`` 和 ``batch``, From 399065ae04652b2ef8c71c91b78d1053b853067d Mon Sep 17 00:00:00 2001 From: yhcc Date: Fri, 17 Jun 2022 00:12:53 +0800 Subject: [PATCH 14/52] update overfit_batches --- fastNLP/core/callbacks/progress_callback.py | 19 +++++---- fastNLP/core/controllers/trainer.py | 32 +++++++------- fastNLP/core/dataloaders/utils.py | 15 ++----- fastNLP/core/drivers/torch_driver/ddp.py | 3 -- fastNLP/core/drivers/torch_driver/utils.py | 46 +++++++++++---------- fastNLP/core/metrics/metric.py | 10 ++++- 6 files changed, 64 insertions(+), 61 deletions(-) diff --git a/fastNLP/core/callbacks/progress_callback.py b/fastNLP/core/callbacks/progress_callback.py index 890864ec..2f1d2b17 100644 --- a/fastNLP/core/callbacks/progress_callback.py +++ b/fastNLP/core/callbacks/progress_callback.py @@ -20,6 +20,7 @@ class ProgressCallback(HasMonitorCallback): must_have_monitor=must_have_monitor) self.best_monitor_epoch = -1 self.best_monitor_step = -1 + self.best_results = None def record_better_monitor(self, trainer): self.best_monitor_step = trainer.global_forward_batches @@ -29,6 +30,8 @@ class ProgressCallback(HasMonitorCallback): if self.best_monitor_epoch != -1: msg = f"The best performance for monitor {self._real_monitor}:{self.monitor_value} was achieved in" \ f" Epoch:{self.best_monitor_epoch}, Global Batch:{self.best_monitor_step}." + if self.best_results is not None: + msg = msg + ' The evaluation result: \n' + str(self.best_results) logger.info(msg) @property @@ -147,9 +150,11 @@ class RichCallback(ProgressCallback): results = {key:trainer.driver.tensor_to_numeric(value) for key, value in results.items() if not key.startswith('_')} if self.format_json: - self.progress_bar.console.print_json(json.dumps(results)) + results = json.dumps(results) + self.progress_bar.console.print_json(results) else: self.progress_bar.print(results) + self.best_results = results def clear_tasks(self): for key, taskid in self.task2id.items(): @@ -227,9 +232,9 @@ class RawTextCallback(ProgressCallback): results = {key:trainer.driver.tensor_to_numeric(value) for key, value in results.items() if not key.startswith('_')} if self.format_json: - logger.info(json.dumps(results)) - else: - logger.info(results) + results = json.dumps(results) + logger.info(results) + self.best_results = results @property def name(self): # progress bar的名称 @@ -316,9 +321,9 @@ class TqdmCallback(ProgressCallback): results = {key:trainer.driver.tensor_to_numeric(value) for key, value in results.items() if not key.startswith('_')} if self.format_json: - logger.info(json.dumps(results)) - else: - logger.info(results) + results = json.dumps(results) + logger.info(results) + self.best_results = results def clear_tasks(self): for key, taskid in self.task2id.items(): diff --git a/fastNLP/core/controllers/trainer.py b/fastNLP/core/controllers/trainer.py index 1259a38e..0f22e63c 100644 --- a/fastNLP/core/controllers/trainer.py +++ b/fastNLP/core/controllers/trainer.py @@ -119,19 +119,6 @@ class Trainer(TrainerEventTrigger): 对于使用 ``TorchDDPDriver`` 的更多细节,请见 :class:`~fastNLP.core.drivers.torch_driver.TorchDDPDriver`。 :param n_epochs: 训练总共的 epoch 的数量,默认为 20;也可以通过 ``n_batches`` 参数设置总共迭代多少个 ``batch`` 。 - :param overfit_batches: 使用该参数来支持 '过拟合' 的功能;支持的值为 ``-1``、``0`` 或者 大于 0 的整数,表示使用多少 batch 的数据 - 来进行过拟合训练;其中 0 为 默认值表示不进行过拟合;-1 表示使用所有的数据进行训练; - - .. note:: - - 您可以使用该参数来简单地查看您的模型是否是 '正确的',即您的模型是否能够在少量的数据上快速进行收敛,从而说明损失函数以及优化器等 - 没有问题。当使用该参数时,我们会直接从 ``train_dataloader`` 中提取固定大小的 batch,然后在之后的所有 epoch 中都是用这些数据来进行过拟合训练; - - .. warning:: - - 在使用该参数时,您同样可以指定 ``metrics`` 参数来进行简单的验证,当该参数和 ``metrics`` 同时出现时,我们会将 evaluate_dataloaders - 直接替换为在过拟合中所使用的训练数据;因此您需要保证您的 ``metrics`` 是能够在 ``train_dataloader`` 上使用的; - :param evaluate_dataloaders: 验证数据集,其可以是单独的一个数据集,也可以是多个数据集;当为多个数据集时,注意其必须是 Dict;默认 为 None; :param batch_step_fn: 定制每次训练时前向运行一个 batch 的数据所执行的函数。该函数应接受两个参数为 ``trainer`` 和 ``batch``, @@ -258,7 +245,20 @@ class Trainer(TrainerEventTrigger): 注意该参数仅当 ``Trainer`` 内置的 ``Evaluator`` 不为 None 时且有需要该参数但是没有设置该参数的 *callback* 实例才有效; - :param n_batches: 迭代多少个 ``batch`` 的训练结束。当该值不为 -1 时,将直接忽略 ``n_epochs`` 的值。 + :param n_batches: 总共迭代多少个 ``batch`` 的训练结束。当该值不为 -1 时,将直接忽略 ``n_epochs`` 的值。 + :param overfit_batches: 使用该参数来支持 '过拟合' 的功能;支持的值为 ``-1``、``0`` 或者 大于 0 的整数,表示使用多少个 batch 的数据 + 来进行过拟合训练;其中 0 为表示不进行任何操作;-1 表示使用所有的数据进行训练; + + .. note:: + + 您可以使用该参数来简单地查看您的模型是否是 '正确的',即您的模型是否能够在少量的数据上快速进行收敛,从而说明损失函数以及优化器等 + 没有问题。当使用该参数时,我们会直接从 ``train_dataloader`` 中提取固定数量的 batch,然后在所有 epoch 中都是用这些数据 + 来进行训练; + + .. warning:: + + 在使用该参数时,您同样可以指定 ``metrics`` 参数来进行简单的验证,当该参数和 ``metrics`` 同时出现时,我们会将 evaluate_dataloaders + 直接替换为在过拟合中所使用的训练数据;因此您需要保证您的 ``metrics`` 是能够在 ``train_dataloader`` 上使用的; :param marker: 用于标记一个 ``Trainer`` 实例,从而在用户调用 ``Trainer.on`` 函数时,标记该函数属于哪一个具体的 ``Trainer`` 实例;默认为 None; @@ -370,7 +370,6 @@ class Trainer(TrainerEventTrigger): optimizers, device: Optional[Union[int, List[int], str]] = "cpu", n_epochs: int = 20, - overfit_batches: int = 0, evaluate_dataloaders=None, batch_step_fn: Optional[Callable] = None, evaluate_batch_step_fn: Optional[Callable] = None, @@ -387,6 +386,7 @@ class Trainer(TrainerEventTrigger): monitor: Union[str, Callable] = None, larger_better: bool = True, n_batches: int = -1, + overfit_batches: int = 0, marker: Optional[str] = None, **kwargs ): @@ -522,8 +522,6 @@ class Trainer(TrainerEventTrigger): self.larger_better = larger_better if metrics is not None: if overfit_batches != 0: - logger.warning("Notice you are trying to 'overfit' the model and also using 'metrics', it may cause error " - "because 'metrics' are prepared for 'evaluate_dataloaders', but now 'train_dataloader'.") evaluate_dataloaders = self.dataloader if evaluate_dataloaders is not None: check_evaluate_every(evaluate_every) diff --git a/fastNLP/core/dataloaders/utils.py b/fastNLP/core/dataloaders/utils.py index 9f8b608c..7ce9f153 100644 --- a/fastNLP/core/dataloaders/utils.py +++ b/fastNLP/core/dataloaders/utils.py @@ -120,20 +120,13 @@ class OverfitDataLoader: def __init__(self, dataloader, overfit_batches: int): self.dataloader = dataloader # 需要将实际的 dataloader 挂载到该对象上,从而应付一些对于实际的 dataloader 的操作; self.batches = [] + self.overfit_batches = int(overfit_batches) - if isinstance(overfit_batches, int): - if overfit_batches < 0 and overfit_batches != -1: - raise ValueError("Parameter 'overfit_batches' can only be '-1' when it is smaller than 0, and it means" - "that you use all the data to check whether it could be overfitted.") - else: - raise TypeError("Parameter 'overfit_batches' can only be 'int' type, check the parameter you input into 'Trainer'.") - - if overfit_batches > len(dataloader): - logger.warning("Parameter 'overfit_batches' is bigger than the real length of 'train dataloader'.") + if self.overfit_batches > len(dataloader): + logger.warning("Parameter 'overfit_batches' is bigger than the length of 'train_dataloader'.") for idx, batch in enumerate(dataloader): - - if idx < overfit_batches or overfit_batches == -1: + if idx < self.overfit_batches or self.overfit_batches < -1: self.batches.append(batch) def __len__(self): diff --git a/fastNLP/core/drivers/torch_driver/ddp.py b/fastNLP/core/drivers/torch_driver/ddp.py index 45a1a61a..affb5ded 100644 --- a/fastNLP/core/drivers/torch_driver/ddp.py +++ b/fastNLP/core/drivers/torch_driver/ddp.py @@ -140,9 +140,6 @@ if _NEED_IMPORT_TORCH: import torch.distributed as dist from torch.nn.parallel import DistributedDataParallel from torch.utils.data import BatchSampler - from torch.utils.data import RandomSampler as TorchRandomSampler - from torch.utils.data import SequentialSampler as TorchSequentialSampler - from torch.utils.data import BatchSampler as TorchBatchSampler __all__ = [ 'TorchDDPDriver' diff --git a/fastNLP/core/drivers/torch_driver/utils.py b/fastNLP/core/drivers/torch_driver/utils.py index f5a76a9e..9bf0da2d 100644 --- a/fastNLP/core/drivers/torch_driver/utils.py +++ b/fastNLP/core/drivers/torch_driver/utils.py @@ -181,18 +181,16 @@ def replace_sampler(dataloader: "DataLoader", sampler): instance_attrs = {k: v for k, v in vars(dataloader).items() if not k.startswith('_')} # 'multiprocessing_context' 是 user-defined function; - instance_attrs["multiprocessing_context"] = dataloader.multiprocessing_context + if getattr(dataloader, 'multiprocessing_context', None) is not None: + instance_attrs["multiprocessing_context"] = dataloader.multiprocessing_context # 拿到 dataloader '__init__' 函数的默认函数签名; init_params = dict(inspect.signature(dataloader.__init__).parameters) - # 这里为什么要单独弄的原因在于,用户在定制自己的 dataloader 的同时可能为了方便只设定一些参数,而后面直接使用 **kwargs 的方式,这时如果 - # 其在初始化自己的 dataloader 实例的时候加入了一些其它的新的参数(首先这一步是必要的,因为我们只能通过这样加 sampler;另一方面,用户 - # 可能确实通过 **kwargs 加入了一些新的参数),如果假设用户是这样使用的: "super().__init__(**kwargs)",那么我们就只能去 DataLoader - # 中寻找; + # 防止用户的 DataLoader 是继承了 pytorch 的 DataLoader,然后还是使用了 **kwargs 的方式对父类传参数 has_variadic_kwargs = any(v.kind is v.VAR_KEYWORD for k, v in init_params.items()) - if has_variadic_kwargs: - # 这里之所以这样写是因为用户自己定制的 Dataloader 中名字一样的参数所设置的默认值可能不同;因此不能直接使用 update 覆盖掉了; + if has_variadic_kwargs and isinstance(dataloader, DataLoader): + # 防止用户写入了 super().__init__(**kwargs) for key, value in dict(inspect.signature(DataLoader.__init__).parameters).items(): if key not in init_params and key != 'self': init_params[key] = value @@ -204,7 +202,8 @@ def replace_sampler(dataloader: "DataLoader", sampler): non_default_params.add("dataset") reconstruct_args = {k: v for k, v in instance_attrs.items() if k in non_default_params} - reconstruct_args.update({"sampler": sampler, "shuffle": False, "batch_sampler": None}) + if isinstance(dataloader, DataLoader): + reconstruct_args.update({"sampler": sampler, "shuffle": False, "batch_sampler": None}) batch_sampler = getattr(dataloader, "batch_sampler") if batch_sampler is not None and isinstance(batch_sampler, ReproducibleBatchSampler): @@ -218,35 +217,31 @@ def replace_sampler(dataloader: "DataLoader", sampler): and p.name not in reconstruct_args } - # 这种错误针对的是 __init__ 中的参数没有用同样名字的 self 挂上; + # 在 attribute 中没有找到这些参数,导致了没有办法重新初始化 if required_args: required_args = sorted(required_args) dataloader_self_name = dataloader.__class__.__name__ raise Exception( - f"Trying to inject `DistributedSampler` into the `{dataloader_self_name}` instance. " - "This would fail as some of the `__init__` arguments are not available as instance attributes. " - f"The missing attributes are {required_args}. " - f"HINT: If you wrote the `{dataloader_self_name}` class, define `self.missing_arg_name` or " - "manually add the `DistributedSampler` as: " - f"`{dataloader_self_name}(dataset, sampler=DistributedSampler(dataset))`." + f"Need to inject arguments {required_args} into the __init__ of `{dataloader_self_name}`. " + f"But they are not found in the attribute of `{dataloader_self_name}`, fastNLP cannot determine its " + f"value when try to reinitialize `{dataloader_self_name}`, please add `{required_args}` to be " + f"`{dataloader_self_name}`'s attribute." ) # 这种错误针对的是传入的 dataloader 不是直接的 DataLoader,而是定制了 DataLoader,但是 __init__ 中没有 **kwargs; if not has_variadic_kwargs: - # the dataloader signature does not allow keyword arguments that need to be passed missing_kwargs = reconstruct_args.keys() - init_params.keys() if missing_kwargs: missing_kwargs = sorted(missing_kwargs) dataloader_self_name = dataloader.__class__.__name__ raise Exception( - f"Trying to inject `DistributedSampler` into the `{dataloader_self_name}` instance. " - "This would fail as it doesn't expose all its attributes in the `__init__` signature. " - f"The missing arguments are {missing_kwargs}. " - f"HINT: If you wrote the `{dataloader_self_name}` class, add the `__init__` arguments or " - "manually add the `DistributedSampler` as: " - f"`{dataloader_self_name}(dataset, sampler=DistributedSampler(dataset))`." + f"The parameter:{missing_kwargs} needed to reinitialize `{dataloader_self_name}` is not found." ) + # 如果没有kwargs,则保证一下只传入需要的参数 + if not isinstance(dataloader, DataLoader): + reconstruct_args = {key:value for key,value in reconstruct_args.items() if key in init_params} + return type(dataloader)(**reconstruct_args) @@ -260,6 +255,13 @@ def replace_batch_sampler(dataloader, new_batch_sampler): params_keys.remove(k) params = {k: getattr(dataloader, k) for k in params_keys} params["batch_sampler"] = new_batch_sampler + + if not isinstance(dataloader, DataLoader): + init_params = dict(inspect.signature(dataloader.__init__).parameters) + has_variadic_kwargs = any(v.kind is v.VAR_KEYWORD for k, v in init_params.items()) + if not has_variadic_kwargs: + params = {key:value for key,value in params.items() if key in init_params} + return type(dataloader)(**params) diff --git a/fastNLP/core/metrics/metric.py b/fastNLP/core/metrics/metric.py index 1a69e80c..e2dc3dda 100644 --- a/fastNLP/core/metrics/metric.py +++ b/fastNLP/core/metrics/metric.py @@ -98,7 +98,7 @@ class Metric: return _wrap_get_metric def __setattr__(self, key, value): - if hasattr(self, '_cannot_change_element') and self._cannot_change_element is True: + if getattr(self, '_cannot_change_element', False): if key in self.elements and isinstance(value, (float, int, bool)): self.elements[key].fill_value(value) return @@ -109,6 +109,14 @@ class Metric: raise RuntimeError("Please use register_element() function to add Element.") object.__setattr__(self, key, value) + # 当调用 __getattribute__ 没有找到时才会触发这个, 保留这个的目的只是为了防止 ide 的 warning + def __getattr__(self, name: str) -> Element: + if 'elements' in self.__dict__: + elements = self.__dict__['elements'] + if name in elements: + return elements[name] + raise AttributeError("`{}` object has no attribute `{}`.".format(type(self).__name__, name)) + def _wrap_update(self, update): @functools.wraps(update) def _wrap_update(*args, **kwargs): From bb68856f85483cada933c8903f8264e8638c3e53 Mon Sep 17 00:00:00 2001 From: YWMditto Date: Fri, 17 Jun 2022 00:53:13 +0800 Subject: [PATCH 15/52] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E5=AF=B9=20overfit=20?= =?UTF-8?q?=E5=A4=9A=E5=8D=A1=E7=9A=84=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/dataloaders/utils.py | 3 ++- tests/core/controllers/test_trainer_w_evaluator_torch.py | 7 +++++-- tests/core/controllers/test_trainer_wo_evaluator_torch.py | 7 +++++-- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/fastNLP/core/dataloaders/utils.py b/fastNLP/core/dataloaders/utils.py index 7ce9f153..06f09da3 100644 --- a/fastNLP/core/dataloaders/utils.py +++ b/fastNLP/core/dataloaders/utils.py @@ -1,3 +1,4 @@ +import os from typing import Callable, Any, Union, Sequence from abc import ABC import inspect @@ -126,7 +127,7 @@ class OverfitDataLoader: logger.warning("Parameter 'overfit_batches' is bigger than the length of 'train_dataloader'.") for idx, batch in enumerate(dataloader): - if idx < self.overfit_batches or self.overfit_batches < -1: + if idx < self.overfit_batches or self.overfit_batches <= -1: self.batches.append(batch) def __len__(self): diff --git a/tests/core/controllers/test_trainer_w_evaluator_torch.py b/tests/core/controllers/test_trainer_w_evaluator_torch.py index a70766f5..78eff36c 100644 --- a/tests/core/controllers/test_trainer_w_evaluator_torch.py +++ b/tests/core/controllers/test_trainer_w_evaluator_torch.py @@ -340,10 +340,13 @@ def test_trainer_specific_params_2( @pytest.mark.torch +@pytest.mark.parametrize("driver,device", [("torch", 1), ("torch", [0, 1])]) # ("torch", [0, 1]),("torch", 1) @pytest.mark.parametrize("overfit_batches,num_train_batch_per_epoch", [(-1, -1), (0, -1), (3, 10), (6, -1)]) @magic_argv_env_context def test_trainer_w_evaluator_overfit_torch( model_and_optimizers: TrainerParameters, + driver, + device, overfit_batches, num_train_batch_per_epoch ): @@ -352,8 +355,8 @@ def test_trainer_w_evaluator_overfit_torch( """ trainer = Trainer( model=model_and_optimizers.model, - driver="torch", - device=0, + driver=driver, + device=device, overfit_batches=overfit_batches, optimizers=model_and_optimizers.optimizers, train_dataloader=model_and_optimizers.train_dataloader, diff --git a/tests/core/controllers/test_trainer_wo_evaluator_torch.py b/tests/core/controllers/test_trainer_wo_evaluator_torch.py index a7eeeda6..ce67814e 100644 --- a/tests/core/controllers/test_trainer_wo_evaluator_torch.py +++ b/tests/core/controllers/test_trainer_wo_evaluator_torch.py @@ -363,17 +363,20 @@ def test_torch_wo_auto_param_call( # 测试 accumulation_steps; @pytest.mark.torch +@pytest.mark.parametrize("driver,device", [("torch", 1), ("torch", [0, 1])]) @pytest.mark.parametrize("overfit_batches,num_train_batch_per_epoch", [(-1, -1), (0, -1), (3, 10), (6, -1)]) @magic_argv_env_context def test_trainer_overfit_torch( model_and_optimizers: TrainerParameters, + driver, + device, overfit_batches, num_train_batch_per_epoch ): trainer = Trainer( model=model_and_optimizers.model, - driver="torch", - device=0, + driver=driver, + device=device, overfit_batches=overfit_batches, optimizers=model_and_optimizers.optimizers, train_dataloader=model_and_optimizers.train_dataloader, From 1a2eb93ab4113717b47169f3eebe1f32bcf1496d Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Fri, 17 Jun 2022 21:55:59 +0800 Subject: [PATCH 16/52] =?UTF-8?q?deepspeed=E5=9F=BA=E6=9C=AC=E5=8A=9F?= =?UTF-8?q?=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../core/drivers/torch_driver/deepspeed.py | 308 +++++++++++------- fastNLP/envs/imports.py | 2 +- 2 files changed, 194 insertions(+), 116 deletions(-) diff --git a/fastNLP/core/drivers/torch_driver/deepspeed.py b/fastNLP/core/drivers/torch_driver/deepspeed.py index 0e0637a1..298945ed 100644 --- a/fastNLP/core/drivers/torch_driver/deepspeed.py +++ b/fastNLP/core/drivers/torch_driver/deepspeed.py @@ -1,17 +1,16 @@ +import os + from typing import Optional, Union, Callable, Dict, Tuple, Sequence, List from .torch_driver import TorchDriver -from .utils import _create_default_config -from fastNLP.core.utils import auto_param_call -from fastNLP.core.utils.utils import _get_fun_msg -from fastNLP.core.samplers import ReproducibleBatchSampler, ReproducibleSampler, re_instantiate_sampler, \ - ReproduceBatchSampler +from .ddp import TorchDDPDriver +from .utils import _create_default_config, _DDPWrappingModel from fastNLP.core.log import logger +from fastNLP.envs.env import FASTNLP_DISTRIBUTED_CHECK from fastNLP.envs.imports import _NEED_IMPORT_TORCH, _NEED_IMPORT_DEEPSPEED if _NEED_IMPORT_TORCH: - import pytorch_lightning import torch - from torch.nn import DataParallel + import torch.distributed as dist if _NEED_IMPORT_DEEPSPEED: import deepspeed @@ -20,13 +19,160 @@ __all__ = [ "DeepSpeedDriver", ] -class DeepSpeedDriver(TorchDriver): - def __init__(self, model, fp16, strategy, **kwargs): - super(DeepSpeedDriver, self).__init__(model, fp16) - +class DeepSpeedDriver(TorchDDPDriver): + # TODO fp16 load_config + def __init__( + self, + model, + parallel_device: Union[List["torch.device"], "torch.device"], + is_pull_by_torch_run = False, + fp16: bool = False, + strategy= "deepspeed", + **kwargs + ): + assert _NEED_IMPORT_DEEPSPEED, "deepspeed is not imported." + assert not dist.is_initialized(), "DeepSpeedDriver does not support initialize distributed by user." + TorchDriver.__init__(self, model=model, fp16=False, **kwargs) + self.fp16 = fp16 + + # 如果用户自己在外面初始化 DDP,那么其一定是通过 python -m torch.distributed.launch 拉起的; + self.is_pull_by_torch_run = is_pull_by_torch_run + self.parallel_device = parallel_device + if not is_pull_by_torch_run and parallel_device is None: + raise ValueError( + "Parameter `parallel_device` can not be None when using `TorchDDPDriver`. This error is caused " + "when your value of parameter `device` is `None` in your `Trainer` instance.") + + # 注意我们在 initialize_torch_driver 中的逻辑就是如果是 is_pull_by_torch_run,那么我们就直接把 parallel_device 置为当前进程的gpu; + if is_pull_by_torch_run: + self.model_device = parallel_device + else: + # 我们的 model_device 一定是 torch.device,而不是一个 list; + self.model_device = parallel_device[self.local_rank] + + # 暂时不允许在外面初始化 + self.outside_ddp = False + self._data_device = kwargs.get("data_device", None) + if isinstance(self._data_device, int): + if self._data_device < 0: + raise ValueError("Parameter `data_device` can not be smaller than 0.") + _could_use_device_num = torch.cuda.device_count() + if self._data_device >= _could_use_device_num: + raise ValueError("The gpu device that parameter `device` specifies is not existed.") + self._data_device = torch.device(f"cuda:{self._data_device}") + elif isinstance(self._data_device, str): + self._data_device = torch.device(self._data_device) + elif self._data_device is not None and not isinstance(self._data_device, torch.device): + raise ValueError("Parameter `device` is wrong type, please check our documentation for the right use.") + + self._master_port = None + # world_size 表示的就是全局的显卡的数量; + self.world_size = None # int(os.environ.get("WORLD_SIZE")) len(self.parallel_device) + self.global_rank = 0 + + self.output_from_new_proc = kwargs.get("output_from_new_proc", "only_error") + assert isinstance(self.output_from_new_proc, str), "Parameter `output_from_new_proc` can only be `str` type." + if self.output_from_new_proc not in {"all", "ignore", "only_error"}: + os.makedirs(name=self.output_from_new_proc, exist_ok=True) + self.output_from_new_proc = os.path.abspath(self.output_from_new_proc) + + self._has_setup = False # 设置这一参数是因为 evaluator 中也会进行 setup 操作,但是显然是不需要的也不应该的; + self._has_ddpwrapped = False # 判断传入的模型是否经过 _has_ddpwrapped 包裹; self.strategy = strategy + self._ds_kwargs = kwargs.get("deepspeed_kwargs", {}) + def setup(self): + r""" + 准备分布式环境,该函数主要做以下两件事情: + + 1. 开启多进程,每个 gpu 设备对应单独的一个进程; + 2. 每个进程将模型迁移到自己对应的 ``gpu`` 设备上;然后使用 ``DistributedDataParallel`` 包裹模型; + """ + if len(self.optimizers) != 1: + raise ValueError("Multi optimizers is not supported for DeepSpeedDriver right now.") + if self._has_setup: + return + self.setup_config() + self._has_setup = True + # 如果用户需要使用多机模式,那么一定进入到这里; + if self.is_pull_by_torch_run: + # dist.get_world_size() 只能在 dist.init_process_group 初始化之后进行调用; + self.world_size = int(os.environ.get("WORLD_SIZE")) + self.global_rank = int(os.environ.get("RANK")) + logger.info(f"World size: {self.world_size}, Global rank: {self.global_rank}") + + if not dist.is_initialized(): + deepspeed.init_distributed("nccl", distributed_port=self.master_port) + + os.environ["fastnlp_torch_launch_not_ddp"] = "yes" + + # 进入到这里的情况时: + # dist.is_initialized 一定为 False; + # 一定是单机; + # self.parallel_device 一定是 List[torch.device]; + else: + if not dist.is_initialized(): + # 这里主要的问题在于要区分 rank0 和其它 rank 的情况; + self.world_size = len(self.parallel_device) + self.open_subprocess() + self.global_rank = self.local_rank # rank 一定是通过环境变量去获取的; + deepspeed.init_distributed("nccl", distributed_port=self.master_port) + # 用户在这个 trainer 前面又初始化了一个 trainer,并且使用的是 TorchDDPDriver; + else: + # 如果 `dist.is_initialized() == True`,那么说明 TorchDDPDriver 在之前已经初始化并且已经 setup 过一次,那么我们需要保证现在 + # 使用的(即之后的)TorchDDPDriver 的设置和第一个 TorchDDPDriver 是完全一样的; + pre_num_processes = int(os.environ[FASTNLP_DISTRIBUTED_CHECK]) + if pre_num_processes != len(self.parallel_device): + raise RuntimeError( + "Notice you are using `TorchDDPDriver` after one instantiated `TorchDDPDriver`, it is not" + "allowed that your second `TorchDDPDriver` has a new setting of parameters " + "`num_nodes` and `num_processes`.") + self.world_size = dist.get_world_size() + self.global_rank = dist.get_rank() + + torch.cuda.set_device(self.model_device) + self.configure_ddp() + + self.barrier() + # 初始化 self._pids,从而使得每一个进程都能接受到 rank0 的 send 操作; + self._pids = [torch.tensor(0, dtype=torch.int).to(self.data_device) for _ in range(dist.get_world_size())] + dist.all_gather(self._pids, torch.tensor(os.getpid(), dtype=torch.int).to(self.data_device)) + local_world_size = int(os.environ.get("LOCAL_WORLD_SIZE")) if "LOCAL_WORLD_SIZE" in os.environ else None + if local_world_size is None: + local_world_size = torch.tensor(int(os.environ.get("LOCAL_RANK")), dtype=torch.int).to(self.data_device) + dist.all_reduce(local_world_size, op=dist.ReduceOp.MAX) + local_world_size = local_world_size.tolist() + 1 + + node_rank = self.global_rank // local_world_size + self._pids = self._pids[node_rank * local_world_size: (node_rank + 1) * local_world_size] + self._pids = self.tensor_to_numeric(self._pids) + + def configure_ddp(self): + + # 设置 deepspeed + if not isinstance(self.model, deepspeed.DeepSpeedEngine): + self.model, ds_optimizer, _, _ = deepspeed.initialize( + model=_DDPWrappingModel(self.model), + optimizer=self.optimizers[0], + config=self.config + ) + # TODO 是否有必要 + self._optimizers = [ds_optimizer] + + if self.config.get("activation_checkpointing"): + checkpoint_config = self.config["activation_checkpointing"] + deepspeed.checkpointing.configure( + mpu_=None, + partition_activations=checkpoint_config.get("partition_activations"), + contiguous_checkpointing=checkpoint_config.get("contiguous_memory_optimization"), + checkpoint_in_cpu=checkpoint_config.get("cpu_checkpointing"), + profile=checkpoint_config.get("profile"), + ) + + self._has_ddpwrapped = True + + def setup_config(self): if self.strategy == "deepspeed": self.config = _create_default_config(stage=2) @@ -53,113 +199,45 @@ class DeepSpeedDriver(TorchDriver): offload_params_device="nvme", offload_optimizer_device="nvme", ) - for i, optimizer in enumerate(self.optimizers): - # TODO 多个 optimizer - engine, optimizer_ds, _, _ = deepspeed.initialize( - model=self.model, - optimizer=optimizer, - config=self.config - ) - self._optimizers[i] = optimizer_ds - self.model = engine - - self._set_deepspeed_activation_checkpointing() - - def model_call(self, batch, fn: Callable, signature_fn: Optional[Callable]) -> Dict: - if isinstance(batch, Dict) and not self.wo_auto_param_call: - return auto_param_call(fn, batch, signature_fn=signature_fn) - else: - return fn(batch) - - def get_model_call_fn(self, fn: str) -> Tuple: - if hasattr(self.model, fn): - fn = getattr(self.model, fn) - if not callable(fn): - raise RuntimeError(f"The `{fn}` attribute is not `Callable`.") - logger.debug(f'Use {_get_fun_msg(fn, with_fp=False)}...') - return fn, None - elif fn in {"train_step", "evaluate_step"}: - logger.debug(f'Use {_get_fun_msg(self.model.forward, with_fp=False)}...') - return self.model, self.model.forward else: - raise RuntimeError(f"There is no `{fn}` method in your {type(self.model)}.") - - def set_dist_repro_dataloader(self, dataloader, - dist: Union[str, ReproducibleBatchSampler, ReproducibleSampler] = None, - reproducible: bool = False): - return dataloader - # 如果 dist 为 ReproducibleBatchSampler, ReproducibleIterator 说明是在断点重训时 driver.load_checkpoint 函数调用; - if isinstance(dist, ReproducibleBatchSampler): - return replace_batch_sampler(dataloader, dist) - elif isinstance(dist, ReproducibleSampler): - return replace_sampler(dataloader, dist) - - # 如果 dist 为 str 或者 None,说明是在 trainer 初试化时调用; - args = self.get_dataloader_args(dataloader) - if isinstance(args.batch_sampler, ReproducibleBatchSampler): - batch_sampler = re_instantiate_sampler(args.batch_sampler) - return replace_batch_sampler(dataloader, batch_sampler) - elif isinstance(args.sampler, ReproducibleSampler): - sampler = re_instantiate_sampler(args.sampler) - return replace_sampler(dataloader, sampler) - - if reproducible: - if type(args.batch_sampler) is TorchBatchSampler: - if type(args.sampler) is TorchRandomSampler: - if getattr(args.sampler, '_num_samples', None) is None \ - and getattr(args.sampler, 'replacements', False) is False \ - and getattr(args.sampler, 'generator', None) is None: - # 如果本来就是随机的,并且没有定制,直接替换掉吧。 - sampler = RandomSampler(args.sampler.data_source, shuffle=True) - logger.debug("Replace torch RandomSampler into fastNLP RandomSampler.") - return replace_sampler(dataloader, sampler) - elif type(args.sampler) is TorchSequentialSampler: - # 需要替换为不要 shuffle 的。 - sampler = RandomSampler(args.sampler.data_source, shuffle=False) - logger.debug("Replace torch SequentialSampler into fastNLP RandomSampler.") - return replace_sampler(dataloader, sampler) - batch_sampler = ReproduceBatchSampler( - batch_sampler=args.batch_sampler, - batch_size=args.batch_size, - drop_last=args.drop_last - ) - return replace_batch_sampler(dataloader, batch_sampler) - else: - return dataloader + raise ValueError(f"Unknown deepspeed strategy {self.strategy}.") + + self.config.setdefault("train_micro_batch_size_per_gpu", 1) + self.config.setdefault("steps_per_print", 2147483647) + + # TODO 梯度裁剪的设置,这里需要用到trainer + # 从kwargs 获取 + # 精度设置 + # _format_precision_config + if self.fp16: + if "fp16" not in self.config: + # FP16 is a DeepSpeed standalone AMP implementation + logger.debug("Enabling DeepSpeed FP16.") + # TODO 这部分是否可以像 pytorch-lightning 那样给用户定制 + self.config["fp16"] = { + "enabled": True, + "loss_scale": 0, + "initial_scale_power": True, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1, + } + elif "amp" not in self.config: + logger.debug("Enabling DeepSpeed APEX Implementation.") + self.config["amp"] = {"enabled": True, "opt_level": "O1"} + + def zero_grad(self): + # DeepSpeedEngine.step 包含了 zero_grad 功能 + pass + + def backward(self, loss): + self.model.backward(loss) + + def step(self): + self.model.step() def unwrap_model(self): r""" :return: 返回原本的模型,例如没有被 ``DataParallel`` 包裹; """ - if isinstance(self.model, deepspeed.DeepSpeedEngine): - print(type(self.model.module), self.model.module) - return self.model.module - if isinstance(self.model, torch.nn.DataParallel) or \ - isinstance(self.model, torch.nn.parallel.DistributedDataParallel): - return self.model.module - else: - return self.model - - @property - def data_device(self): - r""" - 注意单卡模式下使用 ``driver.data_device`` 等价于使用 ``driver.model_device``; - """ - return self.model_device - - def is_distributed(self): - r""" - :return: 返回当前使用的 driver 是否是分布式的 driver,对于 ``TorchSingleDriver`` 来说直接返回 ``False``; - """ - return False - - def _set_deepspeed_activation_checkpointing(self): - if self.config.get("activation_checkpointing"): - checkpoint_config = self.config["activation_checkpointing"] - deepspeed.checkpointing.configure( - mpu_=None, - partition_activations=checkpoint_config.get("partition_activations"), - contiguous_checkpointing=checkpoint_config.get("contiguous_memory_optimization"), - checkpoint_in_cpu=checkpoint_config.get("cpu_checkpointing"), - profile=checkpoint_config.get("profile"), - ) \ No newline at end of file + return self.model.module.model diff --git a/fastNLP/envs/imports.py b/fastNLP/envs/imports.py index 52f49e59..485a9dbf 100644 --- a/fastNLP/envs/imports.py +++ b/fastNLP/envs/imports.py @@ -22,6 +22,6 @@ _NEED_IMPORT_FAIRSCALE = not _IS_WINDOWS and _module_available("fairscale") and _NEED_IMPORT_TORCH = _module_available("torch") and 'torch' in need_import _NEED_IMPORT_JITTOR = _module_available("jittor") and 'jittor' in need_import _NEED_IMPORT_PADDLE = _module_available("paddle") and 'paddle' in need_import -_NEED_IMPORT_DEEPSPEED = _module_available("deepspeed") and 'deepspeed' in need_import +_NEED_IMPORT_DEEPSPEED = _module_available("deepspeed") and 'torch' in need_import _TORCH_GREATER_EQUAL_1_8 = _NEED_IMPORT_TORCH and _compare_version("torch", operator.ge, "1.8.0") From dca3377129c834c1d1aeb7e8038dcda819d58a90 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Fri, 17 Jun 2022 21:58:50 +0800 Subject: [PATCH 17/52] =?UTF-8?q?ddp=E6=B7=BB=E5=8A=A0=E7=8E=AF=E5=A2=83?= =?UTF-8?q?=E5=8F=98=E9=87=8FRANK=E7=9A=84=E8=AE=BE=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/drivers/torch_driver/ddp.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fastNLP/core/drivers/torch_driver/ddp.py b/fastNLP/core/drivers/torch_driver/ddp.py index 45a1a61a..ae85f0b6 100644 --- a/fastNLP/core/drivers/torch_driver/ddp.py +++ b/fastNLP/core/drivers/torch_driver/ddp.py @@ -421,6 +421,7 @@ class TorchDDPDriver(TorchDriver): os.environ['MASTER_ADDR'] = self.master_address os.environ['MASTER_PORT'] = self.master_port + os.environ["RANK"] = "0" os.environ["LOCAL_RANK"] = str(self.local_rank) os.environ["WORLD_SIZE"] = f"{self.world_size}" @@ -433,6 +434,7 @@ class TorchDDPDriver(TorchDriver): for rank in range(1, len(self.parallel_device)): env_copy = os.environ.copy() env_copy["LOCAL_RANK"] = f"{rank}" + env_copy["RANK"] = f"{rank}" # 如果是多机,一定需要用户自己拉起,因此我们自己使用 open_subprocesses 开启的进程的 FASTNLP_GLOBAL_RANK 一定是 LOCAL_RANK; env_copy[FASTNLP_GLOBAL_RANK] = str(rank) From cbee1c6cbc36f17ef4a1f9e331494e2470b009f5 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Fri, 17 Jun 2022 22:12:38 +0800 Subject: [PATCH 18/52] deepspeed test init --- .../controllers/test_trainer_deepspeed.py | 0 .../drivers/torch_driver/test_deepspeed.py | 475 ++++++++++++++++++ 2 files changed, 475 insertions(+) create mode 100644 tests/core/controllers/test_trainer_deepspeed.py create mode 100644 tests/core/drivers/torch_driver/test_deepspeed.py diff --git a/tests/core/controllers/test_trainer_deepspeed.py b/tests/core/controllers/test_trainer_deepspeed.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/core/drivers/torch_driver/test_deepspeed.py b/tests/core/drivers/torch_driver/test_deepspeed.py new file mode 100644 index 00000000..8f28c332 --- /dev/null +++ b/tests/core/drivers/torch_driver/test_deepspeed.py @@ -0,0 +1,475 @@ +import os + +import pytest +from pathlib import Path + +from fastNLP.core.drivers.torch_driver.deepspeed import DeepSpeedDriver +from fastNLP.core.samplers import ( + RandomSampler, + UnrepeatedSampler, + BucketedBatchSampler, + UnrepeatedRandomSampler, + UnrepeatedSequentialSampler, +) +from tests.helpers.models.torch_model import TorchNormalModel_Classification_1 +from tests.helpers.datasets.torch_data import TorchNormalDataset, TorchNormalXYDataset +from tests.helpers.utils import magic_argv_env_context +from fastNLP.envs.distributed import rank_zero_rm +from fastNLP import logger + +from fastNLP.envs.imports import _NEED_IMPORT_TORCH, _NEED_IMPORT_DEEPSPEED + +if _NEED_IMPORT_TORCH: + import torch + import torch.distributed as dist + from torch.utils.data import DataLoader, BatchSampler + +if _NEED_IMPORT_DEEPSPEED: + import deepspeed + +def generate_driver(labels, features, device=[0,1], fp16=False, output_from_new_proc="all"): + torch_model = TorchNormalModel_Classification_1(labels, features) + torch_opt = torch.optim.Adam(params=torch_model.parameters(), lr=0.01) + device = [torch.device(i) for i in device] + driver = DeepSpeedDriver( + model=torch_model, + parallel_device=device, + fp16=fp16, + output_from_new_proc=output_from_new_proc + ) + driver.set_optimizers(torch_opt) + driver.setup() + + return driver + +def dataloader_with_bucketedbatchsampler(dataset, length, batch_size, shuffle, drop_last): + """ + 建立一个 batch_sampler 为 BucketedBatchSampler 的 dataloader + """ + dataloader = DataLoader( + dataset=dataset, + batch_sampler=BucketedBatchSampler( + dataset, + length, + batch_size, + shuffle=shuffle, + drop_last=drop_last, + ), + ) + + return dataloader + +def dataloader_with_randomsampler(dataset, batch_size, shuffle, drop_last, seed=0, unrepeated=False): + """ + 建立一个 sampler 为 RandomSampler 的 dataloader + """ + if unrepeated: + sampler = UnrepeatedRandomSampler(dataset, shuffle, seed) + else: + sampler = RandomSampler(dataset, shuffle, seed=seed) + dataloader = DataLoader( + dataset, + sampler=sampler, + drop_last=drop_last, + batch_size=batch_size + ) + return dataloader + +############################################################################ +# +# 测试 TorchDDPDriver 的一些函数 +# +############################################################################ + +@pytest.mark.torch +@magic_argv_env_context +def test_multi_drivers(): + """ + 测试使用了多个 TorchDDPDriver 的情况。 + """ + generate_driver(10, 10) + generate_driver(20, 10) + + with pytest.raises(RuntimeError): + # 设备设置不同,应该报错 + generate_driver(20, 3, device=[0,1,2]) + assert False + dist.barrier() + + if dist.is_initialized(): + dist.destroy_process_group() + +@magic_argv_env_context +def test_multi_optimizers(): + torch_model = TorchNormalModel_Classification_1(10, 10) + torch_opt = torch.optim.Adam(params=torch_model.parameters(), lr=0.01) + device = [torch.device(i) for i in device] + driver = DeepSpeedDriver( + model=torch_model, + parallel_device=device, + ) + driver.set_optimizers([torch_opt, torch_opt]) + with pytest.raises(ValueError): + driver.setup() + + if dist.is_initialized(): + dist.destroy_process_group() + +@pytest.mark.torch +class TestDeepSpeedDriverFunction: + """ + 测试 TorchDeepSpeedDriver 一些简单函数的测试类,基本都是测试能否运行、是否存在 import 错误等问题 + """ + + @magic_argv_env_context + def test_simple_functions(self): + """ + 简单测试多个函数 + """ + driver = generate_driver(10, 10) + + """ + 测试 move_data_to_device 函数。这个函数仅调用了 torch_move_data_to_device ,测试例在 + tests/core/utils/test_torch_utils.py中,就不重复测试了 + """ + driver.move_data_to_device(torch.rand((32, 64))) + dist.barrier() + + """ + 测试 is_distributed 函数 + """ + assert driver.is_distributed() == True + dist.barrier() + + """ + 测试 get_no_sync_context 函数 + """ + res = driver.get_model_no_sync_context() + dist.barrier() + + """ + 测试 is_global_zero 函数 + """ + driver.is_global_zero() + dist.barrier() + + """ + 测试 unwrap_model 函数 + """ + driver.unwrap_model() + dist.barrier() + + """ + 测试 get_local_rank 函数 + """ + driver.get_local_rank() + dist.barrier() + + """ + 测试 all_gather 函数 + 详细的测试在 test_dist_utils.py 中完成 + """ + obj = { + "rank": driver.global_rank + } + obj_list = driver.all_gather(obj, group=None) + for i, res in enumerate(obj_list): + assert res["rank"] == i + + """ + 测试 broadcast_object 函数 + 详细的函数在 test_dist_utils.py 中完成 + """ + if driver.global_rank == 0: + obj = { + "rank": driver.global_rank + } + else: + obj = None + res = driver.broadcast_object(obj, src=0) + assert res["rank"] == 0 + + if dist.is_initialized(): + dist.destroy_process_group() + +############################################################################ +# +# 测试 save 和 load 相关的功能 +# +############################################################################ +@pytest.mark.torch +class TestSaveLoad: + """ + 测试多卡情况下 save 和 load 相关函数的表现 + """ + + def setup_method(self): + self.dataset = TorchNormalXYDataset(100) + + @magic_argv_env_context + @pytest.mark.parametrize("only_state_dict", ([True, False])) + def test_save_and_load_model(self, only_state_dict): + """ + 测试 save_model 和 load_model 函数 + """ + try: + path = "model" + + dataloader = DataLoader(self.dataset, batch_size=2) + driver1, driver2 = generate_driver(20, 1), generate_driver(20, 1) + + driver1.save_model(path, only_state_dict) + + # 同步 + dist.barrier() + driver2.load_model(path, only_state_dict) + + for idx, batch in enumerate(dataloader): + batch = driver1.move_data_to_device(batch) + res1 = driver1.model( + batch, + fastnlp_fn=driver1.model.module.model.evaluate_step, + # Driver.model -> DataParallel.module -> _FleetWrappingModel.model + fastnlp_signature_fn=None, + wo_auto_param_call=False, + ) + res2 = driver2.model( + batch, + fastnlp_fn=driver2.model.module.model.evaluate_step, + fastnlp_signature_fn=None, + wo_auto_param_call=False, + ) + + assert torch.equal(res1["preds"], res2["preds"]) + finally: + rank_zero_rm(path) + + if dist.is_initialized(): + dist.destroy_process_group() + + @magic_argv_env_context + @pytest.mark.parametrize("only_state_dict", ([True, False])) + @pytest.mark.parametrize("fp16", ([True, False])) + @pytest.mark.parametrize("device", ([[0,1]])) + def test_save_and_load_with_bucketedbatchsampler(self, device, only_state_dict, fp16): + """ + 测试save和load函数,主要测试 dataloader 被替换了 sampler 之后的情况 + """ + + try: + path = "model.ckp" + num_replicas = len(device) + + driver1, driver2 = generate_driver(20, 1, device=device, fp16=fp16), \ + generate_driver(20, 1, device=device, fp16=False) + dataloader = dataloader_with_bucketedbatchsampler( + self.dataset, + length=[10 for i in range(len(self.dataset))], + batch_size=4, + shuffle=True, + drop_last=False + ) + dataloader.batch_sampler.set_distributed( + num_replicas=driver1.world_size, + rank=driver1.global_rank, + pad=True + ) + num_consumed_batches = 4 + + already_seen_x_set = set() + already_seen_y_set = set() + driver1.set_sampler_epoch(dataloader, 4) + for idx, batch in enumerate(dataloader): + if idx >= num_consumed_batches: + break + already_seen_x_set.update(batch["x"].reshape(-1, ).tolist()) + already_seen_y_set.update(batch["y"].reshape(-1, ).tolist()) + + # 同步 + dist.barrier() + + # 保存状态 + sampler_states = dataloader.batch_sampler.state_dict() + save_states = {"num_consumed_batches": num_consumed_batches} + driver1.save_checkpoint(Path(path), save_states, dataloader, only_state_dict, should_save_model=True) + dist.barrier() + # 加载 + # 更改 batch_size + dataloader = dataloader_with_bucketedbatchsampler( + self.dataset, + length=[10 for i in range(len(self.dataset))], + batch_size=2, + shuffle=True, + drop_last=False + ) + dataloader.batch_sampler.set_distributed( + num_replicas=driver2.world_size, + rank=driver2.global_rank, + pad=True + ) + dist.barrier() + load_states = driver2.load_checkpoint(Path(path), dataloader, only_state_dict, should_load_model=True) + dist.barrier() + replaced_loader = load_states.pop("dataloader") + + # 1. 检查 optimizer 的状态 + # TODO optimizer 的 state_dict 总是为空 + + # 2. 检查 batch_sampler 是否被正确地加载和替换 + assert not (replaced_loader is dataloader) + assert replaced_loader.batch_sampler is dataloader.batch_sampler + assert isinstance(replaced_loader.batch_sampler, BucketedBatchSampler) + if os.environ['FASTNLP_GLOBAL_RANK'] == '0': + assert replaced_loader.batch_sampler.seed == sampler_states["seed"] + assert replaced_loader.batch_sampler.num_consumed_samples == num_consumed_batches * 4 * num_replicas + + # 3. 检查 fp16 是否被加载 + if fp16: + assert not isinstance(driver2.grad_scaler, torch.cuda.amp.GradScaler) + + # 4. 检查 model 的参数是否正确 + # 5. 检查 batch_idx + start_batch = load_states.pop('batch_idx_in_epoch') + assert start_batch == 2 * num_consumed_batches + left_x_batches = set() + left_y_batches = set() + driver2.set_sampler_epoch(replaced_loader, 4) + for idx, batch in enumerate(replaced_loader): + + left_x_batches.update(batch["x"].reshape(-1, ).tolist()) + left_y_batches.update(batch["y"].reshape(-1, ).tolist()) + res1 = driver1.model( + batch, + fastnlp_fn=driver1.model.module.model.evaluate_step, + # Driver.model -> DataParallel.module -> _FleetWrappingModel.model + fastnlp_signature_fn=None, + wo_auto_param_call=False, + ) + res2 = driver2.model( + batch, + fastnlp_fn=driver2.model.module.model.evaluate_step, + fastnlp_signature_fn=None, + wo_auto_param_call=False, + ) + assert torch.equal(res1["preds"], res2["preds"]) + + assert len(left_x_batches) + len(already_seen_x_set) == len(self.dataset) / num_replicas + assert len(left_x_batches | already_seen_x_set) == len(self.dataset) / num_replicas + assert len(left_y_batches) + len(already_seen_y_set) == len(self.dataset) / num_replicas + assert len(left_y_batches | already_seen_y_set) == len(self.dataset) / num_replicas + dist.barrier() + finally: + rank_zero_rm(path) + + if dist.is_initialized(): + dist.destroy_process_group() + + @magic_argv_env_context + @pytest.mark.parametrize("only_state_dict", ([True, False])) + @pytest.mark.parametrize("fp16", ([True, False])) + @pytest.mark.parametrize("device", ([[0,1]])) + def test_save_and_load_with_randomsampler(self, device, only_state_dict, fp16): + """ + 测试save和load函数,主要测试 dataloader 被替换了 batch_sampler 的情况 + """ + + try: + path = "checkpoints/" + + num_replicas = len(device) + + driver1 = generate_driver(20, 1, device=device, fp16=fp16) + driver2 = generate_driver(20, 1, device=device, fp16=False) + + dataloader = dataloader_with_randomsampler(self.dataset, 4, True, False, unrepeated=False) + dataloader.batch_sampler.sampler.set_distributed( + num_replicas=driver1.world_size, + rank=driver1.global_rank, + pad=True + ) + num_consumed_batches = 4 + + already_seen_x_set = set() + already_seen_y_set = set() + driver1.set_sampler_epoch(dataloader, 4) + for idx, batch in enumerate(dataloader): + if idx >= num_consumed_batches: + break + already_seen_x_set.update(batch["x"].reshape(-1, ).tolist()) + already_seen_y_set.update(batch["y"].reshape(-1, ).tolist()) + + # 同步 + dist.barrier() + + # 保存状态 + sampler_states = dataloader.batch_sampler.sampler.state_dict() + save_states = {"num_consumed_batches": num_consumed_batches} + if only_state_dict: + driver1.save_checkpoint(Path(path), save_states, dataloader, only_state_dict, should_save_model=True) + else: + driver1.save_checkpoint(Path(path), save_states, dataloader, only_state_dict, should_save_model=True, input_spec=[torch.ones((16, 10))]) + dist.barrier() # 等待save成功 + # 加载 + # 更改 batch_size + dataloader = dataloader_with_randomsampler(self.dataset, 2, True, False, unrepeated=False) + dataloader.batch_sampler.sampler.set_distributed( + num_replicas=driver2.world_size, + rank=driver2.global_rank, + pad=True + ) + load_states = driver2.load_checkpoint(Path(path), dataloader, only_state_dict, should_load_model=True) + replaced_loader = load_states.pop("dataloader") + + # 1. 检查 optimizer 的状态 + # TODO optimizer 的 state_dict 总是为空 + + # 2. 检查 sampler 是否被正确地加载和替换 + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler.sampler, RandomSampler) + if os.environ['FASTNLP_GLOBAL_RANK'] == '0': + assert replaced_loader.batch_sampler.sampler.seed == sampler_states["seed"] + assert replaced_loader.batch_sampler.sampler.epoch == sampler_states["epoch"] + assert len(replaced_loader.batch_sampler.sampler.dataset) == sampler_states["length"] + assert replaced_loader.batch_sampler.sampler.shuffle == sampler_states["shuffle"] + assert replaced_loader.batch_sampler.sampler.num_consumed_samples == 4 * num_consumed_batches * num_replicas + + # 3. 检查 fp16 是否被加载 + if fp16: + assert not isinstance(driver2.grad_scaler, torch.cuda.amp.GradScaler) + + # 4. 检查 model 的参数是否正确 + # 5. 检查 batch_idx + start_batch = load_states.pop('batch_idx_in_epoch') + assert start_batch == 2 * num_consumed_batches + left_x_batches = set() + left_y_batches = set() + driver2.set_sampler_epoch(replaced_loader, 4) + for idx, batch in enumerate(replaced_loader): + + left_x_batches.update(batch["x"].reshape(-1, ).tolist()) + left_y_batches.update(batch["y"].reshape(-1, ).tolist()) + res1 = driver1.model( + batch, + fastnlp_fn=driver1.model.module.model.evaluate_step, + # Driver.model -> DataParallel.module -> _FleetWrappingModel.model + fastnlp_signature_fn=None, + wo_auto_param_call=False, + ) + res2 = driver2.model( + batch, + fastnlp_fn=driver2.model.module.model.evaluate_step, + fastnlp_signature_fn=None, + wo_auto_param_call=False, + ) + assert torch.equal(res1["preds"], res2["preds"]) + + assert len(left_x_batches) + len(already_seen_x_set) == len(self.dataset) / num_replicas + assert len(left_x_batches | already_seen_x_set) == len(self.dataset) / num_replicas + assert len(left_y_batches) + len(already_seen_y_set) == len(self.dataset) / num_replicas + assert len(left_y_batches | already_seen_y_set) == len(self.dataset) / num_replicas + + finally: + rank_zero_rm(path) + + if dist.is_initialized(): + dist.destroy_process_group() \ No newline at end of file From d26d0ad17f1fa896d31d446bed54b58c028b450c Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Fri, 17 Jun 2022 22:16:55 +0800 Subject: [PATCH 19/52] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E9=80=89=E6=8B=A9deeps?= =?UTF-8?q?peed=20driver=E7=9A=84=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/drivers/choose_driver.py | 2 +- .../torch_driver/initialize_torch_driver.py | 14 +++++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/fastNLP/core/drivers/choose_driver.py b/fastNLP/core/drivers/choose_driver.py index 4be1e502..56d30e6f 100644 --- a/fastNLP/core/drivers/choose_driver.py +++ b/fastNLP/core/drivers/choose_driver.py @@ -17,7 +17,7 @@ def choose_driver(model, driver: Union[str, Driver], device: Optional[Union[int, if isinstance(driver, Driver): return driver - if driver in {"torch", "fairscale"}: + if driver in {"torch", "fairscale", "deepspeed"}: from fastNLP.core.drivers.torch_driver.initialize_torch_driver import initialize_torch_driver return initialize_torch_driver(driver, device, model, **kwargs) elif driver in {"jittor"}: diff --git a/fastNLP/core/drivers/torch_driver/initialize_torch_driver.py b/fastNLP/core/drivers/torch_driver/initialize_torch_driver.py index 0deac4dc..5d4d2ab5 100644 --- a/fastNLP/core/drivers/torch_driver/initialize_torch_driver.py +++ b/fastNLP/core/drivers/torch_driver/initialize_torch_driver.py @@ -8,6 +8,7 @@ from .torch_driver import TorchDriver from .single_device import TorchSingleDriver from .ddp import TorchDDPDriver from .fairscale import FairScaleDriver +from .deepspeed import DeepSpeedDriver from fastNLP.core.log import logger from fastNLP.envs import FASTNLP_BACKEND_LAUNCH from pkg_resources import parse_version @@ -20,7 +21,7 @@ def initialize_torch_driver(driver: str, device: Optional[Union[str, "torch.devi r""" 用来根据参数 ``driver` 和 ``device`` 来确定并且初始化一个具体的 ``Driver`` 实例然后返回回去; - :param driver: 该参数的值应为以下之一:``["torch", "fairscale"]``; + :param driver: 该参数的值应为以下之一:``["torch", "fairscale", "deepspeed"]``; :param device: 该参数的格式与 ``Trainer`` 对参数 ``device`` 的要求一致; :param model: 训练或者评测的具体的模型; @@ -41,7 +42,7 @@ def initialize_torch_driver(driver: str, device: Optional[Union[str, "torch.devi return TorchDDPDriver(model, torch.device(f"cuda:{os.environ['LOCAL_RANK']}"), is_pull_by_torch_run=True, **kwargs) - if driver not in {"torch", "fairscale"}: + if driver not in {"torch", "fairscale", "deepspeed"}: raise ValueError("Parameter `driver` can only be one of these values: ['torch', 'fairscale'].") _could_use_device_num = torch.cuda.device_count() @@ -83,4 +84,11 @@ def initialize_torch_driver(driver: str, device: Optional[Union[str, "torch.devi logger.warning_once("Notice you are using `fairscale`, but the `device` is only one gpu.") return FairScaleDriver(model, [device], **kwargs) else: - return FairScaleDriver(model, device, **kwargs) \ No newline at end of file + return FairScaleDriver(model, device, **kwargs) + elif driver == "deepspeed": + if not isinstance(device, List): + if device.type == 'cpu': + raise ValueError("You are using `deepspeed` driver, but your chosen `device` is 'cpu'.") + logger.warning_once("Notice you are using `deepspeed`, but the `device` is only one gpu.") + return DeepSpeedDriver(model, [device], **kwargs) + return DeepSpeedDriver(model, device, **kwargs) \ No newline at end of file From 64da46b613547a5768e6b56ffe83ab11ac1caf60 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Fri, 17 Jun 2022 23:23:33 +0800 Subject: [PATCH 20/52] =?UTF-8?q?paddle=20replace=5Fbatch=5Fsampler?= =?UTF-8?q?=E5=92=8Ccheck=5Fdataloader=20=E8=B7=9F=E8=BF=9B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../drivers/jittor_driver/jittor_driver.py | 3 +- fastNLP/core/drivers/jittor_driver/utils.py | 4 +++ .../drivers/paddle_driver/paddle_driver.py | 3 +- fastNLP/core/drivers/paddle_driver/utils.py | 29 ++++++++++++------- 4 files changed, 26 insertions(+), 13 deletions(-) diff --git a/fastNLP/core/drivers/jittor_driver/jittor_driver.py b/fastNLP/core/drivers/jittor_driver/jittor_driver.py index c2e338bb..312f0d83 100644 --- a/fastNLP/core/drivers/jittor_driver/jittor_driver.py +++ b/fastNLP/core/drivers/jittor_driver/jittor_driver.py @@ -6,6 +6,7 @@ from dataclasses import dataclass from fastNLP.envs.imports import _NEED_IMPORT_JITTOR from fastNLP.core.drivers.driver import Driver from fastNLP.core.dataloaders import JittorDataLoader +from fastNLP.core.dataloaders import OverfitDataLoader from fastNLP.core.samplers import ReproducibleSampler, RandomSampler from fastNLP.core.log import logger from fastNLP.core.utils import apply_to_collection, nullcontext @@ -69,7 +70,7 @@ class JittorDriver(Driver): self.wo_auto_param_call = kwargs.get("model_wo_auto_param_call", False) def check_dataloader_legality(self, dataloader): - if not isinstance(dataloader, (Dataset, JittorDataLoader)): + if not isinstance(dataloader, (Dataset, JittorDataLoader, OverfitDataLoader)): raise TypeError(f"{Dataset} or {JittorDataLoader} is expected, instead of `{type(dataloader)}`") if len(dataloader) == 0: logger.rank_zero_warning("Your dataloader is empty, which is not recommended because it " diff --git a/fastNLP/core/drivers/jittor_driver/utils.py b/fastNLP/core/drivers/jittor_driver/utils.py index c75526df..af840a09 100644 --- a/fastNLP/core/drivers/jittor_driver/utils.py +++ b/fastNLP/core/drivers/jittor_driver/utils.py @@ -14,6 +14,7 @@ from fastNLP.envs import ( FASTNLP_BACKEND_LAUNCH, FASTNLP_GLOBAL_SEED, ) +from fastNLP.core.samplers import ReproducibleBatchSampler from fastNLP.core.log import logger if _NEED_IMPORT_JITTOR: @@ -63,6 +64,9 @@ def replace_batch_sampler(dataloader, batch_sampler): "or report this bug to us.") def replace_sampler(dataloader: Union["Dataset", "JittorDataLoader"], sampler): + batch_sampler = getattr(dataloader, "sampler") + if batch_sampler is not None and isinstance(batch_sampler, ReproducibleBatchSampler): + raise RuntimeError("It should not be running here, please report a bug to us.") if isinstance(dataloader, JittorDataLoader): init_params = dict(inspect.signature(dataloader.__init__).parameters) reconstruct_args = {name: getattr(dataloader, name, p.default) for name, p in init_params.items()} diff --git a/fastNLP/core/drivers/paddle_driver/paddle_driver.py b/fastNLP/core/drivers/paddle_driver/paddle_driver.py index 6ef0aaae..bfc26350 100644 --- a/fastNLP/core/drivers/paddle_driver/paddle_driver.py +++ b/fastNLP/core/drivers/paddle_driver/paddle_driver.py @@ -19,6 +19,7 @@ from fastNLP.envs import ( rank_zero_call, ) from fastNLP.core.log import logger +from fastNLP.core.dataloaders import OverfitDataLoader from fastNLP.core.samplers import ( ReproducibleBatchSampler, ReproducibleSampler, @@ -93,7 +94,7 @@ class PaddleDriver(Driver): self.grad_scaler.update() def check_dataloader_legality(self, dataloader): - if not isinstance(dataloader, DataLoader): + if not isinstance(dataloader, DataLoader) and not isinstance(dataloader, OverfitDataLoader): raise TypeError(f"{DataLoader} is expected, instead of `{type(dataloader)}`") if dataloader.batch_size is None and dataloader.batch_sampler is None: raise ValueError("Please ensure at least one of your dataloader's batch_size and batch_sampler" diff --git a/fastNLP/core/drivers/paddle_driver/utils.py b/fastNLP/core/drivers/paddle_driver/utils.py index 1191b60c..be83e5fe 100644 --- a/fastNLP/core/drivers/paddle_driver/utils.py +++ b/fastNLP/core/drivers/paddle_driver/utils.py @@ -15,6 +15,7 @@ from fastNLP.envs import ( FASTNLP_BACKEND_LAUNCH, FASTNLP_GLOBAL_SEED, ) +from fastNLP.core.samplers import ReproducibleBatchSampler from fastNLP.core.utils import auto_param_call, paddle_to from fastNLP.core.log import logger @@ -129,7 +130,7 @@ def _build_fp16_env(dummy=False): "NOTE: your device does NOT support faster training with fp16, " "please switch to FP32 which is likely to be faster" ) - return auto_cast, GradScaler + return auto_cast, GradScaler def find_free_ports(num): """ @@ -189,10 +190,11 @@ def replace_batch_sampler(dataloader: "DataLoader", batch_sampler: "BatchSampler non_default_params.add("dataset") reconstruct_args = {k: v for k, v in instance_attrs.items() if k in non_default_params} - reconstruct_args.update({ - "batch_sampler": batch_sampler, "shuffle": False, "drop_last": False, "batch_size": 1, - "persistent_workers": dataloader._persistent_workers, - }) + if isinstance(dataloader, DataLoader): + reconstruct_args.update({ + "batch_sampler": batch_sampler, "shuffle": False, "drop_last": False, "batch_size": 1, + "persistent_workers": dataloader._persistent_workers, + }) # POSITIONAL_OR_KEYWORD 代表一般的参数 # 收集初始化函数中出现的、一般形式的、不带默认值且不在 reconstruct_args 中的参数 @@ -210,9 +212,10 @@ def replace_batch_sampler(dataloader: "DataLoader", batch_sampler: "BatchSampler required_args = sorted(required_args) dataloader_self_name = dataloader.__class__.__name__ raise Exception( - f"Trying to inject `BatchSampler` into the `{dataloader_self_name}` instance. " - "This would fail as some of the `__init__` arguments are not available as instance attributes. " - f"The missing attributes are {required_args}. " + f"Need to inject arguments {required_args} into the __init__ of `{dataloader_self_name}`. " + f"But they are not found in the attribute of `{dataloader_self_name}`, fastNLP cannot determine its " + f"value when try to reinitialize `{dataloader_self_name}`, please add `{required_args}` to be " + f"`{dataloader_self_name}`'s attribute." ) # 这种错误针对的是传入的 dataloader 不是直接的 DataLoader,而是定制了 DataLoader,但是 __init__ 中没有 **kwargs; @@ -224,10 +227,11 @@ def replace_batch_sampler(dataloader: "DataLoader", batch_sampler: "BatchSampler missing_kwargs = sorted(missing_kwargs) dataloader_self_name = dataloader.__class__.__name__ raise Exception( - f"Trying to inject `BatchSampler` into the `{dataloader_self_name}` instance. " - "This would fail as it doesn't expose all its attributes in the `__init__` signature. " - f"The missing arguments are {missing_kwargs}. " + f"The parameter:{missing_kwargs} needed to reinitialize `{dataloader_self_name}` is not found." ) + # 如果没有kwargs,则保证一下只传入需要的参数 + if not isinstance(dataloader, DataLoader): + reconstruct_args = {key:value for key,value in reconstruct_args.items() if key in init_params} return type(dataloader)(**reconstruct_args) @@ -235,6 +239,9 @@ def replace_sampler(dataloader, new_sampler): """ 使用 ``new_sampler`` 重新构建一个 ``BatchSampler``,并替换到 ``dataloader`` 中 """ + batch_sampler = getattr(dataloader, "batch_sampler") + if batch_sampler is not None and isinstance(batch_sampler, ReproducibleBatchSampler): + raise RuntimeError("It should not be running here, please report a bug to us.") new_batch_sampler = deepcopy(dataloader.batch_sampler) new_batch_sampler.sampler = new_sampler return replace_batch_sampler(dataloader, new_batch_sampler) From b60621f3d1ddace2588535050f9855ba92fde068 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Fri, 17 Jun 2022 23:23:43 +0800 Subject: [PATCH 21/52] small --- .../paddle_driver/test_initialize_paddle_driver.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/core/drivers/paddle_driver/test_initialize_paddle_driver.py b/tests/core/drivers/paddle_driver/test_initialize_paddle_driver.py index 7e567c84..63124cdc 100644 --- a/tests/core/drivers/paddle_driver/test_initialize_paddle_driver.py +++ b/tests/core/drivers/paddle_driver/test_initialize_paddle_driver.py @@ -1,3 +1,5 @@ +import os + import pytest from fastNLP.core.drivers import PaddleSingleDriver, PaddleFleetDriver @@ -40,9 +42,14 @@ def test_get_fleet(device): """ 测试 fleet 多卡的初始化情况 """ - + flag = False + if "USER_CUDA_VISIBLE_DEVICES" not in os.environ: + os.environ["USER_CUDA_VISIBLE_DEVICES"] = "0,1,2,3" + flag = True model = PaddleNormalModel_Classification_1(20, 10) driver = initialize_paddle_driver("paddle", device, model) + if flag: + del os.environ["USER_CUDA_VISIBLE_DEVICES"] assert isinstance(driver, PaddleFleetDriver) From 2d2bf421fdc6e97fb14346e21511c3e91e1933d5 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Sat, 18 Jun 2022 22:28:57 +0800 Subject: [PATCH 22/52] =?UTF-8?q?deepspeed=20checkpoint=E7=9B=B8=E5=85=B3?= =?UTF-8?q?=E5=87=BD=E6=95=B0=EF=BC=88=C3=AF=E5=BE=AE=E6=9C=AA=E6=B5=8B?= =?UTF-8?q?=E8=AF=95=EF=BC=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../core/drivers/torch_driver/deepspeed.py | 120 ++++++++++++++++-- 1 file changed, 108 insertions(+), 12 deletions(-) diff --git a/fastNLP/core/drivers/torch_driver/deepspeed.py b/fastNLP/core/drivers/torch_driver/deepspeed.py index 298945ed..bb4df495 100644 --- a/fastNLP/core/drivers/torch_driver/deepspeed.py +++ b/fastNLP/core/drivers/torch_driver/deepspeed.py @@ -1,11 +1,16 @@ import os +from pathlib import Path -from typing import Optional, Union, Callable, Dict, Tuple, Sequence, List +from typing import Union, Dict, List from .torch_driver import TorchDriver from .ddp import TorchDDPDriver from .utils import _create_default_config, _DDPWrappingModel +from fastNLP.core.utils import nullcontext from fastNLP.core.log import logger -from fastNLP.envs.env import FASTNLP_DISTRIBUTED_CHECK +from fastNLP.envs import( + FASTNLP_DISTRIBUTED_CHECK, + FASTNLP_CHECKPOINT_FILENAME +) from fastNLP.envs.imports import _NEED_IMPORT_TORCH, _NEED_IMPORT_DEEPSPEED if _NEED_IMPORT_TORCH: @@ -79,6 +84,15 @@ class DeepSpeedDriver(TorchDDPDriver): self._has_setup = False # 设置这一参数是因为 evaluator 中也会进行 setup 操作,但是显然是不需要的也不应该的; self._has_ddpwrapped = False # 判断传入的模型是否经过 _has_ddpwrapped 包裹; self.strategy = strategy + self.accumulation_steps = kwargs.get("accumulation_steps", 1) + # 获取 batch_size 以设置 train_micro_batch_size_per_gpu 参数 + train_dl = kwargs.get("train_dataloader", None) + if train_dl is not None: + self.train_micro_batch_size = self.get_dataloader_args(train_dl) + else: + logger.warn("No `train_dataloader` found, and we will set `train_micro_batch_size_per_gpu`" + "to 1 for deepspeed configuration.") + self.train_micro_batch_size = 1 self._ds_kwargs = kwargs.get("deepspeed_kwargs", {}) @@ -93,8 +107,8 @@ class DeepSpeedDriver(TorchDDPDriver): raise ValueError("Multi optimizers is not supported for DeepSpeedDriver right now.") if self._has_setup: return - self.setup_config() self._has_setup = True + self.setup_config() # 如果用户需要使用多机模式,那么一定进入到这里; if self.is_pull_by_torch_run: # dist.get_world_size() 只能在 dist.init_process_group 初始化之后进行调用; @@ -152,12 +166,14 @@ class DeepSpeedDriver(TorchDDPDriver): # 设置 deepspeed if not isinstance(self.model, deepspeed.DeepSpeedEngine): + model=_DDPWrappingModel(self.model) + model_parameters = filter(lambda p: p.requires_grad, model.parameters()) self.model, ds_optimizer, _, _ = deepspeed.initialize( - model=_DDPWrappingModel(self.model), + model=model, optimizer=self.optimizers[0], - config=self.config + model_parameters=model_parameters, + config=self.config, ) - # TODO 是否有必要 self._optimizers = [ds_optimizer] if self.config.get("activation_checkpointing"): @@ -174,6 +190,13 @@ class DeepSpeedDriver(TorchDDPDriver): def setup_config(self): + self.config = self._ds_kwargs.get("config") + if self.config is not None: + # TODO 究竟哪些参数按照config,哪些按照trainer参数 + logger.warn("Notice that you have defined a configuration for deepspeed and parameters like" + "`optimizers`, `strategy` and `fp16` may not take effects.") + return + if self.strategy == "deepspeed": self.config = _create_default_config(stage=2) elif self.strategy == "deepspeed_stage_1": @@ -202,13 +225,11 @@ class DeepSpeedDriver(TorchDDPDriver): else: raise ValueError(f"Unknown deepspeed strategy {self.strategy}.") - self.config.setdefault("train_micro_batch_size_per_gpu", 1) + # 设置成 max_int 防止 deepspeed 的输出干扰 fastnlp 的输出 self.config.setdefault("steps_per_print", 2147483647) + self.config["gradient_accumulation_steps"] = self.accumulation_steps + self.config.setdefault("train_micro_batch_size_per_gpu", self.train_micro_batch_size) - # TODO 梯度裁剪的设置,这里需要用到trainer - # 从kwargs 获取 - # 精度设置 - # _format_precision_config if self.fp16: if "fp16" not in self.config: # FP16 is a DeepSpeed standalone AMP implementation @@ -238,6 +259,81 @@ class DeepSpeedDriver(TorchDDPDriver): def unwrap_model(self): r""" - :return: 返回原本的模型,例如没有被 ``DataParallel`` 包裹; + :return: 返回原本的模型; """ return self.model.module.model + + def get_model_no_sync_context(self): + r""" + :return: 返回一个 ``context`` 上下文环境,用于关闭各个进程之间的同步;在 ``deepspeed`` 中,返回一个空的上下文 + """ + # 注意此时的 model 是 "DistributedDataParallel" 对象; + return nullcontext + + def save_model(self, filepath: Union[str, Path], only_state_dict: bool = False, **kwargs): + """ + 保存当前 driver 的模型到 folder 下。 + + :param filepath: 保存到哪个文件夹; + :param only_state_dict: 是否只保存权重; + :return: + """ + # deepspeed engine 要求在每个 rank 都调用 save_checkpoint,故去掉了 rank_zero_call 装饰器 + if self.zero_stage_3: + logger.rank_zero_warning( + "When saving the DeepSpeed Stage 3 checkpoint, " + "each worker will save a shard of the checkpoint within a directory. " + # TODO check一下 + # "If a single file is required after training, " + # "see https://pytorch-lightning.readthedocs.io/en/latest/advanced/advanced_gpu.html#" + # "deepspeed-zero-stage-3-single-file for instructions." + ) + if not only_state_dict: + logger.rank_zero_warning("Only saving state dict is not allowed for `DeepSpeedDriver`. We will save its " + "checkpoint for you instead.") + self.model.save_checkpoint(filepath, **kwargs) + + def load_model(self, filepath: Union[Path, str], only_state_dict: bool = False, **kwargs): + """ + 从 folder 中加载权重并赋值到当前 driver 的模型上。 + + :param filepath: 加载权重或模型的路径 + :param load_state_dict: 保存的内容是否只是权重。 + :param kwargs: + :return: + """ + if not only_state_dict: + logger.warn("Only loading state dict is not allowed for `DeepSpeedDriver`. We will load its " + "checkpoint for you instead.") + self.model.load_checkpoint(filepath, **kwargs) + + def save_checkpoint(self, folder: Path, states: Dict, dataloader, only_state_dict: bool = True, should_save_model: bool = True, **kwargs): + # deepspeed engine 要求在每个 rank 都调用 save_checkpoint,故去掉了 rank_zero_call 装饰器 + # 1. 保存 sampler 的状态 + sampler_state_dict = self.get_sampler_state_dict() + + # 2. 保存模型的状态; + if not should_save_model: + logger.rank_zero_warning("Saving checkpoint without model is not allowed for `DeepSpeedDriver`, " + "so we will still save the model for you.") + + self.model.save_checkpoint(Path(folder).joinpath(FASTNLP_CHECKPOINT_FILENAME), + client_state=sampler_state_dict) + + def load_checkpoint(self, folder: Path, dataloader, only_state_dict: bool = True, should_load_model: bool = True, **kwargs) -> Dict: + # 1. 加载模型状态; + if not should_load_model: + logger.rank_zero_warning("Loading checkpoint without model is not allowed for `DeepSpeedDriver`, " + "so we will still load the model for you.") + load_path, states = self.model.load_checkpoint(folder.joinpath(FASTNLP_CHECKPOINT_FILENAME)) + if load_path is None: + raise RuntimeError(f"Failed to load checkpoint from path: {str(folder)}") + + # 2.恢复 sampler 的状态 + states = self.load_sampler_state_dict(states) + + return states + + @property + def stage_3(self) -> bool: + return self.config.get("zero_optimization") and self.config.get("zero_optimization").get("stage") == 3 \ No newline at end of file From 9903d2eec15bd6f5107b1316dc26165710d1cf55 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Sat, 18 Jun 2022 23:04:50 +0800 Subject: [PATCH 23/52] =?UTF-8?q?TorchDriver=E7=9A=84sampler=E5=8A=A0?= =?UTF-8?q?=E8=BD=BD=E5=92=8C=E4=BF=9D=E5=AD=98=E6=8B=86=E5=88=86=E4=B8=BA?= =?UTF-8?q?=E5=8D=95=E7=8B=AC=E7=9A=84=E5=87=BD=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../core/drivers/torch_driver/torch_driver.py | 119 ++++++++++-------- 1 file changed, 65 insertions(+), 54 deletions(-) diff --git a/fastNLP/core/drivers/torch_driver/torch_driver.py b/fastNLP/core/drivers/torch_driver/torch_driver.py index 84e4aa70..1594a903 100644 --- a/fastNLP/core/drivers/torch_driver/torch_driver.py +++ b/fastNLP/core/drivers/torch_driver/torch_driver.py @@ -190,7 +190,30 @@ class TorchDriver(Driver): # 传入的 dataloader 参数是 trainer 的 dataloader 属性,因为 driver 的所有 dataloader 我们是不会去改变它的,而是通过改变 # trainer.dataloader 来改变 dataloader 的状态,从而适配训练或者评测环境; - # 1. sampler 的状态,因为我们支持 resume training,即精确恢复到具体的一个 batch; + # 1. sampler 的状态; + num_consumed_batches = states.pop('num_consumed_batches') + states['sampler_states'] = self.get_sampler_state(dataloader, num_consumed_batches) + + # 2. 保存模型的状态; + if should_save_model: + if not os.path.exists(folder): + os.mkdir(folder) + model_path = folder.joinpath(FASTNLP_MODEL_FILENAME) + self.save_model(model_path, only_state_dict=only_state_dict) + + # 3. 保存 optimizers 的状态; + states["optimizers_state_dict"] = self.get_optimizer_state() + logger.debug("Save optimizer state dict.") + + # 4. 保存fp16的状态 + if not isinstance(self.grad_scaler, DummyGradScaler): + grad_scaler_state_dict = self.grad_scaler.state_dict() + states['grad_scaler_state_dict'] = grad_scaler_state_dict + + torch.save(states, Path(folder).joinpath(FASTNLP_CHECKPOINT_FILENAME)) + + def get_sampler_state(self, dataloader, num_consumed_batches): + # 因为我们支持 resume training,即精确恢复到具体的一个 batch; # 首先 pytorch 的 DataLoader 一定会有 sampler;另一方面,我们在断点重训的时候一定会在 `set_` 中将 dataloader 的 # sampler 替换为 `ReproducibleSampler`;否则就是在单卡情况下将 batch_sampler 替换为 `ReproducibleBatchSampler`; dataloader_args = self.get_dataloader_args(dataloader) @@ -200,7 +223,7 @@ class TorchDriver(Driver): sampler = dataloader_args.sampler else: raise RuntimeError("This condition is not supposed to appear. Please report a bug to us.") - num_consumed_batches = states.pop('num_consumed_batches') + if hasattr(sampler, 'state_dict') and callable(sampler.state_dict): sampler_states = sampler.state_dict() if dataloader_args.batch_size is not None: @@ -209,30 +232,49 @@ class TorchDriver(Driver): else: logger.rank_zero_warning("fastNLP cannot get batch_size, we have to save based on sampler's " "`num_consumed_samples`, it may cause missing some samples when reload.") - - states['sampler_states'] = sampler_states else: raise RuntimeError('The sampler has no `state_dict()` method, fastNLP cannot save the training ' 'state.') - # 2. 保存模型的状态; - if should_save_model: - if not os.path.exists(folder): - os.mkdir(folder) - model_path = folder.joinpath(FASTNLP_MODEL_FILENAME) - self.save_model(model_path, only_state_dict=only_state_dict) + return sampler_states - # 3. 保存 optimizers 的状态; - optimizers_state_dict = self.get_optimizer_state() + def load_sampler_state(self, dataloader, sampler_states): + states = {} + dataloader_args = self.get_dataloader_args(dataloader) + if isinstance(dataloader_args.batch_sampler, ReproducibleBatchSampler): + sampler = dataloader_args.batch_sampler + elif isinstance(dataloader_args.sampler, ReproducibleSampler): + sampler = dataloader_args.sampler + elif isinstance(dataloader_args.sampler, TorchRandomSampler): + sampler = RandomSampler(dataloader_args.sampler.data_source) + logger.debug("Replace torch RandomSampler into fastNLP RandomSampler.") + elif self.is_distributed(): + raise RuntimeError("It is not allowed to use checkpoint retraining when you do not use our" + "`ReproducibleSampler`.") + else: + sampler = ReproduceBatchSampler( + batch_sampler=dataloader_args.batch_sampler if dataloader_args.batch_sampler is not None else dataloader_args.sampler, + batch_size=dataloader_args.batch_size, + drop_last=dataloader_args.drop_last + ) + sampler.load_state_dict(sampler_states) + states["dataloader"] = self.set_dist_repro_dataloader(dataloader, sampler) - # 4. 保存fp16的状态 - if not isinstance(self.grad_scaler, DummyGradScaler): - grad_scaler_state_dict = self.grad_scaler.state_dict() - states['grad_scaler_state_dict'] = grad_scaler_state_dict + # 修改 trainer_state.batch_idx_in_epoch + # sampler 是类似 RandomSampler 的sampler,不是 batch_sampler; + if not isinstance(sampler, ReproducibleBatchSampler): + if dataloader_args.drop_last: + batch_idx_in_epoch = len( + sampler) // dataloader_args.batch_size - sampler.num_left_samples // dataloader_args.batch_size + else: + batch_idx_in_epoch = (len(sampler) + dataloader_args.batch_size - 1) // dataloader_args.batch_size - \ + (sampler.num_left_samples + dataloader_args.batch_size - 1) // dataloader_args.batch_size + # sampler 是 batch_sampler; + else: + batch_idx_in_epoch = sampler.batch_idx_in_epoch - logger.debug("Save optimizer state dict") - states["optimizers_state_dict"] = optimizers_state_dict - torch.save(states, Path(folder).joinpath(FASTNLP_CHECKPOINT_FILENAME)) + states["batch_idx_in_epoch"] = batch_idx_in_epoch + return states def get_optimizer_state(self): optimizers_state_dict = {} @@ -262,7 +304,7 @@ class TorchDriver(Driver): if should_load_model: self.load_model(filepath=folder.joinpath(FASTNLP_MODEL_FILENAME), only_state_dict=only_state_dict) - # 3. 加载fp16的状态 + # 3. 加载 fp16 的状态 if "grad_scaler_state_dict" in states: grad_scaler_state_dict = states.pop("grad_scaler_state_dict") if not isinstance(self.grad_scaler, DummyGradScaler): @@ -273,40 +315,9 @@ class TorchDriver(Driver): f"the training process may be unstable.") # 4. 恢复 sampler 的状态; - dataloader_args = self.get_dataloader_args(dataloader) - if isinstance(dataloader_args.batch_sampler, ReproducibleBatchSampler): - sampler = dataloader_args.batch_sampler - elif isinstance(dataloader_args.sampler, ReproducibleSampler): - sampler = dataloader_args.sampler - elif isinstance(dataloader_args.sampler, TorchRandomSampler): - sampler = RandomSampler(dataloader_args.sampler.data_source) - logger.debug("Replace torch RandomSampler into fastNLP RandomSampler.") - elif self.is_distributed(): - raise RuntimeError("It is not allowed to use checkpoint retraining when you do not use our" - "`ReproducibleSampler`.") - else: - sampler = ReproduceBatchSampler( - batch_sampler=dataloader_args.batch_sampler if dataloader_args.batch_sampler is not None else dataloader_args.sampler, - batch_size=dataloader_args.batch_size, - drop_last=dataloader_args.drop_last - ) - sampler.load_state_dict(states.pop('sampler_states')) - states["dataloader"] = self.set_dist_repro_dataloader(dataloader, sampler) - - # 4. 修改 trainer_state.batch_idx_in_epoch - # sampler 是类似 RandomSampler 的sampler,不是 batch_sampler; - if not isinstance(sampler, ReproducibleBatchSampler): - if dataloader_args.drop_last: - batch_idx_in_epoch = len( - sampler) // dataloader_args.batch_size - sampler.num_left_samples // dataloader_args.batch_size - else: - batch_idx_in_epoch = (len(sampler) + dataloader_args.batch_size - 1) // dataloader_args.batch_size - \ - (sampler.num_left_samples + dataloader_args.batch_size - 1) // dataloader_args.batch_size - # sampler 是 batch_sampler; - else: - batch_idx_in_epoch = sampler.batch_idx_in_epoch - - states["batch_idx_in_epoch"] = batch_idx_in_epoch + sampler_states = states.pop('sampler_states') + states_ret = self.load_sampler_state(dataloader, sampler_states) + states.update(states_ret) return states From 8467cb6e416d641a5129b61b1930a5bf47f9682e Mon Sep 17 00:00:00 2001 From: yhcc Date: Sun, 19 Jun 2022 17:16:39 +0800 Subject: [PATCH 24/52] fix bug and make shuffle automatic --- .../core/callbacks/load_best_model_callback.py | 18 ++++++++++-------- fastNLP/core/callbacks/progress_callback.py | 12 +++++------- .../core/dataloaders/jittor_dataloader/fdl.py | 15 ++++++++------- .../core/dataloaders/paddle_dataloader/fdl.py | 15 +++++++++------ fastNLP/core/dataloaders/prepare_dataloader.py | 5 +++-- .../core/dataloaders/torch_dataloader/fdl.py | 15 ++++++++------- 6 files changed, 43 insertions(+), 37 deletions(-) diff --git a/fastNLP/core/callbacks/load_best_model_callback.py b/fastNLP/core/callbacks/load_best_model_callback.py index ec6579a6..b0fa83c4 100644 --- a/fastNLP/core/callbacks/load_best_model_callback.py +++ b/fastNLP/core/callbacks/load_best_model_callback.py @@ -105,14 +105,16 @@ class LoadBestModelCallback(HasMonitorCallback): def on_train_end(self, trainer): if abs(self.monitor_value) != float('inf'): # 如果是 inf 说明从来没有运行过。 - if self.real_save_folder: - logger.info(f"Loading best model from {self.real_save_folder} with {self._real_monitor}: {self.monitor_value}...") - trainer.load_model(folder=self.real_save_folder, only_state_dict=self.only_state_dict, - model_load_fn=self.model_load_fn) - else: - logger.info(f"Loading best model from buffer with {self._real_monitor}: {self.monitor_value}...") - self.buffer.seek(0) - trainer.load_model(folder=self.buffer, only_state_dict=self.only_state_dict) + # 如果是分布式且报错了,就不要加载了,防止barrier的问题 + if not (trainer.driver.is_distributed() and self.encounter_exception): + if self.real_save_folder: + logger.info(f"Loading best model from {self.real_save_folder} with {self._real_monitor}: {self.monitor_value}...") + trainer.load_model(folder=self.real_save_folder, only_state_dict=self.only_state_dict, + model_load_fn=self.model_load_fn) + else: + logger.info(f"Loading best model from buffer with {self._real_monitor}: {self.monitor_value}...") + self.buffer.seek(0) + trainer.load_model(folder=self.buffer, only_state_dict=self.only_state_dict) if self.delete_after_after: if not self.encounter_exception: # 防止出现死锁。 trainer.driver.barrier() diff --git a/fastNLP/core/callbacks/progress_callback.py b/fastNLP/core/callbacks/progress_callback.py index 2f1d2b17..d1295682 100644 --- a/fastNLP/core/callbacks/progress_callback.py +++ b/fastNLP/core/callbacks/progress_callback.py @@ -22,9 +22,10 @@ class ProgressCallback(HasMonitorCallback): self.best_monitor_step = -1 self.best_results = None - def record_better_monitor(self, trainer): + def record_better_monitor(self, trainer, results): self.best_monitor_step = trainer.global_forward_batches self.best_monitor_epoch = trainer.cur_epoch_idx + self.best_results = results def on_train_end(self, trainer): if self.best_monitor_epoch != -1: @@ -138,7 +139,7 @@ class RichCallback(ProgressCallback): characters = '-' if self.monitor is not None: if self.is_better_results(results, keep_if_better=True): - self.record_better_monitor(trainer) + self.record_better_monitor(trainer, results) if abs(self.monitor_value) != float('inf'): rule_style = 'spring_green3' text_style = '[bold]' @@ -154,7 +155,6 @@ class RichCallback(ProgressCallback): self.progress_bar.console.print_json(results) else: self.progress_bar.print(results) - self.best_results = results def clear_tasks(self): for key, taskid in self.task2id.items(): @@ -222,7 +222,7 @@ class RawTextCallback(ProgressCallback): text = '' if self.monitor is not None: if self.is_better_results(results, keep_if_better=True): - self.record_better_monitor(trainer) + self.record_better_monitor(trainer, results) if abs(self.monitor_value) != float('inf'): text = '+'*self.num_signs + base_text + '+'*self.num_signs if len(text) == 0: @@ -234,7 +234,6 @@ class RawTextCallback(ProgressCallback): if self.format_json: results = json.dumps(results) logger.info(results) - self.best_results = results @property def name(self): # progress bar的名称 @@ -311,7 +310,7 @@ class TqdmCallback(ProgressCallback): text = '' if self.monitor is not None: if self.is_better_results(results, keep_if_better=True): - self.record_better_monitor(trainer) + self.record_better_monitor(trainer, results) if abs(self.monitor_value) != float('inf'): text = '+'*self.num_signs + base_text + '+'*self.num_signs if len(text) == 0: @@ -323,7 +322,6 @@ class TqdmCallback(ProgressCallback): if self.format_json: results = json.dumps(results) logger.info(results) - self.best_results = results def clear_tasks(self): for key, taskid in self.task2id.items(): diff --git a/fastNLP/core/dataloaders/jittor_dataloader/fdl.py b/fastNLP/core/dataloaders/jittor_dataloader/fdl.py index 83555f6e..4631ba7b 100644 --- a/fastNLP/core/dataloaders/jittor_dataloader/fdl.py +++ b/fastNLP/core/dataloaders/jittor_dataloader/fdl.py @@ -200,7 +200,7 @@ class JittorDataLoader: return self.cur_batch_indices -def prepare_jittor_dataloader(ds_or_db, batch_size: int = 16, shuffle: bool = False, +def prepare_jittor_dataloader(ds_or_db, batch_size: int = 16, shuffle: bool = None, drop_last: bool = False, num_workers: int = 0, buffer_size: int = 512 * 1024 * 1024, stop_grad: bool = True, keep_numpy_array: bool = False, endless: bool = False, collate_fn: Union[None, str, Callable] = "auto", @@ -230,7 +230,8 @@ def prepare_jittor_dataloader(ds_or_db, batch_size: int = 16, shuffle: bool = Fa :param non_train_batch_size: 如果传入的 ``ds_or_db`` 为 :class:`Dict` 或 :class:`~fastNLP.io.DataBundle` 对象,可以通过改参数 设置名称不为 `train` 的其他 ``dataset`` 的 ``batch_size``。 默认为 ``16``。 :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 - :param shuffle: 是否打乱数据集, 默认为 ``False``。 + :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 'train' 则设置其 shuffle 为 True , + 其它的为 False 。 :param drop_last: 当 ``drop_last=True`` 时,:class:`JittorDataLoader` 会扔掉最后一个长度小于 ``batch_size`` 的 batch 数据; 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 :param num_workers: 当 ``num_workers > 0`` 时, :class:`JittorDataLoader` 会开启 num_workers 个子进程来处理数据, 可以加快 @@ -258,7 +259,7 @@ def prepare_jittor_dataloader(ds_or_db, batch_size: int = 16, shuffle: bool = Fa dl_bundle = {} for name, ds in ds_or_db.iter_datasets(): if 'train' in name: - dl_bundle[name] = JittorDataLoader(ds, batch_size=batch_size, shuffle=shuffle, + dl_bundle[name] = JittorDataLoader(ds, batch_size=batch_size, shuffle=True if shuffle is None else shuffle, drop_last=drop_last, num_workers=num_workers, buffer_size=buffer_size, stop_grad=stop_grad, keep_numpy_array=keep_numpy_array, @@ -267,7 +268,7 @@ def prepare_jittor_dataloader(ds_or_db, batch_size: int = 16, shuffle: bool = Fa else: dl_bundle[name] = JittorDataLoader(ds, batch_size=non_train_batch_size if non_train_batch_size else batch_size, - shuffle=shuffle, + shuffle=False if shuffle is None else shuffle, drop_last=drop_last, num_workers=num_workers, buffer_size=buffer_size, stop_grad=stop_grad, keep_numpy_array=keep_numpy_array, @@ -279,14 +280,14 @@ def prepare_jittor_dataloader(ds_or_db, batch_size: int = 16, shuffle: bool = Fa ds_dict = {} for name, ds in ds_or_db.items(): if 'train' in name: - dl = JittorDataLoader(ds, batch_size=batch_size, shuffle=shuffle, + dl = JittorDataLoader(ds, batch_size=batch_size, shuffle=True if shuffle is None else shuffle, drop_last=drop_last, num_workers=num_workers, buffer_size=buffer_size, stop_grad=stop_grad, keep_numpy_array=keep_numpy_array, endless=endless, collate_fn=collate_fn) else: dl = JittorDataLoader(ds, batch_size=non_train_batch_size if non_train_batch_size else batch_size, - shuffle=shuffle, + shuffle=False if shuffle is None else shuffle, drop_last=drop_last, num_workers=num_workers, buffer_size=buffer_size, stop_grad=stop_grad, keep_numpy_array=keep_numpy_array, @@ -296,7 +297,7 @@ def prepare_jittor_dataloader(ds_or_db, batch_size: int = 16, shuffle: bool = Fa return ds_dict elif isinstance(ds_or_db, HasLenGetitemType): - dl = JittorDataLoader(ds_or_db, batch_size=batch_size, shuffle=shuffle, + dl = JittorDataLoader(ds_or_db, batch_size=batch_size, shuffle=False if shuffle is None else shuffle, drop_last=drop_last, num_workers=num_workers, buffer_size=buffer_size, stop_grad=stop_grad, keep_numpy_array=keep_numpy_array, endless=endless, collate_fn=collate_fn) diff --git a/fastNLP/core/dataloaders/paddle_dataloader/fdl.py b/fastNLP/core/dataloaders/paddle_dataloader/fdl.py index c84c1aaf..8999322b 100644 --- a/fastNLP/core/dataloaders/paddle_dataloader/fdl.py +++ b/fastNLP/core/dataloaders/paddle_dataloader/fdl.py @@ -293,7 +293,8 @@ def prepare_paddle_dataloader(ds_or_db, feed_list=None, places=None, :param batch_sampler: 实现了 __len__() 和 __iter__() 的实例化对象,,其__iter__() 方法每次都会返回一个 List 对象, List中的值为 dataset 的下标 index ;默认为 None,当其不为 None 时,bacth_size, shuffle 参数均失效。 :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 - :param shuffle: 是否将数据打乱,若``shuffle=True``则会将dataset打乱;若否则什么也不做。 + :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 'train' 则设置其 shuffle 为 True , + 其它的为 False 。 :param drop_last: 当 ``drop_last=True`` 时,``PaddleDataLoader`` 会扔掉最后一个长度小于 ``batch_size`` 的 batch 数据; 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 :param collate_fn: 用于从 dataset 取到的一个 batch 数据进行打包处理的 Callable 函数,其值应该为以下三个: ``[None, "auto", Callable]``. @@ -326,7 +327,7 @@ def prepare_paddle_dataloader(ds_or_db, feed_list=None, places=None, dl_bundle[name] = PaddleDataLoader(ds, feed_list=feed_list, places=places, return_list=return_list, batch_sampler=batch_sampler, batch_size=batch_size, - shuffle=shuffle, + shuffle=True if shuffle is None else shuffle, drop_last=drop_last, collate_fn=collate_fn, num_workers=num_workers, use_shared_memory=use_shared_memory, use_buffer_reader=use_buffer_reader, @@ -337,7 +338,7 @@ def prepare_paddle_dataloader(ds_or_db, feed_list=None, places=None, return_list=return_list, batch_sampler=batch_sampler, batch_size=non_train_batch_size if non_train_batch_size else batch_size, - shuffle=shuffle, + shuffle=False if shuffle is None else shuffle, drop_last=drop_last, collate_fn=collate_fn, num_workers=num_workers, use_shared_memory=use_shared_memory, use_buffer_reader=use_buffer_reader, @@ -350,7 +351,8 @@ def prepare_paddle_dataloader(ds_or_db, feed_list=None, places=None, for name, ds in ds_or_db.items(): if 'train' in name: dl = PaddleDataLoader(ds, feed_list=feed_list, places=places, return_list=return_list, - batch_sampler=batch_sampler, batch_size=batch_size, shuffle=shuffle, + batch_sampler=batch_sampler, batch_size=batch_size, + shuffle=False if shuffle is None else shuffle, drop_last=drop_last, collate_fn=collate_fn, num_workers=num_workers, use_shared_memory=use_shared_memory, use_buffer_reader=use_buffer_reader, timeout=timeout, worker_init_fn=worker_init_fn, @@ -359,7 +361,7 @@ def prepare_paddle_dataloader(ds_or_db, feed_list=None, places=None, dl = PaddleDataLoader(ds, feed_list=feed_list, places=places, return_list=return_list, batch_sampler=batch_sampler, batch_size=non_train_batch_size if non_train_batch_size else batch_size, - shuffle=shuffle, + shuffle=False if shuffle is None else shuffle, drop_last=drop_last, collate_fn=collate_fn, num_workers=num_workers, use_shared_memory=use_shared_memory, use_buffer_reader=use_buffer_reader, timeout=timeout, worker_init_fn=worker_init_fn, @@ -369,7 +371,8 @@ def prepare_paddle_dataloader(ds_or_db, feed_list=None, places=None, elif isinstance(ds_or_db, HasLenGetitemType): dl = PaddleDataLoader(ds_or_db, feed_list=feed_list, places=places, return_list=return_list, - batch_sampler=batch_sampler, batch_size=batch_size, shuffle=shuffle, + batch_sampler=batch_sampler, batch_size=batch_size, + shuffle=False if shuffle is None else shuffle, drop_last=drop_last, collate_fn=collate_fn, num_workers=num_workers, use_shared_memory=use_shared_memory, use_buffer_reader=use_buffer_reader, timeout=timeout, worker_init_fn=worker_init_fn, persistent_workers=persistent_workers) diff --git a/fastNLP/core/dataloaders/prepare_dataloader.py b/fastNLP/core/dataloaders/prepare_dataloader.py index 5f469f2b..9cda2bd3 100644 --- a/fastNLP/core/dataloaders/prepare_dataloader.py +++ b/fastNLP/core/dataloaders/prepare_dataloader.py @@ -13,7 +13,7 @@ from ...envs import FASTNLP_BACKEND, SUPPORT_BACKENDS from ..log import logger -def prepare_dataloader(dataset, batch_size: int = 16, shuffle: bool = False, drop_last: bool = False, +def prepare_dataloader(dataset, batch_size: int = 16, shuffle: bool = None, drop_last: bool = False, collate_fn: Union[Callable, str, None] = 'auto', num_workers: int = 0, backend: str = 'auto'): """ @@ -28,7 +28,8 @@ def prepare_dataloader(dataset, batch_size: int = 16, shuffle: bool = False, dro * 为字典型 或 :class:`~fastNLP.io.DataBundle` 数据时,返回 `Dict` 类型的数据。 :param batch_size: 批次大小。 - :param shuffle: 是否打乱数据集。 + :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 'train' 则设置其 shuffle 为 True , + 其它的为 False 。 :param drop_last: 当最后一个 batch 不足 batch_size 数量的是否,是否丢弃。 :param collate_fn: 用于处理一个 batch 的函数,一般包括 padding 和转为 tensor。有以下三种取值: diff --git a/fastNLP/core/dataloaders/torch_dataloader/fdl.py b/fastNLP/core/dataloaders/torch_dataloader/fdl.py index 2a119260..9b0ab8d3 100644 --- a/fastNLP/core/dataloaders/torch_dataloader/fdl.py +++ b/fastNLP/core/dataloaders/torch_dataloader/fdl.py @@ -218,7 +218,7 @@ class TorchDataLoader(DataLoader): def prepare_torch_dataloader(ds_or_db, batch_size: int = 16, - shuffle: bool = False, + shuffle: bool = None, sampler: Union["Sampler[int]", ReproducibleSampler, UnrepeatedSampler] = None, batch_sampler: Union["Sampler[Sequence[int]]", ReproducibleBatchSampler] = None, num_workers: int = 0, collate_fn: Union[Callable, str, None] = 'auto', @@ -252,7 +252,8 @@ def prepare_torch_dataloader(ds_or_db, :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 :param non_train_batch_size: 非 'train' 数据集的 ``TorchDataLoader`` 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 - :param shuffle: 是否打乱数据集, 默认为 ``False``。 + :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 'train' 则设置其 shuffle 为 True , + 其它的为 False 。 :param sampler: 实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , 默认为None, 当其不为 None 时, shuffle 参数无效。 :param non_train_sampler: 非 'train' 数据集的的实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , @@ -290,7 +291,7 @@ def prepare_torch_dataloader(ds_or_db, for name, ds in ds_or_db.iter_datasets(): if 'train' in name: dl_bundle[name] = TorchDataLoader(dataset=ds, batch_size=batch_size, - shuffle=shuffle, sampler=sampler, batch_sampler=batch_sampler, + shuffle=True if shuffle is None else shuffle, sampler=sampler, batch_sampler=batch_sampler, num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory, drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn, multiprocessing_context=multiprocessing_context, generator=generator, @@ -300,7 +301,7 @@ def prepare_torch_dataloader(ds_or_db, else: dl_bundle[name] = TorchDataLoader(dataset=ds, batch_size=non_train_batch_size if non_train_batch_size else batch_size, - shuffle=shuffle, + shuffle=False if shuffle is None else shuffle, sampler=non_train_sampler if non_train_sampler else sampler, batch_sampler=batch_sampler, num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory, @@ -316,7 +317,7 @@ def prepare_torch_dataloader(ds_or_db, for name, ds in ds_or_db.items(): if 'train' in name: dl_bundle[name] = TorchDataLoader(dataset=ds, batch_size=batch_size, - shuffle=shuffle, sampler=sampler, batch_sampler=batch_sampler, + shuffle=True if shuffle is None else shuffle, sampler=sampler, batch_sampler=batch_sampler, num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory, drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn, multiprocessing_context=multiprocessing_context, generator=generator, @@ -326,7 +327,7 @@ def prepare_torch_dataloader(ds_or_db, else: dl_bundle[name] = TorchDataLoader(dataset=ds, batch_size=non_train_batch_size if non_train_batch_size else batch_size, - shuffle=shuffle, + shuffle=False if shuffle is None else shuffle, sampler=non_train_sampler if non_train_sampler else sampler, batch_sampler=batch_sampler, num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory, @@ -340,7 +341,7 @@ def prepare_torch_dataloader(ds_or_db, elif isinstance(ds_or_db, HasLenGetitemType): dl = TorchDataLoader(dataset=ds_or_db, batch_size=batch_size, - shuffle=shuffle, sampler=sampler, batch_sampler=batch_sampler, + shuffle=False if shuffle is None else shuffle, sampler=sampler, batch_sampler=batch_sampler, num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory, drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn, multiprocessing_context=multiprocessing_context, generator=generator, From 7023ea550cc53d288805e0ca8c2b2716139bb0b7 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Mon, 20 Jun 2022 02:26:34 +0800 Subject: [PATCH 25/52] =?UTF-8?q?deepspeed=E7=9A=84save=20load=E5=8A=9F?= =?UTF-8?q?=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../core/drivers/torch_driver/deepspeed.py | 22 +-- fastNLP/core/drivers/torch_driver/utils.py | 25 +++- .../drivers/torch_driver/test_deepspeed.py | 131 +++++++++--------- tests/pytest.ini | 3 +- 4 files changed, 106 insertions(+), 75 deletions(-) diff --git a/fastNLP/core/drivers/torch_driver/deepspeed.py b/fastNLP/core/drivers/torch_driver/deepspeed.py index bb4df495..79451b13 100644 --- a/fastNLP/core/drivers/torch_driver/deepspeed.py +++ b/fastNLP/core/drivers/torch_driver/deepspeed.py @@ -4,7 +4,7 @@ from pathlib import Path from typing import Union, Dict, List from .torch_driver import TorchDriver from .ddp import TorchDDPDriver -from .utils import _create_default_config, _DDPWrappingModel +from .utils import _create_default_config, _DeepSpeedWrappingModel from fastNLP.core.utils import nullcontext from fastNLP.core.log import logger from fastNLP.envs import( @@ -14,6 +14,7 @@ from fastNLP.envs import( from fastNLP.envs.imports import _NEED_IMPORT_TORCH, _NEED_IMPORT_DEEPSPEED if _NEED_IMPORT_TORCH: + import pytorch_lightning import torch import torch.distributed as dist @@ -35,8 +36,8 @@ class DeepSpeedDriver(TorchDDPDriver): strategy= "deepspeed", **kwargs ): - assert _NEED_IMPORT_DEEPSPEED, "deepspeed is not imported." - assert not dist.is_initialized(), "DeepSpeedDriver does not support initialize distributed by user." + assert _NEED_IMPORT_DEEPSPEED, "Deepspeed is not imported." + # assert not dist.is_initialized(), "DeepSpeedDriver does not support initialize distributed by user." TorchDriver.__init__(self, model=model, fp16=False, **kwargs) self.fp16 = fp16 @@ -88,7 +89,7 @@ class DeepSpeedDriver(TorchDDPDriver): # 获取 batch_size 以设置 train_micro_batch_size_per_gpu 参数 train_dl = kwargs.get("train_dataloader", None) if train_dl is not None: - self.train_micro_batch_size = self.get_dataloader_args(train_dl) + self.train_micro_batch_size = self.get_dataloader_args(train_dl).batch_size else: logger.warn("No `train_dataloader` found, and we will set `train_micro_batch_size_per_gpu`" "to 1 for deepspeed configuration.") @@ -166,7 +167,7 @@ class DeepSpeedDriver(TorchDDPDriver): # 设置 deepspeed if not isinstance(self.model, deepspeed.DeepSpeedEngine): - model=_DDPWrappingModel(self.model) + model=_DeepSpeedWrappingModel(self.model, self.fp16) model_parameters = filter(lambda p: p.requires_grad, model.parameters()) self.model, ds_optimizer, _, _ = deepspeed.initialize( model=model, @@ -279,7 +280,7 @@ class DeepSpeedDriver(TorchDDPDriver): :return: """ # deepspeed engine 要求在每个 rank 都调用 save_checkpoint,故去掉了 rank_zero_call 装饰器 - if self.zero_stage_3: + if self.stage_3: logger.rank_zero_warning( "When saving the DeepSpeed Stage 3 checkpoint, " "each worker will save a shard of the checkpoint within a directory. " @@ -310,7 +311,8 @@ class DeepSpeedDriver(TorchDDPDriver): def save_checkpoint(self, folder: Path, states: Dict, dataloader, only_state_dict: bool = True, should_save_model: bool = True, **kwargs): # deepspeed engine 要求在每个 rank 都调用 save_checkpoint,故去掉了 rank_zero_call 装饰器 # 1. 保存 sampler 的状态 - sampler_state_dict = self.get_sampler_state_dict() + num_consumed_batches = states.pop('num_consumed_batches') + states['sampler_states'] = self.get_sampler_state(dataloader, num_consumed_batches) # 2. 保存模型的状态; if not should_save_model: @@ -318,7 +320,7 @@ class DeepSpeedDriver(TorchDDPDriver): "so we will still save the model for you.") self.model.save_checkpoint(Path(folder).joinpath(FASTNLP_CHECKPOINT_FILENAME), - client_state=sampler_state_dict) + client_state=states) def load_checkpoint(self, folder: Path, dataloader, only_state_dict: bool = True, should_load_model: bool = True, **kwargs) -> Dict: # 1. 加载模型状态; @@ -330,7 +332,9 @@ class DeepSpeedDriver(TorchDDPDriver): raise RuntimeError(f"Failed to load checkpoint from path: {str(folder)}") # 2.恢复 sampler 的状态 - states = self.load_sampler_state_dict(states) + sampler_states = states.pop('sampler_states') + states_ret = self.load_sampler_state(dataloader, sampler_states) + states.update(states_ret) return states diff --git a/fastNLP/core/drivers/torch_driver/utils.py b/fastNLP/core/drivers/torch_driver/utils.py index e2b00aa6..8c44ea37 100644 --- a/fastNLP/core/drivers/torch_driver/utils.py +++ b/fastNLP/core/drivers/torch_driver/utils.py @@ -15,7 +15,7 @@ from fastNLP.envs import ( FASTNLP_GLOBAL_SEED, ) from fastNLP.core.samplers import re_instantiate_sampler, ReproducibleBatchSampler -from fastNLP.core.utils import auto_param_call +from fastNLP.core.utils import auto_param_call, apply_to_collection from fastNLP.core.log import logger if _NEED_IMPORT_TORCH: @@ -107,6 +107,29 @@ class _DDPWrappingModel(Module): else: return fn(batch) +class _DeepSpeedWrappingModel(_DDPWrappingModel): + """ + 继承 ``_DDPWrappingModel``,区别在于进行 forward 之前先将 float 数据转换为 float16 + """ + + def __init__(self, model: Module, fp16): + super(_DeepSpeedWrappingModel, self).__init__(model) + self.fp16 = fp16 + + def forward(self, batch, **kwargs): + if self.fp16: + batch = self._move_float_tensors_to_half(batch) + + return super().forward(batch, **kwargs) + + @staticmethod + def batch_to(data): + return data.half() + + def _move_float_tensors_to_half(self, batch: Any): + batch = apply_to_collection(batch, (torch.FloatTensor, torch.cuda.FloatTensor), function=self.batch_to) + return batch + class DummyGradScaler: """ diff --git a/tests/core/drivers/torch_driver/test_deepspeed.py b/tests/core/drivers/torch_driver/test_deepspeed.py index 8f28c332..462648bd 100644 --- a/tests/core/drivers/torch_driver/test_deepspeed.py +++ b/tests/core/drivers/torch_driver/test_deepspeed.py @@ -1,33 +1,30 @@ import os +from pathlib import Path import pytest -from pathlib import Path from fastNLP.core.drivers.torch_driver.deepspeed import DeepSpeedDriver from fastNLP.core.samplers import ( RandomSampler, - UnrepeatedSampler, BucketedBatchSampler, UnrepeatedRandomSampler, - UnrepeatedSequentialSampler, ) from tests.helpers.models.torch_model import TorchNormalModel_Classification_1 -from tests.helpers.datasets.torch_data import TorchNormalDataset, TorchNormalXYDataset +from tests.helpers.datasets.torch_data import TorchNormalXYDataset from tests.helpers.utils import magic_argv_env_context from fastNLP.envs.distributed import rank_zero_rm from fastNLP import logger - from fastNLP.envs.imports import _NEED_IMPORT_TORCH, _NEED_IMPORT_DEEPSPEED if _NEED_IMPORT_TORCH: import torch import torch.distributed as dist - from torch.utils.data import DataLoader, BatchSampler + from torch.utils.data import DataLoader if _NEED_IMPORT_DEEPSPEED: import deepspeed -def generate_driver(labels, features, device=[0,1], fp16=False, output_from_new_proc="all"): +def generate_driver(labels, features, device=[0,1], fp16=False, output_from_new_proc="all", train_dataloader=None): torch_model = TorchNormalModel_Classification_1(labels, features) torch_opt = torch.optim.Adam(params=torch_model.parameters(), lr=0.01) device = [torch.device(i) for i in device] @@ -35,7 +32,8 @@ def generate_driver(labels, features, device=[0,1], fp16=False, output_from_new_ model=torch_model, parallel_device=device, fp16=fp16, - output_from_new_proc=output_from_new_proc + output_from_new_proc=output_from_new_proc, + train_dataloader=train_dataloader ) driver.set_optimizers(torch_opt) driver.setup() @@ -77,33 +75,33 @@ def dataloader_with_randomsampler(dataset, batch_size, shuffle, drop_last, seed= ############################################################################ # -# 测试 TorchDDPDriver 的一些函数 +# 测试 TorchDeepSpeedDriver 的一些函数 # ############################################################################ -@pytest.mark.torch -@magic_argv_env_context -def test_multi_drivers(): - """ - 测试使用了多个 TorchDDPDriver 的情况。 - """ - generate_driver(10, 10) - generate_driver(20, 10) +# @pytest.mark.deepspeed +# @magic_argv_env_context +# def test_multi_drivers(): +# """ +# 测试使用了多个 TorchDeepSpeedDriver 的情况。 +# """ +# generate_driver(10, 10) +# generate_driver(20, 10) - with pytest.raises(RuntimeError): - # 设备设置不同,应该报错 - generate_driver(20, 3, device=[0,1,2]) - assert False - dist.barrier() +# with pytest.raises(RuntimeError): +# # 设备设置不同,应该报错 +# generate_driver(20, 3, device=[0,1,2]) +# assert False +# dist.barrier() - if dist.is_initialized(): - dist.destroy_process_group() +# if dist.is_initialized(): +# dist.destroy_process_group() @magic_argv_env_context def test_multi_optimizers(): torch_model = TorchNormalModel_Classification_1(10, 10) torch_opt = torch.optim.Adam(params=torch_model.parameters(), lr=0.01) - device = [torch.device(i) for i in device] + device = [torch.device(i) for i in [0, 1]] driver = DeepSpeedDriver( model=torch_model, parallel_device=device, @@ -112,57 +110,59 @@ def test_multi_optimizers(): with pytest.raises(ValueError): driver.setup() - if dist.is_initialized(): - dist.destroy_process_group() + # if dist.is_initialized(): + # dist.destroy_process_group() -@pytest.mark.torch +@pytest.mark.deepspeed class TestDeepSpeedDriverFunction: """ 测试 TorchDeepSpeedDriver 一些简单函数的测试类,基本都是测试能否运行、是否存在 import 错误等问题 """ + @classmethod + def setup_class(cls): + cls.driver = generate_driver(10, 10) @magic_argv_env_context def test_simple_functions(self): """ 简单测试多个函数 """ - driver = generate_driver(10, 10) """ 测试 move_data_to_device 函数。这个函数仅调用了 torch_move_data_to_device ,测试例在 tests/core/utils/test_torch_utils.py中,就不重复测试了 """ - driver.move_data_to_device(torch.rand((32, 64))) + self.driver.move_data_to_device(torch.rand((32, 64))) dist.barrier() """ 测试 is_distributed 函数 """ - assert driver.is_distributed() == True + assert self.driver.is_distributed() == True dist.barrier() """ 测试 get_no_sync_context 函数 """ - res = driver.get_model_no_sync_context() + res = self.driver.get_model_no_sync_context() dist.barrier() """ 测试 is_global_zero 函数 """ - driver.is_global_zero() + self.driver.is_global_zero() dist.barrier() """ 测试 unwrap_model 函数 """ - driver.unwrap_model() + self.driver.unwrap_model() dist.barrier() """ 测试 get_local_rank 函数 """ - driver.get_local_rank() + self.driver.get_local_rank() dist.barrier() """ @@ -170,9 +170,9 @@ class TestDeepSpeedDriverFunction: 详细的测试在 test_dist_utils.py 中完成 """ obj = { - "rank": driver.global_rank + "rank": self.driver.global_rank } - obj_list = driver.all_gather(obj, group=None) + obj_list = self.driver.all_gather(obj, group=None) for i, res in enumerate(obj_list): assert res["rank"] == i @@ -180,28 +180,32 @@ class TestDeepSpeedDriverFunction: 测试 broadcast_object 函数 详细的函数在 test_dist_utils.py 中完成 """ - if driver.global_rank == 0: + if self.driver.global_rank == 0: obj = { - "rank": driver.global_rank + "rank": self.driver.global_rank } else: obj = None - res = driver.broadcast_object(obj, src=0) + res = self.driver.broadcast_object(obj, src=0) assert res["rank"] == 0 - if dist.is_initialized(): - dist.destroy_process_group() + # if dist.is_initialized(): + # dist.destroy_process_group() ############################################################################ # # 测试 save 和 load 相关的功能 # ############################################################################ -@pytest.mark.torch +@pytest.mark.deepspeed class TestSaveLoad: """ 测试多卡情况下 save 和 load 相关函数的表现 """ + @classmethod + def setup_class(cls): + # 不在这里 setup 的话会报错 + cls.driver = generate_driver(10, 10, device=[0,1]) def setup_method(self): self.dataset = TorchNormalXYDataset(100) @@ -216,7 +220,8 @@ class TestSaveLoad: path = "model" dataloader = DataLoader(self.dataset, batch_size=2) - driver1, driver2 = generate_driver(20, 1), generate_driver(20, 1) + driver1, driver2 = generate_driver(20, 1, train_dataloader=dataloader), \ + generate_driver(20, 1, train_dataloader=dataloader) driver1.save_model(path, only_state_dict) @@ -244,8 +249,8 @@ class TestSaveLoad: finally: rank_zero_rm(path) - if dist.is_initialized(): - dist.destroy_process_group() + # if dist.is_initialized(): + # dist.destroy_process_group() @magic_argv_env_context @pytest.mark.parametrize("only_state_dict", ([True, False])) @@ -260,8 +265,6 @@ class TestSaveLoad: path = "model.ckp" num_replicas = len(device) - driver1, driver2 = generate_driver(20, 1, device=device, fp16=fp16), \ - generate_driver(20, 1, device=device, fp16=False) dataloader = dataloader_with_bucketedbatchsampler( self.dataset, length=[10 for i in range(len(self.dataset))], @@ -270,11 +273,13 @@ class TestSaveLoad: drop_last=False ) dataloader.batch_sampler.set_distributed( - num_replicas=driver1.world_size, - rank=driver1.global_rank, - pad=True + num_replicas=int(os.getenv("WORLD_SIZE", "1")), + rank=int(os.getenv("RANK", "0")), + pad=True, ) num_consumed_batches = 4 + driver1, driver2 = generate_driver(20, 1, device=device, fp16=fp16, train_dataloader=dataloader), \ + generate_driver(20, 1, device=device, fp16=False, train_dataloader=dataloader) already_seen_x_set = set() already_seen_y_set = set() @@ -323,10 +328,6 @@ class TestSaveLoad: assert replaced_loader.batch_sampler.seed == sampler_states["seed"] assert replaced_loader.batch_sampler.num_consumed_samples == num_consumed_batches * 4 * num_replicas - # 3. 检查 fp16 是否被加载 - if fp16: - assert not isinstance(driver2.grad_scaler, torch.cuda.amp.GradScaler) - # 4. 检查 model 的参数是否正确 # 5. 检查 batch_idx start_batch = load_states.pop('batch_idx_in_epoch') @@ -338,6 +339,7 @@ class TestSaveLoad: left_x_batches.update(batch["x"].reshape(-1, ).tolist()) left_y_batches.update(batch["y"].reshape(-1, ).tolist()) + batch = driver1.move_data_to_device(batch) res1 = driver1.model( batch, fastnlp_fn=driver1.model.module.model.evaluate_step, @@ -361,8 +363,8 @@ class TestSaveLoad: finally: rank_zero_rm(path) - if dist.is_initialized(): - dist.destroy_process_group() + # if dist.is_initialized(): + # dist.destroy_process_group() @magic_argv_env_context @pytest.mark.parametrize("only_state_dict", ([True, False])) @@ -378,16 +380,16 @@ class TestSaveLoad: num_replicas = len(device) - driver1 = generate_driver(20, 1, device=device, fp16=fp16) - driver2 = generate_driver(20, 1, device=device, fp16=False) - dataloader = dataloader_with_randomsampler(self.dataset, 4, True, False, unrepeated=False) dataloader.batch_sampler.sampler.set_distributed( - num_replicas=driver1.world_size, - rank=driver1.global_rank, + num_replicas=int(os.getenv("WORLD_SIZE", "1")), + rank=int(os.getenv("RANK", "0")), pad=True ) num_consumed_batches = 4 + + driver1 = generate_driver(20, 1, device=device, fp16=fp16, train_dataloader=dataloader) + driver2 = generate_driver(20, 1, device=device, fp16=False, train_dataloader=dataloader) already_seen_x_set = set() already_seen_y_set = set() @@ -448,6 +450,7 @@ class TestSaveLoad: left_x_batches.update(batch["x"].reshape(-1, ).tolist()) left_y_batches.update(batch["y"].reshape(-1, ).tolist()) + batch = driver1.move_data_to_device(batch) res1 = driver1.model( batch, fastnlp_fn=driver1.model.module.model.evaluate_step, @@ -471,5 +474,5 @@ class TestSaveLoad: finally: rank_zero_rm(path) - if dist.is_initialized(): - dist.destroy_process_group() \ No newline at end of file + # if dist.is_initialized(): + # dist.destroy_process_group() \ No newline at end of file diff --git a/tests/pytest.ini b/tests/pytest.ini index 27076810..e2cac8d9 100644 --- a/tests/pytest.ini +++ b/tests/pytest.ini @@ -5,4 +5,5 @@ markers = paddledist jittor torchpaddle - torchjittor \ No newline at end of file + torchjittor + deepspeed \ No newline at end of file From 8d23253318d94add0a0cc681b435bd1f2be6547e Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Mon, 20 Jun 2022 06:33:14 +0800 Subject: [PATCH 26/52] import DeepSpeedDriver --- fastNLP/core/__init__.py | 1 + fastNLP/core/drivers/__init__.py | 3 ++- fastNLP/core/drivers/torch_driver/__init__.py | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/fastNLP/core/__init__.py b/fastNLP/core/__init__.py index 6cf73d3b..052bed5b 100644 --- a/fastNLP/core/__init__.py +++ b/fastNLP/core/__init__.py @@ -59,6 +59,7 @@ __all__ = [ # drivers "TorchSingleDriver", "TorchDDPDriver", + "DeepSpeedDriver", "PaddleSingleDriver", "PaddleFleetDriver", "JittorSingleDriver", diff --git a/fastNLP/core/drivers/__init__.py b/fastNLP/core/drivers/__init__.py index f9be3180..127e723a 100644 --- a/fastNLP/core/drivers/__init__.py +++ b/fastNLP/core/drivers/__init__.py @@ -3,6 +3,7 @@ __all__ = [ 'TorchDriver', "TorchSingleDriver", "TorchDDPDriver", + "DeepSpeedDriver", "PaddleDriver", "PaddleSingleDriver", "PaddleFleetDriver", @@ -14,7 +15,7 @@ __all__ = [ 'optimizer_state_to_device' ] -from .torch_driver import TorchDriver, TorchSingleDriver, TorchDDPDriver, torch_seed_everything, optimizer_state_to_device +from .torch_driver import TorchDriver, TorchSingleDriver, TorchDDPDriver, DeepSpeedDriver, torch_seed_everything, optimizer_state_to_device from .jittor_driver import JittorDriver, JittorMPIDriver, JittorSingleDriver from .paddle_driver import PaddleDriver, PaddleFleetDriver, PaddleSingleDriver, paddle_seed_everything from .driver import Driver diff --git a/fastNLP/core/drivers/torch_driver/__init__.py b/fastNLP/core/drivers/torch_driver/__init__.py index 8c24fa53..08026d9e 100644 --- a/fastNLP/core/drivers/torch_driver/__init__.py +++ b/fastNLP/core/drivers/torch_driver/__init__.py @@ -1,6 +1,7 @@ __all__ = [ 'TorchDDPDriver', 'TorchSingleDriver', + 'DeepSpeedDriver', 'TorchDriver', 'torch_seed_everything', 'optimizer_state_to_device' @@ -10,6 +11,7 @@ from .ddp import TorchDDPDriver # todo 实现 fairscale 后再将 fairscale 导入到这里; from .single_device import TorchSingleDriver from .torch_driver import TorchDriver +from .deepspeed import DeepSpeedDriver from .utils import torch_seed_everything, optimizer_state_to_device From 2735d2d10cb3e38bbeda2f3a0318684fce16dbf0 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Mon, 20 Jun 2022 06:34:25 +0800 Subject: [PATCH 27/52] =?UTF-8?q?DeepSpeedDriver=E7=8E=B0=E5=9C=A8?= =?UTF-8?q?=E5=8F=AF=E4=BB=A5=E9=80=9A=E8=BF=87=20deepspeed=20=E5=91=BD?= =?UTF-8?q?=E4=BB=A4=E6=8B=89=E8=B5=B7=EF=BC=9B=E6=B7=BB=E5=8A=A0=E4=BA=86?= =?UTF-8?q?=E7=9B=B8=E5=85=B3=20trainer=20=E7=9A=84=E7=AE=80=E5=8D=95?= =?UTF-8?q?=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../core/drivers/torch_driver/deepspeed.py | 61 ++++++---- .../torch_driver/initialize_torch_driver.py | 20 ++-- .../controllers/_test_trainer_deepspeed.py | 95 ++++++++++++++++ .../_test_trainer_deepspeed_outside.py | 105 ++++++++++++++++++ .../controllers/test_trainer_deepspeed.py | 99 +++++++++++++++++ 5 files changed, 351 insertions(+), 29 deletions(-) create mode 100644 tests/core/controllers/_test_trainer_deepspeed.py create mode 100644 tests/core/controllers/_test_trainer_deepspeed_outside.py diff --git a/fastNLP/core/drivers/torch_driver/deepspeed.py b/fastNLP/core/drivers/torch_driver/deepspeed.py index 79451b13..579a50f4 100644 --- a/fastNLP/core/drivers/torch_driver/deepspeed.py +++ b/fastNLP/core/drivers/torch_driver/deepspeed.py @@ -14,12 +14,13 @@ from fastNLP.envs import( from fastNLP.envs.imports import _NEED_IMPORT_TORCH, _NEED_IMPORT_DEEPSPEED if _NEED_IMPORT_TORCH: - import pytorch_lightning import torch import torch.distributed as dist + from torch.optim import Optimizer if _NEED_IMPORT_DEEPSPEED: import deepspeed + from deepspeed import DeepSpeedEngine, DeepSpeedOptimizer __all__ = [ "DeepSpeedDriver", @@ -33,7 +34,6 @@ class DeepSpeedDriver(TorchDDPDriver): parallel_device: Union[List["torch.device"], "torch.device"], is_pull_by_torch_run = False, fp16: bool = False, - strategy= "deepspeed", **kwargs ): assert _NEED_IMPORT_DEEPSPEED, "Deepspeed is not imported." @@ -56,8 +56,22 @@ class DeepSpeedDriver(TorchDDPDriver): # 我们的 model_device 一定是 torch.device,而不是一个 list; self.model_device = parallel_device[self.local_rank] - # 暂时不允许在外面初始化 + # 如果用户自己在外面初始化了 deepspeed; self.outside_ddp = False + if dist.is_initialized() and FASTNLP_DISTRIBUTED_CHECK not in os.environ and \ + "fastnlp_torch_launch_not_ddp" not in os.environ: + # 如果用户自己在外面初始化了 deepspeed,那么我们要求用户传入的模型一定是已经由 DeepSpeedEngine 包裹后的模型; + if not isinstance(model, DeepSpeedEngine): + raise RuntimeError( + "It is not allowed to input a normal model instead of `DeepSpeedEngine` when" + "you initialize the ddp process out of our control.") + + self.outside_ddp = True + self.config = model.config + # 用户只有将模型上传到对应机器上后才能用 DistributedDataParallel 包裹,因此如果用户在外面初始化了 DDP,那么在 TorchDDPDriver 中 + # 我们就直接将 model_device 置为 None; + self.model_device = None + self._data_device = kwargs.get("data_device", None) if isinstance(self._data_device, int): if self._data_device < 0: @@ -84,7 +98,6 @@ class DeepSpeedDriver(TorchDDPDriver): self._has_setup = False # 设置这一参数是因为 evaluator 中也会进行 setup 操作,但是显然是不需要的也不应该的; self._has_ddpwrapped = False # 判断传入的模型是否经过 _has_ddpwrapped 包裹; - self.strategy = strategy self.accumulation_steps = kwargs.get("accumulation_steps", 1) # 获取 batch_size 以设置 train_micro_batch_size_per_gpu 参数 train_dl = kwargs.get("train_dataloader", None) @@ -96,6 +109,14 @@ class DeepSpeedDriver(TorchDDPDriver): self.train_micro_batch_size = 1 self._ds_kwargs = kwargs.get("deepspeed_kwargs", {}) + self.strategy = self._ds_kwargs.get("strategy", "deepspeed") + + @staticmethod + def _check_optimizer_legality(optimizers): + for each_optimizer in optimizers: + if not isinstance(each_optimizer, (Optimizer, DeepSpeedOptimizer)): + raise TypeError(f"Each optimizer of parameter `optimizers` should be 'Optimizer' or " + f"'DeepSpeedOptimizer'type, not {type(each_optimizer)}.") def setup(self): r""" @@ -112,15 +133,19 @@ class DeepSpeedDriver(TorchDDPDriver): self.setup_config() # 如果用户需要使用多机模式,那么一定进入到这里; if self.is_pull_by_torch_run: - # dist.get_world_size() 只能在 dist.init_process_group 初始化之后进行调用; - self.world_size = int(os.environ.get("WORLD_SIZE")) - self.global_rank = int(os.environ.get("RANK")) - logger.info(f"World size: {self.world_size}, Global rank: {self.global_rank}") + if self.outside_ddp: + self.world_size = dist.get_world_size() + self.global_rank = dist.get_rank() + else: + # dist.get_world_size() 只能在 dist.init_process_group 初始化之后进行调用; + self.world_size = int(os.environ.get("WORLD_SIZE")) + self.global_rank = int(os.environ.get("RANK")) + logger.info(f"World size: {self.world_size}, Global rank: {self.global_rank}") - if not dist.is_initialized(): - deepspeed.init_distributed("nccl", distributed_port=self.master_port) + if not dist.is_initialized(): + deepspeed.init_distributed("nccl", distributed_port=self.master_port) - os.environ["fastnlp_torch_launch_not_ddp"] = "yes" + os.environ["fastnlp_torch_launch_not_ddp"] = "yes" # 进入到这里的情况时: # dist.is_initialized 一定为 False; @@ -146,8 +171,9 @@ class DeepSpeedDriver(TorchDDPDriver): self.world_size = dist.get_world_size() self.global_rank = dist.get_rank() - torch.cuda.set_device(self.model_device) - self.configure_ddp() + if not self.outside_ddp: + torch.cuda.set_device(self.model_device) + self.configure_ddp() self.barrier() # 初始化 self._pids,从而使得每一个进程都能接受到 rank0 的 send 操作; @@ -166,7 +192,7 @@ class DeepSpeedDriver(TorchDDPDriver): def configure_ddp(self): # 设置 deepspeed - if not isinstance(self.model, deepspeed.DeepSpeedEngine): + if not isinstance(self.model, DeepSpeedEngine): model=_DeepSpeedWrappingModel(self.model, self.fp16) model_parameters = filter(lambda p: p.requires_grad, model.parameters()) self.model, ds_optimizer, _, _ = deepspeed.initialize( @@ -193,7 +219,6 @@ class DeepSpeedDriver(TorchDDPDriver): self.config = self._ds_kwargs.get("config") if self.config is not None: - # TODO 究竟哪些参数按照config,哪些按照trainer参数 logger.warn("Notice that you have defined a configuration for deepspeed and parameters like" "`optimizers`, `strategy` and `fp16` may not take effects.") return @@ -258,12 +283,6 @@ class DeepSpeedDriver(TorchDDPDriver): def step(self): self.model.step() - def unwrap_model(self): - r""" - :return: 返回原本的模型; - """ - return self.model.module.model - def get_model_no_sync_context(self): r""" :return: 返回一个 ``context`` 上下文环境,用于关闭各个进程之间的同步;在 ``deepspeed`` 中,返回一个空的上下文 diff --git a/fastNLP/core/drivers/torch_driver/initialize_torch_driver.py b/fastNLP/core/drivers/torch_driver/initialize_torch_driver.py index 5d4d2ab5..b0a16112 100644 --- a/fastNLP/core/drivers/torch_driver/initialize_torch_driver.py +++ b/fastNLP/core/drivers/torch_driver/initialize_torch_driver.py @@ -38,6 +38,9 @@ def initialize_torch_driver(driver: str, device: Optional[Union[str, "torch.devi if driver == 'fairscale': return FairScaleDriver(model, torch.device(f"cuda:{os.environ['LOCAL_RANK']}"), is_pull_by_torch_run=True, **kwargs) + elif kwargs.get("deepspeed_kwargs") is not None: + return DeepSpeedDriver(model, torch.device(f"cuda:{os.environ['LOCAL_RANK']}"), + is_pull_by_torch_run=True, **kwargs) else: return TorchDDPDriver(model, torch.device(f"cuda:{os.environ['LOCAL_RANK']}"), is_pull_by_torch_run=True, **kwargs) @@ -73,6 +76,14 @@ def initialize_torch_driver(driver: str, device: Optional[Union[str, "torch.devi raise ValueError("Parameter `device` is wrong type, please check our documentation for the right use.") if driver == "torch": # single, ddp, 直接启动。 + if kwargs.get("deepspeed_kwargs") is not None: + # 选择的是 deepspeed + if not isinstance(device, List): + if device.type == 'cpu': + raise ValueError("You are using `deepspeed` driver, but your chosen `device` is 'cpu'.") + logger.warning_once("Notice you are using `deepspeed`, but the `device` is only one gpu.") + return DeepSpeedDriver(model, [device], **kwargs) + return DeepSpeedDriver(model, device, **kwargs) if not isinstance(device, List): return TorchSingleDriver(model, device, **kwargs) else: @@ -84,11 +95,4 @@ def initialize_torch_driver(driver: str, device: Optional[Union[str, "torch.devi logger.warning_once("Notice you are using `fairscale`, but the `device` is only one gpu.") return FairScaleDriver(model, [device], **kwargs) else: - return FairScaleDriver(model, device, **kwargs) - elif driver == "deepspeed": - if not isinstance(device, List): - if device.type == 'cpu': - raise ValueError("You are using `deepspeed` driver, but your chosen `device` is 'cpu'.") - logger.warning_once("Notice you are using `deepspeed`, but the `device` is only one gpu.") - return DeepSpeedDriver(model, [device], **kwargs) - return DeepSpeedDriver(model, device, **kwargs) \ No newline at end of file + return FairScaleDriver(model, device, **kwargs) \ No newline at end of file diff --git a/tests/core/controllers/_test_trainer_deepspeed.py b/tests/core/controllers/_test_trainer_deepspeed.py new file mode 100644 index 00000000..2dc6326c --- /dev/null +++ b/tests/core/controllers/_test_trainer_deepspeed.py @@ -0,0 +1,95 @@ +""" +这个文件测试多卡情况下使用 deepspeed 的情况:: + + >>> # 测试直接使用多卡 + >>> python _test_trainer_deepspeed.py + >>> # 测试通过 deepspeed 拉起 + >>> deepspeed _test_trainer_deepspeed.py + +""" +import sys +sys.path.append("../../../") +from dataclasses import dataclass + +from fastNLP.core.controllers.trainer import Trainer +from fastNLP.core.metrics.accuracy import Accuracy +from fastNLP.core.callbacks.progress_callback import RichCallback + +from torch.optim import Adam +from torch.utils.data import DataLoader + +from tests.helpers.models.torch_model import TorchNormalModel_Classification_1 +from tests.helpers.datasets.torch_data import TorchArgMaxDataset + +@dataclass +class TrainDeepSpeedConfig: + num_labels: int = 3 + feature_dimension: int = 3 + + batch_size: int = 2 + shuffle: bool = True + evaluate_every = 2 + +def test_trainer_deepspeed( + device, + callbacks, + strategy, + config, + n_epochs=2, +): + model = TorchNormalModel_Classification_1( + num_labels=TrainDeepSpeedConfig.num_labels, + feature_dimension=TrainDeepSpeedConfig.feature_dimension + ) + optimizers = Adam(params=model.parameters(), lr=0.0001) + train_dataloader = DataLoader( + dataset=TorchArgMaxDataset(TrainDeepSpeedConfig.feature_dimension, 20), + batch_size=TrainDeepSpeedConfig.batch_size, + shuffle=True + ) + val_dataloader = DataLoader( + dataset=TorchArgMaxDataset(TrainDeepSpeedConfig.feature_dimension, 12), + batch_size=TrainDeepSpeedConfig.batch_size, + shuffle=True + ) + train_dataloader = train_dataloader + evaluate_dataloaders = val_dataloader + evaluate_every = TrainDeepSpeedConfig.evaluate_every + metrics = {"acc": Accuracy()} + if config is not None: + config["train_micro_batch_size_per_gpu"] = TrainDeepSpeedConfig.batch_size + trainer = Trainer( + model=model, + driver="torch", + device=device, + optimizers=optimizers, + train_dataloader=train_dataloader, + evaluate_dataloaders=evaluate_dataloaders, + evaluate_every=evaluate_every, + metrics=metrics, + output_mapping={"preds": "pred"}, + + n_epochs=n_epochs, + callbacks=callbacks, + deepspeed_kwargs={ + "strategy": strategy, + "config": config + } + ) + trainer.run() + +if __name__ == "__main__": + device = [0,1] + # device = [0,1,3] + callbacks = [ + # RecordMetricCallback(monitor="acc#acc", metric_threshold=0.0, larger_better=True), + RichCallback(5), + ] + config = None + test_trainer_deepspeed( + device=device, + callbacks=callbacks, + strategy="deepspeed", + config=config, + n_epochs=5, + ) \ No newline at end of file diff --git a/tests/core/controllers/_test_trainer_deepspeed_outside.py b/tests/core/controllers/_test_trainer_deepspeed_outside.py new file mode 100644 index 00000000..a8dbd823 --- /dev/null +++ b/tests/core/controllers/_test_trainer_deepspeed_outside.py @@ -0,0 +1,105 @@ +""" +这个文件测试多卡情况下使用 deepspeed ,且用户自己调用了 deepspeed.initialize 的情况:: + + >>> deepspeed _test_trainer_deepspeed_outside.py + +""" +import os +import sys +sys.path.append("../../../") +from dataclasses import dataclass + +from fastNLP.core.controllers.trainer import Trainer +from fastNLP.core.metrics.accuracy import Accuracy +from fastNLP.core.callbacks.progress_callback import RichCallback +from fastNLP.core.drivers.torch_driver.utils import _create_default_config + +import deepspeed +import torch +from torch.optim import Adam +from torch.utils.data import DataLoader + + +from tests.helpers.models.torch_model import TorchNormalModel_Classification_2 +from tests.helpers.datasets.torch_data import TorchArgMaxDataset + +local_rank = int(os.environ["LOCAL_RANK"]) + +@dataclass +class TrainDeepSpeedConfig: + num_labels: int = 3 + feature_dimension: int = 3 + + batch_size: int = 2 + shuffle: bool = True + evaluate_every = 2 + +def test_trainer_deepspeed( + device, + callbacks, + strategy, + config, + n_epochs=2, +): + model = TorchNormalModel_Classification_2( + num_labels=TrainDeepSpeedConfig.num_labels, + feature_dimension=TrainDeepSpeedConfig.feature_dimension + ) + optimizers = Adam(params=model.parameters(), lr=0.0001) + train_dataloader = DataLoader( + dataset=TorchArgMaxDataset(TrainDeepSpeedConfig.feature_dimension, 20), + batch_size=TrainDeepSpeedConfig.batch_size, + shuffle=True + ) + val_dataloader = DataLoader( + dataset=TorchArgMaxDataset(TrainDeepSpeedConfig.feature_dimension, 12), + batch_size=TrainDeepSpeedConfig.batch_size, + shuffle=True + ) + train_dataloader = train_dataloader + evaluate_dataloaders = val_dataloader + evaluate_every = TrainDeepSpeedConfig.evaluate_every + metrics = {"acc": Accuracy()} + if config is not None: + config["train_micro_batch_size_per_gpu"] = TrainDeepSpeedConfig.batch_size + model, optimizers, _, _ = deepspeed.initialize( + model=model, + optimizer=optimizers, + config=config, + ) + trainer = Trainer( + model=model, + driver="torch", + device=device, + data_device=torch.device(f"cuda:{local_rank}"), + optimizers=optimizers, + train_dataloader=train_dataloader, + evaluate_dataloaders=evaluate_dataloaders, + evaluate_every=evaluate_every, + metrics=metrics, + output_mapping={"preds": "pred"}, + + n_epochs=n_epochs, + callbacks=callbacks, + deepspeed_kwargs={ + "strategy": strategy, + "config": config + } + ) + trainer.run() + +if __name__ == "__main__": + device = [0,1] + # device = [0,1,3] + callbacks = [ + # RecordMetricCallback(monitor="acc#acc", metric_threshold=0.0, larger_better=True), + RichCallback(5), + ] + config = _create_default_config(stage=2) + test_trainer_deepspeed( + device=device, + callbacks=callbacks, + strategy="deepspeed", + config=config, + n_epochs=5, + ) \ No newline at end of file diff --git a/tests/core/controllers/test_trainer_deepspeed.py b/tests/core/controllers/test_trainer_deepspeed.py index e69de29b..c718e01d 100644 --- a/tests/core/controllers/test_trainer_deepspeed.py +++ b/tests/core/controllers/test_trainer_deepspeed.py @@ -0,0 +1,99 @@ +import pytest +from dataclasses import dataclass + +from fastNLP.core.controllers.trainer import Trainer +from fastNLP.core.metrics.accuracy import Accuracy +from fastNLP.core.callbacks.progress_callback import RichCallback +from fastNLP.core.drivers.torch_driver import DeepSpeedDriver +from fastNLP.core.drivers.torch_driver.utils import _create_default_config +from fastNLP.envs.imports import _NEED_IMPORT_TORCH + +if _NEED_IMPORT_TORCH: + import torch + from torch.optim import Adam + from torch.utils.data import DataLoader + + +from tests.helpers.models.torch_model import TorchNormalModel_Classification_1 +from tests.helpers.datasets.torch_data import TorchArgMaxDataset +from tests.helpers.utils import magic_argv_env_context + +@dataclass +class TrainDeepSpeedConfig: + num_labels: int = 3 + feature_dimension: int = 3 + + batch_size: int = 2 + shuffle: bool = True + evaluate_every = 2 + +@pytest.mark.deepspeed +class TestTrainer: + @classmethod + def setup_class(cls): + # 不初始化的话从第二个测试例开始会因为环境变量报错。 + torch_model = TorchNormalModel_Classification_1(1, 1) + torch_opt = torch.optim.Adam(params=torch_model.parameters(), lr=0.01) + device = [torch.device(i) for i in [0,1]] + driver = DeepSpeedDriver( + model=torch_model, + parallel_device=device, + ) + driver.set_optimizers(torch_opt) + driver.setup() + + return driver + + @pytest.mark.parametrize("device", [[0, 1]]) + @pytest.mark.parametrize("callbacks", [[RichCallback(5)]]) + @pytest.mark.parametrize("strategy", ["deepspeed", "deepspeed_stage_1"]) + @pytest.mark.parametrize("config", [None, _create_default_config(stage=1)]) + @magic_argv_env_context + def test_trainer_deepspeed( + self, + device, + callbacks, + strategy, + config, + n_epochs=2, + ): + model = TorchNormalModel_Classification_1( + num_labels=TrainDeepSpeedConfig.num_labels, + feature_dimension=TrainDeepSpeedConfig.feature_dimension + ) + optimizers = Adam(params=model.parameters(), lr=0.0001) + train_dataloader = DataLoader( + dataset=TorchArgMaxDataset(TrainDeepSpeedConfig.feature_dimension, 20), + batch_size=TrainDeepSpeedConfig.batch_size, + shuffle=True + ) + val_dataloader = DataLoader( + dataset=TorchArgMaxDataset(TrainDeepSpeedConfig.feature_dimension, 12), + batch_size=TrainDeepSpeedConfig.batch_size, + shuffle=True + ) + train_dataloader = train_dataloader + evaluate_dataloaders = val_dataloader + evaluate_every = TrainDeepSpeedConfig.evaluate_every + metrics = {"acc": Accuracy()} + if config is not None: + config["train_micro_batch_size_per_gpu"] = TrainDeepSpeedConfig.batch_size + trainer = Trainer( + model=model, + driver="torch", + device=device, + optimizers=optimizers, + train_dataloader=train_dataloader, + evaluate_dataloaders=evaluate_dataloaders, + evaluate_every=evaluate_every, + metrics=metrics, + output_mapping={"preds": "pred"}, + + n_epochs=n_epochs, + callbacks=callbacks, + deepspeed_kwargs={ + "strategy": strategy, + "config": config + } + ) + trainer.run() From de5d5597e73d08da90e8d2ceebc4d00e64ccf3a4 Mon Sep 17 00:00:00 2001 From: yhcc Date: Mon, 20 Jun 2022 12:58:42 +0800 Subject: [PATCH 28/52] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dfitlogcallback=E5=A2=9E?= =?UTF-8?q?=E5=8A=A0launch=5Ftime=E7=9A=84bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/callbacks/fitlog_callback.py | 2 +- fastNLP/core/callbacks/progress_callback.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fastNLP/core/callbacks/fitlog_callback.py b/fastNLP/core/callbacks/fitlog_callback.py index 35662539..19a8b476 100644 --- a/fastNLP/core/callbacks/fitlog_callback.py +++ b/fastNLP/core/callbacks/fitlog_callback.py @@ -44,7 +44,7 @@ class FitlogCallback(HasMonitorCallback): if get_global_rank() != 0: # 如果不是 global rank 为 0 ,需要关闭 fitlog fitlog.debug() super().on_after_trainer_initialized(trainer, driver) - fitlog.add_other('launch_time', os.environ['FASTNLP_LAUNCH_TIME']) + fitlog.add_other(name='launch_time', value=os.environ['FASTNLP_LAUNCH_TIME']) def on_sanity_check_end(self, trainer, sanity_check_res): super(FitlogCallback, self).on_sanity_check_end(trainer, sanity_check_res) diff --git a/fastNLP/core/callbacks/progress_callback.py b/fastNLP/core/callbacks/progress_callback.py index d1295682..c172a9a7 100644 --- a/fastNLP/core/callbacks/progress_callback.py +++ b/fastNLP/core/callbacks/progress_callback.py @@ -25,7 +25,7 @@ class ProgressCallback(HasMonitorCallback): def record_better_monitor(self, trainer, results): self.best_monitor_step = trainer.global_forward_batches self.best_monitor_epoch = trainer.cur_epoch_idx - self.best_results = results + self.best_results = self.itemize_results(results) def on_train_end(self, trainer): if self.best_monitor_epoch != -1: From a495bb938a57011c2bf4043bf26a630ee834cc94 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Mon, 20 Jun 2022 20:49:17 +0800 Subject: [PATCH 29/52] =?UTF-8?q?1.=E4=BF=AE=E5=A4=8D=E6=A8=A1=E5=9E=8B?= =?UTF-8?q?=E4=BC=9A=E8=A2=AB=E7=A7=BB=E5=8A=A8=E5=88=B0rank=E5=AF=B9?= =?UTF-8?q?=E5=BA=94=E8=AE=BE=E5=A4=87=E7=9A=84=E9=97=AE=E9=A2=98=202.?= =?UTF-8?q?=E6=9B=B4=E6=94=B9=20deepspeed=20driver=20=C3=A6=C2=98=E5=91=BD?= =?UTF-8?q?=E5=90=8D=203.=E4=B8=BA=20deepspeed=20=E6=B7=BB=E5=8A=A0=20logg?= =?UTF-8?q?ing=5Flevel?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../core/drivers/torch_driver/deepspeed.py | 15 ++++++++++---- .../torch_driver/initialize_torch_driver.py | 20 +++++++++---------- .../controllers/_test_trainer_deepspeed.py | 4 ++-- .../_test_trainer_deepspeed_outside.py | 2 +- 4 files changed, 24 insertions(+), 17 deletions(-) diff --git a/fastNLP/core/drivers/torch_driver/deepspeed.py b/fastNLP/core/drivers/torch_driver/deepspeed.py index 579a50f4..3d519099 100644 --- a/fastNLP/core/drivers/torch_driver/deepspeed.py +++ b/fastNLP/core/drivers/torch_driver/deepspeed.py @@ -1,4 +1,6 @@ import os +import argparse +import logging from pathlib import Path from typing import Union, Dict, List @@ -46,7 +48,7 @@ class DeepSpeedDriver(TorchDDPDriver): self.parallel_device = parallel_device if not is_pull_by_torch_run and parallel_device is None: raise ValueError( - "Parameter `parallel_device` can not be None when using `TorchDDPDriver`. This error is caused " + "Parameter `parallel_device` can not be None when using `TorchDeepSpeedDriver`. This error is caused " "when your value of parameter `device` is `None` in your `Trainer` instance.") # 注意我们在 initialize_torch_driver 中的逻辑就是如果是 is_pull_by_torch_run,那么我们就直接把 parallel_device 置为当前进程的gpu; @@ -68,8 +70,6 @@ class DeepSpeedDriver(TorchDDPDriver): self.outside_ddp = True self.config = model.config - # 用户只有将模型上传到对应机器上后才能用 DistributedDataParallel 包裹,因此如果用户在外面初始化了 DDP,那么在 TorchDDPDriver 中 - # 我们就直接将 model_device 置为 None; self.model_device = None self._data_device = kwargs.get("data_device", None) @@ -110,6 +110,8 @@ class DeepSpeedDriver(TorchDDPDriver): self._ds_kwargs = kwargs.get("deepspeed_kwargs", {}) self.strategy = self._ds_kwargs.get("strategy", "deepspeed") + deepspeed_logging_level = self._ds_kwargs.get("logging_level", logging.ERROR) + deepspeed.utils.logging.logger.setLevel(deepspeed_logging_level) @staticmethod def _check_optimizer_legality(optimizers): @@ -126,7 +128,7 @@ class DeepSpeedDriver(TorchDDPDriver): 2. 每个进程将模型迁移到自己对应的 ``gpu`` 设备上;然后使用 ``DistributedDataParallel`` 包裹模型; """ if len(self.optimizers) != 1: - raise ValueError("Multi optimizers is not supported for DeepSpeedDriver right now.") + raise ValueError("Multi optimizers is not supported for `DeepSpeedDriver` right now.") if self._has_setup: return self._has_setup = True @@ -173,6 +175,9 @@ class DeepSpeedDriver(TorchDDPDriver): if not self.outside_ddp: torch.cuda.set_device(self.model_device) + # TODO 模型过大的话应该会导致显存溢出,但是不加的话显存会占用rank对应的设备 + # lightning里在之前通过broadcast_list广播了log_dir所以没有这种情况 + self.model.to(self.model_device) self.configure_ddp() self.barrier() @@ -196,10 +201,12 @@ class DeepSpeedDriver(TorchDDPDriver): model=_DeepSpeedWrappingModel(self.model, self.fp16) model_parameters = filter(lambda p: p.requires_grad, model.parameters()) self.model, ds_optimizer, _, _ = deepspeed.initialize( + args=argparse.Namespace(device_rank=self.model_device.index), model=model, optimizer=self.optimizers[0], model_parameters=model_parameters, config=self.config, + dist_init_required=False ) self._optimizers = [ds_optimizer] diff --git a/fastNLP/core/drivers/torch_driver/initialize_torch_driver.py b/fastNLP/core/drivers/torch_driver/initialize_torch_driver.py index b0a16112..f242b813 100644 --- a/fastNLP/core/drivers/torch_driver/initialize_torch_driver.py +++ b/fastNLP/core/drivers/torch_driver/initialize_torch_driver.py @@ -38,7 +38,7 @@ def initialize_torch_driver(driver: str, device: Optional[Union[str, "torch.devi if driver == 'fairscale': return FairScaleDriver(model, torch.device(f"cuda:{os.environ['LOCAL_RANK']}"), is_pull_by_torch_run=True, **kwargs) - elif kwargs.get("deepspeed_kwargs") is not None: + elif driver == 'deepspeed': return DeepSpeedDriver(model, torch.device(f"cuda:{os.environ['LOCAL_RANK']}"), is_pull_by_torch_run=True, **kwargs) else: @@ -76,14 +76,6 @@ def initialize_torch_driver(driver: str, device: Optional[Union[str, "torch.devi raise ValueError("Parameter `device` is wrong type, please check our documentation for the right use.") if driver == "torch": # single, ddp, 直接启动。 - if kwargs.get("deepspeed_kwargs") is not None: - # 选择的是 deepspeed - if not isinstance(device, List): - if device.type == 'cpu': - raise ValueError("You are using `deepspeed` driver, but your chosen `device` is 'cpu'.") - logger.warning_once("Notice you are using `deepspeed`, but the `device` is only one gpu.") - return DeepSpeedDriver(model, [device], **kwargs) - return DeepSpeedDriver(model, device, **kwargs) if not isinstance(device, List): return TorchSingleDriver(model, device, **kwargs) else: @@ -95,4 +87,12 @@ def initialize_torch_driver(driver: str, device: Optional[Union[str, "torch.devi logger.warning_once("Notice you are using `fairscale`, but the `device` is only one gpu.") return FairScaleDriver(model, [device], **kwargs) else: - return FairScaleDriver(model, device, **kwargs) \ No newline at end of file + return FairScaleDriver(model, device, **kwargs) + elif driver == "deepspeed": + if not isinstance(device, List): + if device.type == 'cpu': + raise ValueError("You are using `deepspeed` driver, but your chosen `device` is 'cpu'.") + logger.warning_once("Notice you are using `deepspeed`, but the `device` is only one gpu.") + return DeepSpeedDriver(model, [device], **kwargs) + else: + return DeepSpeedDriver(model, device, **kwargs) \ No newline at end of file diff --git a/tests/core/controllers/_test_trainer_deepspeed.py b/tests/core/controllers/_test_trainer_deepspeed.py index 2dc6326c..0c51e47c 100644 --- a/tests/core/controllers/_test_trainer_deepspeed.py +++ b/tests/core/controllers/_test_trainer_deepspeed.py @@ -60,7 +60,7 @@ def test_trainer_deepspeed( config["train_micro_batch_size_per_gpu"] = TrainDeepSpeedConfig.batch_size trainer = Trainer( model=model, - driver="torch", + driver="deepspeed", device=device, optimizers=optimizers, train_dataloader=train_dataloader, @@ -79,7 +79,7 @@ def test_trainer_deepspeed( trainer.run() if __name__ == "__main__": - device = [0,1] + device = [4, 5] # device = [0,1,3] callbacks = [ # RecordMetricCallback(monitor="acc#acc", metric_threshold=0.0, larger_better=True), diff --git a/tests/core/controllers/_test_trainer_deepspeed_outside.py b/tests/core/controllers/_test_trainer_deepspeed_outside.py index a8dbd823..6821787e 100644 --- a/tests/core/controllers/_test_trainer_deepspeed_outside.py +++ b/tests/core/controllers/_test_trainer_deepspeed_outside.py @@ -69,7 +69,7 @@ def test_trainer_deepspeed( ) trainer = Trainer( model=model, - driver="torch", + driver="deepspeed", device=device, data_device=torch.device(f"cuda:{local_rank}"), optimizers=optimizers, From 6f9d703f132fde6ec5e18df42eef0d1b0da0b69a Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Mon, 20 Jun 2022 20:52:04 +0800 Subject: [PATCH 30/52] logger.warn->logger.warning --- fastNLP/core/drivers/torch_driver/deepspeed.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fastNLP/core/drivers/torch_driver/deepspeed.py b/fastNLP/core/drivers/torch_driver/deepspeed.py index 3d519099..a99a42f8 100644 --- a/fastNLP/core/drivers/torch_driver/deepspeed.py +++ b/fastNLP/core/drivers/torch_driver/deepspeed.py @@ -104,7 +104,7 @@ class DeepSpeedDriver(TorchDDPDriver): if train_dl is not None: self.train_micro_batch_size = self.get_dataloader_args(train_dl).batch_size else: - logger.warn("No `train_dataloader` found, and we will set `train_micro_batch_size_per_gpu`" + logger.warning("No `train_dataloader` found, and we will set `train_micro_batch_size_per_gpu`" "to 1 for deepspeed configuration.") self.train_micro_batch_size = 1 @@ -226,7 +226,7 @@ class DeepSpeedDriver(TorchDDPDriver): self.config = self._ds_kwargs.get("config") if self.config is not None: - logger.warn("Notice that you have defined a configuration for deepspeed and parameters like" + logger.warning("Notice that you have defined a configuration for deepspeed and parameters like" "`optimizers`, `strategy` and `fp16` may not take effects.") return @@ -330,7 +330,7 @@ class DeepSpeedDriver(TorchDDPDriver): :return: """ if not only_state_dict: - logger.warn("Only loading state dict is not allowed for `DeepSpeedDriver`. We will load its " + logger.warning("Only loading state dict is not allowed for `DeepSpeedDriver`. We will load its " "checkpoint for you instead.") self.model.load_checkpoint(filepath, **kwargs) From 44d2a574ae3adb5ae051aead27aeda69b08cbb9e Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Mon, 20 Jun 2022 20:52:53 +0800 Subject: [PATCH 31/52] logger.warn->logger.warning --- fastNLP/core/metrics/accuracy.py | 2 +- fastNLP/core/metrics/classify_f1_pre_rec_metric.py | 2 +- fastNLP/core/metrics/span_f1_pre_rec_metric.py | 2 +- fastNLP/core/utils/utils.py | 2 +- fastNLP/embeddings/torch/static_embedding.py | 4 ++-- fastNLP/io/embed_loader.py | 4 ++-- fastNLP/io/loader/classification.py | 2 +- fastNLP/io/loader/matching.py | 6 +++--- fastNLP/io/pipe/matching.py | 4 ++-- fastNLP/io/pipe/utils.py | 2 +- fastNLP/modules/mix_modules/utils.py | 2 +- fastNLP/transformers/torch/configuration_utils.py | 2 +- .../transformers/torch/generation_beam_search.py | 2 +- fastNLP/transformers/torch/generation_utils.py | 14 +++++++------- .../transformers/torch/models/auto/auto_factory.py | 2 +- .../torch/models/auto/configuration_auto.py | 2 +- .../torch/models/auto/modeling_auto.py | 4 ++-- .../torch/models/bart/modeling_bart.py | 2 +- .../torch/models/bert/modeling_bert.py | 2 +- .../transformers/torch/models/cpt/modeling_cpt.py | 2 +- .../transformers/torch/tokenization_utils_base.py | 12 ++++++------ 21 files changed, 38 insertions(+), 38 deletions(-) diff --git a/fastNLP/core/metrics/accuracy.py b/fastNLP/core/metrics/accuracy.py index 47d5e114..fbd826bd 100644 --- a/fastNLP/core/metrics/accuracy.py +++ b/fastNLP/core/metrics/accuracy.py @@ -69,7 +69,7 @@ class Accuracy(Metric): elif pred.ndim == target.ndim + 1: pred = pred.argmax(axis=-1) if seq_len is None and target.ndim > 1: - logger.warn("You are not passing `seq_len` to exclude pad when calculate accuracy.") + logger.warning("You are not passing `seq_len` to exclude pad when calculate accuracy.") else: raise RuntimeError(f"when pred have size:{pred.shape}, target should have size: {pred.shape} or " diff --git a/fastNLP/core/metrics/classify_f1_pre_rec_metric.py b/fastNLP/core/metrics/classify_f1_pre_rec_metric.py index daf325c0..39565f40 100644 --- a/fastNLP/core/metrics/classify_f1_pre_rec_metric.py +++ b/fastNLP/core/metrics/classify_f1_pre_rec_metric.py @@ -156,7 +156,7 @@ class ClassifyFPreRecMetric(Metric): elif pred.ndim == target.ndim + 1: pred = pred.argmax(axis=-1) if seq_len is None and target.ndim > 1: - logger.warn("You are not passing `seq_len` to exclude pad when calculate accuracy.") + logger.warning("You are not passing `seq_len` to exclude pad when calculate accuracy.") else: raise RuntimeError(f"when pred have " f"size:{pred.shape}, target should have size: {pred.shape} or " diff --git a/fastNLP/core/metrics/span_f1_pre_rec_metric.py b/fastNLP/core/metrics/span_f1_pre_rec_metric.py index 9a0b1d9d..07a6cd56 100644 --- a/fastNLP/core/metrics/span_f1_pre_rec_metric.py +++ b/fastNLP/core/metrics/span_f1_pre_rec_metric.py @@ -39,7 +39,7 @@ def _check_tag_vocab_and_encoding_type(tag_vocab: Union[Vocabulary, dict], encod f"encoding_type." tags = tags.replace(tag, '') # 删除该值 if tags: # 如果不为空,说明出现了未使用的tag - logger.warn(f"Tag:{tags} in encoding type:{encoding_type} is not presented in your Vocabulary. Check your " + logger.warning(f"Tag:{tags} in encoding type:{encoding_type} is not presented in your Vocabulary. Check your " "encoding_type.") diff --git a/fastNLP/core/utils/utils.py b/fastNLP/core/utils/utils.py index 33a7ee7e..ec0c87b0 100644 --- a/fastNLP/core/utils/utils.py +++ b/fastNLP/core/utils/utils.py @@ -554,7 +554,7 @@ def deprecated(help_message: Optional[str] = None): def wrapper(*args, **kwargs): func_hash = hash(deprecated_function) if func_hash not in _emitted_deprecation_warnings: - logger.warn(warning_msg, category=FutureWarning, stacklevel=2) + logger.warning(warning_msg, category=FutureWarning, stacklevel=2) _emitted_deprecation_warnings.add(func_hash) return deprecated_function(*args, **kwargs) diff --git a/fastNLP/embeddings/torch/static_embedding.py b/fastNLP/embeddings/torch/static_embedding.py index de2b231a..cc15c214 100644 --- a/fastNLP/embeddings/torch/static_embedding.py +++ b/fastNLP/embeddings/torch/static_embedding.py @@ -286,7 +286,7 @@ class StaticEmbedding(TokenEmbedding): if word in vocab: index = vocab.to_index(word) if index in matrix: - logger.warn(f"Word has more than one vector in embedding file. Set logger level to " + logger.warning(f"Word has more than one vector in embedding file. Set logger level to " f"DEBUG for detail.") logger.debug(f"Word:{word} occurs again in line:{idx}(starts from 0)") matrix[index] = torch.from_numpy(np.fromstring(' '.join(nums), sep=' ', dtype=dtype, count=dim)) @@ -295,7 +295,7 @@ class StaticEmbedding(TokenEmbedding): found_count += 1 except Exception as e: if error == 'ignore': - logger.warn("Error occurred at the {} line.".format(idx)) + logger.warning("Error occurred at the {} line.".format(idx)) else: logger.error("Error occurred at the {} line.".format(idx)) raise e diff --git a/fastNLP/io/embed_loader.py b/fastNLP/io/embed_loader.py index 9080ff28..df82643b 100644 --- a/fastNLP/io/embed_loader.py +++ b/fastNLP/io/embed_loader.py @@ -91,7 +91,7 @@ class EmbedLoader: hit_flags[index] = True except Exception as e: if error == 'ignore': - logger.warn("Error occurred at the {} line.".format(idx)) + logger.warning("Error occurred at the {} line.".format(idx)) else: logging.error("Error occurred at the {} line.".format(idx)) raise e @@ -156,7 +156,7 @@ class EmbedLoader: found_pad = True except Exception as e: if error == 'ignore': - logger.warn("Error occurred at the {} line.".format(idx)) + logger.warning("Error occurred at the {} line.".format(idx)) pass else: logging.error("Error occurred at the {} line.".format(idx)) diff --git a/fastNLP/io/loader/classification.py b/fastNLP/io/loader/classification.py index 4416376f..2ae0b163 100644 --- a/fastNLP/io/loader/classification.py +++ b/fastNLP/io/loader/classification.py @@ -345,7 +345,7 @@ class SST2Loader(Loader): with open(path, 'r', encoding='utf-8') as f: f.readline() # 跳过header if 'test' in os.path.split(path)[1]: - logger.warn("SST2's test file has no target.") + logger.warning("SST2's test file has no target.") for line in f: line = line.strip() if line: diff --git a/fastNLP/io/loader/matching.py b/fastNLP/io/loader/matching.py index 5595b798..08387df9 100644 --- a/fastNLP/io/loader/matching.py +++ b/fastNLP/io/loader/matching.py @@ -55,7 +55,7 @@ class MNLILoader(Loader): with open(path, 'r', encoding='utf-8') as f: f.readline() # 跳过header if path.endswith("test_matched.tsv") or path.endswith('test_mismatched.tsv'): - logger.warn("MNLI's test file has no target.") + logger.warning("MNLI's test file has no target.") for line in f: line = line.strip() if line: @@ -227,7 +227,7 @@ class QNLILoader(JsonLoader): with open(path, 'r', encoding='utf-8') as f: f.readline() # 跳过header if path.endswith("test.tsv"): - logger.warn("QNLI's test file has no target.") + logger.warning("QNLI's test file has no target.") for line in f: line = line.strip() if line: @@ -289,7 +289,7 @@ class RTELoader(Loader): with open(path, 'r', encoding='utf-8') as f: f.readline() # 跳过header if path.endswith("test.tsv"): - logger.warn("RTE's test file has no target.") + logger.warning("RTE's test file has no target.") for line in f: line = line.strip() if line: diff --git a/fastNLP/io/pipe/matching.py b/fastNLP/io/pipe/matching.py index a89f2f2b..baebdbaa 100644 --- a/fastNLP/io/pipe/matching.py +++ b/fastNLP/io/pipe/matching.py @@ -146,7 +146,7 @@ class MatchingBertPipe(Pipe): warn_msg = f"There are {len(target_vocab._no_create_word)} target labels" \ f" in {[name for name in data_bundle.datasets.keys() if 'train' not in name]} " \ f"data set but not in train data set!." - logger.warn(warn_msg) + logger.warning(warn_msg) print(warn_msg) has_target_datasets = [dataset for name, dataset in data_bundle.datasets.items() if @@ -291,7 +291,7 @@ class MatchingPipe(Pipe): warn_msg = f"There are {len(target_vocab._no_create_word)} target labels" \ f" in {[name for name in data_bundle.datasets.keys() if 'train' not in name]} " \ f"data set but not in train data set!." - logger.warn(warn_msg) + logger.warning(warn_msg) print(warn_msg) has_target_datasets = [dataset for name, dataset in data_bundle.datasets.items() if diff --git a/fastNLP/io/pipe/utils.py b/fastNLP/io/pipe/utils.py index aa28af08..05dd3cf4 100644 --- a/fastNLP/io/pipe/utils.py +++ b/fastNLP/io/pipe/utils.py @@ -138,7 +138,7 @@ def _indexize(data_bundle, input_field_names='words', target_field_names='target f" in {[name for name in data_bundle.datasets.keys() if 'train' not in name]} " \ f"data set but not in train data set!.\n" \ f"These label(s) are {tgt_vocab._no_create_word}" - logger.warn(warn_msg) + logger.warning(warn_msg) # log.warning(warn_msg) tgt_vocab.index_dataset(*[ds for ds in data_bundle.datasets.values() if ds.has_field(target_field_name)], field_name=target_field_name) data_bundle.set_vocab(tgt_vocab, target_field_name) diff --git a/fastNLP/modules/mix_modules/utils.py b/fastNLP/modules/mix_modules/utils.py index 21d0f05c..04dab056 100644 --- a/fastNLP/modules/mix_modules/utils.py +++ b/fastNLP/modules/mix_modules/utils.py @@ -112,7 +112,7 @@ def _jittor2torch(jittor_var: 'jittor.Var', device: Optional[Union[str, int]] = # 如果outputs有_grad键,可以实现求导 no_gradient = not jittor_var.requires_grad if no_gradient is None else no_gradient if no_gradient == False: - logger.warn("The result tensor will not keep gradients due to differences between jittor and pytorch.") + logger.warning("The result tensor will not keep gradients due to differences between jittor and pytorch.") jittor_numpy = jittor_var.numpy() if not np.issubdtype(jittor_numpy.dtype, np.inexact): no_gradient = True diff --git a/fastNLP/transformers/torch/configuration_utils.py b/fastNLP/transformers/torch/configuration_utils.py index 948d9873..26a80377 100644 --- a/fastNLP/transformers/torch/configuration_utils.py +++ b/fastNLP/transformers/torch/configuration_utils.py @@ -327,7 +327,7 @@ class PretrainedConfig: # Deal with gradient checkpointing if kwargs.get("gradient_checkpointing", False): - logger.warn( + logger.warning( "Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 " "Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the " "`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`." diff --git a/fastNLP/transformers/torch/generation_beam_search.py b/fastNLP/transformers/torch/generation_beam_search.py index 117d9a38..1c3fc592 100644 --- a/fastNLP/transformers/torch/generation_beam_search.py +++ b/fastNLP/transformers/torch/generation_beam_search.py @@ -195,7 +195,7 @@ class BeamSearchScorer(BeamScorer): ) if "max_length" in kwargs: - logger.warn( + logger.warning( "Passing `max_length` to BeamSearchScorer is deprecated and has no effect." "`max_length` should be passed directly to `beam_search(...)`, `beam_sample(...)`" ",or `group_beam_search(...)`." diff --git a/fastNLP/transformers/torch/generation_utils.py b/fastNLP/transformers/torch/generation_utils.py index 0e6fe5c7..29828c15 100644 --- a/fastNLP/transformers/torch/generation_utils.py +++ b/fastNLP/transformers/torch/generation_utils.py @@ -872,7 +872,7 @@ class GenerationMixin: max_length = self.config.max_length elif max_length is not None and max_new_tokens is not None: # Both are set, this is odd, raise a warning - logger.warn( + logger.warning( "Both `max_length` and `max_new_tokens` have been set but they serve the same purpose.", UserWarning ) @@ -1239,7 +1239,7 @@ class GenerationMixin: logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: - logger.warn( + logger.warning( "`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) @@ -1475,7 +1475,7 @@ class GenerationMixin: logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: - logger.warn( + logger.warning( "`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) @@ -1726,13 +1726,13 @@ class GenerationMixin: logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: - logger.warn( + logger.warning( "`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) if len(stopping_criteria) == 0: - logger.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning) + logger.warning("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores @@ -2030,7 +2030,7 @@ class GenerationMixin: logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: - logger.warn( + logger.warning( "`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) @@ -2325,7 +2325,7 @@ class GenerationMixin: logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() if max_length is not None: - logger.warn( + logger.warning( "`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.", UserWarning, ) diff --git a/fastNLP/transformers/torch/models/auto/auto_factory.py b/fastNLP/transformers/torch/models/auto/auto_factory.py index 9eb8ec69..d0969a5b 100644 --- a/fastNLP/transformers/torch/models/auto/auto_factory.py +++ b/fastNLP/transformers/torch/models/auto/auto_factory.py @@ -401,7 +401,7 @@ class _BaseAutoModelClass: "the option `trust_remote_code=True` to remove this error." ) if kwargs.get("revision", None) is None: - logger.warn( + logger.warning( "Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure " "no malicious code has been contributed in a newer revision." ) diff --git a/fastNLP/transformers/torch/models/auto/configuration_auto.py b/fastNLP/transformers/torch/models/auto/configuration_auto.py index 45d3c071..1289071d 100644 --- a/fastNLP/transformers/torch/models/auto/configuration_auto.py +++ b/fastNLP/transformers/torch/models/auto/configuration_auto.py @@ -130,7 +130,7 @@ class _LazyLoadAllMappings(OrderedDict): def _initialize(self): if self._initialized: return - # logger.warn( + # logger.warning( # "ALL_PRETRAINED_CONFIG_ARCHIVE_MAP is deprecated and will be removed in v5 of Transformers. " # "It does not contain all available model checkpoints, far from it. Checkout hf.co/models for that.", # FutureWarning, diff --git a/fastNLP/transformers/torch/models/auto/modeling_auto.py b/fastNLP/transformers/torch/models/auto/modeling_auto.py index aace27a2..dbf4b610 100644 --- a/fastNLP/transformers/torch/models/auto/modeling_auto.py +++ b/fastNLP/transformers/torch/models/auto/modeling_auto.py @@ -306,7 +306,7 @@ AutoModelForSpeechSeq2Seq = auto_class_update( class AutoModelWithLMHead(_AutoModelWithLMHead): @classmethod def from_config(cls, config): - logger.warn( + logger.warning( "The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use " "`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and " "`AutoModelForSeq2SeqLM` for encoder-decoder models.", @@ -316,7 +316,7 @@ class AutoModelWithLMHead(_AutoModelWithLMHead): @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): - logger.warn( + logger.warning( "The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use " "`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and " "`AutoModelForSeq2SeqLM` for encoder-decoder models.", diff --git a/fastNLP/transformers/torch/models/bart/modeling_bart.py b/fastNLP/transformers/torch/models/bart/modeling_bart.py index 7219f49a..377afa41 100644 --- a/fastNLP/transformers/torch/models/bart/modeling_bart.py +++ b/fastNLP/transformers/torch/models/bart/modeling_bart.py @@ -513,7 +513,7 @@ class BartPretrainedModel(PreTrainedModel): class PretrainedBartModel(BartPretrainedModel): def __init_subclass__(self): - logger.warn( + logger.warning( "The class `PretrainedBartModel` has been depreciated, please use `BartPretrainedModel` instead.", FutureWarning, ) diff --git a/fastNLP/transformers/torch/models/bert/modeling_bert.py b/fastNLP/transformers/torch/models/bert/modeling_bert.py index b95da0df..79f1c459 100644 --- a/fastNLP/transformers/torch/models/bert/modeling_bert.py +++ b/fastNLP/transformers/torch/models/bert/modeling_bert.py @@ -1374,7 +1374,7 @@ class BertForNextSentencePrediction(BertPreTrainedModel): """ if "next_sentence_label" in kwargs: - logger.warn( + logger.warning( "The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.", FutureWarning, ) diff --git a/fastNLP/transformers/torch/models/cpt/modeling_cpt.py b/fastNLP/transformers/torch/models/cpt/modeling_cpt.py index 2910cc26..df7d477b 100644 --- a/fastNLP/transformers/torch/models/cpt/modeling_cpt.py +++ b/fastNLP/transformers/torch/models/cpt/modeling_cpt.py @@ -724,7 +724,7 @@ class CPTDecoder(CPTPretrainedModel): if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: - logger.warn( + logger.warning( "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " "`use_cache=False`..." ) diff --git a/fastNLP/transformers/torch/tokenization_utils_base.py b/fastNLP/transformers/torch/tokenization_utils_base.py index 3a033c96..a04dbaf1 100644 --- a/fastNLP/transformers/torch/tokenization_utils_base.py +++ b/fastNLP/transformers/torch/tokenization_utils_base.py @@ -312,7 +312,7 @@ class BatchEncoding(UserDict): """ if not self._encodings: raise ValueError("words() is not available when using Python-based tokenizers") - logger.warn( + logger.warning( "`BatchEncoding.words()` property is deprecated and should be replaced with the identical, " "but more self-explanatory `BatchEncoding.word_ids()` property.", FutureWarning, @@ -1601,7 +1601,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin): f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not " "supported for this tokenizer. Use a model identifier or the path to a directory instead." ) - logger.warn( + logger.warning( f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is deprecated and " "won't be possible anymore in v5. Use a model identifier or the path to a directory instead.", FutureWarning, @@ -2163,7 +2163,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin): # Get padding strategy if padding is False and old_pad_to_max_length: if verbose: - logger.warn( + logger.warning( "The `pad_to_max_length` argument is deprecated and will be removed in a future version, " "use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or " "use `padding='max_length'` to pad to a max length. In this case, you can give a specific " @@ -2184,7 +2184,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin): "To pad to max length, use `padding='max_length'`." ) if old_pad_to_max_length is not False: - logger.warn("Though `pad_to_max_length` = `True`, it is ignored because `padding`=`True`.") + logger.warning("Though `pad_to_max_length` = `True`, it is ignored because `padding`=`True`.") padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(padding, PaddingStrategy): padding_strategy = PaddingStrategy(padding) @@ -2196,7 +2196,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin): # Get truncation strategy if truncation is False and old_truncation_strategy != "do_not_truncate": if verbose: - logger.warn( + logger.warning( "The `truncation_strategy` argument is deprecated and will be removed in a future version, " "use `truncation=True` to truncate examples to a max length. You can give a specific " "length with `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the " @@ -3352,7 +3352,7 @@ model_inputs["labels"] = labels["input_ids"] See the documentation of your specific tokenizer for more details on the specific arguments to the tokenizer of choice. For a more complete example, see the implementation of `prepare_seq2seq_batch`. """ - logger.warn(formatted_warning, FutureWarning) + logger.warning(formatted_warning, FutureWarning) # mBART-specific kwargs that should be ignored by other models. kwargs.pop("src_lang", None) kwargs.pop("tgt_lang", None) From eb0e563fecf8c29949d58a4c4374269ea2e3be2a Mon Sep 17 00:00:00 2001 From: yhcc Date: Wed, 22 Jun 2022 16:34:41 +0800 Subject: [PATCH 32/52] =?UTF-8?q?=E4=BF=AE=E5=A4=8DOverfitBatches=E8=A2=AB?= =?UTF-8?q?=E6=9B=BF=E6=8D=A2=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/dataloaders/utils.py | 25 +++++++++++++++---------- fastNLP/core/dataset/dataset.py | 2 +- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/fastNLP/core/dataloaders/utils.py b/fastNLP/core/dataloaders/utils.py index 06f09da3..4f8fa743 100644 --- a/fastNLP/core/dataloaders/utils.py +++ b/fastNLP/core/dataloaders/utils.py @@ -118,17 +118,22 @@ class OverfitDataLoader: 实现一个简单的迭代器来模拟实际的 dataloader,从给定的 dataloader 中取出部分数据,来让 Trainer 实现 overfit 的功能; """ - def __init__(self, dataloader, overfit_batches: int): + def __init__(self, dataloader, overfit_batches: int, batches=None): + # batches 参数是给重新初始化dataloader使用的 self.dataloader = dataloader # 需要将实际的 dataloader 挂载到该对象上,从而应付一些对于实际的 dataloader 的操作; - self.batches = [] - self.overfit_batches = int(overfit_batches) - - if self.overfit_batches > len(dataloader): - logger.warning("Parameter 'overfit_batches' is bigger than the length of 'train_dataloader'.") - - for idx, batch in enumerate(dataloader): - if idx < self.overfit_batches or self.overfit_batches <= -1: - self.batches.append(batch) + if batches is None: + self.batches = [] + self.overfit_batches = int(overfit_batches) + + if self.overfit_batches > len(dataloader): + logger.warning("Parameter 'overfit_batches' is bigger than the length of 'train_dataloader'.") + + for idx, batch in enumerate(dataloader): + if idx < self.overfit_batches or self.overfit_batches <= -1: + self.batches.append(batch) + else: + assert isinstance(batches, list) + self.batches = batches def __len__(self): return len(self.batches) diff --git a/fastNLP/core/dataset/dataset.py b/fastNLP/core/dataset/dataset.py index fff8b5c2..0238a65d 100644 --- a/fastNLP/core/dataset/dataset.py +++ b/fastNLP/core/dataset/dataset.py @@ -445,7 +445,7 @@ class DataSet: "DataSet object has {} fields, but attempt to append an Instance object with {} fields." .format(len(self.field_arrays), len(instance.fields))) for name, field in instance.items(): - assert name in self.field_arrays + assert name in self.field_arrays, f'Field:`{name}` is not found in {self.field_arrays.keys()}' try: self.field_arrays[name].append(field) except Exception as e: From 78596ea11ce886e28fe97fef2275da9e7fa2f7d6 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Wed, 22 Jun 2022 16:49:03 +0800 Subject: [PATCH 33/52] =?UTF-8?q?=E4=B8=BA=20Trainer=20=E7=9A=84driver=20?= =?UTF-8?q?=E5=8F=82=E6=95=B0=E5=A2=9E=E5=8A=A0=20'auto'=20=E9=80=89?= =?UTF-8?q?=E9=A1=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/controllers/trainer.py | 11 ++++++----- fastNLP/core/drivers/choose_driver.py | 11 +++++++++++ fastNLP/core/utils/__init__.py | 9 ++++++--- fastNLP/core/utils/jittor_utils.py | 14 +++++++++++++- fastNLP/core/utils/paddle_utils.py | 15 ++++++++++++++- fastNLP/core/utils/torch_utils.py | 15 ++++++++++++++- 6 files changed, 64 insertions(+), 11 deletions(-) diff --git a/fastNLP/core/controllers/trainer.py b/fastNLP/core/controllers/trainer.py index 0f22e63c..7a84598e 100644 --- a/fastNLP/core/controllers/trainer.py +++ b/fastNLP/core/controllers/trainer.py @@ -55,9 +55,10 @@ class Trainer(TrainerEventTrigger): 您应当使用 ``TorchDDPDriver``,意味着您需要通过 ``python -m torch.distributed.launch`` 的方式来启动训练,此时参数 ``device`` 应当设置为 None(此时我们会忽略该参数),具体见下面对于参数 ``device`` 的更详细的解释。 - :param driver: 训练模型所使用的具体的驱动模式,应当为以下选择中的一个:["torch"],之后我们会加入 jittor、paddle 等 - 国产框架的训练模式;其中 "torch" 表示使用 ``TorchSingleDriver`` 或者 ``TorchDDPDriver``,具体使用哪一种取决于参数 ``device`` - 的设置; + :param driver: 训练模型所使用的具体的驱动模式,应当为以下选择中的一个:["auto", "torch", "paddle", "jittor", "fairscale"]。其值为 ``"auto"`` 时, + **FastNLP** 会根据传入模型的类型自行判断使用哪一种模式;其值为 "torch" 时,表示使用 ``TorchSingleDriver`` 或者 ``TorchDDPDriver``; + 其值为 "paddle" 时,表示使用 ``PaddleSingleDriver`` 或者 ``PaddleFleetDriver``;其值为 "jittor" 时,表示使用 ``JittorSingleDriver`` + 或者 ``JittorMPIDriver``;其值为 "fairscale" 时,表示使用 ``FairScaleDriver``。在指定了框架的情况下,具体使用哪一种取决于参数 ``device`` 的设置; .. warning:: @@ -81,7 +82,7 @@ class Trainer(TrainerEventTrigger): device 的可选输入如下所示: - * *str*: 例如 'cpu', 'cuda', 'cuda:0', 'cuda:1' 等; + * *str*: 例如 'cpu', 'cuda', 'cuda:0', 'cuda:1', 'gpu:0' 等; * *torch.device*: 例如 'torch.device("cuda:0")'; * *int*: 将使用 ``device_id`` 为该值的 ``gpu`` 进行训练;如果值为 -1,那么默认使用全部的显卡,此时使用的 driver 实例是 `TorchDDPDriver`; * *list(int)*: 如果多于 1 个device,应当通过该种方式进行设定;注意此时我们一定会使用 ``TorchDDPDriver``,不管您传入的列表的长度是 1 还是其它值; @@ -365,9 +366,9 @@ class Trainer(TrainerEventTrigger): def __init__( self, model, - driver, train_dataloader, optimizers, + driver: str = "auto", device: Optional[Union[int, List[int], str]] = "cpu", n_epochs: int = 20, evaluate_dataloaders=None, diff --git a/fastNLP/core/drivers/choose_driver.py b/fastNLP/core/drivers/choose_driver.py index 4be1e502..75df97c4 100644 --- a/fastNLP/core/drivers/choose_driver.py +++ b/fastNLP/core/drivers/choose_driver.py @@ -1,6 +1,7 @@ from typing import Union, Optional, List from .driver import Driver +from ..utils import is_torch_module, is_paddle_module, is_jittor_module def choose_driver(model, driver: Union[str, Driver], device: Optional[Union[int, List[int], str]], **kwargs) -> Driver: @@ -17,6 +18,16 @@ def choose_driver(model, driver: Union[str, Driver], device: Optional[Union[int, if isinstance(driver, Driver): return driver + if driver == "auto": + if is_torch_module(model): + driver = "torch" + elif is_paddle_module(model): + driver = "paddle" + elif is_jittor_module(model): + driver = "jittor" + else: + raise ValueError(f"Cannot choose driver automatically based on model, please set `driver` specifically.") + if driver in {"torch", "fairscale"}: from fastNLP.core.drivers.torch_driver.initialize_torch_driver import initialize_torch_driver return initialize_torch_driver(driver, device, model, **kwargs) diff --git a/fastNLP/core/utils/__init__.py b/fastNLP/core/utils/__init__.py index 0857f450..2825b5ac 100644 --- a/fastNLP/core/utils/__init__.py +++ b/fastNLP/core/utils/__init__.py @@ -1,5 +1,6 @@ __all__ = [ 'cache_results', + 'is_jittor_module', 'is_jittor_dataset', 'jittor_collate_wraps', 'paddle_to', @@ -9,8 +10,10 @@ __all__ = [ 'is_in_paddle_dist', 'is_in_fnlp_paddle_dist', 'is_in_paddle_launch_dist', + 'is_paddle_module', 'f_rich_progress', 'torch_move_data_to_device', + 'is_torch_module', 'get_fn_arg_names', 'auto_param_call', 'check_user_specific_params', @@ -28,11 +31,11 @@ __all__ = [ ] from .cache_results import cache_results -from .jittor_utils import is_jittor_dataset, jittor_collate_wraps +from .jittor_utils import is_jittor_dataset, jittor_collate_wraps, is_jittor_module from .paddle_utils import paddle_to, paddle_move_data_to_device, get_paddle_device_id, get_paddle_gpu_str, is_in_paddle_dist, \ - is_in_fnlp_paddle_dist, is_in_paddle_launch_dist + is_in_fnlp_paddle_dist, is_in_paddle_launch_dist, is_paddle_module from .rich_progress import f_rich_progress -from .torch_utils import torch_move_data_to_device +from .torch_utils import torch_move_data_to_device, is_torch_module from .utils import * from .tqdm_progress import f_tqdm_progress from .seq_len_to_mask import seq_len_to_mask diff --git a/fastNLP/core/utils/jittor_utils.py b/fastNLP/core/utils/jittor_utils.py index f29b1f46..ac00cd22 100644 --- a/fastNLP/core/utils/jittor_utils.py +++ b/fastNLP/core/utils/jittor_utils.py @@ -1,6 +1,7 @@ __all__ = [ + 'is_jittor_module', 'is_jittor_dataset', - 'jittor_collate_wraps' + 'jittor_collate_wraps', ] from collections.abc import Mapping, Callable @@ -13,6 +14,17 @@ if _NEED_IMPORT_JITTOR: from fastNLP.core.dataset import Instance +def is_jittor_module(model) -> bool: + """ + 判断传入的 ``model`` 是否是 :class:`jittor.Module` 类型 + + :param model: 模型; + :return: 当前模型是否为 ``jittor`` 的模型; + """ + try: + return isinstance(model, jt.Module) + except BaseException: + return False def is_jittor_dataset(dataset) -> bool: """ diff --git a/fastNLP/core/utils/paddle_utils.py b/fastNLP/core/utils/paddle_utils.py index 9e7e73a4..adcbcabd 100644 --- a/fastNLP/core/utils/paddle_utils.py +++ b/fastNLP/core/utils/paddle_utils.py @@ -6,6 +6,7 @@ __all__ = [ "is_in_paddle_dist", "is_in_fnlp_paddle_dist", "is_in_paddle_launch_dist", + "is_paddle_module", ] import os @@ -174,4 +175,16 @@ def is_in_paddle_launch_dist() -> bool: """ 判断是否处于 ``python -m paddle.distributed.launch`` 方法启动的 **paddle** 分布式进程中 """ - return FASTNLP_BACKEND_LAUNCH in os.environ \ No newline at end of file + return FASTNLP_BACKEND_LAUNCH in os.environ + +def is_paddle_module(model) -> bool: + """ + 判断传入的 ``model`` 是否是 :class:`paddle.nn.Layer` 类型 + + :param model: 模型; + :return: 当前模型是否为 ``paddle`` 的模型; + """ + try: + return isinstance(model, paddle.nn.Layer) + except BaseException: + return False \ No newline at end of file diff --git a/fastNLP/core/utils/torch_utils.py b/fastNLP/core/utils/torch_utils.py index 0cef2205..c58715b8 100644 --- a/fastNLP/core/utils/torch_utils.py +++ b/fastNLP/core/utils/torch_utils.py @@ -8,7 +8,8 @@ if _NEED_IMPORT_TORCH: DEFAULT_TORCH_GROUP = torch.distributed.distributed_c10d.group.WORLD __all__ = [ - 'torch_move_data_to_device' + 'torch_move_data_to_device', + 'is_torch_module', ] from .utils import apply_to_collection @@ -64,3 +65,15 @@ def torch_move_data_to_device(batch: Any, device: Optional[Union[str, "torch.dev dtype = TorchTransferableDataType return apply_to_collection(batch, dtype=dtype, function=batch_to) + +def is_torch_module(model) -> bool: + """ + 判断传入的 ``model`` 是否是 :class:`torch.nn.Module` 类型 + + :param model: 模型; + :return: 当前模型是否为 ``torch`` 的模型; + """ + try: + return isinstance(model, torch.nn.Module) + except BaseException: + return False \ No newline at end of file From 9e3043251aec3debc216b3a3db3b081165a02c62 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Wed, 22 Jun 2022 23:39:33 +0800 Subject: [PATCH 34/52] =?UTF-8?q?fastnlp=20paddle=20ernie=20=E7=9A=84=20tu?= =?UTF-8?q?torial?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tutorials/fastnlp_tutorial_paddle.ipynb | 1086 +++++++++++++++++ .../paddle-ernie-1.0-masking-levels.png | Bin 0 -> 59022 bytes .../figures/paddle-ernie-1.0-masking.png | Bin 0 -> 46898 bytes .../paddle-ernie-2.0-continual-pretrain.png | Bin 0 -> 128680 bytes .../figures/paddle-ernie-3.0-framework.png | Bin 0 -> 202018 bytes 5 files changed, 1086 insertions(+) create mode 100644 tutorials/fastnlp_tutorial_paddle.ipynb create mode 100644 tutorials/figures/paddle-ernie-1.0-masking-levels.png create mode 100644 tutorials/figures/paddle-ernie-1.0-masking.png create mode 100644 tutorials/figures/paddle-ernie-2.0-continual-pretrain.png create mode 100644 tutorials/figures/paddle-ernie-3.0-framework.png diff --git a/tutorials/fastnlp_tutorial_paddle.ipynb b/tutorials/fastnlp_tutorial_paddle.ipynb new file mode 100644 index 00000000..e07b1509 --- /dev/null +++ b/tutorials/fastnlp_tutorial_paddle.ipynb @@ -0,0 +1,1086 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 使用 paddlenlp 和 FastNLP 实现中文文本情感分析\n", + "\n", + "1. 基础介绍:飞桨自然语言处理库 ``paddlenlp`` 和语义理解框架 ``ERNIE``\n", + "\n", + "2. 准备工作:使用 ``tokenizer`` 处理数据并构造 ``dataloader``\n", + "\n", + "3. 模型训练:加载 ``ERNIE`` 预训练模型,使用 ``FastNLP`` 进行训练" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1. 基础介绍:飞桨自然语言处理库 paddlenlp 和语义理解框架 ERNIE\n", + "\n", + "#### 1.1 飞桨自然语言处理库 paddlenlp\n", + "\n", + "``paddlenlp`` 是由百度以飞桨 ``PaddlePaddle`` 为核心开发的自然语言处理库,集成了多个数据集和 NLP 模型,包括百度自研的语义理解框架 ``ERNIE`` 。在本篇教程中,我们会以 ``paddlenlp`` 为基础,使用模型 ``ERNIE`` 完成中文情感分析任务。" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2.3.3\n" + ] + } + ], + "source": [ + "import sys\n", + "sys.path.append(\"../\")\n", + "\n", + "import paddle\n", + "import paddlenlp\n", + "from paddlenlp.transformers import AutoTokenizer\n", + "from paddlenlp.transformers import AutoModelForSequenceClassification\n", + "\n", + "print(paddlenlp.__version__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 1.2 语义理解框架 ERNIE\n", + "\n", + "``ERNIE(Enhanced Representation from kNowledge IntEgration)`` 是百度提出的基于知识增强的持续学习语义理解框架,至今已有 ``ERNIE 2.0``、``ERNIE 3.0``、``ERNIE-M``、``ERNIE-tiny`` 等多种预训练模型。``ERNIE 1.0`` 采用``Transformer Encoder`` 作为其语义表示的骨架,并改进了两种``mask`` 策略,分别为基于**短语**和**实体**(人名、组织等)的策略。在 ``ERNIE`` 中,由多个字组成的短语或者实体将作为一个统一单元,在训练的时候被统一地 ``mask`` 掉,这样可以潜在地学习到知识的依赖以及更长的语义依赖来让模型更具泛化性。\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "``ERNIE 2.0`` 则提出了连续学习(``Continual Learning``)的概念,即首先用一个简单的任务来初始化模型,在更新时用前一个任务训练好的参数作为下一个任务模型初始化的参数。这样在训练新的任务时,模型便可以记住之前学习到的知识,使得模型在新任务上获得更好的表现。``ERNIE 2.0`` 分别构建了词法、语法、语义不同级别的预训练任务,并使用不同的 task id 来标示不同的任务,在共计16个中英文任务上都取得了SOTA效果。\n", + "\n", + "\n", + "\n", + "``ERNIE 3.0`` 将自回归和自编码网络融合在一起进行预训练,其中自编码网络采用 ``ERNIE 2.0`` 的多任务学习增量式构建预训练任务,持续进行语义理解学习。其中自编码网络增加了知识增强的预训练任务。自回归网络则基于 ``Tranformer-XL`` 结构,支持长文本语言模型建模,并在多个自然语言处理任务中取得了SOTA的效果。\n", + "\n", + "\n", + "\n", + "接下来,我们将展示如何在 ``FastNLP`` 中使用基于 ``paddle`` 的 ``ERNIE 1.0`` 框架进行中文情感分析。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. 使用 tokenizer 处理数据并构造 dataloader\n", + "\n", + "#### 2.1 加载中文数据集 ChnSentiCorp\n", + "\n", + "``ChnSentiCorp`` 数据集是由中国科学院发布的中文句子级情感分析数据集,包含了从网络上获取的酒店、电影、书籍等多个领域的评论,每条评论都被划分为两个标签:消极(``0``)和积极(``1``),可以用于二分类的中文情感分析任务。通过 ``paddlenlp.datasets.load_dataset`` 函数,我们可以加载并查看 ``ChnSentiCorp`` 数据集的内容。" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "训练集大小: 9600\n", + "{'text': '选择珠江花园的原因就是方便,有电动扶梯直接到达海边,周围餐馆、食廊、商场、超市、摊位一应俱全。酒店装修一般,但还算整洁。 泳池在大堂的屋顶,因此很小,不过女儿倒是喜欢。 包的早餐是西式的,还算丰富。 服务吗,一般', 'label': 1, 'qid': ''}\n", + "{'text': '15.4寸笔记本的键盘确实爽,基本跟台式机差不多了,蛮喜欢数字小键盘,输数字特方便,样子也很美观,做工也相当不错', 'label': 1, 'qid': ''}\n", + "{'text': '房间太小。其他的都一般。。。。。。。。。', 'label': 0, 'qid': ''}\n" + ] + } + ], + "source": [ + "from paddlenlp.datasets import load_dataset\n", + "\n", + "train_dataset, val_dataset, test_dataset = load_dataset(\"chnsenticorp\", splits=[\"train\", \"dev\", \"test\"])\n", + "print(\"训练集大小:\", len(train_dataset))\n", + "for i in range(3):\n", + " print(train_dataset[i])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 2.2 处理数据\n", + "\n", + "可以看到,原本的数据集仅包含中文的文本和标签,这样的数据是无法被模型识别的。同英文文本分类任务一样,我们需要使用 ``tokenizer`` 对文本进行分词并转换为数字形式的结果。我们可以加载已经预训练好的中文分词模型 ``ernie-1.0-base-zh``,将分词的过程写在函数 ``_process`` 中,然后调用数据集的 ``map`` 函数对每一条数据进行分词。其中:\n", + "- 参数 ``max_length`` 代表句子的最大长度;\n", + "- ``padding=\"max_length\"`` 表示将长度不足的结果 padding 至和最大长度相同;\n", + "- ``truncation=True`` 表示将长度过长的句子进行截断。\n", + "\n", + "至此,我们得到了每条数据长度均相同的数据集。" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[2022-06-22 21:31:04,168] [ INFO]\u001b[0m - We are using to load 'ernie-1.0-base-zh'.\u001b[0m\n", + "\u001b[32m[2022-06-22 21:31:04,171] [ INFO]\u001b[0m - Already cached /remote-home/shxing/.paddlenlp/models/ernie-1.0-base-zh/vocab.txt\u001b[0m\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'text': '选择珠江花园的原因就是方便,有电动扶梯直接到达海边,周围餐馆、食廊、商场、超市、摊位一应俱全。酒店装修一般,但还算整洁。 泳池在大堂的屋顶,因此很小,不过女儿倒是喜欢。 包的早餐是西式的,还算丰富。 服务吗,一般', 'label': 1, 'qid': '', 'input_ids': [1, 352, 790, 1252, 409, 283, 509, 5, 250, 196, 113, 10, 58, 518, 4, 9, 128, 70, 1495, 1855, 339, 293, 45, 302, 233, 554, 4, 544, 637, 1134, 774, 6, 494, 2068, 6, 278, 191, 6, 634, 99, 6, 2678, 144, 7, 149, 1573, 62, 12043, 661, 737, 371, 435, 7, 689, 4, 255, 201, 559, 407, 1308, 12043, 2275, 1110, 11, 19, 842, 5, 1207, 878, 4, 196, 198, 321, 96, 4, 16, 93, 291, 464, 1099, 10, 692, 811, 12043, 392, 5, 748, 1134, 10, 213, 220, 5, 4, 201, 559, 723, 595, 12043, 231, 112, 1114, 4, 7, 689, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}\n" + ] + } + ], + "source": [ + "max_len = 128\n", + "model_checkpoint = \"ernie-1.0-base-zh\"\n", + "tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)\n", + "def _process(data):\n", + " data.update(tokenizer(\n", + " data[\"text\"],\n", + " max_length=max_len,\n", + " padding=\"max_length\",\n", + " truncation=True,\n", + " return_attention_mask=True,\n", + " ))\n", + " return data\n", + "\n", + "train_dataset.map(_process, num_workers=5)\n", + "val_dataset.map(_process, num_workers=5)\n", + "test_dataset.map(_process, num_workers=5)\n", + "\n", + "print(train_dataset[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "得到数据集之后,我们便可以将数据集包裹在 ``PaddleDataLoader`` 中,用于之后的训练。``FastNLP`` 提供的 ``PaddleDataLoader`` 拓展了 ``paddle.io.DataLoader`` 的功能,详情可以查看相关的文档。" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "from fastNLP.core import PaddleDataLoader\n", + "import paddle.nn as nn\n", + "\n", + "train_dataloader = PaddleDataLoader(train_dataset, batch_size=32, shuffle=True)\n", + "val_dataloader = PaddleDataLoader(val_dataset, batch_size=32, shuffle=False)\n", + "test_dataloader = PaddleDataLoader(test_dataset, batch_size=1, shuffle=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. 模型训练:加载 ERNIE 预训练模型,使用 FastNLP 进行训练\n", + "\n", + "#### 3.1 使用 ERNIE 预训练模型\n", + "\n", + "为了实现文本分类,我们首先需要定义文本分类的模型。``paddlenlp.transformers`` 提供了模型 ``AutoModelForSequenceClassification``,我们可以利用它来加载不同权重的文本分类模型。在 ``FastNLP`` 中,我们可以定义 ``train_step`` 和 ``evaluate_step`` 函数来实现训练和验证过程中的不同行为。\n", + "\n", + "- ``train_step`` 函数在获得返回值 ``logits`` (大小为 ``(batch_size, num_labels)``)后计算交叉熵损失 ``CrossEntropyLoss``,然后将 ``loss`` 放在字典中返回。``FastNLP`` 也支持返回 ``dataclass`` 类型的训练结果,但二者都需要包含名为 **``loss``** 的键或成员。\n", + "- ``evaluate_step`` 函数在获得返回值 ``logits`` 后,将 ``logits`` 和标签 ``label`` 放在字典中返回。\n", + "\n", + "这两个函数的参数均为数据集中字典**键**的子集,``FastNLP`` 会自动进行参数匹配然后输入到模型中。" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[2022-06-22 21:31:15,577] [ INFO]\u001b[0m - We are using to load 'ernie-1.0-base-zh'.\u001b[0m\n", + "\u001b[32m[2022-06-22 21:31:15,580] [ INFO]\u001b[0m - Already cached /remote-home/shxing/.paddlenlp/models/ernie-1.0-base-zh/ernie_v1_chn_base.pdparams\u001b[0m\n" + ] + } + ], + "source": [ + "import paddle.nn as nn\n", + "\n", + "class SeqClsModel(nn.Layer):\n", + " def __init__(self, model_checkpoint, num_labels):\n", + " super(SeqClsModel, self).__init__()\n", + " self.model = AutoModelForSequenceClassification.from_pretrained(\n", + " model_checkpoint,\n", + " num_classes=num_labels,\n", + " )\n", + "\n", + " def forward(self, input_ids, attention_mask, token_type_ids):\n", + " logits = self.model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)\n", + " return logits\n", + "\n", + " def train_step(self, input_ids, attention_mask, token_type_ids, label):\n", + " logits = self(input_ids, attention_mask, token_type_ids)\n", + " loss = nn.CrossEntropyLoss()(logits, label)\n", + " return {\"loss\": loss}\n", + "\n", + " def evaluate_step(self, input_ids, attention_mask, token_type_ids, label):\n", + " logits = self(input_ids, attention_mask, token_type_ids)\n", + " return {'pred': logits, 'target': label}\n", + "\n", + "model = SeqClsModel(model_checkpoint, num_labels=2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 3.2 设置参数并使用 Trainer 开始训练\n", + "\n", + "现在我们可以着手使用 ``FastNLP.Trainer`` 进行训练了。\n", + "\n", + "首先,为了高效地训练 ``ERNIE`` 模型,我们最好为学习率指定一定的策略。``paddlenlp`` 提供的 ``LinearDecayWithWarmup`` 可以令学习率在一段时间内从 0 开始线性地增长(预热),然后再线性地衰减至 0 。在本篇教程中,我们将学习率设置为 ``5e-5``,预热时间为 ``0.1``,然后将得到的的 ``lr_scheduler`` 赋值给 ``AdamW`` 优化器。\n", + "\n", + "其次,我们还可以为 ``Trainer`` 指定多个 ``Callback`` 来在基础的训练过程之外进行额外的定制操作。在本篇教程中,我们使用的 ``Callback`` 有以下三种:\n", + "\n", + "- ``RichCallback`` - 在训练和验证时显示进度条,以便观察训练的过程\n", + "- ``LRSchedCallback`` - 由于我们使用了 ``Scheduler``,因此需要将 ``lr_scheduler`` 传给该 ``Callback`` 以在训练中进行更新\n", + "- ``LoadBestModelCallback`` - 该 ``Callback`` 会评估结果中的 ``'acc#accuracy'`` 值,保存训练中出现的正确率最高的模型,并在训练结束时加载到模型上,方便对模型进行测试和评估。\n", + "\n", + "在 ``Trainer`` 中,我们还可以设置 ``metrics`` 来衡量模型的表现。``Accuracy`` 能够根据传入的预测值和真实值计算出模型预测的正确率。还记得模型中 ``evaluate_step`` 函数的返回值吗?键 ``pred`` 和 ``target`` 分别为 ``Accuracy.update`` 的参数名,在验证过程中 ``FastNLP`` 会自动将键和参数名匹配从而计算出正确率,这也是我们规定模型需要返回字典类型数据的原因。\n", + "\n", + "``Accuracy`` 的返回值包含三个部分:``acc``、``total`` 和 ``correct``,分别代表 ``正确率``、 ``数据总数`` 和 ``预测正确的数目``,这让您能够直观地知晓训练中模型的变化,``LoadBestModelCallback`` 的参数 ``'acc#accuracy'`` 也正是代表了 ``accuracy`` 指标的 ``acc`` 结果。\n", + "\n", + "在设定好参数之后,调用 ``run`` 函数便可以进行训练和验证了。" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
[21:31:16] INFO     Running evaluator sanity check for 2 batches.              trainer.py:631\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m[21:31:16]\u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Running evaluator sanity check for \u001b[1;36m2\u001b[0m batches. \u001b]8;id=4641;file://../fastNLP/core/controllers/trainer.py\u001b\\\u001b[2mtrainer.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=822054;file://../fastNLP/core/controllers/trainer.py#631\u001b\\\u001b[2m631\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "data": {
+      "text/html": [
+       "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:0, Batch:60 -----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m60\u001b[0m -----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.895833,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1075.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.895833\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1075.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:0, Batch:120 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m120\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.8975,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1077.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.8975\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1077.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:0, Batch:180 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m180\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.911667,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1094.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.911667\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1094.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:0, Batch:240 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m240\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.9225,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1107.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.9225\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1107.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:0, Batch:300 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m300\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.9275,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1113.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.9275\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1113.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:1, Batch:60 -----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m1\u001b[0m, Batch:\u001b[1;36m60\u001b[0m -----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.930833,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1117.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.930833\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1117.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:1, Batch:120 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m1\u001b[0m, Batch:\u001b[1;36m120\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.935833,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1123.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.935833\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1123.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:1, Batch:180 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m1\u001b[0m, Batch:\u001b[1;36m180\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.935833,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1123.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.935833\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1123.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:1, Batch:240 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m1\u001b[0m, Batch:\u001b[1;36m240\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.9375,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1125.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.9375\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1125.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
---------------------------- Eval. results on Epoch:1, Batch:300 ----------------------------\n",
+       "
\n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m1\u001b[0m, Batch:\u001b[1;36m300\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
{\n",
+       "  \"acc#accuracy\": 0.941667,\n",
+       "  \"total#accuracy\": 1200.0,\n",
+       "  \"correct#accuracy\": 1130.0\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"acc#accuracy\"\u001b[0m: \u001b[1;36m0.941667\u001b[0m,\n", + " \u001b[1;34m\"total#accuracy\"\u001b[0m: \u001b[1;36m1200.0\u001b[0m,\n", + " \u001b[1;34m\"correct#accuracy\"\u001b[0m: \u001b[1;36m1130.0\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
[21:34:28] INFO     Loading best model from fnlp-ernie/2022-0 load_best_model_callback.py:111\n",
+       "                    6-22-21_29_12_898095/best_so_far with                                    \n",
+       "                    acc#accuracy: 0.941667...                                                \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m[21:34:28]\u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Loading best model from fnlp-ernie/\u001b[1;36m2022\u001b[0m-\u001b[1;36m0\u001b[0m \u001b]8;id=340364;file://../fastNLP/core/callbacks/load_best_model_callback.py\u001b\\\u001b[2mload_best_model_callback.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=763898;file://../fastNLP/core/callbacks/load_best_model_callback.py#111\u001b\\\u001b[2m111\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b[2;36m \u001b[0m \u001b[1;36m6\u001b[0m-\u001b[1;36m22\u001b[0m-21_29_12_898095/best_so_far with \u001b[2m \u001b[0m\n", + "\u001b[2;36m \u001b[0m acc#accuracy: \u001b[1;36m0.941667\u001b[0m\u001b[33m...\u001b[0m \u001b[2m \u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
[21:34:34] INFO     Deleting fnlp-ernie/2022-06-22-21_29_12_8 load_best_model_callback.py:131\n",
+       "                    98095/best_so_far...                                                     \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[2;36m[21:34:34]\u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Deleting fnlp-ernie/\u001b[1;36m2022\u001b[0m-\u001b[1;36m06\u001b[0m-\u001b[1;36m22\u001b[0m-21_29_12_8 \u001b]8;id=430330;file://../fastNLP/core/callbacks/load_best_model_callback.py\u001b\\\u001b[2mload_best_model_callback.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=508566;file://../fastNLP/core/callbacks/load_best_model_callback.py#131\u001b\\\u001b[2m131\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b[2;36m \u001b[0m 98095/best_so_far\u001b[33m...\u001b[0m \u001b[2m \u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "data": {
+      "text/html": [
+       "
\n",
+       "
\n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from fastNLP import LRSchedCallback, RichCallback, LoadBestModelCallback\n", + "from fastNLP import Trainer, Accuracy\n", + "from paddlenlp.transformers import LinearDecayWithWarmup\n", + "\n", + "n_epochs = 2\n", + "num_training_steps = len(train_dataloader) * n_epochs\n", + "lr_scheduler = LinearDecayWithWarmup(5e-5, num_training_steps, 0.1)\n", + "optimizer = paddle.optimizer.AdamW(\n", + " learning_rate=lr_scheduler,\n", + " parameters=model.parameters(),\n", + ")\n", + "callbacks = [\n", + " LRSchedCallback(lr_scheduler, step_on=\"batch\"),\n", + " LoadBestModelCallback(\"acc#accuracy\", larger_better=True, save_folder=\"fnlp-ernie\"),\n", + " RichCallback()\n", + "]\n", + "trainer = Trainer(\n", + " model=model,\n", + " driver=\"paddle\",\n", + " optimizers=optimizer,\n", + " device=0,\n", + " n_epochs=n_epochs,\n", + " train_dataloader=train_dataloader,\n", + " evaluate_dataloaders=val_dataloader,\n", + " evaluate_every=60,\n", + " metrics={\"accuracy\": Accuracy()},\n", + " callbacks=callbacks,\n", + ")\n", + "trainer.run()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 3.3 测试和评估\n", + "\n", + "现在我们已经得到了一个表现良好的 ``ERNIE`` 模型,接下来可以在测试集上测试模型的效果了。``FastNLP.Evaluator`` 提供了定制函数的功能。我们以 ``test_dataloader`` 初始化一个 ``Evaluator``,然后将写好的测试函数 ``test_batch_step_fn`` 传给参数 ``evaluate_batch_step_fn``,``Evaluate`` 在对每个 batch 进行评估时就会调用我们自定义的 ``test_batch_step_fn`` 函数而不是 ``evaluate_step`` 函数。在这里,我们仅测试 5 条数据并输出文本和对应的标签。" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
text: ['这个宾馆比较陈旧了,特价的房间也很一般。总体来说一般']\n",
+       "
\n" + ], + "text/plain": [ + "text: ['这个宾馆比较陈旧了,特价的房间也很一般。总体来说一般']\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
labels: 0\n",
+       "
\n" + ], + "text/plain": [ + "labels: 0\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
text: ['怀着十分激动的心情放映,可是看着看着发现,在放映完毕后,出现一集米老鼠的动画片!开始\n",
+       "还怀疑是不是赠送的个别现象,可是后来发现每张DVD后面都有!真不知道生产商怎么想的,我想看的是猫\n",
+       "和老鼠,不是米老鼠!如果厂家是想赠送的话,那就全套米老鼠和唐老鸭都赠送,只在每张DVD后面添加一\n",
+       "集算什么??简直是画蛇添足!!']\n",
+       "
\n" + ], + "text/plain": [ + "text: ['怀着十分激动的心情放映,可是看着看着发现,在放映完毕后,出现一集米老鼠的动画片!开始\n", + "还怀疑是不是赠送的个别现象,可是后来发现每张DVD后面都有!真不知道生产商怎么想的,我想看的是猫\n", + "和老鼠,不是米老鼠!如果厂家是想赠送的话,那就全套米老鼠和唐老鸭都赠送,只在每张DVD后面添加一\n", + "集算什么??简直是画蛇添足!!']\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
labels: 0\n",
+       "
\n" + ], + "text/plain": [ + "labels: 0\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
text: ['还稍微重了点,可能是硬盘大的原故,还要再轻半斤就好了。其他要进一步验证。贴的几种膜气\n",
+       "泡较多,用不了多久就要更换了,屏幕膜稍好点,但比没有要强多了。建议配赠几张膜让用用户自己贴。'\n",
+       "]\n",
+       "
\n" + ], + "text/plain": [ + "text: ['还稍微重了点,可能是硬盘大的原故,还要再轻半斤就好了。其他要进一步验证。贴的几种膜气\n", + "泡较多,用不了多久就要更换了,屏幕膜稍好点,但比没有要强多了。建议配赠几张膜让用用户自己贴。'\n", + "]\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
labels: 0\n",
+       "
\n" + ], + "text/plain": [ + "labels: 0\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
text: ['交通方便;环境很好;服务态度很好 房间较小']\n",
+       "
\n" + ], + "text/plain": [ + "text: ['交通方便;环境很好;服务态度很好 房间较小']\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
labels: 1\n",
+       "
\n" + ], + "text/plain": [ + "labels: 1\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
text: ['不错,作者的观点很颠覆目前中国父母的教育方式,其实古人们对于教育已经有了很系统的体系\n",
+       "了,可是现在的父母以及祖父母们更多的娇惯纵容孩子,放眼看去自私的孩子是大多数,父母觉得自己的\n",
+       "孩子在外面只要不吃亏就是好事,完全把古人几千年总结的教育古训抛在的九霄云外。所以推荐准妈妈们\n",
+       "可以在等待宝宝降临的时候,好好学习一下,怎么把孩子教育成一个有爱心、有责任心、宽容、大度的人\n",
+       "。']\n",
+       "
\n" + ], + "text/plain": [ + "text: ['不错,作者的观点很颠覆目前中国父母的教育方式,其实古人们对于教育已经有了很系统的体系\n", + "了,可是现在的父母以及祖父母们更多的娇惯纵容孩子,放眼看去自私的孩子是大多数,父母觉得自己的\n", + "孩子在外面只要不吃亏就是好事,完全把古人几千年总结的教育古训抛在的九霄云外。所以推荐准妈妈们\n", + "可以在等待宝宝降临的时候,好好学习一下,怎么把孩子教育成一个有爱心、有责任心、宽容、大度的人\n", + "。']\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
labels: 1\n",
+       "
\n" + ], + "text/plain": [ + "labels: 1\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "data": {
+      "text/plain": [
+       "{}"
+      ]
+     },
+     "execution_count": 14,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "from fastNLP import Evaluator\n",
+    "def test_batch_step_fn(evaluator, batch):\n",
+    "    input_ids = batch[\"input_ids\"]\n",
+    "    attention_mask = batch[\"attention_mask\"]\n",
+    "    token_type_ids = batch[\"token_type_ids\"]\n",
+    "    logits = model(input_ids, attention_mask, token_type_ids)\n",
+    "    predict = logits.argmax().item()\n",
+    "    print(\"text:\", batch['text'])\n",
+    "    print(\"labels:\", predict)\n",
+    "\n",
+    "evaluator = Evaluator(\n",
+    "    model=model,\n",
+    "    dataloaders=test_dataloader,\n",
+    "    driver=\"paddle\",\n",
+    "    device=0,\n",
+    "    evaluate_batch_step_fn=test_batch_step_fn,\n",
+    ")\n",
+    "evaluator.run(5)    "
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3.7.13 ('fnlp-paddle')",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.7.13"
+  },
+  "orig_nbformat": 4,
+  "vscode": {
+   "interpreter": {
+    "hash": "31f2d9d3efc23c441973d7c4273acfea8b132b6a578f002629b6b44b8f65e720"
+   }
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/tutorials/figures/paddle-ernie-1.0-masking-levels.png b/tutorials/figures/paddle-ernie-1.0-masking-levels.png
new file mode 100644
index 0000000000000000000000000000000000000000..ff2519c4bc2c6b3ce4c63a7612fcc1e11f9cf4f8
GIT binary patch
literal 59022
zcmZ77b8sd>yD0G3w)w`k%{O+kv9U=uwr$(i#@HmA4c^$c?PO!??sx7vRp;LMqpPc@
zr)%n|>8_fme;uWwB!i4VfB*&th76RIR09KpO#4@_g@gH5lG*6R|0}_Ol42U3xfce|
zt{O6gU90Wj_x2Ma>`vounk`*NlA+qKu7-oUZobyNRr}**Y8{lZzRu`
zihBKQV@62&GzcPRz{k3>7znvlMn`gOybNAiTJ&X4v{w-L_2@$D_#ljik^ksiM$=>k
z{xd`UL{}{_S?)hCJOY5TXyEsQbo4?szelhQ9^kBoH*eMq_}pdKL4dR?4MOL1hDv3E
zS*V5@X_;8Op@tW5LcT^-;F(oJR&6bQ0YCm8m}Rqfr)n32=n+)$WzqQwfL%%#ohK08
z%FPbA%e3flUB13oB#Z^leNTg>PgMypO#EIHV(5R$kiiePNw0}#ts+I<#@+5$TF`+4
z6;U2x-qA12Fw>dtUEp>C!RkYaIW}^i!SIWfJf&a;@#KW)>siLgctmhVW=)BM%|mjh}n1%s!$YT^jA{1
zRsc4*=aUGCkiNbpzb147Y8I2qyNG|5{J3@b{SPiPsJl6Ey5c@Xkr3gad?F6x={ahv
z$?rH2mIZkpM@qTOjBn=;e^k`f=5h26Nh$1b?jVfyF;pr;`R6+<>1z!~V0DGbLJFDS
zB_RYA;!hEl`yU{pummnF^F|mgWe7Rkr~uzpl%45*e9DwMG@*KVe+b42jDkNOSa{u{
zNds2Wsq|1aSxDx;q6DbNZucp6miA*)7`V{74(65T4R>Rv9{KFDFzx%a42MQiPq)P#hM+r9iK+#u8ffOBuUl6jwq_Y3>4rIg;
zD2@_+MQ(yXLE(Lgj89V!iTi^=woCR5dTIk|h-Kcv7#IFbO}IoW?0I;*3-aiD9?ofgOIlnRGf@*03r5A&usrKV1KjyD7BvmGJ4$c+
zi2LPfp+h^*lE=;yx<5nJL??uA?`MUdi!;K!mq-&agk=P@Atq8YROt~B9V-_0tE6y-^
z%!lQ{2^SuNo#crV>bAmRUNhEQ3~EFNuPOKJ-`w5!3ZAHd))%%8s4Eg{spvRZL-YAz
zR`g1uW~qB1zWJuUn{}N>?LQ^H
z0a6)o!D^cvABAAuKHGZA>J5Fc-lQ~Vc9S*nSut>__vuk`wYZ*8F1-UTTp
zJ@hhsj1|yD5XkXNRP3&lMUgv`CAP@mUahY0$CxvzA>#db#Ya<%D79+=4A_!x#uR8?
z(^X$1{gfq;J3cER6DW&M^b=CVLso&ccI?_y`4XH(h&=a#mcIpBflWiF`Z^3e!p|VLT
zgtX=vWB|ji3GyHX3m~vrCI?+H{d&%#-Pu7g?yt=ZFiY+aa|yJh2fU9$@3e&k<^=T|
zsn(!3gaAXs4Yb*bV;O_i+ydud(krXQT~!wA#MU*H?l>60CW@vGJz$x=EJ8(Z>~|z+
zKjig{=dBAJ2|O4jyTQ}#p+83hp^xV{NdkTqif6K)_~OXJ$v}SnJ(4+Wi6XuxhL?r*
zAzQ)Zbwcqh)wh`lF8%Z&I69Rr%)rFNBKh-z+Hi?ZM)U1#ndF(9A+ZZrr1M9C&S;8Z
zyP5%YL+4WNbf+IO?`L)oE>CIyBf{GtyN_lc~d$nkXzmDdhAYLC8lVaNn}0j)lJTsh{xY&ile3y~B1d5|a9fj!l$z
zD=;x--cH=KMs`J}
z^YkOW!$7#OFz)di)?Th;;~b3iLY|Wd1m8#uEb*vLK=pUXfSWJB{vep&K_ZKcWR)zE
zuxv8C)w|{d4EYPB85oIvmz1gw!dTYYWS?~N?*P*MK!;38k}@9Ok?r^#u$IXZUWchW
z1AcJzpO6_SY=L=DV-HZTxTXRr&)dfCXXNRa$J-IYkw7?s1c>mPcSUM%8YA%d_YXlD
ziv-dXkat{=(JN%qQ(%~0(DGm)w0-Ka*T++5w@AvQ(0X7LfRpAUP&KtNscE?iuii*0
z>pe4;q;u*m6nqizpt%4CBb2b0g
zYsANH(xn@de)c?o(VT**dbt(wyVq`~kAG)|v(OEE5KoMn!4ASMQ%a_6!}c#qihKe&$XLes_Tj#2x*k2<
z^_3Ez(G~|{fb*+jA!=0*yK9F+l=0xDXB8&Ic82YJ?3E?sdBxw~NS-+Jg~L(rH{71PgLc}@Qj}CZ9WWq^
zj_08ERmV>)%zY;KkigFw2;Svp12bp5Gv{PHiyzhvf7=jza5%(v#$Ye%9*<(wZ&;{H
z4hQQd{3QZ%J*&e$cLjtrW1bJ+a{wVNzIoC>KPf`X34lw;`pX@fyTLSk=+tfhD4mWh
z7`kn?|DNKBTsz!hYV7TYs`Jistuyfxt=9tLW05l1s?|dM)1CF<=TvCBH};4@P={2L
zdKeTvD6TZR+akmHYCf6qF7t>{0N)VJ&8Al$M%?eOe1#&5R|m<*n&2xZV!FjL473Rr
ze3J|9+edWelS2M|FYNSh(3sg)sA0PK5rrSof-M?fT~
z#=URwFy_)MRRUNV^(yCdGZPBN1i{Au2`Q|$_1){(g$e)
z2&eoG$6ogO`7NJuydLl^K3#CBpgZA~22;vN@}rumIUdz8g|p)K8Rp3rB0$%s#bI>I<_g$)I(L}C7s
z&UIh~tDyqvvTA$Wzl4O&%6XMtWo>q=6OU%ilVjH$<5v*iRU=Z|2qEeGph+`vH<>B$
zrYtb(fca@<Uc&vTmW(z^@RD)iOQJN=3Cxv*G|6xyK2@#
z7PlO!vBbv&Y5ANO=$4n4b?WYSj&%$P-~h4msdn@wSmTkElTPeQL1Z&!RGj3q)u7|R)*E6GPdEQwpVQ*h@Knwco^r@YQCk4jSXX-iisq+20p}==!LPr<*G*b4Fqhbo*
z{x$!p`mK7e+UTZ$du@d(jfh(Xu#3vXasT)LL54hCV-tF4OjW-6o@(|vG8*#hHnkdm
ztA(
z7)RuR?L9w61`&X+W`CEZl7VLd7EXX`SlE{$3h>)bN!(oz@3-f0HTQbH`H35Q#Y*&P
zLmZI6k5*#!(G>vrd%iUDq85RWtn(olh!PYO06uys%oW6To1JY8%p+k%u$DaLNgr4!
z2=e2;G~s=4BDsP@J|L%xq#|M~O>5HfhrTzG{xILfQM}rcZIuvS>d&$VvDEYk0vN~N
zk~d~xbCu2~dw!+%AYqOft)%xfe=s*N{Ylc8qC!7w0w$$K?WlE;uY17yWx!qw>y{b9
ztG{MTbsjBhCDSGs6i=>f0n{CUV?Q`{CQDO1Ma6~l`42?5(QA(j^HL2BO$smNoj+-S
zz6H<#WEj#Y<{*Y<8Wr${*^?&RBBRj#jA*kyoVbQt
zRwpJ|FTERQDcvs98{TPvNC|W)t4xgA>SxLy?+YeFx#z^ICSW%b#I!O>TW94aDeKhzbFV|5n{hJk7w9n@~>r
z?t*$vi+Kr~T~|1{oyd6QJ6L!iKUyD<9w<^8%AHcP`_0j^{1!xBjAbv)=z6U>&dqu%|`cw-tx6
z_sp#@!XRd+s}Z5AQlJt>9PT7unGHskU`G=x)45@AWDdf_a(Nx}&o>>i(XoD~o7`(}
z4CkXw;*77>=)?b_=0&gFD45aRZSUXPOdKAFj@O80%^xqeSc}LdI2t#%xXvE9n;OiI
zfSH*!OJr>Vlf#NLvZ999lD;!#SFVFc)Em^;YeVur9F&XT!8*o-`Oln^ycc5h&H~mk
zttjoHC?Em5<7;Od?VmT6L=ru3hfzA22}7qhqx2*RY
zoyranUfOdeeu#-r@!%gpsd!D1@o{AGYM8m6b(V0T6`#VI4*QUJ)ms~
znQ}K&%=vWZ4X+^4P9K5HPZwiT6Me^%4HL
zlRqc(yserzkvUZiMe;0}=L6i&pms}$I)Wdw
z*E0MQ5DB}#aGv4A4%89UlLbp+m2T)%6~xCX=EbQx(y?dacDW1NDE!}E{~=8%2xx-*+ikn8px+!rL3}QT@XnT_CVNajh&|>
z41e4M8iYwxq;6~d?_CHz)Q?RCv~`$!l%0Bzi+b^CTnf1N^%uO>L^*`}BOcBqax%(p
z)5yFWH)1(dKc`zvY_~XAF1LrJ4vEl5BsVhL^W_yxG^o>x6%9BdQwKYvtwhy;sQHuLxFeDYe9HW>~bBOB^#&p)&q-jcMk4h(;HG<
zh969RbZ$+8A2WhSWF4I&cxZ+C5n++VLZM?=3sO;f#3+X_LWcLrZtr3ETOIA6M_3f~wI%rtr9Dlwq;AQ_M1+@Bk4@#ldW3
z-n3kOsZoQuKU^#2198U)zwco^InaCOGO*Eg5efv0Thz%V<4l-M=R+z(kr(!nL>iFC
z=*~LPCZ^r~@XSCm`AR^O>NjdgZPYDeyOiyY{}ug~pWw|}%b}9`aH%wGY)L3WTZM>r
z7N6Gx_dc?t6%s!b63Vo#4x{XF0yFkRN<6E`p9;p`dJgdW1n3FX{L3C{x~eL~Sp>G3
z@7b7@3VfRuk4TMkcC*}D(>iR|&K!H&sRE!zf!;33fJ{pnDCzc&hk^~k@o&Gfw=Os#
z*BjA+4~}c)Nh0fE4>^kEzg=YOXH-7F7_rsakDM1{w0
zGFih4$;@#}RoJ;0I|vQ(v7s4%$EfHpP8k)%rZfvWu>96UzvUITD1$j=5&zjO{IS||pHQd3?>oXpI6thDj3|tSn{DIos^%OOuS;kv#g4}yp>>|x^TtG#
zMh80nlHI{0|H2yqn+DSgk^G_#IT4@-{iy}Dm7H9hwAUv>t>CC#nEZD99pB5K`|=kJ
zbGEJG`FHQJ3VMd}VY4iAF#f=~=E2=(ybq0fb&bY|Q8d_m1$Y~t>@t-tA&AQ~)pV>o^Zd;du42WroDnxmMk=8m)EL
zhF;37HO7jIzf@?{^F^O=z*%vX0|#-7A+aMxsCVn9yQ}3c5<%~%BdEwVdiVXEJ8!f;
zyOcSdrz~8sG6eO(1<9?|+9LUV3xt8O{FpQ6@UssQH$pXUtDn7sQVG^#e=`FR#1-m9YFza01!Oz;}i2E
zl}`x<>&(=mr(7YL$UtSk(XNH4FH&8=J$o8n4ovm>Hr|8Gh(<
zg<>a!>PiIKK8u+ppp*9O@4^-aUd*plX%U&=*kZ1v$oP%(21LXeR;#|-Tle*2GsqmX
zM{nQM!1Ihu9{2@rl?5(Ne{)~bJ07y*upHr;Yi{NQXT;O)(@Cdy3+%kY7J$KP7Rp0p
zpW4v;GHf9y+rZB3z*%7p?vCtsTYw)lb|AUEO;+G%>97n`SqSl44?N<);&2!;T&Ue7
zozIZIhh0-c#f{7Y+<)_UK2`GT{KN1dd`#k3OciAl0ntLa6hnModEU0LEkpOGprzYN
z4y3hl&A^(0a1$lnYDvzjw|Y+FZI3$%aby7de%exCP>5L|53LIGDVp4MTCq=)X@VL!
zN|S2jY1;2?h+B3+x1-3y8uY%)vcnaXcBO}bv=uN&X~9B?Y<@~W9r6>NI^=_t0ZV-c
z>MyA!h*In&qbWRE)8-36XA0(9g4Gu@YTmO70`i|sBke^jfkS!_ie4#EhoAh$61p#d
zz_PcOshDLftNwKZ6Oo0dW=a_9tQCx&u7Wo*%&0@7u(^wbhrN&h
zuV`22nuDLJaZML$Y{Xi81}uD^Vn(mn5`DC`
zdS{DMpAULkl*ZPa$V$
zTFG0_*S!qo4#4XU@nwWWVRp#~h9@hSy1MH>A?7u&0SFU%D&)epU4K8qTov
zjHAB46b>FJTJ
z_u%17E4cJdywrV^X+;g0_c=3mm!9Sz47B|^bw5Wv4l)8t(`;p%x#I7x-*|=Map6Cc!4eo!q^1
zvd)PU+1mJ>T_pRU3@PuGN>EmSwE8yB;>C>GVzPdQk}Yq|he1W&>lG&@*K!yf)^;a_
z=8}fHNW-2a)Y-IT&kF#ZNECloiXg6w+T&)YqKN}fkzQB
z&Rbf?eiEv3$IYwd2p{el#y)fT^*|4+Q_l9W-;MCKp*0BenThxH3in!m1i9bsVAUy<
zBr}`zlK#48jkI$WgVjsR&;L-r)mldBnGyBKmqI3n*_1NySwfAWb(Y`sw#p6?U`6{q
zBi#409J)mz2(4ac{u0<@31J1)|CBSf4I>JKjKTVv)~z7p8%?Sw0~VuRy@)9$vW{cm
z;`vc>l4ACUhTSH&ur~bhUHI5+Gsz1iO=Z#JOZJpLc@4(IWQY)q4y)G}!h%nuj`Z-H
zqPf1f9A*P5UsB+k20^#R;AcJP!9c2GaC?QClKx!Sd)$t~4YL-E?KODu*HAm0R>~Wh
zMk>u6>o7e@fq|{`2p^9Wtf!+!p1^WgQmyHkJda?4*=u4#QLgV8wYq8-4;|u
zm)@%xIWmqHitv51-6&ZdN1X+*7~2sSSnFoe;-b!hN7>25psXILNMu6CW;RSfoxP9u
zL~!*eSDoQPy>cY|Nr?t%r0P-L01^0KVHID?qBZSu2)2j(j2N~w2rUSXozrUv~bSZ!9hH9{}+}qdw
z3)w$^!xgx&Gr&H$pdYk&|IJqIv>6HfLl=^=gZA^O1euE{xA7Jb#{CeK1&K+z?M$T8
zN%e6EL~x^r7$2q*_K$t!w6J&`FF}oD+NIY4dlJMFCg&M%PX%6dQGo=x)QzPS&>N*N
zOOyVwlsU8@L$-I>pb;!3J`DN&Hb8G_5b0;3$SPE*Z$cMm0V)ljH%8(;+wA!(vkS>PMD3UwD!QRPLp`
zg2R~2Ex31#6uG^$4;RnwcXCvy`}MDM1`Y}iY_VV6X4|mS7b9hAD7J{8IU=}G5EC$2
zTnV{az?MIMcBYiZDCQFYjcGQx=e1Ut!6ap#HJT!uSO^V|D-$I}@^+s_$)}RWq#Pfc
zLP@2P#$!uRd&kroBzvz9`D1!xb@99?Gv73F#FV(_5=m3ZjW(0!hE4U$^7OZeB0-T#
zaFa#3`)v3-{MgN}RC!HYd&U%M2Yh%pcgn;DdnD{ut($%Ed4h0*?GDlB9ZUN0egx8`
zUcUVUAOeo1C?U>Ao#__!F!`|=16u%Gn@o;Oc{2>Z~)*6%GR9B`vESO=7pxC
zu_<(1Xw^5-nl+e1pQ;9{T1-gOf92W1_1MIUp3cZz6;dm7<(`b^IR?I0^u)o)*{;~?
zUpJ&NDsM&`4abIrqP0?ZE*x|NzqDBNqC4G7AakLi@K%1>V*miKeK&P^5dQ_2wwgh$
zn1OBJozsB#^phge8JtROzE8eJsy;#XQSRL)Bw2nKSrZ%4w5WBe1hp;fM>MJ_pg1%)d
z{`;djvC9PqqOlvl%*FW%4c<{=b>RH;eiOEP{ueP*I6<`4Rt&^d*L{ExuG^VFJuX#@UBgRk%IZ&ia0!&AXR+
zFcPBPf>}V4Avr{JTAC&WELEc;!oJUmrx^7*
zk&kGX6l*fr;t`!)UFdk3QAc+_MN?@!{%)}O^P>kBK=@aD8<|+JY;mb(7kwOZ)rs2A
z+R^M2!B>;%qTa4W@OyRg6Mxo3$6TP|pfE&0$;rZy(=?yV#(sQ^I#x#f%jla`G|;XC
z_c5BzM0jz*$(Tdn3G2C40DY}&)Z0R?Oj>x2^s7ShBij2j+XdT%hoTO!h+mkIBxht~
z^!!#~upcmqOhs-X8&j%ACZEnYk|bdPRIA?vrA*n}GIo(^D;M4$u^371-JGU?civ|~
zL9wAjv8Ct2?H%my7HLyZ{LN#~uEV29LJQC4h3j&6bd)`4m!jA?JWLZ)vk6aZ6Agm6
z0a}g_?gTX#OAI9@IpC=n+ZFediulKp4`$&PyNI83(<5pA`zC(9KJF^-T-L;5(*GJ!
z!+Z|6NE~#Vf}6Vg{O}2pqctmPz0-uBEuwq>SwwAhW*VHhI5;?HY_Wt*kxw1#R?H+E
zu+$F$Z3BgVJ^-g97#Tm}^DCxmdCd?EmNQd=senu>`#HR*UM8j3jDKzz2PLS&Mogwm9$%(_)0}|~D*ImQOMrcm=`R$yc2gd3cF_p&_EQs)NxzUgd3vNNM
zAX(ou$A%H@;{S%c)sCsJGCaKY#0v+5=&LK`+VPvY$z7~Ywvp#8LF~#ijTpFbL6qSp#Q#^t#1QoC0T{rW)nbY6!X#)Ae0g>IN0YG*807gqcl`j@-fBBp%-#9)>XRuNPhpO1HMM
z|3N-)%L}WZ`r$m5MXvh_%`AtSwnuo}H4P~6x2Uo|cNZ`Mc8>7WE|RCDO#_*POBrn`ye?!-@^bsDV6!d~$n9^F#C
zY`w8^8E1$z)Rh6X>*UR;XAbq5`3f#EB}vtgrNWqBOGdk|1aP>){C%t%YdcS}uk{!7
z$f0bKDPLi(7Mj<;!5q@~+4#9H^}G*;^qgfcJdT_5DW6ak|4LIfDt^FGbGeRH^5jJ*
zIV>Y6F7!&HklhS3TwuFCMva79!6TW*TwOSeeJYtwdEu^Ac)fu6mk+9s_qetbc8mzme+-kh%`m@
z^Ca+?H((D%_>eZZFO1mu&;y?V#$#Y-vVPv3b>v>pm52vkA_r$~IzMStl3JO*%4c_%
zR>&9=g@`vW50P+Za&L8|Ai4%6$wZ&d{=6z&`RF?7|P8(;em~)fNiFFoB(?}@z_%;d(I}0P(+?dh*(BKD-+*)thwU{$;8ub<;YC|d5V%=6uvi_zcrAW(As;kNI6u@_~-_pljNdu01c
z+4)sBM-OeNGSP4S%%X9u=LlAVzOEMr6+2?
z$}>lmiL4fQ7mPRUN1uKcO219u?f68~_tW3SNLU6m`(set3NhRLV;3x)2}T|TSH^GI
z^Uu?V5KGf|LEw<#W=BKSp7j{&2cOE0c9((*d7RMA+C(5o2Zs$|N4O_p{PH%gCh#*2
z=rMDkqj0nRI)0wjIW3tlyrPf9G6~2iP#LAozM;2==lQ7AgxiS
z_dCqz8ifUcn58zleFvXJNI`0%c&S^L<)lMAb=qX2kVz>ye53<#m}wGTHbUx$@wMJuAuI-1tv>
zf?~@7rDyKVv!>v|FD$@Y083H
zY31Wxoi2O&HLd!{r5+(!r%`r73L`)9k}1-gyB`hX1k%~R*Zjj#AlH42yQl$O0Cx*WO|+M-PMl9$?&-vK9M$P
zIH+S^hx-%ES4(KTbL0^%DEc=jPlC{7M62yN0X`tm!XS_BF_GeTmfK`d!=Iz2C
z=gM^#y1EyJ2j&ap_{r0laxVJfVl#ClohF!OpO*d$7rJ%lO9ol`67;)}?~M6=IM^id$2k#ZBi~Vd=iMN1-nnrpbq|I@pdr~VCHGDX;L$%#5&em-
z1@bNHqn3zui-SkqCQd~N{%otr$K6qQ%ox^eT)k4MruK-${`&J_T(B&uAIl_y0=iOe
zo{!`&w1CFcFPJAS@#vZ5f9PKvL`?H`6Ww`a@+?e#2^U$5uJyN{=}P9&*EhXN41QJh
z)4x`?BNl=i-%b@T$wsk5Nb5lK32WBNFzvhFXLTYNDed5i7RiiDsxBBBAw6{l+kc$^
zNu|(RuXizIWBp6B(Zmf0V+%OWZ(m0(huDi&oq|YX0^cXW$6G-&E}&XSQK?UdExhMP
z+?}W<7m5=;P%beQKiKLzWL!FUqaW;3{H>1z$rlQ(AR2F}WxC3aS)q+`;TL|XXMQK6
zJUP|~Nu21HvTkmczP-x*YXh`j^;5t5lS*#ES!5G7tg)3sX>9QDOBvo;IrC2qXUTqM
z%!WfTJ8+5{J2z!sRHHb1_4Q)!!KLR*v9VIrfCeOkykYTOnviSrW!HNwWqRC#W360h~FP^O7nCV(8x!JiwrR5qW4Vdsrct_suZ3=@8
z&xaqb?J}%f>4QFc(7(Qn1@Y#1;Y6C4391yQA=>v7$N3{_t;G^Op}Z8%KGltEtw4vp
zJy*76MSNPX4MKh>OP7Ky`>v-4Uah+hz{UTvh{fDuvl&-ZODEHnlh*w`jooGNFm-S1
zpf&2i8e6=KDqDU>uNrl4IZ>WTG05C%ru(k-GxDd~y73ZzK)VAWL*@M$gh_9LpIM|quO#iy2jQD=Y-Ai|9fvFYKo$O1Ul`0jv9LhU#eH7&c
zG&>)=c(-+gol6MYP?86+5F>FFH&fwf(#a6=qXIUs6w?7o5+ZmHLI%d%tH6wPm*NaT
zynd}aeRE!%>4(9xVE6WR0n)z{97Nw-yd%{<=eACi)gm(rU$KGXoJxjZ55J)+99a
zd)&dyRN**1!uP4(&HO<-RfTz-8Fya9S_d*sa{bIz+FV`A@xe
z7gvn2PW`yB^)F2Eov9`hynsFkgACH4EXcc=m3=Vx4PW>KP^#OoFEr2skpDM3wh`0P
zbarW&)u};JHnEt!c*EUn_Ae1$mnDujcTKGGp>5HJ@;K`#8-o8H&8?%PTR(8FSZ=iTrJCnUbjyxZPpBS;f%uz
zByp-Gx=bxZthm)MIlEVfG`pxS8fR++vk~sQs_n%@(;incDtA8|TT=Hwx}{dWb3lz(
zJc{lUEUHu=f=w83;aLW-3^C;N~Mil#b7in#=
zw*L;V=&G}6C}(N#FZ0>
z+QTv13Gwc2aPt=UB)atazDYoxt-yU)5k!{(wQ5p40@b`gwu6ni_~926%7IR)A85#N
zDnS4iLqHy;9&m8o<42US=~DRKR?x!Bk+oKs$)Rjj^K3=M&#zPX_ui%5-G*+BO)XsL
z8B$_6KhVBDjo0aih~=_x%(e_uANHZOr)MG0M%@Jne?3U2*^itbTdrsv9@W_N9C4S1
zvfu}3punMMTN6>KVBH|+5iGPCj7JX=KD?4_S%
zC!xs9sjn4RE1x&wquF-v@~WhPwvG)W1tZOS(O>A}B0TE(MGHN4+X>|uwRVWM!gq9T
zu%VE)^EW{#p~dCRv$;Q!HCCL+ae!#Sx#dD!@N+{SPCD+#^*Qz~WHI^`;
z3hRy420c58z*OFF(PGI?>c77LB{!9mO;w`qI-)<3&V!AR+DT>M9CD@7Unyk7Ya?}x
z%J#hcokK~KU*bu1tntJ`fq(dH`>G0^l0V5&s+L(UEfi4b7HfP?mE=;wWChn6q4SXg
zdZ*?i8jGvBKC{?h7_K5Dj&?%%!?t!$cF)I>>S^0a1OltSeFyPrj+H<=d%QRM@|P>%
z1y6N-g&#IZ;eAtN8>?hy4u$pH4RL@gw(y$q8Who4odw5ntJvS*TFYm}(`>3)lHMCb
z_j+PwJ|YhB<8jzOIbiqv<&WOlBX^W5@$CmEDIZGSPuxIYAW!ivwp9Mk*Y%uG6kuZl
zfg7j%AYzEMc?v?00oxhC5Z&7EQzVDtvXW{S(YnaA^P26BuVaZ_Dlw;6MlF4wfFpdR*x!)y`3)ir5ub8I>UzssQc{wVQ6pZK!wieo
zBCMQBgBR$ErS$%Yp0cI4d-bxa!n0e`av=VRATabW^kmRsveNXm)8fm5^J|JQMIh8*
zGv2^~!DQruROOE>%F1DNaNT@(Vfr-s#h*SL9_Bz8x0oa=!$BD@K)yjIeDxLoPR_Bm
zBNH0jZx>|L3)>PyC*lijEhKvx+WV;vbDx%Kn=lU+L-k0)6(10?Cd{yEW6NJ@`C2U{dp
zhm>e3)_R+SVV;^n;f9-RZf|~o2`dVcjiEnt52`{zrlWQjiIWQV*MIvvtJGO75JcwF
z_a}U?E3n;BdHz1{^}Yt3Y&25x5Bc(&&2;!&cYd+ttMtUhv`8HH;Wk`~gB|p0H(c5=
zyPm0}*3lw8{n!Y)3VFa=;d@+*KM}Mh{_C|h+@s8?&>iQEn(?%nDAGfm``Q|~y;REv
zZ~NSAzp}TA^^pmOOPfecy5%(x%Uj;*PvL0$ASh_xkqUIyT(Oy<_%?N0sm3`~L~uL>
z|3}p5Ib5i(7ME&fg-=@$nv+fg)oqR*R0bY9Iy%f_(lo-mni(M9P^b3*_H%xDonzJR
zTVT|lz@Wn&Ke&tDzvHh$-(Jd`!KMd;MUvQjVS5kQj~1HcxPlPUou(r{q276sJ@pdc
ztw=%4S?ps*SG}d(LbF|dX1q`9u+90+thC+*?pEd8wAsC5LlEV5tY#dx=8eDhl34ZE
zyD^y$t8y$9c0Po`+}+Oia}qL~l>TM+qv8I?0?1gnBeQvGAZjogAV4>r^&Uz}l8KYV
zC+^T@T8zguA)ghsXr4Lb=#Mz_439YFsdNRxLv)rT)?1T6eLb;XlGYTxn=H4QRmgj}
zUg*P6Z(X4qYvVrjyhM&4=MN|@FN@11cL)bhr$t1N@s1&_-lj{1c$Xz@eCJ^{4Y`?|
zwC13;-q?u#KLB??h`%;tj8=WJ&}NKaU)CBe3q{jrgm*GR-iFyQ_d5&P3=@JV6O_ss
zO`C~Q+DtTpsjB8#O;D+}WdfTC!H*OD@U}q??C)=pY@3ONiKPuJ&5dCsGli9{Evzh(
zBAGH2YdZqHs^oyVbxE{uR})t6Uf^}4I@{5vKWyI!Il@DN5oMMQdCN9HJvOi`Zwqv&
z9g6eEugAP+7WL*Rjy)@jHf@WiKKar}I+7r}vb<=~+7SWgPT-cvD#IV)=t&P$Yu^e5
zjdk-6vCxNpX@$CroNv9{F+hEoskB!&kdc?KG1v4ufSSSnz)>&wK
zi8`&-+sK!D7j28-y{aIGxeZFTYm4d@u6Ui?A>&9F9G~42&NUmL)6f}My6*veqveQ7
z&1zk;Y{>qFZyCeV$^uc5QILOfgZrl(C|=4gjuXjjQMjNzK7aJYmq8<$0}2*2O@#oN
z7}O{G*pOvp1WQ(fy`AjKK!=ru(U)mlk?z45Gr|q|I=)ATMgG|IM6Q_fZeoTJqq3pQ
zLmyOV=Z>~3g78SCgDdh?AjTbuM(5#nXr4XgiPaYvm&1sCxAEZW4vZ-M4!hT#z&qt^
zn@?`;_>`kKN~sWNnQdVdwEv8co`L*@35q$@LB6NgaozVVE?j0~HhNSis>QR4mGMTYB4Ur-=
z?p_-kR~^DbzpHpuxhE#)5KSwCiK#v6j{XyK6JHEtUL5^cjX+VZa1G3j`Z@U<^RDP!
zyf-!;Z-vGe&!9lhzfjVI>1u)8@H_9qIw=lWO6uo_q7Z6RqJDLJ+PeY=YtBKZ5|(O&
zVU?I$#($5*nC4GKjxw@_wP^tSgr~Gp2_{i~@bQx)huXUt9%MFjYD)2qSWt8RpEH)h
z(k87~sS(P4hX2k31J?sjSg+)c2xYPu#p0Wk1eP!n9r`;^cq`DaJ20&gV57Xv{R6;d
z#>%7pf!ry`$BgVspjN}0u(!yDft6~bHiMS$-y7-7N~v{b&tsgK`u0o&`6sx#`61-l
z7EBqvJMMVFT(DAkM@i;T5c-URCgpl#>d=>%KD{OCt*(bogU4Wa>r$}R-y?KFq|0&4
zo3aRpUq&Nm!Fm}N{k8+6YyF^*HZ^fUqm3v|%d%xhT<&Jc
zwv9q?RPts)1)f@r>4&T^cJD=WDPRV<+Xi%Nx>s?3!DS-C5gx0?(IE&4P5Em(GOMhx
zHT?u1FVWc&ncG+S5#rTXMOK-IzqNAf@>?Z>rodU?Qf3**`TH4|Iep?hV
zjmF#UA5iwFIA?@B)w1Ehc3V8Y9EkD1d819JF`oRSV5%m?vtUWFVEj1G8`YQQMDGIn
zDY3DAH4L9O8h6|MhFRO|VPnt2u*zx+(@#E#-{d~<4K#;ccIy~_D=Y1U+IIVJ<>VQ7
zeyWaP-xfl)nmMrj!Z8$h8Un}Rm14Mih~`IYh0M$x=0<_|q`F05xIg@ZQ=!W#zS1Be
zhtI$F@IHUBnCVdFRU4t=vmMy+T;+vRxhKx-I0@I7;6bOr1~toF9leJ%K+xTX@%E50
ztJDqDuX;cy{C@JY})h#$1eX!`5kBa$D~2
ziuY#uP#`8sWJQC%&G2#6P%Lw4g#L{bWXCjLK3KhXiFv;u<@IBP?6oFi&7{IOJZ2cy
z-1gV-eyNdq1v4#=md>BBW&R=j7yD=?D0kN{!$DVhCUIQJKoBs6Nuy0yfa-+H3xRI=
zmFHMN&b^sz?f}-+2e!`#Cf$v@v_gAeFqLA^0^n5&dPt43S3s+tz0jj;Yt%1gPr@OK
z@62M6z?ta;nI&v(%wSt@684=qffKQxW7{#tQKg$g-@!O6u6Jkl`z&#gn_Q)=5n%x?O
zQ{3R%^oNOd?GadW>H-eVZHmal6EJAuwV3Ip*0nmKxz%MHJ9iU@PkN(X`=-c|hEY5|
z*^Na4vYV{|+B!wy;_=J4diWp68+U}0NZxey53$cmPyNycZMye`l;+3nN!ExsMN+yd
z1hHbqkka->*iKTq$u<4g1_%!gjr$Vm2KNs@_MABsdSzV;9h+O>^09NcdH5u}>$OMI
zv{asWutZ*m?1=XFi#wF6xS-ia^aD9U{e1D+EF0{zn2G(;aBJIPJW_HmQzq}Zy>8&_
zeer&96g)3(!!ecVwMOO^FpKuaM^7C)8LeueS#254ZCHm}ehM}f^mq%lTr@?)mbKHp
z!!$mk;OGAd$dXOttTcmyoE-gAG|aOaBb%vsRVxguPlYSCgNlm8zmFpkYHWUqf?{>5!T8#rIP|_M>Q~co06-IL
zs-i}@5S-n-9nS^1mwV&Jn$5WK|D9Au;njgTm^g7fMi1d
zF#+R(Xxp_s%u97fhoZ3zMTYFnCt!5m-5A(wJQl55hgI{(pvQ<~DBQa
zSMCDHtL&;YDcKc$%3GjxuWtB8d5uYd0?6&@f|#{>QdK`pENlFTRSTT)@aI98`_KDi
zw3!*o4_SaAHe1lIX=`-v(;Hn{e~-lN;Zznir@yAE6blyQw9tuC-
z&oIko4+ZDAg>8o)rORf+n#ynOSPxag&tcQ%hfuUeI6Q7%z=E>=pBJ3tW$`#lCkX;oP{Ga`e(16+5&-?kj6B|718CcW^@5YELAB
zec!{w{Vg7!U5`b(UcjMFJsqz#g1H52-}@uf=4IM!L@$uRzEO9ywmgOr&h_E!)EUde
zO5qzNw=%2L9fO@iu&uWf>N?d&x4(>0PUY1dv(k;wr07d5>)a5He%zV#Rx+|2_-@8R
z^f2FpzD~8_>|6(RyKjSezuz&XaW=h`DtQRO=g)E`Ww&tPA2$?nbVMFG?f1w((7NSl
zMnh(dHtnoo{f`$)ws%9Zf&K_BZibja@u&!Fndy!^t=&+jlRE}HG{dwWR`Kn?*c82|
z!51LTliF{kD4}WpR7BKi=g}TpHTJf
zBDAer7fyBBVqQRH)XZt3B6sqT$)~@X;hP={vCuOV(PI9-tpkb{cYsaYJRRhyKLK05
zxsQMTxruk5bHZuDS~%qo#1l6QWGR*lOP80&m0NEiAG;RLl^kFUdvu;Vk-=VZlPr}7
zV%?tRxOM3|UitzKZGJ|7$KtS4TXQBT)_E?*n=4+VkmYv9lx-F9?D8et^9V1Lh8{iQ>wOWyLyU?s#*kzOrFb
z{CmtBpB-^}mJN+2|AlI{aZ|Ej+X&+qR7N(Fl-3Vo$~ckaO5D@BbayP-SRD_=ULhvM
z;_A-0A;!%Qg@bAWY+e^*7FR(olR!Mb^cF=%?!>8)@9>HQ*|uOY6p)?4m~NgZnC&a*
z>rjD=$>iWhK%bw1b2(W}xq!Mwfm=Km=>hbd0(`ayEQ3jCzG1>yEB^H+`GAeHm{g7j
z+K|Pa7?MhPVw~9vjGjzqwu^da>W~I@nNl|QF
z))@`=*22(jOVFj$bj*4%95XxD!Pb2Ci1P7=b@MscUbjN(gw%q_cL(v)puGsml?xVu
z-jG)qfu+u=^{&Z;%pQ%px5i)NM!@-cURaqppmEi2VDd2SAFZkUGV4y5KI;L7&+Lpk
zTl2v_$_M_|%`spc
zUI8PPIxGFU2}-nUjpA$P!@ut@P&44CUnt4$i_w!wc6T9n4kl4PCa5wnKG`*|)&@g*rVSHHADo9-z{=|}v<Oq->il)fM9Nm43<_3w6Y
zN*BqEQM{lZ7PqN`>1IrfeQi;1!g35K9Y>;CwfFOG#LqV3PARr(jg2-MXLzW2b;A3om1#3ab`2T7;|M)21!Lh=6*sA#>Wt^ROv{#Rcz)gq
z&ZPgITMORZe3YTLuMilcz}3$mud*4T5SCf)FJdH|;)9NFu{S&c+d9Mf`1P7_X(=uSer%7`
z{FB~4tQ#`r$Bc%OwcXh8+y}orZg+My2DAEyuH3jH+PPyT{O!JD*d9kb3K+#;k(?4N
zT{zsneLH25eNiDPsTgx|n^6o%;@X$P8TD-?hQ0R+t*bebaPt&9KVHhQnzLCqqBgm$
zJ+`zg8h?mVsiZ;$tyJBAcZKMEJUk3OI|UJ33Y7{N@)i>UH&yVIc)X4HKT-5qxowf^
zK3oYEozWhux{;?4%EL?GO;Jb}M~W{kQoHcoyLX9>j?TM4NwGO9GZG@Xnl>m_c1brt
z5{SHdiC_yas#bP+99EeSd7Wz^36%1zh_8!Xo4}}4
zRv6A5(Mj$s;j77aSW&~1pZZ%BFf2HknBYXwXB0eD(Ngpojp#ELqR&{Mx|bySj9T;=
zE2_FH^6>m*4lfK6ea3~0W6}twwFlHck@+ey9NxHbBm2k5KBH}9#!Dm92FpB^an`<*
zZTnHh5)>H3T?+>)RPv&X)(fhytmMTPkI-(%HilQu{>>{@s+8$_aBwh>w8JCwg(PCL
zF?REUyy*x1?MoM#+FR5w8QejZ7v7KQMTe!lrN_vocanY
zUwn}x?RKzjc;y0z>)YDe;^pO)>6p}~Qm+~k36WR16l{T4)ylZ&a^M{LNN&(>Lve9&
zgoo$8ceAmhbm`K#ySqRBF58c6M+Jw9s2i6EQn}(^#Um~KMFQc0k=XirQ_cXs|FUJv
z;_B*}`C!ZuINOJpx!$s)9K(?+wNrsn4hTXVFwornK_q%cm~K%LXPsQ)z~2
z!jj4ZK4yjcM%J&M$d{qk#$5jIcl6ZC-fEpe
z9<9ql&AdAO1h1Gi7Y<$(W>2&H%a93{_V`q
zF=kk@w)f^K@hNFo+WS!LaS~Ma9{?DkIzbt+Drhio6$lG8ey#qSwt*
z%$iV+I{6-LEO7L)*6K4E#eF*UXnSYq!}ox0g7i=2@+{gB`Gl{3^g;~4gS
zBE8;lX1ITTmsDim_OMb45+go&dQOH>8j)O-DXJA%+SsKUbme@gUPr$yk%!l^J?L59
z?&ViJ>%R*L8yA1F3_%R^zn5P!oE14zI1bMQL{@6n8_o@6>|PAC03)0o9e6RGLvjC`IMA
zBbd{+kg=S_1lxvi=tEN0LwUI8dU?@7D78NQpq)^*cFwt9ZiX2p0}I*?_h4UavS_oy
z058dic57?;478_RvV-U|rkjrc;lB6iI`0RfY@cV=`mVS>S*rvL3fo4G99pCn`q}~y
z-Mzt!6*@*~Nj1NR`*^SGJbs9><@s6b>1u8~W*9RRO8jexPf5!m`mbvR8(ee0&_rQP
z{kL~>Vn84_gJaQ@^`^3)i{9NX@(MGlG{cNMKv51F)hKBxNSWTOTD_!&FvE=8V1bj5
z4UG#pvt>{yl~k>13iC2K!?yWw7W)jrt6CLYi+U}ki9=y;MI(D-a3C0oQEJ0Z*5K^fqXqq51%rG*T+p-yEm|=`i*!xkp=<;8g@gzfG
zkDs~mm|=z)MGS*j&@$nM8D=~Q$kLW=y!2maEU?ayPd6Eju|r$lq9pU$32$;bX3Q|=
zy3Hb+m0QUOl3!S7jf<4+!kEiCuh5sstf-Qk6*n@~kL44x2Efv(q$T*z;z{W561wjp
z$5dl?lItyd>ylhwlneBX8Ba?4$qVu_J{n|{DOavsO6ITo_wQqXWU^WCqzr~;7&j!L
zba2qw6UuWKgnUlk(x8_ByhlC$Q)YjY*lnp!cB?|
z1a0|cCR-aDo0Ke`rV2S;==S;aF*Y_fwuGe08M3Wt+W?PIx(1YO2>1UeexY)Lg=jPG
z{j(gFZAsc0QnxF~pc
z4+#woEg_+DhO7&@orol2x{&?G*c~$Zwy}@|J|ZF_rQA&o`o`0ANL?oTyPWVbCS4{S
zLsI5UTTy8v>8|M0?~4X?P`rc@$Zh8*`soyKY^C?KEZeB4sFdw#jM1hjEYckmvd(gg
z@-ir?G{cN17E-Y#NsphAc#kEiRAirsLMl09?b_1t%}y~%nGG@c$>--5`Xl#!lzV#`
z%Z#OTYDhwm({chsl}}-jm-0GmgHTC&(E}Ww}gFL2XEaBlVLpIRGRXkPZtuyO$m0%$PAcWMunEk4DO_j~Mf~
znH};HerardKF=Yenn$8v$LD@sQ6iUqstr?YKY2l3#+7uKNu?QPm|=z)W|(1y8D^Ma
zh8YEhK^H4Z@09eenR%)irUdDMkzp8eO@gTZ@k*vop9^6FcsWJd8r<^~803p47q$@(C}
z5@kCwC$}@Bq#(m}<#=gKVY?D?T$gLWv(vG1IfS%fjVVktH^}kL$;m1E&~OeqAAgq%_|Kg?XOtP#q~TbvUcF3f
z@UC9H%AGrRN>Hl2K@NnHz^4y}GQ#1^nKK4m-zZ6I&6+h+3<+IGgXiqovqt48kY!P)
zPMxe7h|?ft4h{}ZNxD$dkZn&oHL^hP(zU_s&D6nC2Sw2q
zixbk}B=tge4i))+vKHMlAqj{yASL90e)_Z#X4}jL)v8s)!y|Ryr+@$b
zH)UqX(2&NDtWVupQCX+5ZjB8x3_BnoK<_NBBs#{HozUOkpYr9)XFixL41vGJ0336u
zWQ58qZ2h3#ucDE7W;!gL4aT0qGB?ObfZDZdXE`o)gltE~YV_v?(twr@jmMD5c|umF
zwl|F_%`7WYw@>gI=A!;{001BWNkl#x6zc9vW^E$Y{=uQyqb>^pMC
z&zK>}my|!KXmlZU_BIV^r+H0#C)X^_c}?krPKP>|6PN(t%R&k}|$x8N~$
zLMid+hA7x&P-))dL+RUzm^x(pmhYnLoRASkraoRvhmhP7;&I5|)zRXG50@rqDDIpzPQbsxBrwu8yu@g!OIUdOICVgK=nG3{}38jP_OS6#5
z=mRAqOnwk^ASuevsXJJP8p+lGHp7O@45}bO~iT
z6eN&C6%#^OTrq?fsbUBTiz}gJ%qn6+!nm*Z$5q6|
znP!3uGtBsJK|^AqX@^1Cmu?YbYi=$k`Z>*RJBM$4h>mI?P`7lt(GTum6
zMm&s!ziy|zf*FN?X_LytzghKq7dm(Dto7;6^zZW$KBD5!b(2_gS`Z*j4Bt-g%iv`v
z(@y=#x=Cv|oo{ZW=EgTOSbDhV4Us>5&7pAp29PG=5IcT|D4c&@
zeS>diEaHc|x=+y%^X+u{4qnE|Cy`gGL;2~md9436N~ioD^5>R$EI$xvOu5RQAehNd
zQ9yNSDgC>RX7i0FsBDGta^VU@7{oFVy7b6AhK-oVm;3f|{F>R^Feo(l2)c5EaN}Qw
zXWURw7Qfe!1!b
z44o@TsQ7FwreK8gJE7TP&^4WpeU(av2Y(8eO>C$v38HM;n^`5RQ6<*k^>x!|m$9n?
zO1m=I-=hx-M_Ru5Ar&pF8dkm%ftox2?qSQ7s+!
zA*d}ZP+FD3(Weg2cWz59=aQE$zmVi-!1SwhU%8zAex@hCvBI)y2ZoNgOI7cpD^uwy
z|Kwk=u!jl5p=J?}I7xxfXABJ645PGnl98VBWx~J`qpL>%ENq*HhiPK?W#7*zI?dyg
zQFRNYFV#18G3$eiJhyxzt?df&peG5clYIKtM%?FZV02xp+^;usJd99rDPBo!U!{o~
zR)s+FeXgB2%-_yE=v>o|haqSAdgeY(gskG@{+0EXi)HmLj2InFb#Hk#iEBHiGV{Xo
zte)DA)UIMl^)4eA9bFx7Q!H3HgL*R_hswSO*p;H6cs56_D6ai0)L9P2Ou`uiB}H%)t{G6hPk1&Ne<^`(F8
zWbcXphdo<<;a1Q|wk@8{d+$wS$ycWdm!1YqBqzRH!M+PI{JZaCmhZhl-0gq39FZhW
zo#4wA`?wJEFZ(`T&fdSzvw!^ou4Y<`8^@V%Hu6(YZf=W2BF^n+{jvqjTeyNv2QLtv
zevA4xhc|q~nM^Lcc>dnCfgf+BUX&PdPW<*V7R+17icJT(5S3
z{>PtDS!Ga%|HfBqxAU7QTy@-S{<$2H;u(qmN08fJZV_~H8;hsE$9vP3@YU(?OtX2K
zdsjHJc{^7CJW!oCJ=yFH6<5jucO)l;5=<+bq0PsT9Pk`S2^U!`Ey#7P}
zXUiW=x^-KWb?sdh+6pF_bpxGzLiGJ(N1?QMrx&UjLb8Ix+Di~+F&@U9
zhSLU4cK!sA%6KAgT}|2aJ>_%lI-zl*g{cBLzH=S(7XO50lYWePvlkUFE@#GByqh@oO)o>-|-=8>sEgnVbiqvmb6OU3J)NJ028s1)%3i=;k9l!VZ7d4@W
z*z(^H}_oB~AK`;>}(axww1=UtUdp
zf;#AX=8jv!Zc#yT#k^c+hJWzW=p3|n<3qYgD;?3>n~v9b;4K?)0}x^x*Z
zh{12X!<@yVdFJ6y>^>Hi>wS6+VD(*YUcSTq60nMkgZvZ-@t4@YbRxt0_oiFdUJM$y
zoFln9nhJ}DT;0Efaf5r)wM%#U4qMKzX^tuB+}SW~7=62UrAPnK%-?nKaoCyq+7`wQ
zoWYhoiy79l3vaBuMB>Gbj2ZYLKjz_n9$sbtl5q^~-IXrg`!a0#uQ`UaX(QcJ?xZ`~
zXtJisDKg_A%~M`KuU>D}aTdNjjP;iuadG`?bnY^V{dcqP*XiZ^<^C1C-M16%+IFPx
zxDEVuFNug<6X^NgUcwSivbaYFx=h-S%wrLMX+KLR4x@kXZglN6h;hr0kluVAUfs`<
zaf9jIwF}+*4rBSx*9)x=@&--Ghfo>-Ng>8ta@={g%^Jo_vd#2=i$&jEPjMb=LiaGW-!N94$*=}AVf&cYf9&J2xX7T6Av{8b
zTcyg_i~gTnF=ksdkwhUhVFNqh{G2*3J9E=s?|JcsFvon;06Z7fRvMu9x
zON%#=3+qPk(yW82%Y7bPX3v7ryxg@NZ9DW}@Zz7fmH{Obb$mG^d$i`c<}K+yauuhF
zlguSuW5@f0n0?SVW4aR_z>SmeTManB2X?iB^^0KeDOkM`LYD}l`73;wwi4`Kh|u3H
z#C59$(G+A;a|kB?EbCDaML{sD4uh+LsP23fD(K9@CfJowzb=THAj3{VhIN5yH#~&3;(WvY;4DaQ^><)t-|VTlf6lw&{`uq>a-DBjfPZahQ8DYO;v~wJu7d0fa_dzDHkIj7TDSmbOMn^T3*bH^^!Ma&krXXNBo|Cn#xX&H0|Din|ltX5ivE_zB@!h%P!Q)p+Yp_2l#AfX$H(3O^@bJ
zQ@24YUY_s)1FXMc!%<04t*P0q4atZ02?CM&-0Si2k&%BQ~KNZQPlCi*A7I^j$2UJY7?
z!^97cQ|0aXjBVvj)&M3A53B=tb6jtlRP~`k-EO?vM#iQzDPh832nzp~G#g`jr(_W0YOV=GM(%)2H-OhsL=kR%B8JoAPVfNt0xXZ?%y2Pewb2(b3
z2Q$}i=F@TY30gm!b*Iyeo5Tl1hn!~Lucdi;jvz*zyu`6MQMY$_l$_iBYOZcy!18lG
zys>OETh`2GaAWrZg?Z=KQ+|3Mbbdn^0A0lZI7k=(M>0I#dGVB=_yVelb6bM*DX4X0
zMz8yr{#9(KI`9*=Z<|WHayj(tl=3~nx4(HXWaTdQt{skl;1<^G4Z^kaOun4i5jUGg
zjNbe;+ora|LF&fP2uimY&YV?WvT=5MVX!S_oqU-3D%%$<=bX3ne
z4>FM6PrqdUinr<8Nf_!K`!a6BU))PmY1!)wNlx}Nec%W_JDWjR)Hl9j!oc_0AswLV
zaDLk`jbVMe)3rx`M$g~H#ccX(79w59j%`ueie>?})UdbEIg_l%fGeso#{%zacC@Xk
zEc{GIj!5G2-d#jC8Oz5T*D$k-9mki>W%Iv@DBPZ*p?45Rems*Fp`<3{r$hW%=4qN1
z$#Mc=SNyV`h4bbxao8BXwtJOnL+Yi5kE?{v3I3hk$I38Z!Ahod^-j@4QQ6<7rCm<=>{3A}WRdx_b%r;?sQRaSrrf|S|YKwI&D714FO
z3c}XdUJx)rHaq(WqIfg)TCbKcw-fk@0uB@1xvC3Xl4~4=5>|=#cp%7^O*weF3DngJ
z>rak_x28hibAoUV2FJn*EyjZ|L2$SH3u`ur=Y9Z>zJupK6vS^F+!bVVglJ%sHm6>$
z5po5UfrWG6iXh8_gsdairpmo&0sJi9@t7Ek{uJjsx$FFvxMs~61!DWjehxjcL=;K2zW4rP`_VM(JFQ+CCXnN9p2>eOwH&`RvrzHM-DR7>xTQ
z`-?x#-d~;R(7_k39bn~03t6~Od=_osm+%auS#{wIf5-mI*7rv;V#J8l&&Ww^y%5QT
zb5~Nl%}O6(wELW5&&fD6@j}raccF7fUzDl~ocTMJf49ER$n<+hjAYW*3q)Qx$CVtC
z;Hblc2`}e~r~Na$*eaIYyDp3Vn8>x=JBfVmC7L>T;$s-2ck&BN%mkE8KHHau&Xfb#w9qW2x
z85fP(!v~l6TLgxtw+{v7n)KtuG%D(6O(CIR@1!Cdv%
z9`*-Uqhlw3l+oeDm1{&mrD*;*c{O9kSy7HA{a&SI4Sy<65vlkmImg_Q!aGPjY#M{LXb{jEPNvd-7lAl|L^C`Q+{bLy!18n83p~k@++wz
z0i|rPR#;$RX-DZYc39=suNA(u9n^zbPS)7@wWsaVb_5090xLU8*;)#Ep~R|G8Oqp6
zg{Z)##bAbZtc$0mXy0lNd|cwW6&NZ@o1{byiI3t@Tb0ADN)rP7OB+xhWJXFqJ@GBS
zc?@CYuDz@s?oXiTTYDubRQ;QSz7TKW+JL&QH#u_T@3a|IHP??E;=I$-JmaP2(xz$5
zJz9nyGuN~E({a=fTF>ltr?X~O*GHrc*s+tY#?t1caAy85k|y15VcX&s0Sg{*;le%o
z54=U`=WgQEKZ12P()9Sl1lBGLqv4CUac*~;>Z7CBekb+Z{mb`xb94~i?QY@PBbYHi
z>v5Jnc*LABcWC%QT#*`Z8BpN-{18U8t%iq_C-q($M%N0rIet74wZe^-?HiDA@+hfI
z=MG1XUZYCewiL4IWCIHnRyNp)w(jgx9>qUf`DELl8FrV#o~DC_(z<39{Jk>AHd@)0
z#@0%V6G|&el`Vsvo~5=)QWvSDF0#hny(&$gsgj|Kl)iL$tv7X?Y$#o+BORW$CHUs8
ze80iNi@RC)&R|~b+Mae@`Z9X)58RRU`0yv@zcQ9hm(nX$5cJ(s7&Lh&w?lU@w(~pe
z3{Bv~{7$rNJD%N-Lkn}5K~;xyYQuEic(D_0TDPOyfOlASC@^D*l2CS=M!&@5FLpBj
z)y}jXyqbRt(WVrFY&oYHT^6>26pFuH(^;P=dZG46&x?2cr&QwOKT?n~p`U*jrkh%b
zZTXZSUE=r&p|2|n-TU4TWQtrlyG!VGp%4}ba^!9(mLb||zn7twkl(9;Aj00@@~n_c
zj3qXL5cd_tHZF~1302MXq0qup5Kd7C6@|GTcRTf*%p#m13O7l#EuSjT-e2oCL>E6G
z#+CPl8r~!bXY;aIq%a(wK%y*R%+`D1VQq_zwjHZbVxd-}&aYEaS)TTty?AH$aXQvN
z%!O*586kS5g;fonYvYmTIk&^3tdjIC$`wjW90P{1XhKt+bpQ$!3X3$)vOUdSXuwyS
z4skK|G?!}iWTcD{Qz@}@3}DEj2{hGNM4%94!$O&TB6W30FmbNW;3VGFx@&JLyz>=5
zwQkODzk1R0BR`Z%6`roKoRrh96%|-pTcA?u4vHF7k5p;DVTF%Sjw_x%!`Dj|GX3E+
zK6+6cwyelA_3rWA7l)`nzB7K=xM8hrZOB{&7N|uXU{MM?VNAu!P-QzUt)sq6j9{)#
zS)ODpP~hEq6f^qyYRwMZWzU2@Y)${F4(8~pMXdcX5RH?U801yNCTq~BGoIn*<)zd4
z^c=HqN{6POvG}Mk!U8-vd@PuHJzM3OL}_jv;G_3WqkI_7bzxL__IRIu$p;yIrzQhG
zc!>rsvfLf%GI{{N4OqqZr{ADKGhG8X^;veMY_Uuk>q}84<)Dh+_UtFP+KY_xR}vi_
zN4Z7;RNDSMC$A>bpmu7VD#|sX->WUDVW=>ROr*+NkSaF=J-7g(1RrJ9qP6@ODB7!+
z2TrP3l8e|4t*p|MQ3C>5x?~7_HwVzE|LY8DU4e(ep~ODA#_mNQ@ZIC9J#n_xWatWE
zTzYxwQ~4V1=Xk%8Dm_OrAaE&5hV*4~KqvaYK8V(S1Icc8q
z1JYC8ToB5^DdG_!2Kgip@D@h#`7r3I$5SqmZfivflG<|(^qMS;i#i1oGb?iI*NU<@
zIHYSk1$K5esN=G;{?r5?UB#lcKN6^M!pp-6RjiyrQ_D)G=coY$E?vTqzMBc?)SuS}
zwZ^ZI`XDDLeA_apM=hMhvz*$u<>}8C6L?FFDr6tu=BO_OVQF2L^_z}zPSt>h7MD2k
zb2xQ+x5iWb7mM}>Q|-l3_$#9ck1I!`fJ$ut{v=lu8&F%PJhX|lw;<96?b)KSgaya5
z<-8L+wm9MRcMKgSM)CY#HcXu2&FAG4nKd?yNgE&X`i#
z8M(@qmu6V8Xl^t=*L3CB92=|!fw-X1ah3_vvu0L=FyP9tb99*$11`ak@1hrS{P4^e
zlay|_ia|6k_FhV8w0+YKH2rKbM^DES;BlB^!S(3bs$kIud6K1ZKSm9zkn;Sv;~z3`
z(rR|MdzyYJr`;%D;EX$#RDJPn247#uf`L8w;+d`t8ulvfDwj$m?8A03ZQMt!s?(S`&%_aND+t?185Wo-Hk4bTv8Y}9Gk3cO
z%U+vI_$yyCvyC9gGH0Y94yiM!&6&RLTPok(&y3Ma`E*x10$wf8`Ohct{ytY;owAB~
zPg!y2@Tbh4I{~%B2Ht5T-%CYYbTGf~K1TO97qGH|qv42JP7o|{vLKCR1UU=rB5XOK
zZx_D}8{|3h`>kJr3q4`)4tQ(6;M>lyJgrmL6~}8Gg<*p1927d(-w=7YL^pFTm+`lq
zuqm7!G7=C;GMqjDBn68x<|Eh|2p(R7*vd^FWJeIqZuBTIhKTp7+ZQ@?ggJwRtZB_h
z&K{2c8~TX%=~x^_s1{KsjVSMWCY2~llSvZgrKeIaO()74L5KtIq!Hx{spqtba%Dl3
zGv(dUCdvznm?-n4lS&H<15L#$aCojePj6enoNJae8Q&9(u0katBpDLMk^=`CHn#k;JVz8c5H#J1YvNq2=^*KKr
zjih92Ks07UeY)W$J*T^^`5d#c&Wv9rcYT&SCropQ6*Yo?NX241Df#D=8MF?SV@$v
z0}Vz^q~hM$%$|Ceu~T2BcDbxlSy*IK16EaO)$|_oRtZx2Ui;Mba+>>`|7tylLNxTv
zOSKp&;(lJsk{|3Cx^kPSdn+`d+Zj9fK&ERfbPW-`$Lwg(p(S=Rk8;
zPBj}I3MU#2n!|=3H#o6lH6PEP%p<1_ye<@j~z_v
z(-fk(2QgkPP}NN
zGIaHcV^p#sUjzniUud=SIA?U@N_^|HAeaiwXjy!fmxD6fA2;@jVOvr&t*5m|wK(?JYv
zU9$+X_UnfxLFijSRD2y%2~Na0LDFQBvpOlwi$BDljXmInUa(`2;Lcj<8Y~2H`DiwT
zjuk{n=;(vv1kowRh@zcqVY4XylV5?KAX0*G54sj|<`{$X0jpo)^ZX
zp0lhh#uZ`Sz9k50WAS;Vd=U%!1QKP_Cza}d*fe$syB_ODE1rIRSqjt74R)fp-aN5~
z6TaPaX2$G9N5094Jty$((IhoQ#JVTJu+4gky&%Jjj99lAB$OWcNe4DdP1H|AM
zK1$yn001BWNkl6NtHHcD^Wb*sj{!wu<}9*d%Ipyh}mT$nbIx6d{u
zK#Z($S5I&%ssW>>S4vsCsDE{unxLR4T>R5WrTEvcTW_lWbc9CTE2ph_RO037LgW>>
z$jF^?P2XU|KU0`G`VuXkuZiQM8~k=E3fHt}sBeD5lsRYdZT=Jqr%&SAu8Sz0w8cw;
zSU&ygTqf=Mka_Qa!mHEY%vtAVf;H6#Ok%)_xr`t9KRn8pBPlu>t2!-dQsEzLtP3&f
zlqfW)Vg=ct>JRmvKRb9;W|a#D&e&Nr-Uw5OHxcmUWyC%?9?E=6LqcW9iC-Eu7K
ztJCU*9+^U%?8=tG@_r1_(soL3hDmq18)uEZL#ezQs0tg(l(M39zz9}OYNm(OrL|BC
z1qQ9jY)I9Zg0P#yK%*j77|d4g4y0D|IrjW`S?GU5O^fM=I`k5k@S}!@AI*Aq;^>|Q
zL`Nl2s!jm464$U}=Oya*tBPnXgrEJ3D8GhO*W>KbMb7sp6ME^A7@ce2;n$4bosP0+
zK{Qcvuq(o1kNriE#zQby5X2##qKre}`N3%?v+>	A2*un>rXm%)<3+
z3u_y!l4FR95dA`QNR29%*hf~lJCqUKP{ZAGd-yT^VZvfdEp89|n)RmB(LF4Pj!LmD
zN)>5FpD(6;fXe0=l|PFtNo(^SYMj7mjZ8A@4|CSb%$Ch7B3WJ*DvJ&`t8
z6BD|~**^VmiV3waRnTX#qcGr#a|}!3;=!-^qs<5coHT?S-pawCigaw69#LaW
zjrOf@9>0~BvUU$Q+@51(6;a^1?m1|IwT-Cp=tyK&
zsgT9gQWNs;MW|F0&q^)m)9na5KZqpKpb6xX2s-=;^Hv??MzS;Bo=zm(l@SSrh&jz>
z232!El8}2&_*Ztw7<(0EC|}-*_^T1fK(2IB=~mvmSPgcCAd|z}z~Gs1#8otXH)t*h
z*YBhfoPm>5b*|k5p$@AFvh!@_`z^iU^I3vaP7uVcG#nUFOoEFti;M*uA+JNKs%F)X
zke0fHKZI_rCHO(j=3;y}CO%PQBtH{fwzZIRX*7(Q1Z4$@>sfNaP?=yVMA@`SrKLZ;
zr_JU0Olu4+DCZ+ytIV?uo2bHFd)nQ)5kscb!a3!wt*P7pU3&d|6-}}d-_GM1VV&kO
zu&zu0cYE>kRjtr59uRmdktSVR#_73q*&oJV{CV0kOCdAeb
zrE?!<%&gAu=kAdF#ypzVbQ8Z4Pv`L?uud^n?eKqLIt%JuBqAw3Rjv`%$Ww}EhcQv*
zPUb2Y!jxJv9bX*R|GIQKq@V!eH{PXmg3-oc${bzlPPx
z1CJ>XvLjVR%5L;Q;Xu=Ft=TbeCIkL*!%pc&vpSV1Xf@{*>fl4noqT`<7iX*=#E@KV
z5L259n)~c#mAISIe@?1gr0S4Mx9EQ
zq&!U>%%QoHIgsFjv-JaFlB+RjYEx2*+rauDH{?ZM2s%ciw$Ea@U<;cpZD}#3PO2eX
zsty6Q5?QljCztB?BUMG5<*z6|8dfitc^u4vNEd(7XAL`7MF|7Wk&`>i3Ii^gz^$Rw
z+%L~r;L*r|U0cg?{+IhqSR6_BM@lY?OEvv1{mZem@&n$Q7fIt~uDs$?T*OjI1@Bna
z4eP^FD>X591(|m
zBfGq8D`7>I<}L93=5vO<_$>`z`Gi^B?X}LU2|6%?3HyaEaf&)JG*;fr3qo{$d^f}euYA`+r*UF>Qp52=uyFgn-z9tDPwtGjA7D5aAF(Dqj_!(hH$wF~33m8|3B_j`&(xoAm3
zy0l>Y`uYqA3?tF8Jibm=sD_C5wHDe+QH`PNbwcKpdX8y9u5Y!h)`oU@m?6&%3a3`Q
zH8GGm3*V#Px6+XjON?bL`c8S1rfF>=ivc{PGtO3(Y1-U}-5W1^=S%q(nhRoJ>^emlMIgmK@Z9Ns=|XcxC3^+9gPi@r#C
z+d6G&X0wl-WnSmKr_zjLg*%QK5_)A4S<^
z&h&IIVq&R$iq}@}<`tza%J>j&1U;aPw;vv5blZoDM=DFIwGlQ&Q8gXQr+8!2FT9c6
z=^7*0{8Q#*R&~d)UF(=_-4U$$x+B-az`c@7iu0lFnC+xr(s1k+cJ;kY=sgSUh2C{9
zo%S+Wo^8XkllF2bpRb@vh$S}O3U{Z}D1w+X-*E7vpu;l0*2)`i*JytG^*nJ6pT;iY
zBs&k^ASs>HO6P0JM5}bZR><9vW!H$70SZ3ax|uT#hEUI5!GlXX*m2U5rV|?(r2Sv~
zUXVkf7iYbbp+EBjM^CNnt=1XjQO=#z%UZ72<1?LhfFKSGKBYis#F
zEBhrSOJKkdmTc=o$c;P1Dx4@^!JSO@hpqvfLM&{lJ#;0T+ub0-!WF-igWj|mzn14-
zzs0Qx6?U%W@pQ6Hn}JYLZRmOovBWSF`N@`4l?;?+J;|tMYYJG+`GQqh5B$VOFRY#L
z^)JN2iEQt;qO6A>+5K!@kzY^wn$Ka+C>zDSX`
z79+m+5#0zkrFR>~uYHc!Z`~q7Wru5dPn^;{9k%V5^If~NE9>V-q>B{_CEa_J=HuBB
zJat+r-qn)s^{lb_SDaHP^5u+hVZbSc0jCiLoDEZFmC`ZbtaxRz8zJw8@#+KxrI$Ds
z@l1z8^og{#Q^phjs`{=Gh(;S38t1QJt9=*f>?BdbVoJigKQn#HVO}TjAK8
z9__a?d&*0kmQe)m&r#3cl7CZvx)wbLHRI?Z62IT4AGDKs8S}VrtXdG^VS*g>5_Zh+%DG>!3ro={>u7b%$NEGN%nE+ulgH=$
zuo0I14iN_4b*Q9?GCAxbQKSQ}EXEA_N4&qdyYL?8r)=s_@hR5U=Ig%A
zvwwalDeEg}8E_U<
z9^lD?0jb}-o_FEz2KK^$OUBLDS{QK2X$G7HOShC~?Cm6Bz$x+fwahT!MtEx(a5hX_
z>6(%_XqLRF6VR&>TnHG(9C
zbagB6>GA8B~Q!6NZ2C
z4gG>|aQm(W&b~Uc%$7A6p*y{sCs^QA9vT;QvYEUO2nd
zwa_x)lvJps%%cIP;fHmREIXwlBDq9@c}3G{M88R%^l`6DMJIF1$r~&w=jETVlt?>@
zfYh1PYLC4n);c-UZE&hsJ)hg6$=|C&&3x~t`(#2P3cgVul5G|TTz$pI5DyYvZ>}Je
zk%B4Pmhhf=w*K(m0HK`*f@ej;KHE$x%`n3#p`ff!^*kk6F~baFg{gYVjSpshkRRAK
za^%n=HENkyr$mDqEU4SYp2>}qnPOvE_?eD^Kv5Q->gJYXh8ZOa1^8D(e~y%5hSC87
z9E#@I1|gL;Ha5h^#~b2NW|&c;kZoBTR#qip1>_yt@)^6Vy3m$ap(wg(tfG9j8D^Ma
zd{Ee1(a7Ey^q~c=6>JPM(_w}g`GN^-%ceJ@tuf1OYVsBZa(-WfSXY_-
zeMHuYsW|Yo%h(jkJ2&Wk2fHf8FGzl&Pewaqn9Iusc28wJW-G=J3uAC#6$2EdMCZ>r
zM%y47KpL}Et5(I@+8nQMhG{|$i2nZmSxBV4y*)K*)G#_jQx4ufK0f&QWo+#t31z){
z^(b35uh~mV2GZcIRjU?d%4BT6Ey=vEudhLk5=khfUeX6ycE(85%9k&%*D;`7R>lNb
zhNVjBtQB+XV;aQLLKtG+qq7{YSg~U9%VJ
zSXg^NEg`>k#*XAtHw6X;rsyVPLg~__Q=Bf2j*i)0Bl}KRSQv?kiNz>c?vP`=oB)w}
zR(I@FtJNv}Jt`{7D96q0ART&g%qUkboBh)x_rk5Y5PwB{F%H`W3o$M`w1kRt#0zBO
z6w)NX%~=q(MO4ncVihk7ER;~TG5862%hc6>O|lwpFBFacb4ZLTUipdxRzf#<4HKk#
zOg16Q889fRG{X!t%rK*9AW6XO+qb!Y|9(k}l0w)eadC5VGwN9tGyVgRPK&TFA^M>B
zJE5ed$p-|nDP0ph-^ewj%nUQkFvApJCY5HGVTKuIm|=z)W|(1y8D^rip85449Lo#4bu3wc)3rs0^
zPxf=!-*e0#5O+?D8=rwHxcCIi7IHoN(?b6a7e<$J&X-1i58+!&eodv6xm@GaFm+4&
z>@SP`1wuCnG8A2Gk`)`|no5t?ML9Ie^-_a`zsd#Ka^b(RDG@Ww$Pdyps_JHXm5vg*
zfZv!5F`0BgCQH!G3M%_xbab@QC3w(=OeW&w62F#0cW0r_>09aTTZCF49dTi`PlXM;r6_ekteT^
zgJiv9f?#IN4*c-pLxO^WOv<>_MtXKm-EDIuL6&QBjV*ynejpRA=uHxn7z4p8;NA%%
zObnGBgt1zxMruY$yGmKk%Dum(E;qLNlB7)PZ+%2YlCqGHkP?t8@6gwVA|oSH%Gwkm
z>p|KrdL81?2jHLXqE8svWRI;sJS|9=bv6mbZi(^Z#{0!9Q*J0-J9W&k)RQSVbUj?s
zlgajpg3wph1<7~IIWw<8NTtm7b?;tDdlSqsqwtU!k`&9#q3MQ+B$UCyhWDgO<3Sn`negZ)oNg$r>_~(EUZLST1^i}bJ$_@V
z7-;7dzdX4??pxI)C}W_FzX7KzV>FotbAoGMsIfK6VN=YEstf73kz4d=!;}r@bV$Nn
zsZyoPd(Ucw!F>J&c<^t@304?bIYF&GV3V;&vUF_7Y^25tNnm9A(yj+N{@l26!{}%C
zb=!~}v<9t+w!$}|GBf=7XbHx~~Lw3~<
zg}mj3JrPqfG8HGRg>Bq8P>lQ8NAwwFO{J+1z-E|{4(T1%hMYMwRzfKuXHe52XD3a8
zP)f+zNEsrP4jGbH6p+u&;x`LgVVs0g;?5!=n;r+OP{j{Co15`LEMaXx`qT^x?nl7P~kYVj%n=&2J*6YRyNyKy~5+s>4c0wtk
z+lHQm{;s!gOOj)(gi<2AF)i*-<0O<4v3g{dQBVH`LMb6bs1t7Hc!@zsrKyGqnPEn5
zFt*SmU1-Z^YU`P`MCzt2PJ#HVMe$@4p*
z*<#RDnGesxYeJ5JAHwF_#Uo3eQ5;U+|43b!6sc65nn#!BZRyti1zzahm5$wBVanFC
z#HJM3Jr2wq$>;wX8+}RQ(idZx@ojAGmlpPyE`7n68Q-G&OPa8~Y}_59f8zvA7<<0h
zOITraandEe7&DcT5=~?8MBY##S%+NAms49R2PWuEyzvTd8hx>boQS6KNy^
zn)(j=7mVetx87pRn6bP&X%3%!^C#gZ$$9f;ROAmD0?)x2Tj<*iJWEmwT0+a`p>90v
zy`0@eNd*2GX+rX*!nDd*7aGF8ok-6Y-(%JGZTxuABs-7sBH<_I^?Q?b7m8?HNK9bG
zhY<{%5y9B~#WVIKUB6GCVNrY)lX-l2_*X2i2{{JNj%M?%q6yVay116tJHN}Wk{pAQ
zW4V0v0Ds+c!LMR@994hw<@o-LSa&)5nG(xd^cwdb!`u3%Y?_{Qc{8KCPh{W2#}`nm
z*ErrA-WFej?W$h9C{edP;^Fxy*IS02Tr0MyTAA7}|vlUKqNWlj|)k{`Ao
zMA>^W+c$mAJI@tdf0bO{`42;##IYkPN(YqYg&NA-uP;Jad)v~djWuzobf!zy4~p8suDKY
zJVGBzEMa@nWj2rQ&O~GC>=xi(8(LJ%dMZmf>CwxEf~7087lg_yH&vPx_T-YUeAGoz
zw&6V2MFC)1q*4dg?oH|1)Rv2XYh_DJ3gy@Jvlu&kIB(D1$jPu&wiN%@Ha^|+BVWxJ
z#rXASNKe3=aQhe==8k9N@R7VXe>1-Y-{I)S9h?&cOcQaEwUb9Mbo30?p9t3qD^(Hp
z$N%_v)+8oQp3A4Z&JvmDu#+6_Ro`I$@=ct|l&Sjxr#CNW-?h~DP=)=$|303@q=}Q6
z`{^#u=HZ9qZwC<*yq|UZ!ZIriO*s44?%{USZEk7nz=Ycz+c1~$BZo8cz4>hZEtoq;
zH?l((ig-fA$+b)#F_h6W)^j2}>+dIp9Od)HpK~-s$gIB4jX%#5mIx33+{)@h*SU3c
zEz?I2W$36Ge11G6lNVES_X6L3HjgRqPiD?1yEqpa$?08tI1_6mvw(TR1OC}Qlc7D^
z(mbFM%{mTd&bPNx-ZSaqXAEvXhSl4q(W_}gdMy209^|vZ?HRLr8`F9f!}vQJ@L0qTc|x7E~F*fJ-rV)zX9G|g<>8AV~%93
zTR5{H+Por&R2QfyuGym}{_DgSP)(fMQoyY_^DA%MB}89_D!qhUJ%xN9Lz+s-lf3uO
zi$Q20y#7PM*JqfN%*k=Y37zQZ;zoWcNfzUv#QIMDVZ^&9S#j3RCrW*3P(jYW1s3AEBd!*!oItNZ63n2L%$;E(NU&8
zTbG8zw-YVwrE@F#(|*D}Qp}>bfBD~x!E|cgh=$KRPtQ3&l3^|-6LEAtgId+6cFnr9
z7_@|6ql>vgYofV!A&}S{I(v!sF`)?OzXQEDz_2%=tkAjc!(e+LtQ5M}y#v?^o!Wmt
zsY{lG4qUqg8ukZgalE>?cYC_IEzYeI=V}W5?ATpgBlL5Y*aH2(8;o|PL-Ett@mRV23?9qnxUzCF{w+5I6TK@T;_6id
zs{N-VJlQ!C1D8L7Wu3O@)T%P1*XN`EnEiN>ZANADti&F+vA<&7!GQM;tSaOPHtmmu
zmAw>6Pj+JDz~y*kSqE)9wL)d-bt}9Z&0;QC&cXDiM#1W0wvw|flp*R
z^sM?8{{AUp$RA-VcNkasIif}X?=ZMsX@veX7UNfL$C=06!I59V%EkYp+>nXzt`vc*
zR|7N5;f#CagE50wPOotM=#7*P5G4Xn{*G%!YN1i7+B9o1^qN85o5aqOA!7L}HUeGh!KHuf5sLN2YwcO4s|R?RwS(ql4w?xtGz3ixjF
zMzc{Hv1djn)UDA0EAHi`Ci}psKr9~Zp9Sxh^-!}$Z8YpS59hRO>zG^Qalnh~z>V_2
zgTug~M!>eEfcIa(#$SL?UYQ&6iu#A^>SV%;x4dB<${ivf;5XhF>|DYB@~A8PxD&%0
zS2x~J`*A1gAa4|$KFF@eM}RH|G{bFr*cX7joOdCBnJ2$*BX6|0@t`^bOudpbd6WHt
zKC^hk)e!ixvHHW`e_Lkxc}#885v%Vd;O?q6s8(YL_C;jhQM6&-0`zVu7_?Q;(0etm
zM=5j|%AC0S*f)D1IyR{X_nKd$h4(xhPpN}rAMM91@0O@nvj%E6?1*{iq7bxaIJ`P8
zMp_?}3c>rnLz6xm@G&EEb<3Vop((@g=Z10U*3<*ltJOsPwga*7*fVvVBy@6(Y}Oe|
zZ>HNC=;>DUZ#Ed)U+`8z8G=izzeC4{wNSH86ZDwugS$E=*kz7$J}7mDh$dmth(L7y
z=^X|S2!Lt*SFrsy9Q$6#v2JD%Y#YAfMqLnk9!^Rzrseo)Nf>H;69B8G0Vp?u-y4|H
z2F1y-WpOAfwDE^U^8nOX7N6Oe=J|7NTPVtUzs0Vgf@-@M-u?R%#&q;V&FZz#xW{z-
z`dCx1f2#7686NJ1b(l4MGKP2W2Oq;>SUjQz)Hb=!wl5Ts@gmrMvkIqIcZ8imT}(f7
z1s9gJho$5!(|irYAP*$$)kUBivTlTiNI
z7A)KIEKjkByT1>!2X;i0dhV$CbqjdUI}Wva#+}BD_5-l#VQQlBc6r|zv>UYtuY!Fr
zz+)ix1|{OsbPrTk*BhxJX5sK=p=0W3X>-Oug6K%#*jb&y$DK1&w^R~&)MygmGfUvFvq!MpM+FLJ;a=Ts*Dy8+&vJEJo{+IO3QM|Edm
zLSs|SMHUOauP0)*cOz6R=?eGmBhW_t2QK+*+#Z!%M~wQ$9j-Pq^}MtFKkkS1n2ngz
zyAi5=RS6y)$6?iEcPJwTWmh37SqTG+VklX?BgTzu4=0J>_Lbmm@Gh
zcpgp={2ssQ%X<13XxZr
zV$g*1s4!zQX16MV%$++7v9kL3evUU9R&;@D&F&b{R*I{a{e{VwIJWdBsQRzPf^VCk
z##a^L-f{?jm|PFCsKV5J9*@w+_i*>_-L&IDATms|6x*Wth;iuTUL1Nc;ZQiZz&7qV
z{DM;&B9S>74jP3PmCM4-(JbX55zHG7!l)LNQP#~7W>Pz}=uj7lmrmh^@D*A4630(G
zMuoO*;hNhDK^296PX37?>x!sq`w(k~j==7?#+c=E8s`qJKo3)HL=XEBSL4K58upGT
ztE;iUw+mt)@mSkMx1;b|4dt3x+y?%ck}US1Z>4TtHx!K=@mHy&q;j}!UJkXgSzrXAje
z!4(Yf)zH1TbYU6Vn`hTibOHM>+hffBYxwK%M3nQ}gr$35=`@s8v4IFPX^x5O4&e0h
zHE6Foj5*6st4A$tn=%i#T*kxa%vl^++)a32g$=|P`NUcW5
zbstlDbjmIybQ12a9f}dVss(Ep~Ss)BQWVy5%gYt6lV_1LG4$oFmCx(WEvQ#HO~2M^)N(7C~)kw9JMAp
zApD>WH>P7Tc=%hKDr=30XB=_9qX@rmh}Ia>aSCq08DYsHJ3QT8lpAny7``E19nPKI
z;TU;G4-3~j;Mp%WnC&6WXiRe+|Na(T_AqoEZGj#RqI`~v4ixvXcUKr1%-V$$hgPAh
z@deDDumN|I#GkJGXh9@1fU&VDY@D4SzP$}ge!hYCO}N&!F9n$qObqmR^(uwTxCl&*
zGFuSBzRb#`c*HAYFt;m@2KCCqDD|S)1?`6SLN#lBn3Qghw(f=qe4dv*OjU>t41-DY
ziCA~wG>)%n57ps0SS~2LDh6RM0}zprW|JgE2H|B;BowyaVAbi>=xAqvy3^0#icoJ@
zrre~{;mumdG%wKRjDH;E8*`_mBzMMmo!ZBPJ2N~hrgY$SFi-N@Uc9d5#l-h5Pf~t;
z=0L#HjypNr8Fl8bj!l_cm@EuLa$+u0ob^rJ&MTmA^UJti?}E%RWF(5%t)+ZRpcG~A_S*W1Pdn@
z6iu~jlkVW}fGTLyz$$}hZCIfR8n_6XqZ`6=$hWvSt`B-%^Ty=yT~XOw4EgOF_$T54
z!UuK1mUPdlqQVeu`VjH1p;)!^Z^Q@|u(%i+4jY8jPJ`^8O={%kz=QBX=#ueaghiX;VNxr&YfCU?2>e6LQM|YX8uV+4ZIgX)
zzvUQ|_1lG=Va+h7krj;mEfM-En7fn?xbt@asHGT1{Pr!6nlbzKA{O-(YjvZyuMEOQvJ_Q9r1x9pPXtM&t76#tZ{cApq_h>fOdEnrO%~(O)$#CX@?pa+geB8*PL6AP
z_rbqnKTIhnMR;foEWWM}x2*?p>0uH)YG`s%WZn>iMzuiY{2D&Ojf!O6lEud>ATuw4
z@(t|rDPW$pykbu+_aXycsq_CAP4!5)U1-D|p=6;d;T8CSjlfi1ELGfSaOU?2>tiVS
zd1)D71$UB$E$8QbfNy&O(|H4KAcVo6UAG=EZ3N7#_3=G=IhL36{30-jH>?kNy;PDr
z!HL}QQmS;zYmySookPGMJim|i2W<193-Zg5SqeiF1IWZ8i1mzM$_u=No327f5mX6?
z2zr&$QFL3~QPfl&v65)_onX*%4YuxpcC}Vw#?$9qbzHURj?nMa9Gc)F5cEm`2Nzq!
z-4<$CM_$C0NQ#e#LS~K<IUNLH-P@j<2doS0@^Ng{rE;)m+bOD
z?A`woRfny|w;s0Y@@my(8irhIg2jifV!VeJnzycrRqIaS*7qK$D|&$Ar$bQFyEPmY
z*DzzBKPvW{f^yPugvMCl>-uikdhil&t2|I6g8<1o;+)@zP`;@dZ=ZzRFhrlPrPzGQ
z5K}tq!-^YYwHq73ZBGJx-!OD=GRDlIJbx0E;64PLog|1=_w*WZ1CDWHnmZm!5iE=A
zqg6@+pZe|U?e%bUT&F;%S+Fu0_;or<|M4Q)rPz~0vG=r3H>+VjjI~K#x
zx=e~IBQ}TS`^no}V~q@;jLXYsSTSsykH9$Ih9*P=;g!MxF1B&F?H9}~@~obd!HCxz
zdRzpl%os&dlwB>sdxuxhF~Q+oj#+}kD-X^_$C_xIp7+jp{xqcKMbg?Iq0W`?-!`fB
zvOd%!`4qe!R`4iK{1Mpw5-7$UEL*Mus+9V$guR2
zwN!E>$~B*zWG?M6e@|`P_+=KRj*P>ytpngB)q_>NG1#`i3mIM|yi3B5m=|c;)#220I!S>^EgU`
zWYIpZTp56X7+ZL9Z(5??4Sh@X#nuyT(B$%2IQIS-Wuy|w9c>YLQ5eeQ2z{9d^2~_>
zd7?aJOLiGt+RewF+Bfjatf?3ow+vgo`Dtlsc-D%>uWNrto%!FOw4oMRggnTYL6C6m
zQK&P>7{+?<5E;W`VU*4iR7N5^QUz;m!4*M!$ugp?>xFV%Ag9|%ALW3>Fy9|
zbtpt1o;3DAhZe>dbMhi?7CVi%^+%zJbUt_R$BgZp}C5Z+n4
zp|nL>IAW6$C}Ev|$6+x%r?qBWW>nD#2#iF$?=DOq_iNgD$D%My_tjQnS0Y~**pXJ?
z`Y_ZFvuCzNqmA{^Y4B(aYh5O-hqM5Q=8f5~Gr$7wAP%q-Mj!wUy;CmoWcBLH9Uq@Bb7-%}@|#%R`11zGgg3M=Tl05v6Jl(`F>snUw52}m*8P8xIlGP`u^B8aJ}_uwl68f~gFA^O)0g9@AFtaS
z9bhew1QRR-X$AD2G8BHZXQFMRji}#gFh&n+jWWg-Xw=aQKTbb^t8rhW__0$6aPN&)
z4*B4;=#X6%9fqKH)^ICrkyc-cO}JBFjfBTxh*61Q)1)mtmMy`_zY@EFd#<84$^
zZmC6|Mqp$-e0NPp#_GWknFfREjPu7w?L*AX#OaJ_5hT2Qes4@0h#*L(Hl~B(FmYid
zc5}PGn1d9ya#hM)E)lAIV~MHH!!fjXAbzNAg2{u;(cLvA5N@%qTayS=FI&v3BPrOS
zV3J~RbWE#krFO-5yUE*B^=HCQSbnm=kjr4fZXrt8Z%i$k_O>slh>Ln)_`2ih_4GWp
zFP?|*hbF-K#EcI*DLxhj0!}W$^yNq4r?Q5lgEi!lj2rN}^*LK~IJ~Op*V*V8)FT@{
z&Kb`;2Y&*5c-wV-FW|!K(U<#x3MW#;HU$cn<2ZXZo1
z!#u;abi+{eYoKuzU+mfQ80nTmLD<>z-hr_{MWJ~Q
zca52^R~?=ed~x(t7{sa1a*H(Z*9jQDS@)9Ns;B^j$QVD#tz?zzjX~+)s;cAN@(R3jh%}Q<9X`4x#RNhPpj~IfC>dPfG8hOAu%!%
z2{Ln76_KeY?%&#TERXy2$xx+zE7)A!j3uX&(4<3scPORm
zb4F&SkiL71x9L~eS41M>ojxoqjdNZhhz((CEQ4wNN!Wkv7>=bLM|NU#&2+9(RtlLH
z#6{sTd>xLTKZvp;&soV7IGDhyvnxrEVj%p1hb}ISl-rlE2DO;08Y=Jfqro
z0zCQOknpr;EqLYs8+R@!KnPszZ*r-4DoIiu#cW=e))~VA7
z#3<@99P5ss$HBQxk$m)f3|)F%Jy*!sqXSwPU&YCbH*xgzTh#B+1i9#A$~9smGnh)>
z;cdps0+A8#pl@LbW8vNAo@nc-#FbNj;_9*UPM)-H`u&}a%ZM2cRHu=Ht%)8iJj_vUFFW#ziYV6>E_g@
zI>IkDf$Ofu5l+)xO{cF3k7NInC|rB1XO$ORRc`UNE|JS$p&pmL3J?*VPW}SUi~pQc
znRCqHHDMCUpBj$tW0vBl?c1<=T7P_ZNDt+N`5ek;*g3QdhAdc%)pHl&qMaw)jd_Au
zw!ow@_SiqD4<`P&9-G%M!q{FTaNM~sy0~Rsapw40NeQ&?RtfrUUC=3Yn5EbfEhdeD
z{r*AdGx0~P-@G1+#`eO9Y#ZBiI4~4%!-q4DUGUh3!w
z+c1CiM09hn37Ay=6oz*ngQY)h$F|ke(Eqzb&?~2z5U+f;6GOWU!Gg7`F?Z2L*m=6A
zeI??KW$lscvDoV#W)EI~b0N90=47ZebTNjS?8LXt+M=g-UvzET9ZO%=M8i^sFfjO1
z+)6R5yt<=}(Fu%h=7pw>yJ4-|4Q_etWR#X}k2bC^@L0b&I@J95^-qoJG(}zUC2ZSr
z3+k!Eh{eNQKDeUS04+UIa#CBvp6Q%1YSyoge>q^(*u~pUtcLDe}!Js-u-~${95TgykT<#
zCVFIi-nBV!fL}kf5SZ}Sr?rzBcT|apj7)&c+zOiDZeh={2bxYhWf1P&34&Z=hSE*?;v07}M8A2X
zF%V=`(7u%ou5Mm}(@9Oxq5dbH9Ai`mP3wwrY1R&o9us2Dfk81&5~8Urp;
zjt!@iQjBSaSC8b}m=?jJq%lUdHGsuCB|_4D#ImSiiQh)caDC}(47;c(kd7lYihKC|
zmp}1Fcwtfij&D8$|I&@nICWKy0x3UAJlN0^UJVURP8O*JUaH~Xx
zh+BJbELGX{98tpd4Q^b#17Xc6W!M$$KKU$N9_zzE#>>y^u$<+4j&$rZ!M%faZMiC6
z6;_6$_p;!PVOid>kO3VZRNoG+;Q%Xj2|EhFfYES;jhw){jqes5wzD&MbK!%
zA|%?Scdv}lVBAu;+el&JIsr@S6wBaziLE^`X=hp7Id=|!y^2TC29vOUO?lf4J&9slFHGH41-^e=!R_EAICffqA$3Z@Eae`lYZok-sE;D)!jYEh
zj)fB~kyh`Tqw$oVQPb2i?VYBLF>!Gsx!djiPqA$?%sWyWgZ+Y$WaSLkqB10nRY0sSg;YEaNAG+Xa3ze(bTSn=
z5)-AU*rpxI=YzJ2ty^K*!lxKNZ2-Iu*r2F565-OS7_f9Qyv#NCXd3q2>W2Mbox#|?
zv^ZM+%<5!2Nx|(@;QU
z#v5Fll=%^5fqSRYy^mM^Z+jM;yq)-e8gVD=6}QyOIB~t>f0Mbayhv)7m$P77T5u*@
z{tJimF7KBovmOl1EKo!}6hEb-Xvg;$-RuVJj3lu0ssT4?&Q@W}tWTZc)x8aNd_Nk^
zZrH&{Vh67pZcqoO^vB@|!|+>z4Qvcz5U#3(Vapq()ZbE+Zr=v3KP^F2-`U8AKBlbU
zZ9%JP3-NUPGz{=M0Gpy>M21UIb-+?gPOU5;4Ekw{CIwEe*PcWq^j1L3dHV(x1cF|ft5Y6T#-#1$o?mZM{XS&%8h5ouZv
z<5%`a+4t?nCoVtI${2O{0kpJ`n#+~6w0*J~n0zYTNy8c#x9<$vKM4Ws%D_e}gU#eq
zNWG)Z)T6lQ{R+X+BAAzOft^W;=4T39AD;8C;8q@@A-3{D_clB5{pe=6VP^!1ofm4j
zNukzu?wEHU)Cgy0_e0~;wlL5ufkqWeLHa5+K4sCUu?u#u>4ByP>tOKqrRbRD%W|e;
z`Gz!O9Vt+rJA<)((tb-EfIm{(D(-yU<7M|DuWM}_x#PmUm<1_LSfdeu|2MpT=8l;a
zf5tJzFwaoW^K;*?;7983Pu)|L;(V=I$lHv6c^klsM>}5M==o|c3yyF*-sad_X%_!+
z|IiUeN~do)(1M3i!~j@U<eJ8h
zJTwVL#hl=1ZJ6WX52mK3u(Q)F$?@{#OU-ce9FPNi=9cf3l|CbDdg*W
z?zhI?5y$WyHw5aI1LllP`_fq8B=Gky08aY>Z@vZ|59jA{hj?g3{6mxQf
zwV{s0$|BAm;^5!_W8-vQT;M8xEYEmTVjQ}(dk6Qe#V|gPtFJ2FC36Fg!KQ?a8*mKq
z3e7xB;cI1NNHSi%Zrr++M1kv5%ps=0)TVpzkH8oyXJ2g_B5C
zS@bSHlkK}?(>@cinY0K+M!sF{Y4$!Lc=hV8lO`2Nmu`h>JDU@G_MjDdm7SGycwRq#
z`c^h5r##P`W4Ao~pq#T7wqgA=M;zFJXKhplYG^<2M|iAL+36V>e3{Chk&P;)l2lt1
zxA7u<{ZMW5IrA~{(fkwfBupH-O{K;0U#PDxJ><7z=Vh?r-(K=fZwt
zF{=-c#=b998XxWK@(x&#-`$&n0#_EXWjTP))jWof#rA2yz0dyxa!U&ho`|~
zR(R_|n5j!zyL2%(G@cZiKT=RsU>-cC;#Woh`XUIJGy7v7I(2(T
zbisfhtEE5?L|8>>#7^4tUI9gl#0#g5Hjp{`(R@b-Yq~!HxrZo$o#zcTT6z%eA{W*t
z9@-=I+t+OrJ^9b%W3y0FcXmwJjbv_R1yiY9+rx4yv!nu#p?TtkOdGv^uMPJP2^QSF
zov{4$8!;F}ibVEyFoDMu!?bvDPILD_(D;{aL=pwVpJX_Rf031
zZ34GAc1ij??_dj4fti`V{pX*!QuPvV6Q>Rql9clw0Gy9LD1(SN_uWamyBMC}VAS7O
zfW6@fs1(ny`BhD9THyj1IZHwEz8=w>Y%-yd{BT^N0CI5N+oS7eWKE8vYXy<>-lAW>
zevLRb5cZGHdhyxL#FI+w=3R2D-&K9dQp-$d0BqTdYaK8<
z93UBa?h|Ko;la2e#baqO=|WauZ)XlbP`&-fSR{TNDYE0emCzOQ^hQxhK|aHDjVbIH
zq8E(7gZ&II^IpT34Hneal&lBk*b*2)R$eVCR3(q8{%$J1x^Q0Cm(;vVe8jm!q!0=K^%;m`8Mv`*y$;M*mZS-KAfgFLT`*FW;
zw&h^eDZ%lUgAdo69?MsIrovI!G$ZOMjcgt{6CwT}^{L}GUD&F>VAA97wbb-$N3Lfe
zAjEi~$AHm?#(dZ{BBstrKVC6Z$H2=|-N~d6eRdcqVnSs8S31X03H-nBgukOm5&Tz@
z&3s+(?Y|$xfDP~==9d4H=M$W+_@5#$7{>n%@c-m(QvMsR|NUO5n2aj(e+op2v;Ch{
z|9@LqAKTL(UDpZxuDS0VZol*K^RMo(&`@7nwPj0R
zUK})lZ^<%y4YM7~f=`Q`@gK#4NuM@dZscr~h2!M0WcWDr7Unx=j7+`NegF?VKDM<5
z`L$Db(5dB`tsaIg%QWYN2kGtjFp#rZp04-3p%$f!Fd%N#(8|AfkCI^=_MO~m!q$KX
zq8$)qOnE^B&5#JkKJx^$d28>IoK0Z{BtbL!eq)~%ina+BZ>PObY-y{-Z>PRNR?Ed%
zJ)EPGY1Vm3YL4hnNWp|CQk#rnPtdBBSzI&iC^?*1UaG(4juWx39YgOZdeFe$mSvxW
zc#&Hx;0bAi%9IV^{L|!sq6MMrf(MXN7Is3#T<@I8hzUy%@xBa`fHmpek(FBf1qnTN
zGYICCHIp!>00uKK>NMF)3V_w2GtD57PC=x24ad=#0lrBO-crn_H}uqlY_AZ!)%|&P
zzpJh-)ZBlena@`I4TtbLL&U7xlhRP|cogtZtzXcN-9F@a=t{08r+p0jRR6DI5KN+oy6ch
zC9pH{ZrZHHF)vf>(M>i|lLSt9foRFHJD%v>D!(uJQ@ZgAe~ORrE+DQD~@VV~r2~RLHjhBpy!NrJ+}Vs*$+bWN2KFwLGu!UUJ76F33DMN~eZCZ@cY2
z#JiF>rrR@pe29Y`Ohot8eQf_mIta@e!|;H4c)4h^M@(X{?xd|x8oQyA!<5T5vwD2v
zV?IVlqVLf8Y>~21e{ds3_4|^qZ>=S^B}7{`P&lmQgvgnyL!K8cgIXJiQKam?qJbnOo6oKI<~gXpYJoO<2pCLabSg_@q*7T7Q?-9VPJ%8j=Ql-HEUmi{7)-ZevsC
zGFC6SM|x4`do7&^pnw$#=9fOtGbX`oMZeWCI@W=-m}#wdtgv2~lPl
z`d!9ioto`kKVQkU+f`m+dMabWhxOtSLiWz>gp(V+_y_CMRDw@Dmfci|FqRhK81C;R
zn-1STIg;yjRRWakD4xDxMvV1#7}gq(snj7qM(c2a%7JQlxOB_R0`b{hFJEBK8_B0X
z=WMdfSkD^tK>8PP;rJ$`2z2LB~CI}Y+fA8$pF8%m?jO-H-*Vu%rEm-
zDi`42W#!mN8`+F*lcS##~*w!}cx5N}^Vnp0)Jemg3(N&1_xJ5DFo+Hl~md_5Ci1GU%ojsWvjkUNe
zgtGLRt20O*t#@gZ0-lH6;HL@%4GfU)RH?He~;2vBkSX+z_z*4HF%j7Ui7hpjaaFszo`53A86$4ntp7mU0Xn!$#&as
z=(&v-!^}MubJwOlOw+Up8>$;vMes~REN!nX&D(tM
zp(7^H7lPf(Yqz~w0N-~Yh;|N3ej(qRg1tCJ0gt9UYQ|YAmH&9sX#zMB-bFUD1kHHJCQQTcpg%!0>&v>#0zw#55
zGF}9wwx2J+>s*P;gV=sRF0q3>4`8%>F;ZHKHIuAQ!Q=&%5Qz6Qk2bZZjcLynuHV`*
z({3)ZBgbWsS}+nxl&=KwMA^lQksEuS89zf*=HkCSLk@BFc+QDuuMs8`Y+U>)5n+Kb
ztT}3d{Lpr*QoHM`X+*v3qO%06gJkxBzOyIrAlbfzwQ=~7)YJs(OO8PXxXmiTtHXn=PR{sM^L;iWVAuKVvsSwxp)ND6=;%
zP%p4!o@i~!v*qlrv-u^nG+BY>y8F?CPgkz6RHoATiu?ieI7N24snz=ZzB=TF8Al1m%TL~+H>k@qSBa$;M83p_g_{q{c
zR)|}Yu3Tezi!4=U{C7WlF)DMH7I))aZJfmYTVypSbUy<&%x5
zSBRwfd;l@rZhzF7Yr9qV>b`N)H1a_Q+f{EAK&6
z)jAHQpSFS9vLOlCj>hrnBSn#xf;N9Fwwdp1a@UG>Eapb(#8QHK(tWtwMwgRfO?`Q>
zt0H|6(Q#}f>R{r=`-72}#g&xyDI*3H`&IjVJ~Tfr8m+uQpR8?`ta+4%@9ayjLiyeR
zWhqB>gLed=hQsdwK>S?3Z>g-_|98kZmVUbV)e}JJfO(0KXjaLGg$IxWH|HyaT5B_
z#(ivE2CzNm6f-V^3^E9o}M_F{Tlmn&B@zH7p-`s#hJ@PG`HP#gb_Of|F@AE{i
zGtSc*cAiJNmZLW&RqQs~_rV*QDY?e9M3z9MP!{xiN3vs&G#@Z(ez_7%!1#R&+G
z*8*@izx;%sf`0N!AXb59qRCP`xrP7{rYH5$>+N3LkeS&@(a^nQqHv+2{}n|5{74mU=9;;d}z=vg-zP0;Bd&0W{UfK3xZ&w+@Zg|
z%bF6R%DG*)Rc{r%`@@q{4oXm~Gdt#Cm@do@s316L{!6G)4P0cxx}g!qFj>yiMXuGT<7KcOYulVBn2G?t{%`h(^s2Mc1isutd
zK`Tvq-{xEQErk8%Bt7zo?4ru|GnbVKt1Q-D|D7+TIs*`x+OCb`fQW1EgEKz`(A^Xg
z@0nmKAO*7Qpr*Gnf5=(#p@wIXnQO!=B+q}u5+oj-%1LkzxSV4H0t3H`Lf@%bWVir60>#$T04L(7Av{@{)|s@y$O@qM;MI4ap?oY3;||VT|~(h3jYX
z(_)Mlb`jeG&6QVapTKaaK;k%MfB}uDEt$+2lJ95!zFdovQ|}dbgTF@K?FN6Q=YMG1
zmsP!(-*v#*H^il~eF%2y-^q2e#U5}u?5YK}0)8`)uWiOQ3>TdCiyp(_
z2l|#*2vPIHE2hg&X=y4F%&vRQ$j^2_%4|VsZyWK4Q+kcwE!B~AVM?)ifsoPivYY&Q
znaiwJY7p8qjO9QZquG_Lelx666#Bxx*EoShF$=JYxaNQV*zW5;>;a2RpY$>#8RwDHfzEGw501ty
z`5Y*-Fm4(C0i;KdIJUP|X(xgfHF}{yrp*THCNaA5=Xj%AG<=eMIY
zQ8h}~Ggp@xog>4!R!!>NpUU=cXzvAaq*`Sb03N+$ocsHUzuicvq)my!
z$rt)9EbdeF7K-g8xQ$}RMdCJE5`Uz7L*E(zN&5YtNIPkKSC@~`T~C_dP5FFN6G>no
z$yt`AkHZ|B5j}RC%WGx{iqGYpUnR`Raj)r7WyeA>H3+#)JN{uee8LNH{IF=$h~1_y
z!5FNv!!Ps;NZH!H5`!zVfA)(moYaskyCjVyyVG2q*B2a>f}PR($T*N9v4w&PGeMIc
zQ;z$2;gGu@-yedoO$6RPd`CF??_u9OKXt!O#{Si$g%Uhb?l=BPBA6F4;Bqc~2aEa8g#JfI8wwz43~&cwoEj|(Iwvi(?Oha5=T
zQ_R&ynKT>I8uUE^Z`G5m_UHMZmyqYLl1*GxA@eqSJYxMXv#>)4j~|-`q+S1{fs&t=
z3~m&cc+K`cf>R-<>iM>F_wAg`7l?Kz+F_p%Yr`^x@QHk^s@Jjj{bjcClZWj7h_+N*
zdz(JmH|O%+md7t_I?c3-1(0*?GxB0RgQlKm2|8?-Q=s)`_>9)ywZ0(X(}}pi5qX)E2(sH+c9J{KtMMr(rVaWHvPl(%O
zPCo<`uTWn#a>~
z))#hS{#oq=siiJaMfc9DUDWuy_gQ^aP!iX2O+o>T-cD`9*s6y9^_d|#q1d!g?5|dG
zZ`||J2POT!brk8~3WYDP4@_<~WVTsow)PeEM!2mclk%m@B4t82g~=5=<|cyS{dE5TbW=_lli~l@)}n8GRFYsq^#YE8N2n#cI3s
zG&mNG%|j%y?o&sj-!N}Ow}KX{e{(PElGC*^5wEHn*nNhj**T0w~XadQ=Z}pZV{RVBx9{FH>+kkG-)aQ7?{Y-hO1EojMzC366c@
zrxR~mLR7y3t&g+p_a&D+|vT@?zRCD`zJ=jL&E
z7&`_YuorS4;74+`Rjp69xTj49vbtan%nT+6Dj5_IduFdCG@seqOqxtt#M!z)
zZy?W=zC0RkAYGwHzhfdPV8YN2V&A}y{o(VaspVjnDx7f9Fs2=V&-LoEuN2q!?B2x?
z&}^%A1NW4R$XTcE3!&Q$B6;xw?WKaO)JJ*@Y4j}LTYVV)lcDU=eD5~COAqyx>|z2T
zAO)l);NFSn_2{%M_l$`6Tgg=;TB+I9O60rEZ!Rvn!OCe?fXPJb=fT5w6
z5jF2?c&bCE-!Y6;zo{)tmSwUa66er4S|~fLXk8xZ%x7>|hQ7Mr8=~>DzmT0-R8VV0
z{Qwx(aNPQ0$a&Z*6gDzzivg-v@JW*I8X5MsxKKq07*l<&gqJw#IDyP>4{@sV5cARN
zLqyQZ`>K$+dOS@Q0hd96Tie<*DWLD=y@f!wjIa$S(Ix>OU($Z*@hokr*JyDmB;~wR
zCt}(0VTke2+^Wy@SAiuGc3WK<*PdxcN`y}nWrFp}$lL(l18EOZZKm2%;_HJ;bu?4+
zG*$sy)qQVL%gkJzYUKK^$vO}DOKS<4#gr(KXP+A)kWSl_n3sq=*r3xS)3CFJgNiRs
zeuz3v&3xer$HL|L%zT4t9*D`q6HNNKnr>W7x1HmxLb$XpB{?kWP=5;#dyXzHMN8%E
zLCjFH?qy9}Kp2-e+}lBwcO^OLu&d9lG`lQt*kyqI(x}J3^)V6k>5Q0aWgxo!!^nsO
zu6ap}gc)EEe=gW7TDfmHMt&t#`^kn-)Y}R50=U&~eO-F?AgO=aCNVNs#O}c7#k}Bo
zs$1tKEjrV%Lgu9gZ`!=V-LCmTS7OesQ{cA|b}$#`OUQXTL_ISdo<+O|xNlmip7T|$
z!Zx(2!OMHQHL>h~WC-1M5l}~?AX-EKHt5+(8sqib?r9Jj^fxZpdG@yhtG15K0gQ1m5
zxt;PVQj`3L`p7xK%<-ql$!jC(h1q_@wm+xtzVgLhyr-r(6}#Ews|ELy$2!iiHVMbj
zX51-akHldH0MEF{sOac)uek1(c-$j@fdC(IL`s~W^^Ex%9jE36jPo+AeQis)uB#bX
z!6&P{4^LN_yJp4ye2;%Yxe`0)o(0Db`2L!8E^@nk)t^p!L_y%
zioPU8%C7~Ha;Nc0q4nHRnsQg!;bHF*Mu-yc8o}0@LYd;9!%!~1v29qzK_gZ$*5xGn
z&5aV}($Y&T=eqRDD
zoLvF4kf<jIw)itt!4
z6@2fNbJ~eWLg2uT8sVo^96{fW7}b*-b<-AkHZ(zU&}hpX{Ds&ld3R$ea-=5;_l{3R
z1&^Hagr%4dqtqEN&)ng1ZBzjRdX2$DJfcw@NqlayxCP$$i{mD@wCb1$pu14YLLt%M
zITO+w+0aC?F_4tl)`eieZlJLLZNSrn`zo#eImEhK7L9Z%lV~XpJeeF6np#7TWV_v4
z#DTFX$0Cq-*4Y-Sqq#INO1akkN$u{M$)_#6rU<wU_F^)nId9!YM%8)LDhbebr1n$
zhl=Ko8Tn;1F2Cwz>yO%<;zS40^wQ?xjm
zYYs#Zs8OWJCvdXK<~0JT#}4mW_YwJ+F`ag)i*Px{8(043uI0vu8drV*t}6$-YxsUf
zIDf8ptQV;hvM0=X1_>H)CsLo|oN4|iZTB2pVS}g(AkmOdt~6Wpw^oyb
z!LO*1uBv(dT8kWR4o}+cu3AP1i~>JIj$*`8A9v-jR33ECnk#qTS=7mZKwv3FGxD%5
zyFcEm5$dQZMMUky_F~BSPy>8h}LhJ@fan%Ja8gi=7
z+q6r_i&G{~J?gb?7x7dY;pLQo>-M&&Ng%qwa#t&KaKZD~Bcv<9G8ZMwufVH9FW7
z+Ze=YgxQ(VRphdf81|902?Al^SPP3NM(BO)9Yfm&bkC1MOj+XcHf1FRaty1uDF2~=
z+vkWwluO1y()O+$M#D7L&7y?<6x{~R;H1(uP8AjY;JizM$9`r$)tHpE{$IZsfv|1E
z-LXcZ)yCEsh=`H;@e)|t4t9+Jx%cGXLz7xCj>YYC>P+*Q3vS(OGjJ0T>PW_j;1W|#AN7Vzzt*k12T9S3u)pCIX-|CzZ`K=$Sd
z8Og0y{)T9d{w~u*eu5T8j1L#e$h7q%swT_;@}IKnu3icofuD_cnC*2t^iIfF|FBq`
zjz`a(>RLp8uptBI_~VmYcE!n_)q4!16MEsx4F(|8#IX06;<9@cGimi#+CME0SOG|=
zrcyk9y=JnLZeqFL)K9fP4R_3mKKxh*Io-*0{|PkjV$cBnLzn<~$9MRgrwyhZ(?K%of
zS=5T_4q~}mn8n%gqCA>1^{u{<j6q_5=EGxXyBA@)$D1|c|N6ncGPT8MKtAR_pCHh9sN=xDAOB~9
z?f;i}oL+G-qr$wH`=oy+MRrJ}&$;4o-ay9>gEvR&<`(pz;1nZ=f}tXjXKq$F3ae@Z
z>2OV)m41|9BeTHSjhXM4eM+1~;E?!g-#vKk7wzf)2Pjcg_{6{Jf6NE{j*nDtesVCY
zU=wH5Y?}h*s0x8FT**<0RED-nGvE}v4%xiq^jnL|V$^g6`~BUU9x8alx`AsD&q3f~
z*Oxd85m@>+4XHbq+TRhEI4j;)9)MD(2lwRU=p;3??m`L~A_TXC&CN^sp5293?SweV
z`ge-v*Yd^2L}VbW8fP``#TcDob(y`Bly=5gGC8rV-K+3d%~n-*a7~OHEQX570B`{n
z>vJH6+cBX;Q2^0aFHa@$7s)i94^}>im!eQlH2dU|Rjk5T^Bo*0Hw$LKB87r&8mI^P
z>{h2}$nr{ycRguW%5n3;Hae1xPtn+Ub0Xo)&MuYEFo7&h
z9Dd!_f#zqYCyFUam9x|TCV7jEU0O>1x9&r~dIG2*4%TXAfb7DNF~Srf%%C0RQFvRc
zzQv5FFym*CocOlUALlIDi-pvw)*_57FR8-s0y21R6<(_~hZONP=K7e5T!GR@g>48r
zZRwMu=_sFz#sxN^<(FSiM7F~w2X$x}ZH{c*VPFYG*d6s`Mam~ae4pf@v-bgo(%9goT;
zJj9^xHvmqEP;zHbjgk6!`oz`bSZ?hon`wW(!T<8*3J8X=h=3m{J
zurcGX7%ix5d2RSCjVTDXCkq%60vfoCmR)Fm2UH09gwabmOrs=LQ;zGZ!zn+?N*~a44FA4XBC^`0GZmwVw0y-
zeeCSpmwX%#6JAsPM`O*cIgrqKVqIzF{qaKe+?mkz$5x;OM*{Xw=S%a_fejR1!-
zfz}fYvtjpo%v)gjGdIIffVG@XVDJ9W4T_jBGn5QuXN2-M8u5#2?1pCW(-4Bf<%7Nl;dTjvsq|rD70?wM(x#mBF$77i
z2hKKb9vnIV#MD^ehDy`E@UNPNg*bEz2S0}rc8ljN>=@^wRzh(|u_wg{SPt=_L;g|{
z0X2yQM`6~)*q)-8bB*Fr9l@i`C87AZ_XpJsVu1q3&WmxS{jCiInJvZrRfP4)3(*hq
zXQp*ssP;xX`*CGeJsas~2}tC_lVM{jz_=ydg_Qdu$l~xlH@v
z7U~}$ZSPjC&-kTt#qm(ybXbRe0<7t(*D_IUA7j_iHDaeJg=;JxD_zitQc
z1PdGHJ*4zNEq$G`CrU$*uPd=_T4(428|eYqKfDPD$gtz-xD
zpWa^sv8IX&v*$KN`~gM%obKR_gd&bfF?^Oz4t;nogH?qZy-QC60g3@9RxRDOJzN1F
zA4uf$zV{vAwAm~B@^k;5|aab0*>_Cto{Hzw8!ki
zRw&t==%D$ob(+jbU1vcOsWq)3_gM%p=l=z0H`f
zPJ7u(93qxpfno7IfiSs&_Rr0;pG4Qxme)7v<1>p8@*D}m%%t8}dgYx1TC!X5O^vCr
zeh^esG_*KZ=SzEw%x43}3LWsqc}nCdsQdDGIvye@te97Z;N|mjR$MG0b8!KJ4KLos
z(<{}9Mv0@--{P3R%fEWh3dQl>=gB$cBU|{!nmHsxTSBD@O~%yZN{6lf!i7Q!CJXgl
z8=~WAOduPzqary*yH%&)eB9Vu$rmhOO@A!X0|g%@lo
zf}DEmOimS4i^EG7N}|D>30YVHK$IqBhAP8_#O;T67}!Z$@LRD33^YK+*uaqp(tgSW
zU=cx^QbjIArkr48dH#7Ec^;jBjOT>BPB_US1v;1zWIpwx#AdM>-b9OQTTSXeFPy0Z
z<(cO!tIg9;%@$MQ1j2c__X)XXh01G?_qNpzFRjaQsCQsAegT9cdDA{W$vVd1T&qDH
zg72NXj?rh#7tUja){jdcehpZWQWg0+!sW1BrwNw_9R{m@vV*L(Ps|
zDDr~4@~K|BF~u~h2Hpmh(3}~Av{a4T0={W6zLnNr_-69Qb{ns}JNx!F@paRwW2PBS
zzLE0~u^ncS0&{+03WHq$mod%5fk2yNT3{(`FBMeAJr6H-qx}=tlo!vb
zy>2PWsW?;#evF*h7>5mv^~=Ci37OWjv%p2Su|&}y&UH8S9h2gGrLAQmpYec-(^rJB
z*fI~ke{4KpX=ZBxEM~P9C0q2r+9bxb4tm1_I}5tFupPZ5Nse;Fg*_ciTB`MbD<2hE
zcYCZ1=O&$%;-c%$){cB7VUWt<+|(3JHOkUDvwE?ohl`yPqIG_X)YaEr=O3`mCB~=W
znw?fL#&uiXZt%<&?G<->-15-?Lm<(=in>G8?{17JDQs~?NT&7u1LfJ(-d=xI#TdEZyZC30KmNlDopuTbv3$lGJVg{Tp4i|R-SK=Hx*CY;-dP?MK7A9
zoSci(_u@~CnDj#t_0~(QeS?F1yfh$XiJ_kPWCksLc7nLLIRCXa*T97!GBvxSSsLja
z_q|bk3F=_Usl%Bf`-4gH11H{-#Tw|t!^0T4Pb%G%l_80ViN+<8R0+dZu5B)-lX+rD
za77Vev*o$D5E)#yjW7r%!J(n~(kYCSmO7@D!AGgvgRw^E>mA7(90Vx5u4hqHvgzF1
zDxj2MeV2_+?_Dar8xMh2*YlW7ecUd{a6}&OYCCWr!pTP0d&HoTq2XksjTYSRu&`N1
zE-u`aMw_~qGLVa@0|XTx7tqXMX9$Bxz~d%jaHZZN&~!}ug)!teqP@L+gtKyd0&^Cx
zvxvHSYL=2%lKqkX)?$rOvXn}kcr?K;@8`SZG)y2+>zj;BJ&}gww{KrfO-)<&MMOk4
z_xIs^eSO)@C%eDWe5RFm*@Z(Rsh?eo07*#XtCXp_$Y3kF^YHK_o9sGR6!EtzBx}cf
zj`bU6(J?muyt=wdlqglZS3hIuP*HsnwR@6f4PnwxD@~&E6Pr#*h*Vcs7gdT9qa^O6
z`18`)&hBoKCnG!iY%Ny!P}+Ln72#?4SkFn^EPlm|>-eWC#WD3qF`fR(5wA8-{U`QcW7Z_ksDC^BAY|E$9IoyE
zUBG1Js9l@$18FF=VO8HOP#R8R{%C8UBd;Nml_Cv;RNHF`Ckwh8p!jIwmILd6jf|rm
zDqa7jRzzbXF*>Zckp>p&WO69&atNvgsvtMndDguJR82;Qb-pR{(TtBfp=@jJ=a9-p
z%vfcwNdPPm=&7U$HLUNSiC+~0sto7}IG#+^(f5v>GH(a7ERoe-QUHOiGA@TwxG)Ae
z-$3ulAA|T;{u%r7XUqNj^?!sM70>^O9$n%37jjz4VX*yUeyvZ=#)gTHkN<_1w)VC%
z{2x(KjCfUf+S%C&kBAr`S5!9DY3%{GKRc@|o%hyo*=60*L{VT+QgvEz=S!$yH{(LV<&$jRWSnhuk)_?F05GaQH
zv6LT4@&nxXubQ%zs~r`vR7qyP+Gr^!Y^;$tPKGe#NAFW!aHvtd3gtF_dHNMq=eh?*
zXGc!=p!|JERsPJ!%*X|P0BCUxsnhn>BfKwJhQfQ76^mKe;n`UPJvtkJttM{UYW=Wh
z45hAc(Xx~r(84Ae$qy=Ep2I)laVemaQoM_eIA1aBGyx8r-RruU#Y9ckesZvq8W|;n
z$=`8p@8An{!$ORXX}pHK4}84v>C0y8?)t`8AB!risL5Je|J5AbjtBf@NKUkK9u0Dx
z>h}QYpG##1f=265N#BAkA4tZ&v;HY$U-t7wL=-7krPT0i>Irfk%otTQ*Pe#kqu%F;
zg@{dR8q~{uq^*T!O2dGPd=d%=HdceH8}YZ{SINo2Tu~W?Z~Nc<0uOeBScy`eM~
zf5V_HrgzCn4}|vF9q#zqQ(C
zwigo&=$N+CT_K*3c}Ct|@yNA%$8COJ%CT?lj=Hxc_N)5w3U9T3>T1E9P1pv1-uVLQ
zvzh=Y@9Vl_3Kjh<=(n)p2Gb!@Uy$~Px_8jfxx;0Mj{xnyET{iuVlX#+RCyhd-}6*r
z68Stpsp~BmSx9duYeZzIqH24F1wWP7p0;uC`40mfzPs$ermtQ{m>{`HU=~y#(>tkd
z#ude#$z%6mW;jvoJl@S>WGZ_r^h?=e%O{MfuECFOI+PuSz)g6Gpvs6BRLz>NVFZp%
zedvbccJ3Wf=lA7ahA}sgS&MwMeSx!mO%I&>a!j0N=h2TA=4-(FmOX>vzbulOdH+Sc
z^A<9BgBhg>Hpx1<4oBz(9O8+?*gL~eTkSeW;>N{*3b)$EA4|D`W3U+n!{6u(2zgFi
zdQK}yb=t7vo_*99U2?*01z5#-g3-M0{?NJI_yb4N-`RQHb)*D7jf-z4Bq1?>8MO@i
z!5`fw;MPG~P7O=^7p46}XifuBF$uiv-Qr7xOe*KA_l+m=tg>FXG>)%sq~aF{B?
z-30DRy|}Vpy;)~^xGEgD_|zjh-CA|=*O#cs4jIbLqD@K`$+I!NaS
z{;Axz*xEV#gnE@SbQv`4ByPA*G%?cxQ`i2uEbS)tpTR4pWR*6WFy0fP7xnO1#*29<
z2SJ@`T*80iD2BQcg91~Z9}#oU)sd0q`s3ExYCP}BwJ(mt?L{eSya%kU=@Mjj|2Ynp
z5fUE~=;Rw_$c5c;;X+tYf*HnY37_F=MfR0>Y~|@|%~1rgg(4FzpvR=h>!$PKdQ?Q91RJK9*r`PSQ;LOeu15SE+7m7quMR+z2q#yrq7Phte#Mq3vqNZJMd*O+3)6&*Mf>JyiD>>oAuy_WbY=v)u$_M=4J1s?#TX`ff`G|7
zU9{DbrFO>IM}b+y);EA6byxoIu|XAOYfLDCwbaohBqZ|`&cp;Ep=4}=kobkVDFaMHvI(ZAs
zBHf5jyg%dXA*0YXC9yoB-{Fe=Qd&Zjx^$!j=A|2sNP&#i)KKGgKjoF35e0I37H!?;
z#E_p)Z}V`ozRP+wzPDuUY>0-(ZDw;1Z
zzNmn7_Fv;gfg1?l)?=Dyfe-AKvFi)2VxX1`mL1^Z0m4pb
zy-1>Sq0Aw-^8-JBo)+23v=w69C(=-vcT;l$ug9RU_MQB&iG&;t?=lT~%v!nAKgj~r
zb~TeoC@Z(ZvGe4+=M4&3AIGXV=IZWFTwPs#j!@O_zra?U)Ev9|=2z|iw0z(Ht053K
z768k|#iex`@vmWE-MzNfaTHDaVOjL__Vzk7a{PmtWAJ3FmIZfp`9|Lv)JYdfQ6(VY
zGU1jjERfI&p0y-k6{iGm|FocpV|cxb-u-{4Cg?T1@(aQjrsRUV>=
z@Sg=Tbo;-N%YQ?0afn$$zUx!pia7v*Jq|IymxsLy3k!L9@s$Tz)a^vBGGmPfV>I3<
z7o0!*o}InDkCFy)RM3$WwabSKWNd8A&5LhLVK$L1=HbzrpP*>9a|F)fa|NGwy$`H+
zcoJt3Wzc>3QoUm{md?TL{ZyZypkyU={kWj#OHM;GJT+X?(#kp?9vN8&<6S9ppT2nj
znlz~uu~}`#7>p%tax#r3;HcHUlVU(6=7q_hg4fg2lb|k8B^LC;YjZtsbTUREZFeVqG`~WQyOHa|RyT$&
z4hgAb4Gs=Y7XC`2nWRU+L+8Eq2R0})RExk{t;uYHq;FtAlb1zENN5gK^6q@YHjN?9
zgzC70NPXmMcuGo2eu62c!$rH*LZ!6t$jjry0~r}vw|_|~u|zCMf2a2|{f5NS#YijN
zb;-b{p-SzLjV2BE
zBoit8b)X;1d3jxJaZCm%$nMfy(_Cvc(wnR$MR#RJt4deX1(LaA$pHKdc4_!Hmez|)
zI+7%wr2^fKuM+*+%i!8-tA5s{rlqCrUa3D(Jmc{M9+yG}@=tdf4Wdd5|4tO*8K%lf
zz+|hGiE%aNQB9QB2)UZRx%yH>7w?}kwSF;a@0F9u)iX`(r{T@+r)QeuGGsp;?+eOd
zvaf&>sa=|`3&W0zS^p#xYidkQO_^_Wb_9M`X?uIQo^bFT8OW8-
z5k!@e9UGR@98cWY*-~F
zN%pHq$@J3S+F51!x$Lgm*=)5(;=~=YW!^9G39Oa~5mF@`{FF;(Ul_LSZaaS`et*+;
z#%X1()z#crVmN-J2FE*eFd(
z7}US0;PM`RufWUi*|s6lK=Y@A#nTURQY6J|7IJR?-@;wx=YJ7&A~7{{3y1
zfWDX~$C+IHr@Iq4>Fqsirs<0%shft96@}hE)Ga53o;&
zD!IeI|86N1kT<|uS6FyiHj|RPoZ_UeWK~~r2kE3?Syks;LVql!n+DPSXX6SEYr#TDYcW(lom&vL+g(m0m)}B5M`q`0<)Y
z70W66iQhd1=XmF7q?1!_SwE}2lJI(4d;k=dh4bz6Nj+h|mCfc77g|~CsOOSAMsg(u
zW_^sm()e0h`_5*|H^eyqjdxcbxgVztfIWVuFW&7{%bREVxooyF^4(MS_>W(ivd-{_
zBqiam%|*4GDAF9{H5tCDds}Zgx}W@L)1d89$~)^}Uir^ZXPv$$#J5%_<|lsFC}pW~
zrp$5Pf{$2^rT;>_q-^vtCtjsAOxSU!rJ9|ta(bFlS*htI9Ltq-ff(QRW<0X;(yvNq
z!%Jvxa$L%F0Me!&hcQ?0Bm
zA5n;Z{4TnFlpc|e%#m!OGhEgV&b)P1Zmh8+lw3%<`t0ChNqn4GPF2UukySE(Yh}p%KAdAzAV0aPCk=)Y%|!o@$KYrpo3KVMdJ1sF>ILc~S`Pt8|?1
zGE4a|$+|aWFlJ1L@rOWqbXnSXgW2sk8#q
zCVpf5bF(4|sJ2*$IEw`d0R
zNCl8qeMGSd30VEvW!y)3~d0y|s
z;LTqmOP-M{Bx<<4a>#j}YMs%@5ypHwM}J6Rz4&MaZ}(mFPo!Psyp{LkiR8p}{eX028
z-+D5>i;;&mZxif*OCl_tr_}n8G1NQ%mrGhjR8S2nO^bGe{#5??b5UbKMaf0Uxv%5t
z$4!X~KIYRwTF%lPHanWRl7!NdChYxfI^3$d0#nuG*qg4+EB}DR&O-3+C;Vd_A%&l;
zT;X3G&aly{mXDtN1L}oP7aelrQFbs!XL3rJ6@Er_2L}uP7y}$~pyG@dLL9ryIWdiI
z4bCO;TMVI-Hb#pH3HL?lK2n5Q!}dk%4;vCj8)mquiU9a9s2(5IlCaw
zB{6KMU^ck3KEz%5J-Fdt)b~KopjrXxipg|)v4p;ldY6o+%|mT@I9#?*9u85uvdXx-
zQ%K&xvk-CKGab?1ZU&!7K~(nuv9R2!d^t6Q9k-P^l@@zfzqe|T+0)7-RDv!)k8^O|
zfzj~tD6;zA()9igFzQu-UZrPy%*vAg&5XhWkg`#wWd&UrdGA>ko8X-mt)YIa#rjF-
zAkVqcV4%O**jn19_Nuy&3tWPWba@)s$V6t7iXpm^F@mOiOU2Jxf?h|$vH0>@Z0Lf7
zb0pVm);DhZHZbIh2KL=J>-EjniPjS2jdo0;>WwPz7`&XjdjZe%$cQc0?%>VqD1RNa
z`P);dg0SeO3hVxhE;XJ>|FRTG(rNFl9M#G6k??-|Q0PjZ@hRoJq>yuPKt^Mq#p=@i
z_ex8Q)o~iSgLZ7w{L)*9J3}RhzHskVEH?3!0a!CvR&;lPJBl1y8YgogoEMiq6}`;~
zx6s=0>T8YpOFEpz#c)$2soYTp9?P$yk{6CQln-~h1hr-A&bAD3+VSlYWLF?X1!UFM
zpC0?+%1>+aKvGr^a-Pm{wwUSgSOX532s-~`^X4*_E7nG?bNM(1-ezd^dynA@!p4tg
zCWjvqa3)~$k?C7^iiUSc2gj|5pgVD++Pvx)H|n9XEtvrIpkI5m6WhwIy}F%8z7m_zataCB#h66SZP(VC9QeU;M%
ze9!BhtVx0+81~7NG|=+BPI|6)ZAo!4Jzw~4Hjkm+sTV3;B%JJIdaBo?G?L>Bxk2CR
zS6}bJoleC&*)Bt`g&rvecfCb|5u%tix_C|!PVL!ZbU&@034br~XXD#q%671iYvVww
zD({JUeV12;l2BWKw_763L}7E0J@5Qc{)Hr(ymWVB9TTcvCR+ddZP}x9M_Jj09e*L^
zWA5Y9XZlD65%mciH&Z(S7Q8gn{^N(Uk3IgahwPU-&
z8en-K2(`pV)ILZudnn1g^LIXoVtDLpRD=#MP%*pOh}AnvihPZOb?vmZdqXU6DF
z5oMHw>W0+1Z*$KSi^_^TgwAgXwjMR`vwa=?{TTVy4Fl?V!uNc+0UE>xWTpQcr@BH>vK-C5ekBGxX)hi1r-*
zkjsfdV7@4uOnVyw5bbPW2u+THqLy4-XE(aqC7MN>-EYi8{FhoWZtISi#5(!9}Cwg{1qFZ>MZT1H*6BP+(crz^#P%xYAMkJLaF5}J`pDbm<1jz*HA59eNcJBHp~u>MPPG9FQ8
z0B-h5=oo&QIu9LATVDG18>hSan=1h;3bxJh=Uv@KHbP6#`bHKIX$}G8=6BjU6NxP~{ENG~s|N8P^MBU}z|X+j4bVi9
zxjPwMg`RAxV>)Vz>P{Uu9rZGx=wyWN-;Q`?3SYpcdH3SOXmP@>Dy?$!wQ
zVGl=tEY@Rmgn6rCipq_XRsRS?!GC{v`F8I?1kVAEZi_ajdm`bQ>Jr4x?->*_x1SoZ
zPNSTCO1;>040h|5rwh=DR@yL}brQs2SrR{Y!Qt`11T)LV?cYfWH7rJVw_z~
zg3EY3Mg@Kh|4`fElunHaDsw&!b({3$Hb$gA$hP%9iYh!lb
z!Y5kx{IqdeyCF+Q{HeFNYYP*@SqcaI$uVxD8o%?kT%ffjt=d(LAGx=3VB|G$s!$B?
zEMpdbfs{XHLq;&Hq$pJFCuurd8+r9FQUbR_bfUvhW^dk3JvT^k9h~G>JG`8#WMp3dolfl?
zxb2~5XQhXEYcOKbA3*(0QoQbS^JiYDF#b_L3bi>L|;xB?%-5QLnH^wGvacBtT
zwuq><*vb>HT1#0C+e|A3D+o=V#m+K0Dd@4#^a0#6aPSG+j|7@;GoLSzkyK>9*{FN*
z4sUvtaLXo-Iain0MUOef$}s>_UbP*qjCY!miU5sWFwo){>8I}n9?ns|y&mE7g!b(5
z8shsYX4nPbmbtWv8kKll?CS}Fmu*zfjYN8`XC2upa~|FLsSifoyamfkC18=~9cz52
zNPiEJVIUg$PmPI0&C>7w%dQykj5~ETrC#KYiM{&vXRbK8rOMuV8_hn_8G~MTlt#t{
zgJXP#yRgApP|otU2St5X+t#+MVa8{Mu^WM=STcK4ZpCYI$W-c>GS
z^t(%M;I(t}=4c$$&fg%(c6Js(2he-6UR0_58N}Sk$PEm)i#yM$39@ezzRV$tf3z-NTNDh&QuK3Uqi6}YW
z*uBp3wr14lF9#z-suUxEBjtMby=LerIV7Zvb9p9J;E?kL{JgD|yx?u>iUxBJSvr14
z$EDaNw}J0P3`$F4>kTR)R;H;R&m`>3kXFz1UczYdF#wM+WHyC_P+m3}SDN{^-Lmo<
zVe@=3;1=8_a!)DwsN(Yjrp}5~JN;dR)R5Z&-)DC%-~~>sv)J@og`9wvD{L9-U$;fL
zWvVT=gKd+DsSo+zX|HbQL8)imtz
zfR%WwYA4e`F|)hsgVFJk0SPA$rr^|zY#EntW_O7PqlM#h2_c}7Rw>r=I
zB~nI%@$9k)TU)bwo9@Qr14hfios$lUGP8B{(=<{n$;^?X8By39P-B@Dy(?9gQ$}_e
zg6^YH=VJt)GhsDdIPxzUH{D>HFzj$l12JGtlEX40WS9@@lUeVNmp*a%b|ES4yX%XN
z;J7i$FuY1#5+5}rOId+jvH_jCa;@S8$VxHAWyu@t%n__3L$%6N;C&G6TsS|rin=0)
z`h5joOgQJmoLMA#HPnzS&T}~HFRfIZZ{K2=R>~hR-R#96D?N*e3*~HrTy&{UYGq*b
zB2%7m@84~x4(*TDMb({s&5-F9{T~scC-;i#!rI4)BU7~dimg7?%u*B7Q6N^YTfP@y
zl4gTf=&1$I5->^qsag1%XgYnXSwwaTK@Gip!?<=i#8nSR)#+;2Arr)oV$fH^M&b|o
zGQImha&F!oHJzF{06(Ogw$|ooxa~gUZ*+Dc9j|coZbGOBgses`aVt4=I9Ck#Ar)Q!
zEI|8nu(DQt)dnIOWd#;4``$SVfXtu!?|QV`ZA-id9{2Jjk0#
zYGdH78}x-c!^&DBJFm@3+rBQeC5V0wlmnOGhcCLbVy^gX#HOT}=AM+eVGlr3+Q{je
zJXe=e)biiT8rj(@@E86DfqNKHGkjYEI>nS$^J53M*)Xeb7sXyW%;Bg`;RianJcdN4
znvI+U%HR9_;#>iu+4fUg$#@S{LQ$0V!9!zX{ZEh2%+9vtH*&3gbuY5P9Ndp7BqcyQ
z@6sX!S;91Cw|Yp|MEbRROM`Bu*M5lS%;K^))8_DnGtGk-E^ze2WSOGy4+MFxN)zA$K=Z(_CkP@K}oQApoixLjUB5?J9MuA#i(l+me}
z1j-u7r1clJ_*_@5)G;t)YsJ-@#q$!ut41txAdAU7h=B|n
z+6^&|e?H$h#V-l=<(Otb9mO__t1N$`#}l}4zpDS|o=2Z)6G%BVK>|tB^@d;`xv-k-
zp+O46T~cfodd<$ZIicv8bSiId(-hvvf^WTK0oAoHwZ^uee3ZvvKqp3SS!fG8>{wH$
zSzA0#(9U9w)P<%$7KTL7sAE6N-$!F#!uw2Tunt~C0EVn(kfWCou|?`v^GQDPix;2-aI`vjg+*X6TvU7sErPi_+GedbuE`Ie}m+S>x
zL!TfLioJ+Y4pu`uKOAYaVJTzrvNG8VER36MB1H`^8N5{l^)|&wYoUaKt>aB5LP3VR
z;cHCP2;a!j1J^nX0kQn9JFcaRG>q)r25l6Y=4kVWzPreN{Z;j=Y|(wyb7*2Nc(?Zw
z#B6#QfLy2ME80LB>LT4n(j000X-&I>Xp%K)}JzrFGp8N-o#Ay
z0jDLdcf07I2Mr+Y?z~b?9h$S3;TFDRN~^=BRYqUeqDhK~DSCyd|_#2l3_p
znpT&3d4b840KHZ$>b$dBR5%~^^yoKmRYfl{dqVl$YwVB-qG?T`C*jb_|N6RgF%W{y}ifp%so;9VKmJV&kc1&Cgwt7->P
z!GOT48m4u>?&7E7CKJ9+8yD!k4~}=)1&R9>pKFeMwAHGQSX
zIJj#IWMD@yh_46YH^g9Co#Gx^4CM*R-^3#43Vb6DPLzkMXhK1sBjZDCQxVQqEGei#
zYSV!E!J$j={xroKstig}ftCKBGBkXKtfd?)CLvunsZc$p*Mgo-l-7~1s+q`~k(K0_
zQ*hIVt6bnB7TP~d*;QzDd2RdUa1{9<&-GPf{D|2q@M7mT)xiP#V96Z0Zv|g#MTG~S
z77c&#;H!L#KKYT?W%uCg$2*ADGKElyvzl#1v_KpsYES*r*Y%np01B_z_WZQa+AE9Q
z!>-^u1wS&HAWq>0F9Xmch$8Q%s#^j1Nc$>OUI3Y)q*c_G_RZC4c2~`VJ+hDZ3O4~E
z9h&dbD?sR}{$6C7)U#TU1L|B*zsvdF#R`A%0wYdzHf?;A<;I|=d44LK4EUX{0{6$;
zMD#(L6GodHmhb`{0jAdT;_VAjOkwz})G-2{N;UGg?RqtaYSIQTz_O(_pW5+R7$4(O
z_H=F1r}dc5pz7!c<>XkYt8>jASYlc{T9qQfHpJj|x5nTG<^Q=0;K@Tq+|YO1<){fG
z4wEkrn$wyeZ(C3hz^=BCH;zk4lPg9dMl}!YpY|(T2
z=k6q>PUEN-=h_O6PLEJ%*XNUlYZ2RP{uB*{#~RE+@D&!}++Z6Wyel=!4N!Jw`Q`rQ
zRl(iFM`3TS1MheAU6WJL`K)6PVP5u9eG$a|93r6C1zv^>Y_A$EUjQkpy4a~UayL*W(g1@PJ1Hyc+W!1$-WD1)&+35TcXtAq&A!F
zJ;Tb#yq+imM=Y4CfT``6hQMISXr(Jf|dgz~RXgyaU
z%Mn?3LG+X1Nv1Ag6|qedlyw{R$z%y}t~E_@N-S=0?Mk)1A)9pta
zpa7D3jUuw|_saI!`nvf4#ok+m)zLJ8x;Vkz-QC??5&{GXuEE^`EZl>8kl+&B-C^Mn
zf(2*c1YNj0d+~k$zxTyCd!L(gaW2jki+QGJs(YrotE#Ksap&R2Z7pgU`Qo0cG0I|;
zb6;?0%r_nNTc8!?>{P38b&*zw5ifvP&{ZAvy@B=9XVsY8jusY2s0w9%e|W16by($W
zjE%X@U-G?u3A!t4nT9sbObAnJVIVNOKhJ=N2|)EGI!xICC9k4xG~y1C7d8i}crc%*7g~Fo6m~{ILPP?w=|MAo
z5<}B9-%@-g0CzMi-eb4}9sxL+yRi!UMFKnNr=IfIlOyR
zI~y>*g0iru65WI8`%%#|nh8M-@82ta9Pio^a#KT?h}=smXiyWsmZz=E4k6-6`eaj$
z;9>et(cDjIgKy?$8NfH12x^1w0Ru&k*eBz2cXHU`ZQlcrZ@xcsg+kpgjfRh>e=Ocj
zi?u#(%iy6eqHOPOz35X6b67EFzSgJ}b3P1bAzzrwzc^vcPCQ~mbShB>k}xVSi~+}M
zA8V)36MVle9+kR`YX9IUexvDp1iFX%K)$?};ZPJvOc*Ysxe0UpJ<^h~62exg67_@u%TSl>x^Wn24hO<@YNtPV$
zY*3_}+b&T5#gv~?fDNfGGtF!>$wBelKD@-ZlSG*r!R?WM!^$Ktc?Ma-vyET%h*+}MV7w3b1Vx4tCenAV!uIjZf86|`6!OyThw?XT&0
z4H!3C(UW)kSl*9czeIHJ8u?!B>I0sZf#vI<78d%K#&<#?;Tz+0@!Ru{zGuUgT-JKq
z>K-zhr&#+{Sxf+TCxaP*H$n_G!+=Ted}qhzCP(++yVHOh?iO+!u7+21(Z_jXMU+mg
z8sArcK|n8f|C1w9Nd)1D$Y{S%
zRT<+L%ry{5gF<^Ba_q!c=}k8)zplLY(vzm&3u(CrkiimvM(`+=
zpem7hHb6n8O+Qw`B^zprLVYv7H9p^t?***6Oy%3)l&p-u3euo!d|mJs6vE9Y7Hc&5E|Kuuv3
z=3GcrO{ZmSwW!s*&?dXm&S+zbo!tAQq3I$5x})D}rwQUDqD|eN7CkC$K%!fI_>rKgrYV
z&gCf?*53wOG-0h4^z!)B-f|?mkW$kA^Jkf=*zQbRBlnDCEYGd#2mE(bKh=?`^)sDM
zmFZ(gAf8VC`;%VQ&Th}K(D5o+R}Q5Pw9v3#7aRe0AXjchxRJ|!X_
zv|9oKTZr;ahqhN1K+oD?OTRm(`WnPTrGh;L41Gk%n5xE4XjJOsSCcyTMzX7KBKaU-
zO}EW21#G8rogtEsFZ5rl^G%Fl;S1!ey{MKc;YM4V
zg=+0e0ufft?~bwRSaPq0{J9^DVeNGosKcuxss1uEmVQ={DjmqxrKM!ikUW_a&t;Mv
zVk5&l5`^askKnUMz5cuU9ilo}lJ+K$><#myN4aoMRhTL#&aG=`WdD`%WO&cC>L?vo
zw^d3S1OYe?_8HS%BWU$jBpJulX=XUZH5Ste1gx;}m~>!pa^N>)Y!9TxT$;e)N$(U3
zw@{^pgSNWn;Gt)wb6rnplT7Z_JFJh(fSQa|;9ZZqUQNA4~B=Jh;#wT;*gl>ZHT5rdJPh*ykn^r)=GZo6FY8KK-n8<$tIR
zCRuv%bWB9=P+DXsrQTkC2W|Izlmdj|#6z_~Nudb$lOFbQsYe}$hTMFNw1#DScS!OO
z9m%)~H*xHh9}K%}Xw?@e2tUX~{UVOum?ryFzwZ9XG~P`&fD|JSxMCV8=d8~&^HIjh
zIYK{Lq07TUp>D4wFM#|ZZ(j{n@t0gBiP`TRs%gx|#;*@7n0m@&)va%^zipTYR0HF`45Y!5M-Dwbjf72PNNF%>)J+%66cWdz={OzQySBD(Wv~_
z8^kXnMCXT`MDLZZ)7y1`K!rtraN(p1Wi{3uD5B;Y4A0v1_OO{zQhF3}h9~#tE;+rZ
zb2#dGX&`Ugk^R+P3)A9#4Z3j+-qgtpcIP~nAi=;;U(S=aIQ;k`H^rKnG;KvWp=n>J
zU7$vrMWl=gq#hxIaZ2u2`dI=J*@PnC6%lD^A$9A3f9FvYeKoodAlc&Jm4
zu|8gokTJ7ip*o?YGz~*1n*6>M$x8MjjMR8&HjwZQ=ybxqZ1L%W_M_it*#*#t^HCaP
z@sF@^P7u9*xR8w)BxPxV3YU%%{S`;T*@#V^
zOmXPFo$Sx~El_b_ColBDpyV_W{s1ltrb>f(&U02ewWO{2fFD~Bju*E)8uKFwohf~c
zHHRtP<0@XjVJ&TB?qEVKbqB74Ez2TcUMW~`%kYYO=<1K@l_uE@UV8^;8p8l
zax(Hq&G2iZE6&V7L7JRa-EY1sflP3UdTdlJ4Op9Kj0xM{fY#LVej$zL>h1>Wx7S&i
zzGD4#Dj{C3A2W{&baz)+y0Z@oM@MuHtI6iDx9=$S$wW{qfGi=qVcNAk)Od0%T`-Cc
zRm2_etO{*-%Da`HO6|W^>d)n7)V<>lJy;YNcN~sP;uGU9nJ^drnP>4jmy8Z2R4_7$
zWvk}iyHiLeyv;|BbdlOrwfpvKcvvL$4cOw!iQZ(H09%C~e74_#{igu3#i0Lf9bIhb
znU-q1H1xEk4n4+QdkK;D_cB8subkhKW~O^F;0f9rnyJGb@X8{VlUv>Y6ncSDuY1<~
zHD$leN-kpeuVZp;Lmg7_XKZwWsPrboUf`$Z9x}#Ljqge&;uj?u;R|L#VfTnP-95qk
zobbil*t>Y?oYmpMpYQ>^)@FS?o+G6;*sK-QIiYfv!3Jv+-g5dacKnfdPkv<5^4~Xh
zCZaxLmQzkm_O}dRW3Bs_%G@{Fc=>Uj%b;tE7YHC{2b>V9!?9;Gc3hRzl~SY2Rm+ti<(#PeU)N8CABx!Y;9-Jd^N_
z%0fq>^+@%`;#s{sbm?hJ(QXubH!UD@vP4NxPs<`ZG=Z5D`R>3+%$9<+$EHu6EJuiS
z3l0PO1Kd;hr#>c}XpRfH-@2&gb*BrgiBmsgV@eHKdq?oNd>W8ebw4$D(<$+<&Cr}9
z;5@8C;pXC)1@4&`tgA+2m&nHjx@?|88JD89AWr_Mv1gM;QBEHUlE3%@!46C~B
zQjhTmp|>2V%26GI|&W?1|**i
zg#$M%k!_K`EG_N3(XY%bgxW2McU+6W;?sVo^Da_w2e@SfAOEnBn0(faCyU%c%vgB}dS_7|MQ=u2S|rruo~u8J)XL5HTKlk%_@-^
zrPGt{xAc^7A=f0R?Hw7?BWE-wS
z_Nq@wRT%1*$-bG=0=F`!EKfrQLU^4o-23xOdlR2kcd_|l%U{h^ukUCkHTTz~{HfNx
znAZ9=C=6xH;yAf>@FHiJ%hTbg{tA?-%#K$p#gg2i;{6kVDzdIeRiy
ztMtFyNX)-k5AdeNJlt_dMTg7jntXZ0K!@2PZm$%aqh^Ex>`-~81;1Sm2*np;nyHP`;nS9fmF&S9_tJ2~=!r_rPY}K+|)S)8x|6m%($I
z)^3|qvcl<{L7%PW0s}GwVd4X}KysaNPceNmA-|TR?(n;hZKaMgPS}HdLTyUd;+8+L
zfS5|=E%j^__$KTnq||URdbIK0{6I341*q+p$u|?9L*kvN=)h``v)D^*hn+m^fzcHy
z8Q*w8(C78V9{!kx1~4Jz_}FRO94)fWbL_))luQon{IRRw4+GOb`w&-YLh?;hLoo@6
z0c2-~t@S)%YZ2=PCR-*ZeXA$zkv?#D&^=wSyp9_uNd|5F%0WvSWb3lQ3zrL?zk(f{
z@YX{mOqX6_mAn11_LnA_EXo}_m50}$MY&eIz(09VAP!&z$=|=QnYS=4=88ydtJ{;wv%8}seFZi
zxoBx=4KCVqb1B;0!QT=vPXlP_CG1=`fY2SMhzJM;5dPL~K-V0E;#Ft2!0uLSp^Pi{
z^=R=e(&ETWM3weybd-zXl#cH3)?^3d0Ksah_4xY{q{SGZtzn!~Cf~}S5%WqeR2z;v
zl;Qby>>!#~PIfEE%V!K@RLv$E&kSP_UX)czNlCTdEZafK^p8%y81h4&kPA#V$vKp;
zkkHZz_miS=iApvmG4Wwvk`O@+!|8@l4(@pa()`xe)~5zb<^5~#8fAO{iSUPScT3;3
zs`RLse?{kN%JPO$Wb+ZnDypguN8&IrcWRfYkn!5ij&!ctZkmME2zp)6dYr82ZYuf!
z0Im0VI21~qWVu_TI0j8+6-{;v{0yjTMyldkjw`Ko(%b_Wq#mbhtD-92IB()%kHL5Q
z+#GRvhR)xqb=B5#UItdB(9l2L9P=1&*t3a?udPh`@Fz+a6pN
zCOh&+<;mFp+W>J*&HQqrb@&c3wVmD~gCCLvRDXz#KYq}w-M9&7`sCt82%#u6Zu!GI
zON@S;9HTDpA0@l?Yn^R{eivmZ}q3=WHn*ik`hf0h#Y(m|
zWVQM{GTWfW`DOKr(O^C!3AW!i3tAPJThM@TN6@AXQ$+Kpijve|no!N5~}GR@LLZ1zk@yQK5SX(D}2BgO>Wi;!^GHNSzxsm
zWBPLlzarbgs5^yjY^@~Rup!ns#~ccPs-kdd$+J-m1>k>hZt5q1u`wc1Nf7Kgo^0CpOd+&W_B!#lf1v#W+TjrgY4QLrnxU%|ZDu!d%KY`Hz661H>zF{0$x}Pcv=dr2ZVmjrs
z!PZsofuuDh4wMB&-WTn6(jwT$pOl=Nz5^ezo0@08)aP9{FzDIvILroPVie%Z+PMJd
zAF!0F7UHer=V924cTs|PORoQFu~^~5$)Tu5{}YRxe&=5zlsu}r$=3wem$`%@(e#~j
zag8fs`}(q3_&-zgmXt450{xnd2ov+$L>+5@dEBJrKFyBOdo6P?o
zgPkSo!YS({f)Em(eBtzoU+{loq!ay4Z6uhOP!L!|N%cb6sI-d<*F=t(&!`6Cf6kcW
zU}Svf4xV>kz7)>lM~lpr%ai?2F!gqm$CsOjC%aj^gboD)r1&1F@zMLgxd4oBQbCL`
z%eJ;Q75%iCBzWD_>?Y=ayD)<1x(n_5HmUw2yeuBX!1?$5&;OVkZ-+G@+VY>3YUe7v
z9!omnbaXI?F1nUybypt4ziwZw6tRHz_Io)qGRA9IjsIHU?^^KLgGNhhhajZ6C~7t&
z{F_igH^%sDeBl0)NYUU0Z!M<*?O<<`hDU^==!88}=7-KzFNsc755j83tgELfgn+6whtao$FxUjPB$Oy9Z)4slo5{Cq0k5e$~`?z@aA0KqyqjD~meJ^VzjLJ10
zlnPI&&^Zq&EHIAyC9Vw_m$dk|=jsx+H3#q;A>Dr@to!0mogwx_CQgRs##KAM6m2Z-
zOR>(OB-vdWvSCcI8ZHs0f6?9dB{GzX(zolWu=TjCx;5`6vGM3$#yKi9a(Np1
zSn{!V--EC6#Tp6Q|3^$o&te#in*67VM1a>Xov&r%ef~2{zx{n#yFs1^MU&{j;@$q*
zqwsUBp3IQ09^b&*VvfO0QAy4NwbzVZfbRw1%E6@dQ|ep|MlY!!1)=tDg)a@^=c^?&
znEfoDJBd!?vVx{uR-NxMDe7nn;k*X(_pe9v0bR!I{y7)yyA&hRrbbMMt5>LN{`V@p
zP2<|gaVDF46lS|oG$YJhQb>-$Mluvb+qYk39h7#}m-arqqxxI^>FrfuR}1~E!BY3K
zQIX#A1?~%)mJ{A-?;eI&@N0%xi0Vrl*|>~sdi{;r3#euz1#kG%EwtGnpG9`>uJL;C
zdh+Sk@Msklbt>gfjTP6$J;$_1Z-aFJRqA^1598eayxi2SAcVHXjxQ9!!DEH?Ovp6F
z$_f)=W{DRAJ%GXz_n$@)V!0Bbk7u9vcT3n^Kk{I0af684Ot^xLv)gpQdNSdqDysL#Czxn&IbcEy
zVs0E+d<(%9P@wsKC-;NejYp9=0
z!IU2PTxbW;{(;W}!mXx%SNWl}iF>%hiS1b-f8c!uIFnoH#fE3Ey{Ei#@+T&8
z!PruJYZIcsJ>RXSxac-=_<&Ai6W|wm8bfn7(O?l7e1A(~7(XoGi$9*K(%-qQb*=eq
z0GV$DVc&EXQ#+A*sy5%DML#OkZy0OIjJv>LZ({$;RF@sDmSH_LR-((eiP+{lb+lR3
zUkWO5pI!6q;t(YO=((LYsE)6NX!2sOFy#AMC4ZQNi^6X=bwY9g-TB8!RQvNCVy*$p
z;a}TOsK+CtLL7!+^0jHmYGqL{bC8
zAI96vo1*^46Tg?wbMd;@lz3Cv3CS)Ns?gRLil-;|%mv!e1#~GbTjm&RTbvP+#MD%H
zx&aR{m@5QWxJ*cb&+Y=Xwuv=>hu&`;v5?<4)+#pux46d2R61+B+PEC=8?n${q75&>
zWK>L6%;hKBZ&ZtiosGG<683hVwP*w2;52;|aOvRS5R4!j1shwjR&=wKc){K+s=X2%
z?oxv|2y~3>sf+84?CHyIDlXEwu%AVR`!+}ugwQ0a;VWk}{;A<^i*
zkG7M+Z(~iColiR!3;EYT5N7j{-ESqvX7AOA?*Ya`c|OIa@z-N%#&D3~`7CSo_I{54
zTvP)%{n8bM;IWVPmnc?=;VDn|La6Suug^D{#!<;;j6@9%suAmy3X^Y`BW2OI0pSIMNW=0
zftF_%dR4#0Px9r!Sf+8#m*;b9{V-d|y3*zy+$U%z=jG+4sI2ThR1T5K4zGlwKeA6a
zA{xC6aHusQb&-e)#~Ulxn%&Rqdg5ZQ*M=4TC#H0%mF2;J1hIz)F|oiYY8Q*3aJ%z+
zzo3Oka
zQUf1*zNT+C^y_y7h5Xf~u4%464+mJ>u1WEfGGCMt=
z=B|ZB{<|4#4QsgZ@juu3p91{H0*DxbV2({7{tqW{>SY5wb5RIRD%z{tuv0Kr8iBqRjRRcL2E(o}8w
zv7DMUWuX?8lw^WjUSFOUyZt(Ln?VpV*Y8{jin6k@w<~N4JZ-v*dR_eNh<5$VsCy>q
zJFMGio!C}5h3y|+nZlx-5$|C`MA6Cbuxw7AcW;uNp1yxVEtJhVwpeQ#(w6h&!?t}q
z3R_Jk;?df{IQJ{30-LY
zv5V$$kpc>%XdGLZwuJ
z;89Tgf<_YUWmnu}wg_Uj%HFIGc?Zrq|2@<=cn}uLXaNC@I2~Nh*D1D8r5_G~?PY>T
zVY%2GTSWJ_9!of8lUuCh<-fJ%_&&zQ#LRvaV}mA?;Ou{-7$aD>n#>Xkfw;uN*B((P
zz~oQ}=lRDYiBU^I0ks?E5Ge7r65jV{4{`3!!)7(kcjYh0K~^?Y`?jSsw{AeF)95MY
z{f7@xUNH~|^HU(n6i(c!m+tjF)mfFrF4dW3wsd`P8qfaeg%O
zN9ruV>jDTS!^Yk^*)rr>>pkD#mXLUrE*bWu_4l3tiHO-fjgV24R)KBqw|bFI3Bj{v
z+b3U)@W@3k8<0}XBLdF7FCWe~)Ri6?Y6#XHy*BL2SYC4$vKKxpfvRd|lMxv^PwQrr
zPb;1_!tf7AVLAP^M$EZ?fWJHr#yegV&WTdG?Is%<9&Gf62tq;1Zm#>#klUX(
z|AkE$!
z?X(cL9Sgzthx!GfJfN@pG#ReAQXhdBgNU&0&>1`y|*M*E7D5m(mP8*5YwhpMcGpLwJmL0ci
z0ROm%jCe^i1O#YwO{M+K)4$Un!r$X(kZXp7F@C>Zkd1?|>HKf5F*T`DXr=
zLOeZk*!y!)m?ql#x8~B@hsU^rL85$gGF@j|!wsN506rMEdSO*C^P((HT{*w)x{qL^
zpt&PB^#Mqmvq1pjqW-o>6dLxRY0%`##a$Rf6)A;+ef;O);o*)#9hE^)48_V0pvTN-
zqt@$k*N9-)g*=n>%sz$zxznv>&6JgZ*Zp{T4y>}h*dJ1no=$jq%b=1~dPaWA$$IKV
zBc7JMtx)8^QM(Pxqe|HLje>7uG?$N!^q{^mWoN&LFNYCu>0uj9vAG4Ho-CQx-3GBq
z$ozy)^}c`}Z_W{ws_i^IX1(6n4nO|EwU(n-#xVRy40;*9XvZ&|n*^04l&Mvev@oE|
zH)<02uxgA}dV&4-6GvfQo$n6>$P-G*s5UA1b3yC*GKk}}sQ7xQ+(J!IdeLN}!(f|^
zRofymA!A!5@jM&%+{RrNhw?euHR77WE*A}y*|^pUI)ybA@b%SAS?uRtkP?e$&BX{?
zJjRqSxyUd_aibXaY?Z!%va(El@f>AxOPRxJBVmlQ?Pzf7VL5(`^)+K+jeU!z>haXW
z4|wB)fW{o)qjWtGY%MbPOaE3T{v&>+%`;;aTNU~BMG8%0)_lBCc~em;0+*d0MJ=1J
z#p_krjiv~-gO?#nqyNAvB&lUq-S9{atlgC|ybyfK+Wno?nh$gnM&pgIr|LWxcVyGV
z=4a7Sn&r5bvHHof1gj)@}#UnoSL77!DgGABPe#&vG)7$Bdn&e
zjAM4@to`5>a7O(ZbsyUaTsk>7Cq=*rW~gpmXUQk
z3(&HytItY<-zRFBk=b>|H9tqUoP?9-vRWU^Yy2p%+_!P)u7_xGlML7j|7`$*
zt98%(JG&bH^Z=exON#VWKZm6X!`gLoXNSx@EO8pA|1#ZV@$|n)d7dFSEW5V*r;#++
z>Qf^zSVu!-@wnO+Y<{(xUc1ytK!2DG&kmBpULrDVL?e&y{cT
z`HD1&K^lpQ>rP!@Ve`Fp$~2*Q!@X%$zNvBiK~LY->%W7c0UiyW53xpR@|gWV;;I+x
zUTI@v8KX^$vi7iPgj97RX?t8ch@P_z9jV1`tG>;m`e|qOuxRj~VYw-$vI(2dr7_LF
z_Ly~DWo_hT_x80bGlWEJfMah@VC|cp__CvNg@bkee5xKwE9*aI!ts=Ck8Llwp5*Fb{ASS{@u6tfaCH2M1?e9m4RBVKguUc%u5mHhxe2@4uOVryuY>gN31x
z5J*JJmJA4z*HnHiXd5JC(c<+=MuXg?9ykO!Ecm$)l$~050g;IMLYYA@_@>N8
zvw0>dXfbB7OCf$FI!(r0c(v`0=I&yB{$~Ybnp1V{&6e)Rzo=~_I-<{?&(CjvH5)q3
zb+K_?^5uQ&^3zNVYBUb2#H(gab-R~?Mk7c3%6H7(=Tz7e>QIidP_N8C8D`P(L6pLF
z@FL#_5o#JMg~O?AufSc>!|^(8QkT6Y$Ib()r$L#Yrs82ti|mdrd7+lw8X!T*I^YF%
zH>ywQNOnhN3HrlZQaMpH05!ku4kNAmm7188BeNx1E~7K2tePpa;mjMp?j384gH%$;
zyUH`TiuC|VHSEXU=nF;vD~3Fw3X14%RCiML3}1Z0CsOB&%IIHOXu`GS6F-n=QMCdj
zXh;b8i53G^ns!`^i>HA#r%+f;bL$+8F!Eh6_KU~Mu0l99BsDZO6*9%X8?(c#BEknr
zuAfo-3@Hz7Zk$TDcRujw8cKY5&2dw1D-SW2(OO{8F|Ozi@#mocT-y<(l@W{bz61CL
zXA8KsKwEXY+jy1BaYmz250`Zh!tM|2{1I4>zRWjC?RkAdnBrS|;qJ^)^#?wbTfAlc
z!Pjw~GH6YnI2>_Y4t?E-Ha65QKExet2d%Hce>oJf@|$>%&%KnB#6O8N=+Y2qX%X_n
zWN(F875mR_d-KN$OKT`zUzO)l?OrO%9gS2Pz;vRhJ~c@eiTK!|?&JwI&N>W!rK@3L
z`54bF%qvOJ%i4FRz#4x~2e0z}5oq9}YI)Bvco=+e?ATgtU+cjbE0^{|$v{2Fc5sqi
zHpZp%_tFo^35133CBk7hu!k!}5A#*5@ry1agF9si?mBBN!)pnE2Np`F5~VQ>F}S>?
zz4DpF7efruvv!iGzvlmLoaJiWtkawPuBc|yaJR;Wue{gl%IS!Mf+ctJ`%ymQ&=u%p
z>@m?^{gNu}ij{C)f)lj`jf|tSXSv5omkW~@=AHU;ks@M-0N_~w9Q6X;526$3*3HRT
zlv$3E&C0O2f7Ogn0U2n=9S(qX4yZDNpV^s|n#R`k!z>!r7yLZdFN&Gm;sh(h4YulC
zH8}=PT1!*J8L5aAT{v~6S++;(G$iY@aZ>eclVU1ZT%KB)|*cIb4=
zX(JNx5~|e
zsrhcLqB_Lf-85l;JUBjjQJ4CtfYTnXZBWx)gk@aUa~rTQrt;$Rc`wIYioLI!jY!bl
zbWc0M>xi^D$9)}f^P}9}WkSOD=Q#V-nnK8VwG1>{sHvOi^D^8^+s?44&|8=()9JI*D`Md^tkd
zjw}*2aUK=CvO3Hu
z)4<5)uy}v}ja
zV)z8>4$9yx$PZk4RaE4IVXrb(Bks=4{r3AeC+Vnp3fu4AzP50#fB-C1H7vq4qh2E!
zUh-2@;6xAlwygO(m1^NIv3uxN7agS-YbsOFQ2QOS8VpDuAFtOA=++Nf}EPHRtL>}9y?>E{Gs$?f0$<%g+8Lu`J5eH#5zkJJLSsSEsV
z97MJxDaosUf}ZEcn9v%+HhieviJ}enjDaq-+^Kv)%6Ye
zW_)hDhF3d=0jBvie>hiJ3Da(GetE7>dnnoUiHn-P+lilR>%|(ntujW;7G<$cHav_T
zhy0MC(4#pbcTZ_Cg@!UO2$VrmGxLG-elz*YxA8?kPf#L82bQ;4vBc{q-+_<+edA
z@jLE_5ecJEOR}c~9CGkHbOklmd5h=J9_-iMJ(W(*L#UTg$G}HjM5Bc7!A}0lLno}z
zTx29>TV(E--e-(2PG0CL-%o}?(^bi(7eAzL51v
z*)#EOnOHiA3Q!SG=iKGJH^kIyd>3XAKWEziazR;$ltZM4ft8~J!8abF&L3FBP}oWv
zltp-H3Cv>meHS|HKX>H-`$;QXi0c#PJF@<_=-9dqoGDPt=8Kz
zSu?!OrnkMLVAluOhg$=;YN5PhTUR`+DH`-t{yS>U|F#UR#MqSa+B0IEkGy
zbD_U*ij!)Y5`6^kCq;AgjLW8JgQyQFqa|tJSOfMLtHHSO{9nX(eFI?bF^1Y3ABKL;
z-0c^BiSeq&F*c`tZ>>;IeGKGxJKZo4;4eagm{MAWDUSKl8L>4URu68EsGYWQIR3h;%M^I2nK
z^%AHw2@?_zqv3BZ^ATesTMY<=2MSEW`dGp`(LtXJz-l0-(m{g@0!uPsRO@y?^{3bN
z*)S-y4Bc>7qP=UNh4RZ(_a_r2nP(+m?u5&<*sz4mlc
zhIEjtim52MvuDufGb_Ba7oHacoQxe<7ek9#4ot2+PoyDsdDM7qXa8Ju<5z~Q&M!5&
zDU{zb%Q5*N)z_|PPg605X}{l>z-aHW1~&;a!L;E$)SF>
zL`(GOKfP&~n%>gg8?k`M$cmIuTNi|-%8SKmlN!j6hU@l;9buI4wi0VK`76MqX5&WK
z^`yhH_j~+~8~WBZ#r=ZmlmAo_$1lMPu=mPs2U3?pb3cn`IQ4nB1HeI-9*pN!3m7sz
zvYtgPwEMZ;zqE%XAczhNiiE(#)QTn~%lG
z);e_U=SVJp%VOIE8Pky}Vx8Txr$W{D-k11MC&foBYUbs6bs~92V$+W&)-uQqOf(61
z4lJ+u7G$CY+hH&fP8^m=ux>H>)J?b-r5mXBZEIZXq#YD{;G#uT7ls5>nedK_g$cQ8
z_zG213G$Lb7e4?=&^Z13<0WiAz&*G-tZ0;v5y
z_uIf;7+ebRpj>6;v$+96gBf02KLpPq&_dR53?KCgxh?@{k9PB`nsE2C!a!+$zM>8Q
zj>H0MSc|;3afzX}OmSc-Orfq%#_=Ytn<%{*OV9&O#}9pR1ZFUn35b59=SVyC>^P?^jn+q_AVd#@d$v(Zkjb{HtgBrj7VM0{v
zrei3Xx_N~r}7edH~<cNipUE?8~
zyfei2&q+f_{vI9HLprec`ZFK+DMGiWVOsiX__}2v&ycK(1gs~gt{6sHSk%*j^3|)l
zm`AWrIPfepX1~U&cb5tJODU4Z5aZppU-OgWq@XSEGrr<**l24Z2d7mA`YXAG)SM~O
zsoy9zjkw&L2CNNsxxCie5@L_U5t8%018g+E+QVVS
z47}BnH}UBH)~~kSxP94uGQU|EurVuxfv)=Bl4P|nJ|eyfV9aOdnPfk%ZFVe)?pF97
zi+z`wGX%v9&v&_#@8vr3V=;Fw)WYDE`A~O4en3jBBK}}6ks7>9f*Q3ID(f`zKOcAI
zh}+JtK{wUN+$GdYYod8fvFyq)?W=9Aui30R8@gnh>X&I>$jZ8S>^v`QDUvMSiLz^k
zW~of7AMN!!S*)*-z<3(D8RN(`>80~wJK&wVAkIttbtxnk`-L}u(U!T>fLSJ0)-7}4
zXpIzhf_9@}DZH-Qi}af;K;GDl0IZP8F=cw-g4Llr@H>DuOOV0jKzyE@E9%)F
zf0;Fhe)e1$F
zuN|Jey-7S{fsuvowTD*E;cu|XZDb??Q$iT#RRgNYk=>Rb+1Yg0<_1HuUAwEw)?6#?
z$XUWhc~o^L4`xD_M*Q^Yd&aIhT&MFw895kiUGqOJDKM(PelVl`lta8NxcMg8p5<<1
zpX-#Dm3ZbBholjnIn^8#bF)Cqbw5RVOWW)
ztZ?fFG@wH_1XzRewQ!Wd$9b{*Q;Itb>ql$Sq@
zB$KT-;?#g*I~upJ84U}M4bOM1pm5#dJgc70XRD1AGoV5Z^umIN`vFIMQUE$dryo@G
zjm4M8b{W7`1M7+?FZOPywl>b~`c8PRCGJ*SzB|_6lRm9)a+kXI7V8%ZF5h>N2-tH3
z>bvEz@epsH7N!7O$B34srGnM9H@amq2udC~`|O2WB-)N0!x2=eemoNq433tg97_1g
zR8d~*D_oO-3-Os}YtcpfWV=My*I7BM!N+$r2#;n*ug7w$;l%kxS&?l#aoY)XzT~VJ
zwPOT$mGZmw&|qWH#5EWeF>7FZ1e!c-rsO2dPK#aB(6O4leRdCl-M0Src}8Wji93^E
zZurbSRRnsgpz|I-@}QXK;rUejs5L(`HW~^yr<**tt?GWuK);8fddF6Z)m%U~E#Gwr&H0;wkv4{i;K0>_3t4ro&GrY2qtlBqeqZ(@OQW%0N9W(KjKDo*rWm`)AX`;V6WhgxR
z9?`N+FrCD3KL{M!2Aa%oGEB~x-4GG&O-76eGjBweaU;|%Y{Sr!y7n9Dqq3QCadBf7
z5C&4!7=#bKlJy&3&R`LeL<$to4~gSo(8xBDOp#u5tO(0V?*)w~80&K~Tnent*E>$H%~X
zkV$xQUZ2g0&~-+2+sg^+_BZUjHboU)UyQ@MY4hB9Ahy8exDZbS(|wrJ}XuU&6YN}?kF{O1wyBU3Rt
z0jC36G?+M(Gm#=)7yXn_^;Zgc8YhiRYpv6XPH{+p*3vCEiK-==<)~?SgjET0WjwE`
ztQ)jK3;0Y2rh-Mknp1h;CDp#uW=K4oLzY%9dbh#f=XU>pH1|!xl?LCw;e-=R?1?$C
zZCg9$WMbPB+qUgwk{#Q&o$T0lZvMCG+=pA|`PBK|y1rH2UENjPy=wiyM6V?N$q6l#
z0o81%QP$0;)TjyxDKV7)5!6MWZ&-7Ht;x^ytJN1U9wy%#LmOjM;R|cy(+RM(DONnK1=;obEz4*>%O&~)aM~Mua>Zd7H^4h
z0!2JSDv!vo%^hh-A-CxTN`WD+
z(fQzf7Z&hbB#1mkl+#z;YVH_aMn}DSGzyJ#6Gp6~q
zdg94ADCO1Q3N_OdYD@^Q5#mE!Ir2p6{wA^9E%)8JJ#>_On^o4usfp$=yh{0zT+{jF
zAr&7ehUN34)-=oM`jw6Q!P$NV4fIL4mEYHWdm8L5Vjbb+~%2
zy~GLssNrMa=UehowY)U&n342}=c-G$OW#hD=e#8j+Pd|jCA@s7YdTLqm1j(R|EgSz@#J>xrxTwo+rx8m!^N2vu-7?Q&G{5TT4>bEbRE`Oi1t>pg;^
zWZ#m@h}b&g4(h<4soYjk0H3M@v+%hYSVFD_YEcnAjn!8jK=^*mOm*>o>+)>u22zIb
z$5@x#8qB!CC$0uFlQ&iF*3mqT4j(4NI%}@gh0a(qQ%a}pXy3ZNquu+{B&@UjOm+Xq
z0(WMkPVE@PyZ*{koXv$9^zVx##*6`xl9=$5cRK9@#iT(rpju62F!H#Ds4gI!n%blN
zpLA~ymJP$;s%e56SUQ7IoQb~4{ktXqYwK@0h~#%_qco62leI=V!#mjP`f^wBqPUlR
z3SwWfjL(V*lvvCD(+-IG54GYfIwi*8Xqr5w!Ba($+s;_iksTYg;`p^=6r{D_C$m!ez_Gi|4c;f}p0i%N1@h+92x!e-lUb
zi{5jAIEbU<8vzcSEMMJa((r#q8%
za4hf)bX)|5-8xr?k_7Kwk1%1;2U6@?pjWZyj-8CAI3~xxbMrn0MsRHYfI}NNmB0*t
zoy{vy6;wuy`Ql_Zi>Mm9^ORLNChIYqov5{p)!)$$0n$|?dp
zB5(h~ba3_cw-K~!M(9)L`z`;xqi9n~QGZ#%r6VdoGwc
zI38xazd(tgtms~n3;x1wRa584uL3_W&R2wMvP4%3*S7Fg5WuvugYQ_``SFRT7PLLy
z2mJyWgVgx>vDXLgi`*)4o3stA^!iiy{y5YiIACc*aMs;WP*JP64Av&W(E+n|Y_X;O
zY}9BQw3KsqV2$Atu>q_(h4s2MB9}Pe)6jrryhk-~6M#W@uA}2TVPdadbk=Oil>VJ%
zb?#^KnoU6Z(m>;3eqYIFudtG8G{sk$RdCu;&AR{!+j%&3m#L{g1#9#_1SXU-H>o;%
z1HAoMN`||i$psgsyA#g?jask;j8WFXOIZrvWWxyBv(rA47Uohxtv6iIYYzUa_1s9`
z5Rp89nN^j)J|C7Ns0nUt;RCmNJOY}fv2Dyb56@j)l3wOqZZBKJ({VvkZ;h
zw07`8e#uf`kULgaUz-Ry#kfSL@MOZiZ9R57G_8zg+Yxn)b+27Yu}dN!X%6U9@q+)R
z1A|9Wvur63a^LzLCTuWP{%h(qwG)MkTdM<9j^y|2%PNc#1x5Qf|Lx;eLcZQ&l^=U5
z;5vfhSxG}gt%e#z{H;qURCz>~oEKh3{X6PG+q2q%)wip!I@)EUU|q@vQK0@bZ(I=6
z#ocjzSLk{*Ac=JD{<}&*K4haAM8Iq{d25~=_NA#
zFY&p>j8fnj@gcGKsq$L~9gHqTln_?H1~fqvsy*tSJHA!}iZ7vUWC&3^Cfm9$Au@?6
ze$Rt(S}d)TAB1R@fPdc?df8AHN(#fb-9
z(zm9Iv;!CPA0Xx|z{9ww7|txc>`X|Mq7OrRxHHBt^+7SIa1`vae{Iz{%vX~_qgi}-
zbyMHMu{Y(917S?-$MSj&8xsbmBEfxB|tMH?2|K-m-^_v|D@vlIEynRKiyVCi0yHLFYhJ
z`|rlHzN2l%+|+p_=zVhfRHkwl^l_$4MHec`?=N)@wzN%I^;d!x)7mYUaUn_9$m<2`
zt8C(BYP90V+;3Z?o-nlU+>3y>
zyX#Q|ZJB5Jf-}9RsvEZ$#h{?MaO_#>7UwfJllAw$L~vc)`mPztWBxM}&=MqwXU^nd
zjKkZ0@t!EN!)HZ(M#^N&e=4c&JYYPh276Pg#q%rxYw)n{ajOFHVo9m&vIMZJzg(WE
zcVVjbAd#q|{i(Q#x2OYWkQ0ZpEbsnjz&*o1H8JviEK#!t`S+e*?h~*R@O%WEkTx3i
zF&Vhbp+^3POh>CPiDl9YKuBB&nOyG2*pV)2BN)I~xEDfm&HF7>AS*>g>y2nt^3bN6
z7Uy~VI%up<19bk%I=YRM*ZW3yi4LjoI&AjGl^z&A#58MQW9%Ba@g
ziq`ugl86qi{=xl)PXfjC(|~)IP*Wc)IVSSm`YK`)jH|$>2;}#FyHhJp>d4zaORIUS
zJ<5$gEVobIud=?HI*pvfR2jGFl$JhB@5ymOpK|DViIipqE{oj{=9H0E-N5K^|RR)Cuu-
z_Cy2S{`OsKrlnRK;-KXeAoWJhhTYD#ZjwL+{D5W^?~LrwY9Me41yq7YV?wi$*S=nz
z>@XyI`n$J~%xAu>((AobeYR?=C9~Pz41R)rnu>`NmAWwOQTFhmlo{SY_uzlC4-?csTu6I;f+##9Wed`g~MQ$
zO^+#mjvP%WiMNuzoZOBbFvv<-ReAD_QM^LVUW?hQKKW2tXab*SjWs)+
zDS+O&a(B=dW#L<*uVsfW9tz8m+}q`WL8|gcw=2S=Zl(v>H_}?GZogdqZG+_kV-)?{
zUC;O~0Ys4RBl|k2?@=r$87BIYEDYbdsVj}
zltz~$mp$|hx`nbnndwg^seZXViM)lNut@R!1I#i8{rIr`LRNI$djk!J9oy9UscqS&
zvfVWY)LD57(uRG9C0cd-IeKDJfqQ(b&vo**njufz>9f}WRS)r~A*IDA$j_J8uPUCBu+Nw{I6%okyb@asDeLw$}Gc6IGr$hgl`Q%l2q%Mci
z=MH8$PJf~5&S6wdQ$T6O)QxLRRc%3VqqPSGoz$V;v7+4;GfQ6Be(M^0p&TA7F^mnB
za8Se;TJ!jrk*h$zNol21FEJ^oULiV)z%+s2;ZW@)g+XG*oDAb};7O^9rmJ-Rp`qS8
zhuj?5>k~S7NOD;3^GgY^5w>g*g#NyLc!=!VRZehLTDZ-br5f3Zu?wG%kx2KH`)PYA
zyKOYN#rcs|Agq+gvCg5=InbGii~Uqq60>DN
ziwpjjZeqT1@t=d|&p)jcaueteFTPtf0Lu)Ypo@zEFYit
z)V|X6JH|;r?BYJq--~-Dq?ejsg!Snl(0TYkzb4BKoX3m(|8v40x_JmIQ2!JrMvZB?KG0m92Qe+zVqc`r_dQ}!rwZLiY
z#RF-_D>5;oBRVHbGdOKe?bJnce?x6!8E5s|AHB8XLkPy+2L_Wz54E*sFqhkArN0g`
zw3k_{IXpb+x?{d%D{pG+mhC7d%JL=$=Z3R1c#4IhSWYJpbBUAIch
zM+->m6v?&hY{Iupp6Kjk0=cf9r$4OPGhxG6F?mWN`>&KfI6s8NKvJLm&p8LiRyoPF
zW}Ji}#xaIF+lyufYp~y2xLxm_p{4TO@H3C$L&U#NcefIrG^R!GNe9P1Qb9GqJO&-c
zLvnilp`~a9iU6Ta6>qq4W=Xq-?0^il7aG_|bl1Xw5SyzuRe4IeWWTnqm%NxDVyo83
zzFoNxd(!);aPHlHUoUu}28sNP4)WF4EQxt>hJOvrenpMck?Ao#*>0c%{lM2(Z>G{M
zH?J=DUlgw8F)!90#{Fda<7qrZYt>0h8%3$6S=yvu}hsx(Y77WBm)^31tdQ1F-`
zH|Q*cYKob-ed@qX_Ym%=Wm>||(bn>-v+(@$nQ8c)Xvak7Zxm+c{}Ec9WaKxQ<3(=T
zRk(GR)kcMT33=gZr&#raMBxhSc$iG~?ILdkqW4RN7&`)(EKU~ut@Q;mz36%}Qu$IB
z2k;x4r|4Sc2L^{lgREvtO177VFvZXR?C7M+zVjedmnw!NHIzxmyTg@`=>N6g%#Eq|
z#j34}L2LwPs&b6`P`stn8zUR`Y
zD+^O-G#SfOT(+T9zUv~MJ&Qeu1r#wlzt1?Qdze8C=Jq05jFhX1;N77t{k%pSenP8<
z;HAOWB1)xAtbACyYXJzkinMEny~|CQfhWO2Ycc=<6_U2xW_^Im8P(IX`<`<8{75A3
zQzmKLTkKq)B|0i=Y~&L}eLD7_x4J01n9o9@lR&y00R0uY=#I_}498(<1@YYvDOj(V
z@*&4wlN%_U?m@2}y!_mM?w
zLXLb%NfU7jIaAOpOHpEN84=D^W9fAyta1Z%JJ^Vb)=8z>MOx5$Xv=yvZ6Hk
zBUQyZUPa^){_{K*N|(Ix$t`Hg=}VjQc?u~G07(otJ~%y-4+(Z$Vs3$lrS(IlIZY*9
zFyjRWMZbb+BD6F&F`jdkcRKULGf+`E?#z~C1XzZ79sV#VJ$JOz2n+vJ#`p5WkVB*`
z6C34|?EC&)cwS$<|KTq%-Ln(izvPnVJnE}RTEa@2ZIh)f?|pJ5Ww0I`{Cr+~B?8V`
z`KqYpO_|f?P19(vDNGKu&G-41>CKugbYmnQAo-T~;JzFy
zXdt2=7!eNQIa5kWsG+U$QCBJolS;e~Rd<@%xib`_X9+k49=3!#LV1RyB?ykCuOff=
zFG>{I0i>xxp;!x87AkE6u2M=_@e==T&~#@VGz5Y0$e<#dGr5IEd~-vm9hm2(qtj&P
zOPeiF!#*==`;W2u?VjhhomlDx>D?3!ocr1l4U5iT{K-ZhiCpG0G;j|dDBQE?xP
z%WN>nak#^ouY7{T#^FT|r#hQ!2LO$v~?
zHRX6FCqRRAus`=6^*$mBwtLJyn~6~PQlkw9hS?z{Dx``Ngw>J14gPDCOhgts!;5tzaw<7AHM=rk#>6+kn+iOY
zZq$C~cCm%b0G5RID=lOFhOOSHExE8K81$GGf
zOGqH^i5ESdBo=8M3a(2|h^{vgL9XVRhKyt0J9$vR(%4@Qwe)az(wB<;Nl)
z2-pUD8+Y_CU`?ha1x#+u8LrI|LX42&^LZ7lSAE0jdg;3qwMNWgN1+g_Kjr#@(C@{Y
z2>DpETFs$utSy^y&H)HdK#507AtEIT)1#4=&QEeoP6v!mj<4U^`W}p==iH!R-Vn1B
zNDJ#`!e4zAfMPP9;AAn;yNrM|2qC%U??CqY-N%QaY{?}
z0`*9CC#MNR<2(ey8scwqkAV%EZ1R-FM{8?v-0@`dM=#$8fouoW`+dz4$p5M-dWeAe
z2!NR8Eq(e4ZA~;3#QeWHKi;u?`4A~s{5i~fr+h(Fi-Zom;Jr(IXY+tR
z&wI@FN7mOq?7L_|a|kFlD!Ni^z+7@KqHCWd8^DQkG_?_=V;)$Eg(T0y)Rcy14MUi*b(Ei+oNa!S${iYC$!QRT<9d{ep-|afEID7UKGsei13Vyo4ub1NnStk
z%!2EQMA
zEHAa1X6Rk}%E>bS&-LvSBMvV_HH%2y<)qrhyGZ#_Ly#Gd;a%vW2FMkbXA){R!N)@f
zJG5s5OBgmj#=vy>htp|INM-GLsLPGG!FRKLU*+%4rDbwuGbrW2o9Ayh(@mtO
z=BS~i0AYJ>Mnp=}#@*oe(JMm%Gk%CfJp95w_xa!LG>5;0VTjHIR*(%ZvKSvEX$VWS
z^BWo#w5Pn0&3*%EFhXz_D(tU$VKm_f)adB}P+LAI2QY4lFY^9Qhf1kB(e;~B058&J
z`mHE2U#Gq^M^8p`RfOdnY<+6?Bq)Ztnr*0|(ihG1RB{e)fh!*anjq4PX=B|&v!C09k0itBpNy5lb
zQ3dD+2YJMg_E>OOzUhlhUo6L;J`ZBoKfwCm@G%~CE2KU#9ikGPwY<0@ushDnEN>xxnGLsn
zDknYW?|D2vVjf+@l87HT&nhY1)7u>-B@3!5gjj0)HKVv^%%PaYjEK=O#E0MgV
zen{HGPycD9^;|2dpGZ+We+`3AE|>*t%vG-2|3Gj4tc+2@W8XXJ5PG8Shv^fr2%Pgd
z&)t!No+$I*NZx@QG6~R|W4w2txG^4TJ1WX3r7ND2P
ze$Pcy{%5cYOTFqd1{OEJprIw*#KXJ!!w#`+@Ywh>RS%ongx#xzNVE!CqHbaCgbxP^
znR~$r{g|&HUlsp0sZ-GC#&bF3&b4n&NQkK*w>33PQB#(X2!bE^?nQyD@EtnqtbA0{
z%ag5IhE_;cH8EUdO}SlCw%Y4kDRLAYc$xRFf4xi0%D(s+C5Ji
z=wtAH!0K#s1q)rF;SImQ^JH(KOqUbi757x0qrXsU^9tWAJW{32{^&&yAN_HNv8`9K
zvWG{-&9^J&cx9DF`E#^Bn}lBq4e#*Fx*an87$S`+t~tA$JNY`w!+7Pvc3SB2do>*)
zcuwr$rK?3=xQli$GJeW)aP*O4E}f{Jf|~aP&r!JT2hQSoCIV##@A|7b1O!7DWyB6%
zZ=DFs5eXVo+>(+!8_P#gZBMT+2DN!-R)F-o%1!9J=TpZZ)tkp5vr7$pfB3JqS{GsM
zd3qDJpf#zRTNzyHG}wGzH3cs`a4Y=uKI{=rbo9t&<+ZE^J3cdNVdc+1Xyncws><0n
zL8^BMSWM9Dc4jt+WJJP>cE^ZP!>!S|MZ{mbS$(@wpZ0aEEungF=BZL3;ZS$9C6d|7
z$P9cYzoz7WmX&7?0C{q^AAUlmRLfpG{G|IXRi)a!L#=N5JJu_9o&b7ZS35)uCcCHb
zeaK9;c}aY*L$O%rlB>($I-Y8>(=u16tENui1ph6|Io9q4+om%qBuT#nIoUc3I%Fv?
zmES=rO+M#@dlto<=HgusvcRV!V5|*5Y&U>;ro{tpZCNo>{_O@JzJz-$JHW!$6eFE9
z2ln2H4+FC!0;O;>aL(R$ZLbE9slOy<7Ya=d5U2`dxPr$-HHojs`ST=>eZ+-Ap}C{6
z_e`*><2_waOy3&)f#0-Z__XQCamW9Pm^?6Yx6s8h@$mkVYEeo?kth^=CvZVM*?D!!
zDx@MKbj@dhDrUH{@7xTyBga~o*}PCV;hyMwNP0IUiRg>ICe*NR%S6=w%6Bzc8?_r%
zndvKp-!Vu;e`Q2EKf;HwsLdqNA@A}s_0u?rEjeXRH6BCueAkXk&dUer&sQcbnW>Vw
z&oj&Fxn6eoq`>W6hNvC-qMK{sqBH$vj;sFj5FpwTOH?+Bc9`4`VUEA(?rsF|qWFMC
z9tf+2)oFJ6?n+2Ts{L#)jK^C0L)hUFwn?H`IP)IraIvQQ+Ej-$KH=}Ogl4z_&E7Y(
zXPtf2!~NuNj)m5!004sZ@8Bt6eIWF*3&{_xzDHva6vw#2Wz5Vk*3{QL4!4AXchUsm
z{QOe)CCHhTLVEl^ndNt~7V<9@{_h<)HL=yqikL`|oEu4ATi=N}B=VF((fwL+Tcd7;
z5zb`;r%n)n&}()iHzd7}HT_V)XUR(e-13OBfS92uF}w}BOx+{DppqXLAr80M+1)!b
z48#fKJM#O^{T(Q@o7AHqT!dZRP#toiTgO<^1KhrKtNp}y0U|CgqiUvMF_&v1)9S$iZ0834tgd*ff1Kb(K=o%{T)DN)>+@LvJ)f7jle?ua%ZZj9r;L}Fq&yqSwU
z!x|9eWhGk%+m3Vs_Qjq^VzRp>%Rr7xhaoyR?=ruiJ
zAD}!R&(+={WDoa95G=oH^skch&=GmN&96q$(~aCQZX#)Fu2a{Q=szY^pjS2z`Uw;y
zj-}#HCb7xF;&mM$y^XO8%!U=|50YtiIRuYySdiN(OVDquCI>xL{^3@+9^WF^u3F@$
z(U;HbLm$UeMy}Zld~!=o7`=<5+3{irK4GEyY+aF(SpC@!VbB&d*5CdHHMss$9M>0%
z$@cefyEph!l7qzJ*>M0mN<$w&<(+pYw+iJFk?t#P5Qd@=mvv0iyftk?R6QC_8Z54eip
zHsh#i99?wnk>73!*?Cv)9$D3iHY$I=>EYGQtReMA-lksiLreS^tJW)x8Kar*x8d3v
zr&Y|1pFneP7;vz*gVcX6NeUaIf$kam=l`$s=F2u!J6=^siw*z6GSE()sv^8~)F-?|
zr)78VR&DsFDCCsE+u0oY!We!-?5^6NPcQvIGrJQHY%lOi?ji_UMt
zhxC``70i1?eOp=4Vb`4_xu>2GBSQo6|J*WxxBo_p4^`pU?Su4^rOYYvh7SFYwSmFm
zme7Cb?-pJdJ;A~w+pj_-xRWDna+>Kk{eFT_PE=y9p%%yS81273
z9(`N`K!Dqg143k3z$1hf4z7$C&Pid7&CF(vXIv03L00mw?{{l59Qwu63t9RJ6+t>g
zTEFL}Vxs#;Z_mT1T$V&FEGyNnc;x}#h`gmEKHVk%i3Vc_hVS)
zo{d0gtZx_N@`vty$-|4+?h<1`vW?8qE-l7MT>sq4Ji$_nvV4DM9_1l>
zY|Si73Sqk(Lym6Y1WQ|78E;&I5KnsZ0igBu1o_20m)!i;t$(ge=9hrB^iB#OPDbv+
z)tcp`^NC}0cvGRksS-lvZlG@+~-;`Gvnbt92ya{o|V-iAoY=AO$~d
ze#OVRRL)ch?w?;H)*}PJ{jn0BttQGLE!59aF2;=gdZ!?J_{
zT)t>0j!dIvq|O(!LENspqnn8lYO9&nf)V&w;wnBqbU{7>0Wa*8>Q_j4IL>KD3O!gq
zWOgMNF>WjY39medUEg0~ZLgC2%jbU5r$lhKedm|QW1lXY|GGPsn6CUr|?4&~=J>!>yH
zEF7;<3qj4X`HmI&6)aBPk0j~q4g6|**HT^2rmUvc*nCQ9FFe6MM<7`i1eFqLLc)^9
zh+;)o+N$0cU@@Tz=8S^rh20clE}ZVTWa$wAR%)vrhs*g)R=3ruSpJ-#tgJCl7)X^Q
zoOZ46NBKFm#Ug9?RuEUTeHIU<dEtfc`0kEO_jsSvUFYJtWZHH#P;kMVv{=k}^pfwCs+2aW^g24RS&f`g%f
zCCsU7sqA%xMCcg1e@D~@PjOIHC_dQ`zj$>R-w7T%-SzKxcPuzBJXZE4X#H#)*-A))
z9X#HyPcQ~3%vwuTWVVl4XH6XCNqV7fav0g?b;RQ^YRR!-@oG*iTV-Q=hc&i;Yrcq`
z-&M_}Se|K1EST5^AH=0~-T;Ou$KX{4|ZQTh$xeJ0J9zLS9H#E2E%v5QbJ2
zL+JIc5`KNunPEZAF7XPrWE@Uj_RYxD*>AE#yuSWid~>#|u1ApZr@q-AVEQfA5D5A2
z*+P!w53SzzatUwjPpsM=2odNX{dA5(HN!KZa0Dvs4WX=k~jM$e1E`R)#
zFjo*nv!jJiCKS(_6QMv^Deh&!I@=lUP@VNLpE$gLo+q&3E0P=5d}Uh_ry+8(Bp1o`
zO(5*4D81e15>Md)D6#*d2av7zH0z7=CjPt|eVTGK_6;8KTfnuD0WAU|#&9eqYN*nt
zj6Jg+A|IMeDvkxIfDGaWh4(hvHnMHWs#W2L!JF9dEC~wdD4vYMph%@Dgba&_*`cXW
z_EiufcjsQP?#M5@T2)
zW2(i8DR$WYO`OeCx*KcY)ppb&4sHdYGW
he}aRh|0jQaz?g>xKBZ>*(Sm(_QeyI=)xrk;{|B)Mm)J-}dUYSZWH?c9<%!bC*5hNN?e
z!k0ggA`69t1g;P0LqyDN%htY<?7w>aEyyI2ApU6ulTauF_HSaJRnjvQ|4aO*RpR&m5K!fi`SKqE
zwEsO!vb(`m4H;M
z(yGa~`94=6Kj!(-8CvF-2B##gju206c|!UV_aDULdnJFF$}5*m>kg4LT|I@DHCMu!
zhM^>^{5eMb?iZ-G)NycT%MiojAujj4M@v6N$|Ioh-lEX&Y_-(V;reX%g$B#M>-X2k
z^X|-|h?GcIuc?#a7Vj<1uF1houp}tX1Ysi94~Sj#q=WeO#@|6DochIMN``)YtDPUT
zDV@Hex;VJUvT57|9nsKJ!he?3>CD5sy%$oLq6g3rGW@=tG=lRleTkKgn?2Bt-!cZ?
zgx|V>F)~)c>UI4L=oICXW)WloE}mF_7sC1)Omr
z8TwKgTAKU}FWy`ILp=3zgeK%0nJoPN%bnKMT1C6_M8p$U3mC;Ok?MN~Mvt1&#h@FJXMv
zx>Ew}g>7tYH&?Q-#c&d#X2zync|l-%wCgje2hsa*4ssW#+Nzi)HRpZhhEEoMUf#jk
z?^OCQ;r4}8CTfZ(7Tknarh66g^SFwZm
zGZ)}|_!>7=NO^No>&qB-oR3m*3t*_md+M|sfgkHU{m*OXAsCog;X%BvlhbcgoosIa
zE5aSud->PL7*gSa^vYkZULUQcExyB2U6Uh|1ZvRSDZ)e(jR2ggjjQ>B1M~>>
zcKc5WnVnCua4N>CWwr9Sq(yVH9muUZyVZ2P-dwRG(RVflgrAn}9Wv-!+{6y@c0B}m
z1bxY;)HfIz96r*^PByj+Ya|RCRL3%Wem~WQPLahQ1h%Z{!%t-eJU;(s#ltKy|tu
zzQ9Y7%91?zIp(k#ZgjI~BWqrdGi*Xwe>P997qCrjs2Q|&C~m)Y3*Z6Zpd}674hsgj
z@0r-joZOmTDLq+sJ7??g))FGzyZszH18nt@U&Y_LsgGhDnD`K$IJ0RUDp%QdiFD6@
zml~YsaoZ~(cF*;tFdV);5!O>o(5}1SGw4<1$Ib?;IQpTM`Cp6$(jh0pNOu#723&FXoY^XRu_`md;xdrDBd-j$D>;OsS-&>3y(2f_VIHjON2aQgD
z-^rTCe^jFC<*2&ckCZdF1VjUkz+bVb*83}&b5hXyIQG=w4DPAi|Z^A
zZIJlS<_RVqGcMX=Tv*}dpY;=pq?mXDTObVL?6q+fY>J0y-7A(^_jI|s5MvM@evaWI
zZydz6l1XVgG6tb`p2?&Q>M=d5QA!~n?+0e4ruc-PnziR-{
zEuAMI99*cZ)g#=)-)v{1Yg?5`V_x@<6~o5h&Aebt@cEjw4!oMsXX_~
z*@Ni_EhcyRqhr=L{Cs&Q{F7U2rbFWH!U#16B!>NX$`*pRjI^R>Q`l4+t;wA%p%8qz
z<&q)V1%gVxRGiLZ43k|9Q&_AIEdsN;(V5?-kAjEW4kRVt3$;
z;>QH$-jZ7Nsdrcz?71l5*RWZv+n5C_9GDoOp=bdBWyhnw-?1IN=SjbdrXe)cPx29d`92*zex{ZzjtSiWm-PCO*&
zjkfHYH>T*Queb6D56QV>Xj)*^mv>W`qn?E=**-0X4jS9wYa=U`l-U7(9I@0
z+NP-JXBYeeclBxmsk#+!&zh9e-}P>mu$!Beb(k?|^2)u}LB4Q_6tx*vFqNm;4f@1a
zkoOtpF)1!Ej8N(ie#s>+9bcIDGbNYUuq0IV*OkUDZE4FJ!1qjb{W=f6Rm7AK=#(8M6f#c
  • M;hg~qMEIz zv2AK@98Oe}t+-s?$AzD0Pe8a_nUrfu6%_>J=o|xx7mm^$_U*oXSKA0ggVK>Tkghj-^!{H-6@)%?ZI(4_QgCH zr97VdmYZ)B_gEGVy_9|2@r_(`BtwKnXA2CSL&NVX!f_q~Jv>0xf1%7PNIfNdd@r!6 zLt)ViuI*4+GS171t^rYM`iw%|yl+gdzu{Tn$)1u?gjZ{jI+VgUKsO^xz(4F$m0Do~ zxFQ_wRU#FEV|eovXwmEs6BsdGAH2d;f&m{Ma1UL68XJPDvLWae8b3?#2S;L4R$iCr ztfqFY5%bzB^^%)N=FC1D zctHKi2P0uk%23kvmob}K_y(g9P!eo?LOiU|A+)tf$;69 z8T$Z(TDCI5C}(C~yUu9-N{jb*sptxO&yh>8S1VmT2#5$Nw&$|iA-;kvb2d(`QHPuHj9#ebFx^o zj}Kql?_uF~=bjR+*4})aU>6P?-IwIWRpW*Rg)%`E>k*XsXm3THMY+!A0>kGc{9z-^al0 z+*oUeA8!mGm++~@$EH?>M#tK<`(<{g(!fN?bi|e1qlpE*b2189T=xTuQB_#&FQTRS zA3-UOy5Cuy#HE-|r(B#1%ls=5D&!hJA?>d!7SDw8OlSns1p(3BUJ+^;umsS$qn!#| zyMZ^%SH>-eb6xa>8=Zy)TrmStlu8U0LyN}tnn_Mk_2&ez^^UlYnrbAj8*&rx97xTK zWX4;)ChxK7(evFp!j@5=5F#03CNrEL1P#Tw-2UT?a#O06N0MR@>Bf~cCND{-TD zRJpV^419f<#b+6o2hs_`^U$p>-5_~Lq~C*`3Oy(#H=%!hS{`xX$|b!zUk^2tqV=KK zpH3GnJ3+CS?tKX=<#pR6460>He;E+W|2XE9RRq0eJ+R<+*AdLChhJ`TC}k=R451yw zs_74cTGXW4C>E>(lCvr{YE;Wqt&#-@{v}1@*2(AyAV=7wHLlVY2a!{?s7Jj$-3d{L z^bPPu?LVZO%fYvCm-K_bWEtMlOTh$_!jEv4VKR^Q}ov~T;-|tXC}qQjILnY@eJz_&A}G=~M--|+-=CO< z8)suwn{0^-|5r5NBBa|-JYY2x_!}$ zRO3nBhV@H{zl1$%!3oVdisKW5Cm>ju(D9p@sp%5b_}W_qsPZAxXvl|23%mx&N#tRcJhf{dIlM6|vQU@OE@6K7!LjU_ySwkW`8xTCcga3bIfQ+h=@B2u zzOP z8|Ol9M_66Z^6!fWOMEhq#3*j*Vhn)YKT_kH5%+T8>*6v936mwgNnFv?$HuJA%k(G# z2O_MZ8bL^Og#Xv)sdL6h!00?jUm^c?@w+j6ym_50d*0oqo@78M6NY@^V?>=btyDfT z@2{CJk1YnmNB^AqW1DbVJ0h#OcSOe;R&2)=QM`|o_;=Z5SJWUusmL^ikjgf!4%E!l z2LNr)mIy`QefHFz*EWR5JPKNLa(n|U1xZZb%u@&d>2aIkEF9uw6r?Q#_dX}E9{bWO zhED=~wAl%N9U5&+q&;(7aWIfw_4n=*#-@AYbn8%yM}=`m_5N0v%H_2uzzgN~W;=)(>_#* zOreDTdUiP`TuHj4mt8 z2>DU~-w-214jow{^(E;N5Z5koCS&|kN`L8vk~w;n_NI;=`nzU-g%Z0`z}We237e&Z zFE_OgY*+BfOYICf$tXfQv6uW$cp*FcZS6OY-A(m>-PBZH$0=-2+={3oQkT;c#m+zL z%T7<3U#&iwtQdD+NAfu3Kb)|#+O?$0&W?U5c}$t*!9jjb^SpjKG@8N1_WKY@*;;vp zadrO`n^`Vtnrn#G+b85n5Lk&-AYY!3S}myA!AN`@XAX~nEg)z*eUhPa>rF|_q-8Z| z*CS&)iEt`U)X#6R-!vJ15hHrc?PBzip{CSOsbzgytw#Ge*h5p*s?Rc$YX9{Gx}$zv z+h|~Ah#v~-khsxm%v5Kw7$2_r%07xbdd`E3t5GAbDIuP%h@3`RVUVjrU@hxMrVDV*ZBsu z(R*MuG2vE01$D*AVwXZ@we<+E6N(L=vKS|8^UOODqr8IseU3y${2lw+T56k#*^Pj5J3*I=qP=*X7j(-y^*c}`bebWT^cjW ziZ}vrdN|ew$szq1pi{j z{FVYps3z%pkuK6;dodt!2BLg}nwB-6>nUfIZ#4FGyE|^#1#96#B$tOjmgvm!Nx)=`$r~vnwYblucPb0Ih*)gW z3CES(bkRA78Q7nFGJRz`Age-jIn-NIF6s7^qolz$4l?Z^!!DA1wQTZ6z~hbX(~Bq+ zztbz2=`m!ZC6+bM;5NQs+mEN<=Iq?bie$HlpsJbID0rLW{q9QV*!2A&hC9gtZWhc` zuWFtmvxO|3^NFUTTd++{97_hLmsO6r*%%dqR5L#j=(C==xXbTc-j0s{0Fh<}+l$%b z-Lv^QX=RaSI{YqId9+>6u~rgX?bz_Uy}HBT9lbZ!;<-*CouaL&L0O89{sYW#tj>dy1Ob507QMdwRXA&Es! z2Ae_-%_zsfsQST8-qif7h)Lw&TiAfj@aAbE%jHit7jHVRb`Q!y@#uosh{<)G(aWV3UM_p z4n&H~p>2HW(0fqu1FWjvq%f$K)w8{U^%LRu2mSunZzzv4LNbMKzSbZ9p!KEn;a+^% zW$CV~t_`38TfVb%2B!#uRl`WCb#$HAcW|pg{|DK4yuFOT@Vzdw;Ch#RrJ+q8Q!Dax zmzJvZ(YL0Ca-f{1I_6^ga>(T?*qP0`l<_i;t^G$M{`-vl8JQ}U-T-^Xn|rb_1)8X} zN>F|#Z42?P(|kQY9%0+tsdAhvwgymlW}}%_Cxf1n)AWfD!9#9YBRSV@*~6{-)A-zN zZS9%3s@fcz#-ZL`ei5b9vr|dm8=QnuaUR!UWGpU2A5O7apAODa#cS>Uf)cbHW)2#< zypsB|B+#ir_iy8iCa^se6mIMyK}xAv>MP>K0;f={jQPv_{C1pB#9tS#W1_E;XUwSY zwwU6`d=5Ds(sU-H0Z)>#xW}k}abePSb1B#6YRdts4 z_WdcZ&vAI)`9CNp96S9*PW^{HTCm+V>#-Y^Be+xYpltlw=1|w~PN6x$2~8d{$Wt#R zRoD}pfoQ-+HML}a#r*l%s~WV~6LsdXmeq!0_2AWV7863bo|9hQSoD(ue9_e1xx~YX zEJ4j={ZuR4dV>k12aJ(+PmD%}jaJZd(oO3L-6|8F&(hMnD87%HsniP@&dJATcvzNg zUQ6V>qCCAxqZ-w?Kxp860EmMk6n)84p9jxJbU4aK3_&THu(7XKP=y8b*9 zUdiFAX(IPqUVFq#H0K#fZw|irk_<$5Z18oZSL(lW=4=_08h64`!yE$_*E9Xy?VTNv zfZ3qa0gNZ!Ux28(xS?CSVBe}BHxCw!g4qz!_Vjb248b=xA|NPPQTiz>B;mv{)uFn5 z8?Ncjfdbc}X`QfFlp#9_|2C=~gUAJa+*Fl`fTm1(QgD(>U1^i9zDj1si*KXQZzdcB zgVkSq-Bb-YH0x5lk&(CcE8PcVGR{{!)r`ZfS4Oqf41k2f7z4(;>-X+~$W-+Wp|NC@ zYb@x<{LZf!RT(OuWyy!T>+9>(5Sfh1-2H?_(vVXL97OZPz4Om+GF$Mv;C_L1-J|;2nUzBU+mBoKQEgi98oFdN-vWe7Y7-M((Iic{cGlp z->T%;D~|UXStTcM+uN-0j8HPqeHkGkN7Di25ZE5kyURl+kbVJ3!JPnTn4C%i>cpg0 zuB5mipqi1?`)g0P23CVd>Hob^9Tjby2MxH^aye1*b6$)Oqto9Oqc@*kpB-_q{|Sx% x1z9&$4W;kj`QKpk>z3;2|9?>M|M_U=>&IzSs~jshmdTHANQ*0oRf`z-{~uWgH6s83 literal 0 HcmV?d00001 diff --git a/tutorials/figures/paddle-ernie-3.0-framework.png b/tutorials/figures/paddle-ernie-3.0-framework.png new file mode 100644 index 0000000000000000000000000000000000000000..f50ddb1c51525dc33ff0c92806afba4a859456d8 GIT binary patch literal 202018 zcmeEtRaBfo(d(RZm4IDM+Cp5+K6Bz@W%Ti>tuEe29R7c|VN+2W`RHBh!b0 zA%l?-7g6)fILrKKq^G_(G^NEl(yvcn+Qz5U^F2NHl-||L^`U;ru@p3)bI8?vgDK zP1u+$Q(T;nVD?iR<0J(_5ZU~YCHv0-(*lI@v}ic7^o+g(aDr+Fzk$#i0J_4t=YjHE z1Tm&4Tq>z6^VElz-o9$(PnQO_JITJdNX!U9)8L#jdv-j!^&fecESL?-xScEYzoiRM zu#orN0VTDA5)BG=Fg+B?HksnkZDRQunL&enmV60S?|`5t&jh@myJ@*Jt1nV`V8w$KqbOB*oFDlQc>ce zbDTh^0_F|VSErdqRq*4YU|+tX?aXW zfmJ*vzq7f0dgNipN{gMUn1%=yGBMJ!`H+Zr)b4?^4bNZc z4^Y(yqJ}VGd7&BS-5XieNY{7a#)CyW5&vzNy+9FQ6Fw&|RB)IIXLqCY$^R&zFPq0T znI*zJfjqsu;_5}{Ak%ml(`ZDa_dQUhxWys}ZVW=jr0FytJq9V&1jWqfNy*(nt#HuNQy z;KVbk(M9`T`=Avw^At{1DGu**H3+1^obqoSV~^YoKL48^5%F1>d&wjIasS^>?~F)P-^7R9M?3;#^%?5Q z7q><}oQ;{EH3sHaz*r!EoD@zd^^bRPw}9iD$C8rjfc6s$XJXVhTWynu zPF5sGpi&8p?Ou@E1FLg-S?%~;Df6-j4wpEsZ1j8UX(M52b4j?t!+rXEn?qM0Z;o)s z!s23yH`CJUpMruu_Y1LiQ`~0krD%jW7&A#A&o1n?57JNf zH&fFo+5U2hLienhp4V3AI`#~(r&rA_GO z==_N#XGe8Y{33>mZ%N}ttrN{dd*oG1vNJQ^Zui#Bu&{}J#oeiW{YYE%nQGU*K6>k% zHlrm0d4B+vw})!HT& zk%Am8yZ_p*HfQ;HfJ;pqB!5cWDT)E27Q+#Kzn&{5C1tGIo7oD&Ik#=7sp^ZN7Q@!S zRk)j0S>I2OrcJ<;EWSc&;E&s?x(dQbd?T5fFIBRtFOIrH{a;7IZeLvdi;6;U*7<2g@e3(Hq0rGvwe zX;KIf3JT5Go%k!rd8UOAjW$?T>WW)`cfPsv+O5<^*teV;M21J(UO5r)ah9YbEGz2s zdKAu0@rQ+$oH|&)FX35Yx+o5EJnPX{;ux*bo^MyEXo#Te zkY+CpQHU)4v3I@rn4%=!FIWXYAmY@$^?R=O=u+Yn@+;R24dCILU;EX>kHGgq*>Yob z;r5XRG*>dScQj9D!9o1H*np`xIBxF~g6u?B7VL@}Wp-|hd&elV-v$xW5PqUR0bScfq(0vwDWE)XMdWDn(h&R&--(=jqQY-{A|=!YPG4 zjBz!~c8~2RI4(!{5Y0F-w^?#ta`v%v_k}d<0kHAT&iYCiq51dk-yNU-{!lq&#Y17g zPg$vVy{d9u>hMr;m5U}I;$E4+MX4Pz{W}c{X=bU$h{w>87L{jA`P3z~O0m#@&%KTs zgDd_GppHGz-+v8gmbB%w?YAVTgmWr^g?KB)4i8J)H#-WX9#>XW91OzLNi28Sw@j4T zYS7a$k?SVKym9`HHq{ssA!g@+`TSug{A1(ahpWUR;3pcmtzf$}fg4?VCi;T?B(>K2 zo}p#;V`7nIb|`PEJ6648?Kdi0EpJ0{yM%Q`O??kyt*cE!!j zo;b>m(ZR>Iy@Gji*w{K+&OV|PYaShP&rH$Ff+o62EoHSqKX!MUGjb0@Aizvu$6d*} zk;}%3_eS7e+ItdB7r&Rl#VTE~sm@FP(N4)~h-3%5-b_J+!i2PJfuW+YNT)Sux0&Bk z=$7@w4%_cn@~_(SxR{7Nm$1gK8s*9GehnvrJxI9ZAtjjWgy1jLTFs2TS33}${EHAq zdL78cK_l#t&;>B!#pmce#y|+6qWg^P1b#>?9lvrithTM?k6RVIosyRqkA#F&%>xiT z&~N7=9|dF+K}SVd4*hc>aI`C1Wx9s*-7*`Gb2Ybf4|TyKTHSqi4_vJuVo{)ehx;u) zpqffQJb*X=#gRw9N09mqj{3X&=&$+$p^04 zJ#1p)2@qd$rj$TV%%B?l>ic3<8!xS2q82f3MgJsWpcJfTQMmDJw*QO7lncFD_yAPj- zZSh;j6{$|Ur6A$NpFbC_8}dz;(V{NGCcjeB#qKw~*dUG0JRZZ$sotoB6I+H(D3FW1 zv7-p7qM1+>n`3ZW(M-39*b0JXY2a}1{e}Wn8F~Bjt5iuky{eVWrw#9$hBL7D$bsfu zd$z?~kXP5GTPz8HXv1$Rc$Ajr62|soN@(`<%{PAVYw+99>FYSk033&-*M2U*^5IgM z-Fn>V_AYwL>U?#pnV~q_33cOkiSGo+H=fNGw&``+Rqwl|VV|nVbB;`}iLwH=(>;0; z+A1SRR5dLUMvqlFImBCOw}KV$d?V(ce(Q7NatcopL>CUbozNuJ4NqePCw$lW`XI`AI37Y*P-;jrb&JF90NK(+i=;vbA)&!Yu;78ZVkGL+69-vK`ruAE?)mAS` zQj^_gC!$7=)6DxNj5Sm^gzGI|woE~9klpCi)LvlU{#^0wf`5CU%X_PHIVRtS&GdT5 z*C{n+<(>l)c2hja#qQYg119x#M}PNU!#AGf^nF&;`SJGNg|}v9AC9X{^_Q2ew|V8TANP+i z1e~weB$IePfY?2c!YL`xBFTv4frwqX-GkfDk5B#v1&02aL!wHDllsS_LVAkDWt}^r z4dus$huOyD2ib2=Xlv3$hjVdT`5s%|F_VHI57%RCf77*(<_nn9xX9_ zcssxNF7~dvb8iEUb(Oh<=yb#P_SQ8^pTw<^^|wWqS8thx3ibIKYnowq4)#^2a*!4p zi~xF!Py7BYuhzWAyAfsj7gBRw52+zG~m!rsuS%dzr)%U5+{itSjD>oxhzoe%OHVqrB?b#u(gA*fyNSV^Ee{?|#y z+nlBqGHG@4K)IrjKY8PPe+fxr({0N_ZkUkf&{%CYCSI{szV&DBTzmjVILc8_#7`G> zLzSP~wP+p-Ay9ZSVx#QlEuA%7>l{W{X{FYTvO>Qjaxe7UT6nGkz`WYv!bd0_&TudA zz%BB1WTh^O#BW<+!D>TtNRd0jHa@=I=#}~QCjo88O-~Ac0Jq~~eOkb{f_`&u@3_O% zrmxn7BJvmCjojF{Yhk=uz5&6iutxip{t*#k>wu8J=(WAPvrN8+-{8LorEq-;0)RVX z^3$j(K~v2gA=+THnd16_REg5cGK{%mhi`mczNa__Gl#flN8h3X91|LBG)gC*{7XEt zJJ}D$U7C#zTYi+bPOd5EvzxD9t+yB!Zt6BT<4=tnE$&I{CrhCN|H7uLspu8(u1(#L zy(3;OJv6|puS2gQ1w0t&v^xsM2-x#S(IDZD7vic75wLX}g;RK0+E3c!1R13py|tFb za`yf*BIWvHC_8u2kvxzbpFenod^dP^dNgpDU1V1V>$B8>R?}funJ&Ql!dyPd$Xk4L zNzvo%Wkt7E^W@TI3Hw@JSy5Y?;eB^9mg~x28eWa_L!S!T9?={);L)&{Ax%8M+kXBF;%4Xj3Gg+9!foWc+N*F=}}{@ zRIEnB4lvn<}MaGjr*D;Ut(47aXoUXO9C{?&*4xBh>*VuOWzSJ z{JLc5@-|3FNKn^ZN|_1?gkSB5piwaIaa?pk#RpIRg^MZ>aWhEpL_96n@eQZVs(xH)hGeoKN6y^Q-OG zc6&uw9xjx6ExR_SKAnp^ifHS{TB_T@a^|^9jQrBK?jBntK6X3+_Zv|Z-)GdE`CC_eBvhPvx~H8uen;F2z0T-ZLZSGz zrot$k>*fO4CHpd=%{Ur1obl{2yZNIUn(AVz%F~G@Lvv8p+C|2o8w_)H4B?V;NNu|L z%~Dd@O4<_|o_KsbCpbaB!<#DkSMuy1(oU-MiNU=fHyeMGcU3k`_O2tKs=v)MTCmZU;+!je4k}( z7@hglrI$PvZy)mp7{+QvjwclSdI(kj;flr z#00!=3vl1=kh$80CbDEKo}C3J@cWD6g)oHnM<3!CCZVOF0$ zeGxVxq0WF6C3YABS1_O(=Ui`7;`M*2Dd*$uyh{`_!Ioa02NVf>-czs2PlAj~-OMpWq5nbVHnzQ42i{Lvx%=G^haFmeVgpMv~^RMB>65cgJdNyb|IEis$JvrDf7Ntt(wXL z+>rDq5dPwMJ96k5AH)nbjUC8U%_qFAdl%2@U7T5)&s%)AaWrp|b3$*aB;L2A3_rw7 z16YvbVG|={QyBJ5Dh%6xhu{KhC(@rWtLVMJUbrerjFEP&e>YmXGUvsz$(pNlYJdaD z7TNylcgBi4_vE}{ri&ff$nV5}Bl@1Xn+3MWqi^G?aka}`Zl}})ES3ldXCvloF{}cY z!-V{4Fgq5HvjgnsS-MBbDOvfseNrJwg!r5La&ObY=bqg&U+mER{B~>!d6zt^_D$)SxHAr{$v8+`8Q?e1u$<$ z!|e{hq1*)`X4^VVM)CQRU3j2ok#?Q;$3r|(%6AfkF;c{VWbNKFrw8LM|GO_bs%8{hjk? zTdl58{UwvVU8{lXgLVRf-pp|vq=WslzwKiDFqbx8y)yx?1FldNzdmC>Q+(aj(<$YB zyL9I<-j!)bv9*ZH@?Ez1M8bE;8VK;Mtd%ERsya}x5i9cD?#^XMFcJx#uV+Vvcpntv z9<3uW6x5?;mF?t8ak!#-Cwx^$}F3Pm!pA%O3Z>Xmd_OirY`qC;g;y9{|uR z!z_roD^#F}jN6!+*z1=Pjn%uV{6XO(w*F2g{W{M#K?s~<_6;1@yB>(ANG}?7-I8i(`$Q$MY-oh<#xa0U=3JiQW*&T=)F0gIgFc=Wp z$qZjN%O8r)XBK)6?^q`Hd%3=008iqxI3lb$d5#(d?JRwCSt&QYUEbrlMcS$2;aNec zyudSy`a7*EN9(G=`7x{ba+~4nS7opb1>ouEV9VoMe_|K(K8aUYWbVkv;XIFrO4qp7 z(9V6?9j5f5D)VanB9#Me`W>uSEevL_E~lN+P#V^mX8zX2z0F8I+Q)rL{Z`Lh^C5M8 zX5Chxf0$iZ@oQ~iSFKda*6yKS>)gTq6C<;S%O=gT`o8BiaR1{#DqODNk{*E4?INMU zu(Lq0Y|H;1f;I>BYnv6f9J*m}rmT%p(%Xq(qa2&o{j#mwn0GxK{m!DCJ`c(^hgs%- z6Ks7M6{=MNauTc=U*!OS{bFUzJ4X`G*cHl^YLy`5D55wkx8&#pqjIKo^=I|`88r%i zJfCsAoE_18pj}_Z4g>qKfmG`GW_n6rD50_CjbGYCDAvvo%RfIU-%H}x8WG~84rDQTW99d>7lkcGK z`2~YlRDfey;1}m*<9xe)gbkFEt^adLfoTR?ABkRM~Cd zVwrDu&U^Oa2z+)=?y%ohyNM|j^JEz3QQ02gs}6C#$b!}3klZZ?Pek@4Bnfn2BYY!% z3q7d$Py{UZ;lZ9l4R>d-dQWTr!zR-u-n$G#N#ClzLH=x-6qDbISd&62&X?sg3CX;@V-SUqel;8w0xc4g)0$bj9icVrzEay=J! zyPrR&70~=JTfW@(Q4|h@`GEFA$ktum3>!%W!R})#N-#WKyR1|+fv$x%U|+WP3v58@ zxicp}xf5;oA~`8$w<)`ZA|*K0*=8iSW*T+0>|*5C1mV$IRngom6?*JCgQxLlqb~Ky zqz%cInJfdZ8#~xb&BL{Axay@A`G#PJ?g0Akt+NtU-@0qoL~TgxJ9M1k5E*#hFf=fCWc* zPILksOyY?kEREo11r#|l`UTA(^xzG~a(FIbH`FcRU=xIwVt?Cd&33EX-P5TI_AP`{ z8Av@g%6oVu5yL|GrPI5oAaWW@OO$8DTWF>dD`@g4dP-zMFw>)V`@EpP09{PZrV2p%x=!_=?rFs13H^M0j#-A`$Q^s((e~;6-qn z){Jg>iL~*efQjJs{|ym)@lPcKqUkz~H7f8U#5Ndt1wm+lu4&6qt&rym(+aJo03KUa4KObg39QAo< z(XNuhu7N|pS@tpbg8BEi{&xS+4gaat#c1vj*v*N@a1PkQ!s><|v@b^3V#_hp3lq(t zA(^BJ+wvLGP)$S=qdl2VpJcEK!GT9CrppKlT5-ufJJ{hFCNx?6X$H?zNJJD3rdRKA ze*PRn*?d0Y%jSJVzA)k1m-$dNwBOzP@zS#W%|UUtI^M+cuZQ7G@5ipo{(hUc=TsMz zg?AHjB@yKoVaS1<{JplN3Yx2=^@ft*64v>ZVs)jVKq;)%$%sUSqM9N3Xh^n@2%~! zJOTMMUXVjuFg~`=LO}`ylF$<|CQzgp!h1RK?QcvPBbdEbhbR6f_FR{ik}`3FK}@rM zC(YUHkE)6aoft|ifwJGSI#+#SOd4MEZ-U*Ezs`;&B4`>hfw>Y#1OzJky#ca^J8&bK zGXdE9hfb+t&fgJJ0LOWTR%3s=1H^1Q$%q=dQ7g1MQ&S)kLvOQRD|gm8yoU(76(m*$@1ehi>alQmA9_}MwOlkuP%=RQ;^B_ko0-!O*$KR-Ajwh*%z2ksFGy^3>$k5P1V z?1T^|*@a4-!h<>n5~rz!(XwEOB&=7aBa7qXgAaFHAt5XwTYJe?zTi;Fvu*QjpZ(5Yo=+- zN2aR%fx&LAH3HsY>=)CnBN}_Zj^$=m`TgX zo87yQi%vbg+Mue7#Ul7xnP-E9!Hej_Ts7(5cNb#uWjArv49hwfK7e+VP$P~HlMzJN zkOiB^+JW25+6XNY{LGhuC>%#T=OO}WvVKIiqRZ=u$jF^`VWG*FSjlLji5&v(=EYZ< zX+NU`BZp0U^?SFbsEC+@!qDQ3Z^>e;(@bR1s1!(WI$Czr<+_TKJit$%(7ICcFBTLg zF>37A%$=`lTs#L~jShQse;lIKGr}PaXI?>?*p3SQ1>`Xm`9q+-wQ9~;w@@vvYV-8ZMsvu*yFu-x_TSSSEz@;Pmd z2}qg05%}|?G2GZ1r)5#c(V^U!0C%yrbkVt2ozI(evi*E?I z3(#X#V&OolY$LtLKuwEd4t=;Ta=;}NnK;+BDN%xtK>}^3te{z|A>Pm>$U2(P*#&%t zE4cAQwERTrgU6HOr*wWd4(d`d0Xm`%NRpIGbTMr^#gXKaA>-|eAtDIxn+cI=o#GbWlxvAXs-_GN6b+9y5>x#tVz zT#)Cxdyb_ySgiL$17$WWPteSIbe8vv$dmP;MLXgmWHTCBJI z`fB@aG@SP$YC}V`=eO-(t1j}Xc5%|A(PODEAzL-Rgb?r=Vk6yJqkU1=rsss;#a{#= z2zV$Ybn-ScJ% z4Rv?|{^)5NC`&HATuBn}cH()idJ3})#i5r> zDMSC6wH*Cv9Al9;v=*S?)3%+#!nd#G@J;VNegyE0-;Wd>g#q^o>R_%1OK9_@@qKpik>*zk4SBuBR zE*yWZ&#U)NeUA)W#T!yvY@?YX6btaW(lys)=s$A)F*f4U&oz+WPfd|v3;+8Ifdvkf zLiI8sp9Rp=;YWwcr@lL)Royp&`T=`>hK%;Bt&}5^g`$w4fsZ6!yV+&(cw$ZKUZfol z>q18ka~JzpAKRVCUkZwfX4*(6whP{d%j+MOyl#ta>K6p!bw9nw{v0R-jpmkR%+<@F z=^0YCX!*Wd(`T+^YPQaJ#mbqy`~v(-G5~nOBgbMqZsg$x3YM{;4N8YAB-$9jGH zI&!}})mZ9`6OO~X5-D8S(|FNz9?y;?6b`o>ViZCS+|YNrb8}r>Ih4;{ zxW+8QHyQ2G{<0T#DO;QWs4U zZX4fOyiRwtCU-?qAHp?l{ z5FOtdi5<-J%exTHl1pbPVt4F=uFHh4M`%lsv>$uB>@#A%(3=g;=-GV8Idway6WkfB zV%cP7on@Ximl0q5KE7yje$lVdQQ4I!`15W<`;K&`CrI*Yz?YwGu&{Wp?M{rTWB~!e zjxBJAqh{r0B}NYPKxHFyJ%157d_iZ1Y{E@78ae*>jzG_k3Sq%(b;@PFq8)`rX47Qq zQxC|y=o&8~VUR(gErpLE!-hM>1|82%q9BU9rU@K*nk-hH<dfym-)8v5P!(%xPF7idzQ3dnI9y4s+4FJ?j=)(Dn@ym|3c!P+Ywpgu zexAaSM;2#(OYRtFc?cTzTs>B`0*`~1Pmh0}+V4K0^P*OtS6@v~-9DdyA4YZ(3igSy zeLUi|yvLU@mu>2EfrHK2T0J$Yhm-3~^_rSj9_R^vRp^4+C5=bh3iq8Nup({ zT_F%?Ueb<^0)>SHpVRgWpj@;&m=?Z0<&QbWvwSPfg8pQ$cx)(+G7DRCu{Ow&rg+;h z2okF*vspP|7)xoZq;(&jf3)A(P58)X1f++<1D4@}5$r}RNEvmw<1X5uwuke+DrIY1 z#8}q+J|rKPpO5ZSJeG;gIxHD`ZoB>3U02j-CVx13e2GcU+ES5o+sFm1EU4%Nm9}j^OqC?nqP0OUPF~3$enjv8?aw z3w;^i5uT!k&RW?48+E>{hEJPVzxSb<8mGWQURyLIM+W1$cgndnPgn!&DDtkHIW?wQ zQxXN|qBzI$DnI=Y5~rAt^NU%fPm%2840pYB6&)gZ?oG!Tqv`u~`pBYG#gyVA^c>Se z-kv{=3UvS5;2YiJ9=3*jMryKbt@)O|Tzib+q@l za4-bD68RK_nnVoB6}oT-ywB?dZ}b9|Qv_Vqcvz0tmLYwd-hRRmwvO|jnp+nr;b+_C zO@sL^i>EJjYv-u@O4qd(Uwhvu(5(59|%s_0xW-(Qw}t zYfC}HJ1BQJ@zUIpmW7w}!Q{=oS{qI2>HGEPBE#^aUnOHn#x`{jh#xP~kBNRbE&2NA zm%ll}G6U#$%-{cm`^f9e;WWnR+bQT8AuznoJ#p3^+pKok5|`Tb7@0ibW>1HKIwcYn zvgGiO(~?qB#0=%%qHjF-@6N_t|IWJ*>K#j4l0toOUV;kyx%cF{VUYIcSUD+Y|JLT_ zpIM%_?>LTVz;dFryU^6{g4QlBxWMp-UCdH z*8XMz)JkN{ew@E7)Q&(EKFF%ZmUt3?$zH}yuR#wekP zJ61Nn8+sA>kQ{^|>Rp*H2%;`C*NCfhTwGx(Qn4!MvaQyJpFe*t^gRv;r->&+kbl<} zqg0!aKw(h$xkV*0L6zrTP1YF^@jMuq4n=$;*ae0E10_q*K(ixIKc=HTMf!`#!GB|v zp^A?f(T z*58O{<^IFKQ;vQ_G>dZT!S=X-c+s#UrS|xLxa9pTEkTT9{>7&GF(=NLvlPPkU(E#5 zP<$&6Yg(*uymWRNf+k9|Jj$1Xw0|ghJR=wkD-Kmx@n!OsvLA-&3;i)kf0F2-vwZEw zN?(AXpk|Hk2t(MDIStSEsoP8$!2GV3M>qxpD}#`E)s!DwO}FdAga&y>K(Y;VG9a1n+jjA2HBJH z=nF2uJ;=zllOgo(ylagevCM&1W~?OnqG`HsmiRluh}yMm`lB^GckkW7pS7jT5;eWD~ablp~xNO`t(fcw)8_e5EVcU{1y~Qh}F(= zlXDPZ;|Du8EiQbt#oHNR=OA)J4iya%R%rLzZ$`8RP^+CoY>b07Mb5?H@0cOxA3OgF z9}-$Q^-lW;c{KH8&j7m9d2${)cxgfpZ3>Y4OD>C@Uki@V6)Tt2VOkFnB0^(VP(sYP?k`FMd4sd!;W7URkrI3kU)81M zrHw~mXLtMwCkdtHYgwYqvkX^6-u7P#<7+AZjB1*o3GFhOPSqFsAwfa*9;(@*LpcqE zI5nVu+5lg31^tksA(kPU<@pjXEk%I)ZzmO35a9f$=Uh>D+4Da`&lROG!d!JEzE~qz zx&9N^FR-oBe~KmQiT|S!u?oDu$>!1RsQ;Y@|5WY29x2p8FcVZC|GNRI1*vOo|7vN5 z=pFaRnJTH{mK609ZB4&L*+aqdWJF$`5Ci&p5P5!&_(-GNo$4wS^$MiK#(<{a&{5eC zt}EQVEp-Dk5$Uami#KU&+Da59kq5LUOxfLEw9Nh$iCvbztsBss)31qBaXgj@+^B@) z19=C)1qe|w$Y0LWpnESqbpKz47}HQSX!z+<{&`EvOvmblHnW;kK!Ju!h+5N2)hSvp>4CpOkTwKxpr#p zNEj{_5pR`*zm`cct{m0^go>xPhk`!iBcNsdl&iV9|hDT8L9AJ4|7 z`FR*1i3C?fhG(@bM#6=Z>Unv!zrAoas<5As|Swm67PFX%V@_o=Vg# zice~Png4jVfB)D_A0wF$JZiuosS0ZsM7#Hmk;F`UUzLxq_Z9F^s!*{12~!7Z878_9 zZ&d9?IFdD6Y1uduKJWfH*Ds(XnH*8s%gbzWua!tGRs9{)%ixA`Hz|;uG0nxcAQ_Jl z#&onT$n;q86LH?iSNctSbTpdB9Bva#z{RMoF5RGjCQpzkqRfe;zM`L`t2NPU0s7Ny zQpAKxbBRM=!)N8;#PN1p(v_||!7@(mruu`Dah_^Ma^#x^Vq*UyRG^}KJD2lHxdLH` z<@Z1P^v57!e?Oboszfp-s^sy;t2<*ag}oXco?tv|;pfd@ zV)p6?(XwiQC3Y8q%gb2XJFr>G&!8_;gE-KcFy|TQn-uO?n*B`mR+K?G zXRIpJxg!exHP6x*!lkd>5Os!%R=BD^&qJ56OM&`!L-|j!+PX1w*Olb}vBZ3`VzeB`3I z#{Z5q?KitdB19F~d7YM)P-I(Yy0@O@SsXpQ7MEB&4QGZ2rzCYWCj2qhxQj&Y?4tB% z>tK8KBA9mjfO5haDYrX9!;Xm&@%t0pqM-W<-(}tllu|Z%Md{D^DFTTfJ@9=Xd>w|>+6OmTf&g%uP0r7 z^$bYQ>1tFZVxn59SGZdmfC&HU&+JfoT@cUO=&GD)Xo>}s9KXpWLG@H6raW@?2R+@w zVhBUgr3S-!{HLcL`>J$)lBSby55GImEbZUk3!E3VH{E7=+IV|%G)4R25I5A3Zig4R z9j9wbhEr!x`A6YYX3LabWgfNfRTYsO`_hen7Fmcz1lO)))P*LPuo0YfKk1T%avl;i z3nEW^^%py|FB43${ViLk=__Ip+X<8elaTs!*?dRiy>Bk3rDDo-G2pz{lmB%NGnL@z zN=Bjlb=MhCTC^y(z(Mj#ob_!+8K+i4XBZ1_x+k$PZ+3tpaKYLf^ce^dMvjO<-8&b9 z_eW?dQ-YNTBJFIXmQ;7QY{&n|MQ}s*Y57{F$Im@*_)AzCv0Za?-U$0}u*ONxQzAFI z0rmVN05g9QAK#eV#&aVydraee_uNTN4<1NJM7e5i+ylGa-fV7r>iV>NN<*!aT#@YZ z+Kp7CqHmGP%RCt~P1m+DA;}}DpKZX^^SpEIlt#bsb=-EBm5chC+yX5&=2TcBUjaQK zq@CrZMVAlf0&`O;Q?2az7dr0b(85Jx*;lu0C83hB)DlN^3-eCR5GvHWvO?+dkjU>g zk6Hagp*M>2E-|gRo?hxaN>kkMCumEdtV{E)j~UHd)OTk_x}HC@)dl*8fU2Iu9ev=2 zz^NkM3?hd;n~TumR#31EMe_ z;6t(ojMInyf z%q!BkeZL@K5R);>r8h}C@lT0gJKeeI4IVPDNN;Z+)RqsienWJ#uj7%+AN&= zjQo>3iD>?l-1aF7w!qoRONg15TIHyN`k`=UPFC=}$byr>h<#+uKs+P=@9*V^kH6)q zF_)N6{@4@qSvnk(H0Z~d<+FU(xTm6HiWv{Ao##G3c>q7Lro-5L;(;CrCCKpE9m(asrUtN=ojWDgj>~h z*O&&MeJ3d}VX*v`KK$dGQu2m;@Dr6v98-;T&PCxCCsrNaH*0A`s(x!jErd@|n%K0I zHhE5zv-1HPLp*`ko(oRoIIa`Xd42+W!M$i7LQ!;O>8f&mhhXTTA*mF6qNaqW zaj7Iz($Xt$WKaIpQB zKtpi*x`bWF5^_4Jei$0S>TQW=xK}BlUhf3JUqZ3{dXl4f&rLA>H866Fs9t%2DoOyA zi%}=O>LM*$L6AIgpu8bnmbR-TG#O@|qmqV^CjstGe)}}gu}+oPe{9nEyE-U+;gIKX z;YIL6J`=t&yOscwce6oDD_XP1ql8W;3_n*hNpbl_L%yJ91@rC{jpET~bPu1CM+T!A zIv(;M7blS~z4kRHUc`S!lYV32dkXs2GqwdambQxdn)oLXPks8(&{_8S-uZnl^o$MN}=Ao0P zcFfu)Tox=GPHtrReJy%2#zu^$2xe8@;8iK=?#cPsQQc#eix$Dj^nsJo{cYzN9c#f5 zUMKWw(RMVEHgzqK!T!MTWH#YZuh|hMVZ=*ymoui=nfBy{-#g^ml6iX?N_u=cK&oUV zye}Ooliz$~x~g9tGKuH=NuC%$m%Ji-9Gq>CtN)mO{F}~W`Z8a|dP;C2x0tstmY z;C49qEq8y}b>R>^tIL+XMg(-{@rmRyO7QuCv&c}Cx1Z&MmN*0p7N7OmuXu3=VLlX-YRVP!lo#DJbh9zDl@dZ; z`w8cL@kZ}$buT2n0gEmttrQD@KFgg}Z6616TQ~V*iR?-LU=4fy?krD|s;E4QA69Ch z2-lGUq0-K-WWmgPIDoA7xbf|ILY(E_NahkL z4^5rmGH6AIBuON>`=*^2A0OkRaI#uq)n#`QQk}7;Zx`o3A!m79UTN}aYG8^NNK;weL8NB6H?4p%B3&>Bo7U7plXb!T^$E&SNTV#p3`Lay|j7}Q5e zZ7p;2nXVJu0N+3Jk^RXp(Pne~`_8VC$TBSzEeB(kuG1NQgwADZG+e)m3PNnS0ED1| zL_&EjcU=#K&~aqmSbp0I#D+AO8{XTNs=Ph1t|=9Ch)C(FqgEgI;JzeqaqKFOM~+5Q z0Z`({Qhauvn+aF09{3h+w}%#TqdY1Wvj2hi$d~Q^7pYvv@d7?Fd{Fj@!h8G0!QPYK zwx52KLhDqsAwI)mX|iHDZ?x9jlirEZlJek&q9 z)?8Z3%F@{MFh{;%vJJn_Tgi^M)i;Y+mAYP%~kHwI-NR5)`$ZCRw(Q2zLX7lC7b>YhXntmszme`s4 zqtVglbgor(8wg%vTMfhp#Rcw!559IOzOE>CsVHDX4@+KNNzAV;B4?7ETM^yvWa)wN zB(CMF!1|=)J0c+0S>GM|EFU(#$rcmEio)5l0zw-&lcr}Q?CZ}U(oHlj-CnoKzxEb} zr7@gXo>Gn~UQ&5pUyx7J*CPTF1Fv5@>f3BNQzlR_awb=>WSI9iUjYXWuWmE$^G$bj z7>jpot4hrYUTPoALFOA?I|_zSQM(+K z%fRNJskp}xS6}?VZ4rfWK^c+-kNT-~x00_emu2_e1+>*q$nAdHt_?+|ZEhF2<@K4T ztE~zWnt`VV$~D=GycQrwWfXO&H|(8iw?HtL^ofB^xD$>)qOvZ&DT{MaPkh2^SVssR z9zTSClWap?XcdByPNOSM2iKn7qX>uIwplz{nuNwt+ZwJ(#^)2(0OX)=}dkFgsO^#=@1{^j<~wzw~Y zpqTGb7(0BTvf4p@N5H~7dtajt+H#IBz&0t|^mL0dlG{qzhDuMaR5Jsqo6&SG((bX` z$5}CohW_14#nA>O@BPseCLMSr;AxedYh8-FPJf|c&~!)r6s_1`9!4m4+I}zfwd^hM zn)2^AAnx3@2__c5L2zt0*`^O<-~;iGW~kSZzrM2#iweA#%!hd3@fS{1QUu!O9?Y?M z=w<|gE5Ok*SjIHFu>qwLQz~;m3~Q3_Y_$^T?7px%+=A3(wl$x?}_XV!eE2OQgM37@ZAddKI&xe6%dAS<@DwL85v zN2L@DV!V}%GMsALFY*;}=kBpJWU2%2!$xV37s!PK=ev117!tkIKd8fyx-h+|-MhL8L9#j?`t0j%Kg#_m zNc)R0r`-N>|LGmq!=ebdu1TTfF`$RsPdc3SP{M%_OpEiX8HoNalf}6SY zwR&e2Dt>?r)I?q~T(nYFs374%lHvZIz|=kjz@kx6RuURx`4R{)YKc#YN0xU+ zNoCrXdfXHc!zgxKm_8_4AIEoEtcG|L@pYc}ZP=Qd>_}53;xIc_RGKo_j{? zu_$+*fDgxW6ZRLKImx5VsT_^YnM9CX-Y z53$tcarSdHr7J{mAHGzC01-RkpHNfno1o&rYli@Kb}={>FZb+}g~QENWe&dDBFqHe zrelI9pc`J~c7Ue{1Y!l-K{sPT(Yd$p20qqZI;>T-#i~o>6@{T%c_XWyO}Y&5=iH7|=5v4vR;tMbrvk-9*XJYJ;^q>4y5{j*_t3>Dq!*&07kbsr*^* zxs<(%aJoiFLk16&&P%>Is;vw!U&|q?AQBpJCW%`HZsUyy)Jkp(Xs&_}w%trs4`TS_ z`OLuVc@2$xMZyv7XdAkz+huuS^OvncmCeuO4mP}|m>mmViE3W9ebn?tbHQV4l2t^5 zIgK0Ml=`8q2fuM|0%BCIG$b%d5-imE1EVsNc7K5*6=^l6k`mi`RZjQF=t3*=Ll6y) zw7E-F+)9I=7+HnA*xfw-blKltHTNF@mkG2Vtf(jvNg-)|KW6UCvMD4`!i0RG^&^fE5H<4AjV-@$&Yk|eMn3Cn|sZZd?L2R zvWQ<8(f=Tr4b-Asjyrk=DgqM>j6V8yI%fu(M%g&ZAtq(%2ML+u-@`cy(!@%_RF>IQ zyK+Y$!%uM;T?w;Db*8#a2tA$(`i%4LT591yWQ4to+n^vaWd1%oY{NZ%7QRLCTU)ld zHur*_>9lz}cD-XJGCNL_Uvp)1KPCy|RnjMLRN<$;5#n%5BA#N3l=-LOGwKiSIOP!Ap+p>k^WGd3caw*|KIeJ4)xHe2v3L7!CVkg@zZ z5p8%b96lQSV9|p|#8J7Y4v7uteK3I`Bph817qDvCUtTPrw*<)XN4IxAAWQJUKid7l z3?uRvzLV|gg{0|d@SW?ppPRJo)W*tQWq!|wsJ-K^4TRY>i$(co0O5Bpr+Un8t-5+! zNqk=uUH0IQ)|`Sxy>|RYf0EyWG97Y*IF;QD7b254vu-t+@VxMu^+uxXVH7)P&){}z z$3}g%cP`&`#o?+}qbkfvFaQLfluvWMR+vv!M&Ed1SfSeJ_8p>dC8oxkJJqGCLlB8tI~5nUf}5Imk!N zqiKUuk5h&4{X@P2`MV%`$0utY6%Wey4cji*NBr#j^2n>q!B?~n;*qzaa{G1%I9Yzg zih46VF{gH2f}@~YBFy;Y_dRg4zo(ah_k!n@PGOB><^{m*ZlbCj)`5pFAj&!0d;uk* zEJn?DTk*?ax<(h$zT*b*;j1w}B?x4eW5DK66m)6@*8Y$`YX58AidMh!&UL%htqug`3J*;t1vkh~IMBrgAE5z%T1_MiTAQ<(#QhKGg!0jhq&Z6pnf3-F zgjY{Z7B7pF{ zqME8G@;I1*xSt68DobvGxAMXC@(PLou_1Xdl^l99CJQtqC#IMKe{poTFLi<#=4!hL z?7XV;J^xUzcaz>+HWaxp7nlXO7Z$8EBB2l7P6`4(hX8!kZ+}Q*0ZLCytT3XTN6b)5 z?E+Fedq7{`FLIshI(AwZ;g1v~d(tY{COHKEX*`PX28ZyH6&>`vVKF)`YqZRWmX6e& z1!Qeav+76x5O}S)8~VG~DsOw~=2jvfx1b{WG^dXWGt>syOF(Lz9@r9`4d#B#D0Q{{ zttn9e&r7qkNBy;dkfmL^5a0WF&QbT# zo$G7?lWQICXX{7mk%EJ6E!>j-^k`niM)y;t0QZrYRKzvSyorHiNFym3LgowgOM;@g zv;5LNnM8kR5D7xqw|WNx16>}2*3`_AbX)2aGq+bcRpA|l=> zZOAN&$CtJj&-^24m0(VHV+PNCvVZrBq}T@SQD7k#9=CCN6kjC6~iyvEG2AF3P#g@Pf#SFoCHaAs4S4;=tw+FufMA9{7w+-l3jTH-Pm`Q@MxqalsP0-; zwq2&hdYrP5e%r`@n$D#a&xcMRsG0#tC_un*eVUfmeKtBV9wSHLM~>k&$}eP{XiP|L z8oKvkX1{6TbCcd547x|xtheucT>Q1Wx%%M3hN)~{6OHE*D7*p}fthUI&5X%0)YYxY zmko2vd>A&OU|c0GElyOVxNSB{Ve*bRE@j)vJ9^Z2`2u1oK1@-$6wK8}{d39matA`i znx-+b+E}>wrtoF(g~<2Ew^x)F3eUj!hRveG*r2oo`w_jr93C-Hg9c85rekf~Up99= znsFiE{P+Si1>v=;Pti`NmU>OVGz8}vQlSl-+?T%>nK}IlnGcAHZW>;e&GK?eskVwib*x~Q zE0ymR_3S3hq)CB)eTc_mcp(Tvnlizq?f)M8fDD~EQ|B>rCfPm13rt2IffIlTR5+mj zRT*W*VCRJm;y}oSHD?2|({r^JWB*|C+*xavST!T0&4=_WDh5bVR|OC3==fmq)vz#w zg{W^ls(^ePL0UZz;ter0Kt8Lm}5p1bm#YEIn;Vd1=o#bp@ zEB=UFnx`Kt8#8@L1U=tiGTOg7?#EY96I;vA1m1>07_aYN2cvOU*g7F{^pr!OZ)ezO zY=IkF`<@hkX!mw5PPCW^2;)LO-{3+s2~^_kCNH?3 zHO1GmLJL$yzYU_OEtM6LyTj%X5-y0OH74B5p1OyXs!2`vwKa43T+s?dZ@Yrklz zfM#g5C@KCWo_6hB*f z*vBXW7bKu>3ZVjDyJ$s|1LPS;CZ%PgmJk=fNW>di&+?wEHcoPyYL>U*%G}O@W{0=a zTC=-&3Nzrroif8R+2@e``U_NB6MiLH);*7d#vgq@Xtl8{HpzbJmcdMSfNkHSm%>5J zE24|Dz=ASekGQ2+jFEVdmD@c*MYkxU6z8q1_3zPdA#~I#43Qp9c|ARK20i~bey;O$ z9PDOXFcin@@q8jxwdA$dGG9;v_dj!h0hpLXtene|fH=ddO+_iSlo>3MAa@Ab>TBR* ztBDE&|2(|`9)DHqEv=EGK6~&ghF>g~)HGf}z8lvIPgWO~cBRFRH%5-#UJiNNE54+< zjkVYT39zS0IFcoRMc5M{;(v%7H(D~v>XWR~i!q9BB#7)`?J7a3ZMW^EVJRs=XsN*LB&lXQc%dE#D%*A>XURrbRl|3I>j_lPG9`yhzsPer$z^Z+EYo*LdK^0D3SNa{i zy9IyuH)jgy8=IZqngA^e!sfkp&|a|`th&11QY*c*ZNG45RcPf$7i$)@ z;;nrG>3ou88JF|4W<5waKct5R;VP#KbnEuy((C>m8YQh?Q^`{!2T7IuN9P(PsM}HU z?9Yu{NNJ?XG=Zb${F@o!6r76+k}WJ31m;U)?@PLqsNs`WXLjJCD*g*2c3X!bl{&qX3rU+{n^MWRHwMQ#2*097@=0e|Zp_W#`<)Jy=O(c@b-+(SeP#5^8{}e! zEm@|gy+#z3;--#XE{`lxD!H&0509eR{HsCFs0>B?P8YPu;I#tbBzCWPA-LgkI+gho#eFnssHI%7*j?C5C$ZhxIAZLy^1p#bzY(?F@q*nR2|-R=RrNB zF0h9Z{QQY}i_)X;>p-nEvTMJIAeEtnF^mILm^ofYQPLgC{uzhgwAbtw%G`8wwu8ys z*3MQ0ub$o_o2;G7b7`DxHC0Qz?QwabEVK{YVBtkens&+RBNQsbitf}=>H#FCn}YOp z6aH6CuX;q^#1vXTtxHxx(P}d+9-ROdx4g-_xhBpibwv(#fRpy(=WzQkm~~O29-WT1 zaE9}k_yof|)3SqL#DCX1^#k`CH+M!pP`1q5P3>?X0t?O70RxO(QWZ(pbdb#=Zs2og z!-5HBM}bDgh;DX-_*Q-hVWBX!FWoqNsLd6Nig-W1z8n=f-mD*d2}?Pjk}K;fEr|k@U^oRubFa~tRuJQE-kJ%w7V@Xe->RgO0s_Z{{3rPiLrc$ zO}*0-Pp+Y}=hXdk1^9Mmm)VnM|Y2Q8{$4x`Sqz{SanM*qX+9)m1 ztH11uBAWjSbNCl;Jgg$WvscopYh5)IZA~m8p2uN^J9dh@83w`Hy-1>gAmE zG*vch>@^(-V6I%TDmH3_7B-5QEDnx-qZ{v4v+kp)*BH@w zay=<=eEFBLmpByVU)EbM$+zw;NSf&`inRhx>$8Jmye#8S6qTNaQ5(#jBwoEAJAr8` z_f9{j^-ele#Kpw2H`9pKCK}h<42GxyVRPx)UHe%ckqVvn{nEQ^mrk0ysuRKkr&`5$ zkVNo5g|4WoyDxu!u{*2vqm=89h}~j+(d}NiGdhn>uCrey+I)Oi1L_RI$<*aG>aNSo zrHzI^KJE&ZW_CvsW2W~QZqcW0!J{_o9{gURef>eBfVB>mZ4#I-8r>CN#^p(@LrO2|rT4l9LMgd^L`-Dt;)tG6s2emirYU_n z;cGs$aPOgYwWcS9`w7QROVg~ZFIyVTA|}*Ur?V)5Rn(i35AYNopskI&DIi+j~g73Fc>lEun#^3)E0tx1$M0-Jly)|xNovng52@{Tx@|F!REEl|*h zYe*h9xwv(%@gqbyzVger1x6%K^@bZA?TV#6xK%G+yOX|y@N;%GK0G#(`8iG*d1X5sbmWW)@gtdvbp_fI##P^

    z0>m|p&}PU+?M~b>*(wb;iq6+9LDBCoCxTt0s@ zn?-bQYUEIC7{zgfrql&D6h5O8S{@|%6uNFOSM+#x#lm1+1h4AAYyf|D-yDM$lY9Z+ zW;T`sx6cYGJovYk*S*>$={?Ocj&8pBJzG5Y4qD^ywl+A+iqrHxo5d3*&iIM;pzfg4 z^^Y#rDrrr3l&L?lecs+1k=k6M|Lp~+#=hm|lEzhBa@r~xm+J8BId;iDGO_ZkarXrW z$t-M`#(zMvydIc+8L>WNTWXZh%*xc(*t&6@x{G5T)us_|WhAfkez$#mSbLjhUrP<+ z+7-Mo?J)@3aVXs)5>lKRN^<15zZWj+lqG^0p_!Z+2D*zAI(YP4IF`G!A89v)JoGHd z?wgG;NDxtMa)5YoWcSg`%XhPvecI-aNxH~{I z!@=W$RYhATxXn_0vr)=q4xne=dP{$xT;ZB7g>k_Vg)+3TAs~O9HT`DSY=!fFzlsWg zaC?8v(l?ZM`udnxyr5Cn_EWuDzA|`qL)3Qq8Dyy995o@4wIu#{?OctYe|4vAIqr_@ z?fT?R$KiCq?SpA1-XYE{XQ9LM63L<~DvW^{lt4lr!%(r`HO@%#|irM#PrB5; zKTjGDsc(eQc!R;e=n4D2TZeP3yAz#0p|_>5T%hr&O~w1z7{~oZ#*ka%Q#jgY^v@A3 zV$S7u{2EU*_cE^OHBhSW+W0akp2n|uRZz*{NOw(SUDwz5EORNiZy-moWk{Ra?;sIv z92nQ)R7e5ora(q=cZD@Ik`R@0eSNyu&RKw6ec%=*p|uN-$kCK_;wEco0U@HcuuN$~?of!{W`eSF!FJE9nNnmP*8NCgR8m|_==$q|9 zJ6rK$MyIo53dDfSzSKAIEZwd@6$NQfa>R1uif*$fc^p2Odn{&ChtSB|Dq^H^0(SEe zw6vgqn%K(`hzmMb$Mq?Pa-0^x?4MSQD&dRQBCw5rFV#z+)aSTC zGE%Xi#qfKXkata-M!hO$XEilvnVWu_W^1;@mbEp%qXzluTAA(%eby&FxC^waGNDH3 zT^(oYv&$<11s%>!j~T>0Ob4B>R$#GB$zUjURc=WIkVtFmLqU2G=KOs*XsqM8z(ArQ zRqh|xD@yEAwe`H@NFFM>pL>B*{qYEO0}m2{f(FOm(?b}V(-ZOFPyKZC%eBtUMk!;| zRif!jlz~<=H6i)nATdah@)vxV^wg|++qz%;v#g>XTr?xl>MW!C=QGob0l-ER$H%qM zDJ7T9^AJS-;OTMiN2}KV#%CCmgXBrAQ@5eDQTa+|!-v#@2b@Ex31!5tW@5QMyWy!N5_TXU;OG2QKqFnwTgrgBoPC4V zsR%%oJ60|2X5cc0$W zOTZh*>FMMkiTfuI{>QRgI^;Bu1)y_a&CZ#bXoO8A2_o!&ek5zCPZU?O<}_@aKXxGT zR#_eMnJg7#IV~(34}KCpEPGa?XdOy+asR(dqyC~UB#EE@om*HUAYTUuvS-rZ+?ZQm zDE8A-2UfLa5Bc3{A&#t)0?k~GiHV1vNjx7QeSCR2a5IZ?zNr(`;p%8=)6~4)H)RjE zkW*7|S3a7kyRnFqJh35|s_cPBCKEOjZfuYQa=$qJzh)cd8L)Pfvfj{7o8IC7l34aOzK$dbH^xC#9GbW!L3g|zo_RQ)r1S}1@;jB z2leXYWN@Ta|J{9W3m!A`ubJB2PT8d%yT|0*oFqfz$~2T@lTzl+-FT)+kXnMwwRR>_ ze^}01Zl!aN<%zr_SZF$n8aEi%5^)eR?E}`>1znmy8pCs&%*^73L4sWeSGy1x+()C@ z9-!NA7aXN;%HEEyvg+PPbQ2j(qVBk>Y1Bs;N#G$@FC4|RC3Pvb}_U)g!^mDwvxe;2d zrh9&M%PFsgQEIq2KCWLlDKZR?V!>m_FW!<5p^sk%LzFNJ>0#40_AluR4Pl?LUeiCV zZfKb>6jqm~lUG#xe}RF)f=IRuANeRv$g_NhGOI7DYdE{if7RrqQT_ESZOL1U5@F?d z{dnI~91l=sy8-hrwTn!|?dR0U3R;;BSv{n9H+Y~82*${armoZ`jI*;{m|rW{gtZ4> zJnYP?#O*ylCr(ah7Y!z8E$OJa!M%QB)IG`9m~SD5CoPTASJ-?jHlsjAcZo2X!0LdP z%c;SW>p!Ede93#Y5^;J}OfMc4HT#2K7@i&kx>x0B1_haq{c=`wv@8-Xp5g1dR~gtN zlZDJg17jL1wuj_1{uX>s_d9_lue}sTZs+s#O14Rvc^q0q+<3U)90+DZ6fr;vjbk_^{Ht2IBM*%p8*=Ku97KO29O@O7$h0$lR(aI3PNM z$lUq_QJ#!sR%f40e%XJoKtzJlvG$llN!jQRr&PwqFa{17E-@T+n)#`efHpIeQ)pUx zR1>&qv!SCol%EPNm8p=KE9I^}^z6TL{r@;Irm0a}1`o`i3T8IVTGX@0Dn7e=y5~2r z=5@)9ujtmGo~)PkYX6yHbFw~-v`nQ}H%#lyyC19bHnD<4mRuFu#b%|7CckirgbEGT zc9=^u-K56U{}xB9)~BY{XfLS9dqNvKem(2W1rU`g8hJVBf)*NT_G}27Ue3sdlioMEmM_}K63Wb0CQ%2@vk4LQ(4_#NT1Ihnwl6>>VCeB zx6sC&ga_}K#&PU6DHE=_=KO4Nf(RK9L^k>upla1tIaPfjFFB-Kg7<}}v)!p>VNn^T zF-8Lh!sry7(9BEi@(tz4+f$Z$XCXX-aoo5%gKAHm234qujKm*;f19{8G;9RoY9v1# zeu)d&6|a~EHbs$!TcO~4{Cw|_m7Al9m5Wr4Q-dI&IY+9^qo*Pk8ZJd5jX<)Oi& z&WB;5uTArLeRb1cJth+>J8M_g*qT}T1sB8qB^1YniGCSXlSd#d^4~e+dyknsK*i02 zG$%44tabr7AN_*({(*|V^`m~0`yVYU7q1CiA(O9B7+w*lRigM(lh4}sqUWneRC-bq z@BGc|wy`F+Cp}$!`4}7pep_){|sq-sZ{&!vY zIeTo@uNR5Bk?96_-C7(vX5NfdZNNN82!`5~!r~9a^^>wUaKwbxw*Z&PEfpT}`uS&! z5Rt)Hk~LL*T%TRBAhTdCnV);=7e(ose|hd3I_gKWDX{q|=KwV|)jS*|Js5e|zC!sc z70D>xew_UvgF4(FEF|izwmWPZ3M+m<3PO)h=P(XBzJJ1hbNTn4uv{66#AI^d{bp?1 zjDDvhqbjexQ*pij{P^OcXQUM|Uh#mCi|tnh_5_=JBitV=M+jJmfReu8)WTmi(kWJr%FNhcTbpRr@@WnxQ^uzyxI)(aReGp8J~g@_#t{a5KD@`Nd8xYwm%|lAAS#UW_K8eDV4^U zD3ybQ+AmS2i9S~7p5FKzuQfwyDFq&JpHP|4L5Bm51SA`Kb093@AMX? zHn8S4S`ZoAGK5Te`Lwmi_Fa@FzKn^VFulxU*u!XAH9gH+{ zaz_bwof^%LEOw=hWiZE+8onvbY(STe`Wxv8ipWR$P54g_;2Cps0A3)N?>_9tNTQ9m zxaPod<{-2wbRsr7Mo@$M7a<_XquulS&qc(GQZYUBY--K|88T83Frj<`hP8qzJE!)% zI>4hAVc3;%L=uscvaelbpa@#)n_C|w|I9(sD*Kv_nXj^fdvgKbd2bS``Pn4jKfACK z*a*3TO?jOPg$oC`HfZH*U|hdF38)I3fMFcBvSs@G0N4l2<|St=g*r1@X}CDPA1_|1 zF;7^ls+2y&arzhmp^?h8gJrj9=}(Q*8a~{8tJgEVt};{Dg5WOBj5;}ymmF<*N2}k5NN+!m=qP=Crm|?p&qRekR$+nK zlQbM6R}vg0)>9M8g;m8xV2D)VGiWS@8e3dmC|m9>#u6c(MpbDt9xRf?R#@?LZR=Ql zl2?sY`2hRef8gb)d-HgW$bKhtDf-8r*cT?pWK9VJLlE{4qFK6%p@~R#!v6#{-)Yv2=8>ZmC)KymKvH|Vz`Y?w6Ni2a}jaZ zGOFnkaen=sJ`Pvhim>uU>*v>HmLIKU4UH)SzT|Y>;F%_-&_XXmQ6cf=#^DO_D$~K~ zatNtpv;T`b(LkUvGBS7|?*_HPo}9WO*A3F%`Z%KZ#UmxaM^FosvND3{besh!fdlsi;<@xX+ zM#3^kL%{zV@%)VCG1~oJLiVyoEoQfJkRj?f^NK#*|MMEtW>E2X;y9bZ;`p26OZEiL zWH%Q9A%;P?G@m%fp_|Q1)&ZDsJoNtTB_cZ)lIthPzfnn5hUk=4oHIIsJ+$z7u|uOD zLDJ^}rn+o_15t=}=9+`hOc)k&Yl=SU;bstUp|K_|G9m&f81p7)j~!u)kti+N8>*2L zu2&lv8_U)ycX;3MCJISO^d{=bE2MYy{cgq+10xHgo;YT(B?r|yqh9Qi zXb4<_q;}6piqm_PDjuj*GzvoSk7rvy909%17*1J(C{^H~G5wxeax@FH^@ee+?o^NZ zK2B-hJtROZm>G|$AC4ak7eEdL^B7aEzaK3IhD~jq$O`XGL3i+Bt8}_?$~NY*#+7-n}=OEJouW`SV}fV}xT3 z3W!hEYj!);(r=kFHb3bi@1})^)Bdn}-wzP2HonqWj_HD>*9$SVtPu8;WX9>b97@jU z04b^ILqJCi0v>AGDOt+mldvSYOn;(dh^vi3O9>=1G|p1%d*#aw_Nn4+ z+LnXN&(KK>TjF)YfYZ6-UdViwS^G9U0c2!7!EbRu@A+E9FdNoQpe721D2 zKh1;6W~`$NZvo{C)5(Gk{x=`7BHq+B{x>So%A)U2&-uVZP^EH^+Cq=^!kZOerHKgG zIwgpyA39euA4Az*xq|O{*K_Gz|K!-gmcI3k01<_VS=ZLd-rm>uo}oF^nw}AB;GY(n zZl)~E*&TJr-(E{dOd_Z`dL9fLrr_41RB2QYJ~V8YCaF^G4N*Hsl9#qJ)_Nkn7%|-q zL2e*%s3{aEMA@Nr!mW3+`)hEjqr)RQjz?{F)7{NAWB{Qe6y;AVBw!~*-ddNBPI?iU z5T8M*G7h5FZ)IMjK+OwZy)($4LeJ~Rg8Kw9Udk)VB9;b5|a zszzjVjB$8yKzwQ7MAyNsu*|5{v9b#4z4hZ;!GT2IKQWu@F{_xHS%_?eCG_a{fVYH* z<0u}RQ!j2R0^i*1+%-3*tW!x?Apd%NcyvzLx=oKAGPG7P0S!}SetCI@#6O_N6LU-& zPzD>plPTg`^2p3bUfVrUq;(V?9b)+gEx)FTV*VF*4Vqw8X#HVR&kAd-L}^ONq+$B$ z;Y)|FSE>5@KQavVx7_hxy&&fX9lQdswUv_?cwY9eweja>AbyShzsZFS^gF@*n{ofo zhOg*9C;vD5{B!VsF8<%U_=X4nfBU8l{|ATv=MeNN2Zisu4mhwHF-cRlHr39>4ZnT0 z7oFe>nBkmIe$a$lKPbI^yp1;j%PlHG#nwW3x%jCR+*JkV(T@G{CvmGJ696tloU~(k zAb$Y<+%jr(TCZEJdiCj5X{XlaK0V|5xqo}}DF49bWkoLS9^@+9vJ7iu(xUi6vY>N$ zdcB)m%#mIfW-A32H?}l{>D8$XyW|$nRS5g^F;L5Xna&7&1Go#b(#3`I@Kx~N+pKt9 z!T#1Q5-cO_)ZFJaPwHEf!t9r)lxXS-Ftvu$ zpF|@U=aPq&L9*dD>H^30oILW7-97@LUAp z66X>9j{5vgy_3B*pt273sI&Dq-SRsrTjr<9j188vT3mbjQ@eEuxOHpl6Gr-@(%WY5 zrxW;NLsiurhg0eeH1xG&3Zq{|0?5??`R*7*4Tuc0P`4w(>2Sp8_5FCOYN~hK;XQ-Z)36SKlG`bX|C%A(y?(x!!>&|W z#Jp)@b;@nbSqS0tvGjG;6iwMlcb;Ny7rI^R-R|-V%Oc-2Sk` zZ@Hy&N`QKX#YzDbRon7TcKcO%KNp(!G!*YCJm8w zB1n<5HfV@lbBeOGXRgY|UhX<%eBPAHZjGtICsRJPgtonz3)LMq!{NkW(RE^-N|W0p zU(j`85cYe~C@{1&j-B?sK(Lp+7*1n6Zv6F~*UY{Hm(MPD%~k#YBMGNpmo3)0^3pQo zKNXEXZx*GolrI3*P^@2RC`Y&4?(cWW$0X8i3pVEt|PZL(@c;@E%47-h52P_n+5!+_3{AQG@ z^3|ZjMA#%?IZX#ToOUw``Mo~rg^EhrEyceNYmqJKNG~7hoJMHYt#;vkIpvilTKg1b zrtyBwewwFOXI0}pvFMQ>j(ih4Dk_(PX;@xZ2cZ$a29K)J{1M1wfcH;(IB~IiL>5dY zvu9b{<-x4A*}!@N74A9o7?vQ`f`uNGrAIFeFWkIp)2OT`FQJQK>63D(!@W zwl6J6bqyur6}+(rTCGI2_Vv9dmGi!tw`Z` zmCu7cR@UBoskxK;@yFtaims|%eqTd-uK5WqXANFxb61rIs!4=Q&p3cQDZX~1rCxL5 z3#Kh{Y?U5<{q9hDvb(}u3&Z8PLfIXAsII(pq@bPefqMsohv8vf^DuqIs~XzIJNZap zxVmaq36~czWyxN?+ynehS@zumze>5WGm1`VdzOn8WRdVt;c~dOCReoxW8zZg`j{7l zg2%#8{DpBep7=rh{B#OzJ;aMo1I?>E1uc{odzNKBxnFiwcgM(8{=KFtleO%fB_7+9 z*XKA#(?0dv-qMz8V6m;>7c*?%AUCOD$F;rd$ZXhfH6Pc@LP6v~ajW;;JBi|AWAivK z;!oDnPW8ga4j`$gl!%-j`W@qM-mbO`WR65$`VV?gRAE^c0kUp{rD4 z@JSz+TimmmX-m5kTEw^}D)g{`xggHy{t1an+%HFJD&r+!#-*L1%hV92f~e)Oy(+jS zWc`HK&$jC5)2R@NsTPeB{7IpG8|`)9(p2Llj`ogr+Ya@= z@fJ>Ue4H!ie5ghUj&D(4MvFpqp;^E_$>lfq8(eamC&P|ybipD{vSDBa@$UswW83IJ?P)?USz1xtqSzP&1)cN&nOa)`Y*N!b zkwmZG_9ZbQvaYk|IK@Eh$cYyPoMUO1WquH0nMlBjIY8-}lcR!W&zdr9Z(-*aCB!j0 zy2Q%XGtfJAyw{L5k=2U$L-izAo3attZ`#BVdHOlo=@|NOo1NfXj~E11Mu$&{0M;cbs-%~e zI`1sJ2vy`dd6g{p8sVIAiaY7u>9NgauTAUQs!9F(ndu1U?D*8~Jv=HX5Mvv<@&V;Y3(KL>Zwo3Gq0+t%b@Q~_)bXxXY|*yi$kmO_D+qXEwmO+bY1{i17MNM60S~C57k|d3MniYo&6rvtf4SXOP0s^5%(r(7CIp z9lJ&xIELmyZAF6e%3XR_S$dyWDfJ`>@>e`_yWGi^_T+0*(FJHG5*taxL?wsshTEi; zcZ{YUcBV8H!ojJWmHnoM+pB|GB~mAkw4L0SwIeZ1OOvgX<`dV#8xD?Nc;3PV4tb)K z1WDOg%Xa+b?wKL+@X5-q$d~cjX~N{-RoQhyiGEKTyr|0a$L;B%8I+uOTDZ=*!1Yy=zZG^qVD^xS*D9&y;P4qR5 z*H3U4r<);izDZEiFfr2SCXmO@VF|49<$t8!EXV7Bku==Hk+tKusx*gJ@PFA857IW2 ze=7=-ejo5g>v#XdH_?lX#h@uJwHdI#x*RxMN4x?4PX!vyd>T2ow9{&OyC91MZRw&Q zp=B9xy)PS<`w!CtAo$!UDCi3^^XCJ^|G~Vl*;Utp8v|?(^%zkb^uXULeflTj@33I)jv{2HM@i-y4Gj$q4UM4&Ltuml1xNMvz~a`nT8V zA8-Pb8LZq?fUzUuFfKFx0!XN#asHu6A`J)ZI#Mnf2bRqn?T|YAbg*>O4WJWsEnTo! zwfd`}p`oEMIGB|<$l!zIm;eL?c%L?iD>H*p!JaiCRnouTyf7PK!Tz6$tOLO;MjQ8* zASpIf$rFD@L{*oy17$78(9}^2YmeokaF^52(9qC0GcdrsuctqvgA$P(I>IHB+v&h8 zf}8i8Kw(*fCW$mI8EBG7!vXvADe))LgTmZ^h=_VCxZ@L)>z9_Hd^7Aj8KHov*JTCH zov!WyCRRipmUc9CFou#oHD2uKwsy-}^|9czQ^U@^kN|F!VAjEN%dT|M2s{a-O3UvJeH-a?ewbrK&RZ>(Lh2@SPPm^x8KXbP;lC{1*Ug+ww zUO?$IG|n$B?-P>7S>x*2nV2&n74gx*XZ%b>U55k})wmqtp%kfZwXvDb=&gyjkFWZ< z_!J{!OA|hL_hlT+-G=to7WnxG;GT!Si|p~!(5>2cIfCu7n7d9X$-ce_2ndwFtyYU% z;~~e`(ufsrK8MPa#dzY!e>(4Pomt-Qt!uIVy*IIN*&UcVYmt&oD@q!DP|He#(TtXM z%V|j>p2(Y?&v-FeXw3L1`RGq&Tc=U3oFVOjq* zx;PTOJ-z+5r@dG#Kfn)}*~75#+A9zoX47RG>zeTDGjFTC&x-7c zBhUNy(Q2_`$A-Ol?)R_ajxXMTsdFZt*YY(k9<-T_w(rwfOd&51+Y1iv9U2;!3N%Ti z;eh1W(6fHV)OB$vNgC%L-Q5WLY^$U<|+!y&)gqx(Z-nsa?yKvoYU%05u z+k1Mt@#YJE#QX2QjHHy|NJvhD(b&{y(Q#7=rj0_G9L|~9X<{U|( z>Mk6UL;k=T2oIlNtM(o&_3oma^ zjLaUc^d;ZZTcv4SlIZC@_i?>gmJAJzGl3?FG#pS?-Hg_DGlnHaz~Ar8riWhiE?-zJ z7EwM^hiA!QpKnpCDoRmuJRflhsmK^H8Uu-z%+cdr-q7)HXJy)c&zUOO?#ejR@{S(f zhYwf0irEX6;m!x1Pzqy^D2Rco=4<#8YL=#0u|JI?qEHF!O>H5JI+`4MW= zSdCC);^X6onyOOl-M(H(p^&ghjLMk=b5|z{^7f*&r5SVYzkU!I&pd{c^Yi@sWJw{4 zj^-g<&6V_FSz?LtNpLpfq;dISVpbwn{^kBN&MKy%aXN54Nl1;u_kZ&>wKw$0l2>-T z98Z4h_u^Le^`Ab1;aO?2G?Q?lrAkCp7`$jn)))7O)<#yfpD>^;$#%~^D>IAPV*6YD z476J1I_B-;B_tG|<>w?7C=0D&x}991Xes0&gFpEmlO+&+P^6u%k)xpi502EDsE6cl8)Q44sRcu&d1N}@^G*8^zu+Ys!k7Eo&1bj z75`0Hc#ph@@b>Kgw^$)Nm2dBt9?I=u``bEztYJTTdwEGe^I6_&RqbV;yR-aZf97sG zp!P8{<}RLrN56NUSQfqf*Eg|d`6iSVm&-j?H3qz|tJ93hv&O?O;8grG*1>UPE_QE6(fq)S=6hc9O+Kl^!%bWFCj3fu<`!R4wG2N@R@vl(OwFbqrMIS&sc4yQK}|>#%Geb)7$Ewiggt z*>+D)Z?WK^to9f@9j12aAAaskq_EADnRT6gJ|YU_)sC%C@(2OV(KtF_N}K76PJWN+t=e?fB6|&nj6vHW|Z+XwYOp8n*Sm+A_}9&P3xD7 z?O?kolSRo(_dF76tlt^-KIAxZ92nP#eQ7b9rH{6Jv=SYv9`=1ibR4yRTWlgVZZ!R8 z`Gb*N4s$^cMqV<0zP|o0b3x;?0fWDf1dZ0vIG^zK^+8O06u6%9V7_O|K5^9=H)RZR zCTFQNGNgZ*;$E|NTOJx}8!=XWR$f|-lEP9%$3mGYaDiO+9&OuF@G}3)b+w6 z#V|G*F;?}@xT#}cY;MH?T4Jf^NNUVpI1Q=8lluuJ?kV-vjjG@Fqq@A-W^tqXW8#dl z{bNvH-5~9x56DTm}<_T((tH-$J;V^LCg5{GgN&`{SX{hW{zkIAaNA>kqYdI^an>M)e8 zwCJSBNm3^!J{seuWeXvfw;_fbRRQh_FkG#t=kG@-W6*!NaN zDwQ(xAwt3;lma#p21Ag;LA~rm0oJViH)dXOl>|fHziTtB-4lff}2*m!~8&Pm*FPiFWg{VnM&%~1J?m}dAf)EQN02n*yWa)7%yZL@8k8y+=>uZpe zJy|JQw<0LWW8=C$u)*8F8cfbHA0LCQ)_%@hJHG&~Ah)~{5uY*q?(HAat{ zibc!rP>OwUUmLuHBw6$JYsgpq($U_E#FSyuhVaN(Q4BjxZCJD7Rn%0LW73Ry>i0?H z?b|L^Ial3yuaGAL!MDdpkL*WVYYP@$dk3Oo6NChUyv;PLgG`cAI&*~dsjVuL_D!3! zMD?95EvvV;FY0P4uxaf((iW0iVd0UOyYL1z=9BxCRT4lXdX67Gi0zx+7vhlQS*W^q z?8MoawO|>#I!wsj@sTyB$ppwL)mkSK|?=`cV@_8lNiEeB&XwyGOfOUv99)x$0GGPnR6}X?Zep z!4x57>_LvXW>=l8!9RcZvRJAx_RQ9GyM!S4;y3O_>#TM>``ee*n!QP$;auVGgyc9p z{noD#6&EQMJKI0ngTMabIb@B?RBL7kHm}(MQ+tPy5eN4i!B7A69knj{iY3iEFRsFy zFRVaqWu2ThS>D_w(=lo0I3b&!e(HG?6`YW@x!_{`q{HW>ST;c!_JL+g<$#0kH09S$n$@C4aW;g zg@{`I+6NKyb zktc)^Ev%B$6YxVN!|3rN<(b^=2l31A{aHvV#``*QpipFvzLs>JZXu&b|Wt z`1jwEGBqw;P?w%DHbs6kG%girl1Re=9=6w3LoJ5JMF?7MkW^`JHDc0?1&B{dbMTDf zCiua-uj1W*{{trqk4S)M#{L;Ud5(|(BvU?E{!0JbIT8`uHmpK$NEoiY^+9>Q^5h9D zf8$vpCydPvO2IlLRwQ*bl{mbACsHy-V&e4qLPn5$dF$mrqqU_8A)ygs>C;eKiK}k> zybv$P4)0U-y(-ElNs8mg@?Z!G5(^p<5bwP9H<78mtp$NWp>l2a_VqY=I2R9n{bv%t zh(ycExBrC=AH1WK<06r{Q`LRNqU%MmUc2&je7NE@RF;=W)-;k|&5gC#w{xSquU&lC z_iS6QOu?cau$rUt1yF(t!=AR^drM-T52y?G7Ze)(y!@?y-W@Q4`I?pEyF z{GpI-l!@cOvDvitzgVT(Tvbtm(9lRp;Z0)j*pYp>?@K?z*h#a*3ahcMMveC?GKOuc z4}*hgdDtst&aliJIT)NMJS^^sq2bXOsp_Dm5rbsE|KFFCvYw22i*JxX=gveA4=UxQ z$JM>hqN1!AO-*$;o_|n?$2!#?M-J{r#_-X|o;X7+m^3s#Ed(0;uyFQBWTi(BwI~`F z2qXfI9y%_TCzEE47owrx%2KVXM-ChlYY!3yyq6?N@~}kYOdJJsrx}T9iTJ^I3Xfw7UGA=?030e`g8rzY7uu!ae9{T3z(b8t_#je8#Y zwo>dv@$>K8FNB6!wSlNqZFuF`UkmX>8M1PwNPM8b{_49zXt7+%d+hLjb(m<9zuAY^ z-~JHNlnm@s=NLvjFw|6?L`hM;xUF%#Hm_ZdvJ*#T9B#Y!%a}BCfymz0+Kk=X*7aG1 zO_n#PB&eSM%g-fd(U-pYl(>k|LhEn8{h^e_ewjFR9vYi#u>6gGVAb2tBW2h~eBr^b z3kk$=BheKTpDc%*qeu2hA1=H30kIN$>-trgHhYok%SVOiBHUR6ohUjYbAc8c z9c^v6_4AJ_dB)&>zw?0fM^{(pSp^T)xO^aiqlakdBSHf3#67bmow3H{3A%leSeZ6= zqPURR!S$8I4iEE|E{j;W-23G_kUk<6-o7>$rXFjL1da|436yzCH#U+&VG*GS3Jnrc zfR-)AM@z(l==wXCV%hDBh1A)#Iae$+6H?-2J*Fk=q1=46hPGhlg2~94JX)0#B9=t< zSUx0;&=b(_QYsu0s;nXWmsaanIwot35nH0(}#Ppa6-1 z#2?0KYHewkRXJ@$l6+R7>U#O6PulQg=^t9H^ZpK1Pww_SA-`^Y z;Cd{%X})BUlXyV`QrQ#Sk7+?gLW-X?H8kVTPd+EXqsLAejYqzHuUJFbUD9a16dNCf zk=es!Jl21>126pf4RP=K(hnX$X7+IDZ@P)S`^tMFGhGddm9Kw@k5+HR)GH}qrqq*d9=Prs@5+eo>8)VL9SW9hob(ekM%|Afp35@$EweKoGS zWg+5|VzJ@-g8RWh#Glra)Wtu2ibpnB?@ z#bT+$`VQ^eAtVu9!S4Uk50R2SO0u}I99k+J$=fZ~I(%z-aPL;JFd8>yuCysGF{Ljn zm*TH zixrnawP*dRH&wqH5fht)JMVu2)8{Si4`fP0uA#0<2%XH)HjMp8xBUs*FuocIyMU^`0+Fzx29s*#+2u1$gAU_X*j+>|BgXL_#2E(kS@!kxsN+;$A>Y77`M-+%n#Zz&^T}#oa+G7vM%9DxxSq5AlH=v#nsw(xH%ZyOeXgC(U_7I;;tK5B zo+mOt_|?1A9$PMb#Ibqj<<fJ%x=B?vkDn(?3NYa6j<`9>I9+Hmddi!nSa z!{$oZYa^dqTiam>HXuNadqYhF{`1mmahdz-j~^0(s7Lmo?*94E(qxplNZhOFwnn$R zsdLAx`yWPZQjCo4U@p-l;nuF)gvPo?TzBVEeBp_^rM(@dP81zEA#)&SVitUTv=xzt zhQ_B4O%iE1Ks@q$F4u&q8s`cICZ>xINs)}qY-Em}=x{*izt^sQLn*O=xax+_iv@~h zK#=2>mPVyO?Gv{uqp=w+jrC%glAJz5tbq7DiH*HGK2qh6!DHWiN`hMRdfwhGLikLc zu@DJK=|VEmHHtBGN{<)d@4x#o<}JAiS6*|Q#2O-~lob^m6cS|NHFpRBWNU{H*1h+p zkP!Di_5%s*+|y%oS0eF3B8>5Z5|T3{KG3E${}n5sWw$&a#DqO?Gvg1DBw@U!WrY_U zoB#kI07*naRJT2Vj1k#lol{(N2#5Ao5#xGhM zv0pc>U5?#b)=If_6=QJlbk%>fBC0%jT->hMpR|Ueg^w-J^I)0bdd2dIGT;5scMubo zDAq!h^|?R)Ob9c^24cTbPWHq2DRWhuL$K=YKcT6y4oj~49EOh?+aJ7}>p^vSi3IzM zPfQb{h}J-C*Nt~Rf}9C6#KNmzf?iSS#^? zm~ksII$UDY@I}d@>*paLz+Ws>_HI1@Ly$p;6nohXbxqj#-d3?LVnAnFnQ-l0c-=f9 znYOLljsLv38bwD+@Xh~yOoBqwLWzNtNrGgK8HTZwM$1bUTD8#nB_Plri>|!_U0tX4 zViF=GshBQ4bwrB1)Lig6 zMzk)XD_-jGB&3f_MPqFf_V3IS;woiWG8*ceW#8i6afz{*Ff9keMx~4O)$-RrM3Cwe zx@$>a4??U+G7M9fWOFmMo3QNmMOb{(0@(}bmiE-Q|0uDMZc}4KYoZVSvqALPF%w3K zB^TWY5AMmAY;|KMj_P}H)h%rq`mi^MN6IGq|%%64D^=iC#NwD;j$kd4!G%gWnl1Re=h5#Rg1qZ-O$0gFRL(<+_B_<6?l1bC%!(a#$cON?#C+67U zT!e&%V^sE}{=+*Jq2osn;f)vmpcKvHLe?-ab4_KLQt(VJEefu;PVfE2LXm4 zRdx%G9?6w5mfmnrfBIZn`t0AeQLKJO=S)%a+*?Q|S~8U!&qukEF*ZI$)iqB2yGY!$ z;`sN-9I+s>x2fu6vFeXpB&1{@Eq#>8&$p+?kLF3+rrdl#f>mGets@DP1ADi^+sj)B z8D*=gb!L1Yy5HGd#uAd!#AR*j>?LB2#6ZnVT|ayNG9i3O%nUwEo)Y7TZJ-<7 z)Y*%~EsmB_Bxv%K45h_Kg(RX27~=&o@G}E-lb~bVAwN~;)LDzG2Gio} z?vf04J$=?_>sI|+WE!Ey#}F7a=s`l`@&(5~K|~9SHE)C_awk^az>)0p)c55h~i_V zh>nTCh|%e?x3P|s#T9BzE)(J;B|TBz*wIpjZdqUY{sWjbe=?r^-K&y8j9Io4(-Oq8 zXyXSv)LPks*^8$OX=Slk(bC*1E^&1KV%-C?N0O9aVC>lor;1yXsZH%qyABKCF>ywY zSWZzUAu(Fogm9sY+?WX?)&6Y|ck_y}Dj}zsS#9#{@p3OMnF{g>B~usU0ny!x?V&{# zt%V-?=G{nAGWYb6M39W9UI+;bLQ`F{#6%*o#CzOjyR?IK(UorVnw?^)#O!m_Pxwf1 zNRTb1J(DWhchn*_E{Q6%JGLQUX^%H3>;0@JjzT00)vFe<26dsP+u*<%Nau{ zYwVP>oW&X%=Lg;DyKZdjf{(W+f(*VxEsn-{1j&(iURkZi)?0!p)7oYrF~E2`NA?|4 z`${shn31dRIa&&>Td`3rO78pe9k~8;SF8Q76&qJ?MSV>ZuDk0h8^J;g&b_kyr#w$ag(CE8ncP9ZRIB_@Z0}+28KX?Tm9i7VmXt4@R(TeBnm-;zV-r5 z^-U5?nz4Im-6Yr5e}zX*NTBA~i>4twDqODf#m?prcS?QZr)483Bv9O@7!%0X-xq!X zeiF#rmL;xN0#%bFVxKUGHiJBqC?TQr@tjC6D$AAh_duZRJ=lhWd)?p9-~)JZEW5=r(5M^Zswt~ZZkv2RFX~Razy*zBY35k5#378$ZQ`~>GKeYE&1!$xx^{Y)JwhzGxIb_BV7vULwGm%{5a}aQBvn1#0r= zsC1h+$siY-5G`?=W-gejbe|%uR{a(j5`?huU{tC(5*87L1EnBFfdt$pYU7Zwq% z+OrKWKJyE4$6{)G#z#uc7==3@{Dx8*QzT&Z$rFVVhiL5NSq||4P8J_UMOl%!UyaP3 zC~s+LU3Bd5J|Rx%u0=x2o}sNUf4}t8qGh)WfkmZo_LbKvxz~v+9WAivN>*D{AH82JyY6RTXhmw-*hE{gF`>ow#||b=XL3UAIE!3Cm$WWskSHzpY#KhGdmXP9G_CI$INQ z%qbfcW4fkA#q`Yua~F=}?LtRKJ7&+nRxHUhL8LJh=;*Xy$Nm!doS{p$7(ae$2LlgS zd%EDs|2(yNcQ)Yix_|c(2@;$!A_XZKi4LDL$S^HDNET&J$`bb@x-H3PzCIEoi0)6! zZdFxTrS`&c;DiObUPtaRk4vT0>= z>_{=Ttl1&)gXmU9YmzIkor7EMyAFXtfg*7J{-fe*MpvicFq``=|BHwT$88T@kEeg} zJl=eMIo^L~y;vEI7@dLdJoRPFSTLpEiiYl3k+Bg%GBHEjK+Bi>{l_FD+LYPjL#+ESc^IzRSy@(%%F=3a8Dv^{ zkG}6K1C1}ZW;Ry8wGPkx@_7lK&Clj8nTe}!T_l!VbYEkeXyL;E+l(RgmnZ)Te>LVD zaYa$Y6bSulAI7%$FNSb-} zl}VzAa*Q6A={9h;#?XQ<|DZFgYJ3KuwFr$*3?DBKF~Qfk{J^(xHI=0(K9(os%BY-4 z4ulC6LC?qrzH{9{CxWWa6gp1sjCyRw_ zy7txw#5Kz9UdPWE^M{|&O^h-Vd^=ijc#n`&*%M|+0B^=Hs;#aNx32i4bama9EsP}E z>;=ovr26g6m;Q{kD_$2Rl`)7IABaJ@X%$2=jq=ik&p*KMv4Jc3?K_sgU;2$?*n!=f z;H&yAHZB=A-1!I=U2{94VRz*h-N56(|2zD5D`&v$jX_j z6kl&qxUanWHWcLTQA&3%@^ZIg-r}1btj?r-)z63Xb|XA88snzS?hhKv{-`V~L118z znhW#AWl_gzxO~8A;OTk#`vkohT7J+25)bK2Vd}X*JJj=X%gcn zBGG`*uyFWz_~VMJW=Y(gu!vBx@OkDxzY&6g?pGuLJk0iDh|0V%4kj&uv%JlGTudkBT+!y?s)=s~D>1U!F*ohamW1;H1E0)d@ zIk?_SfNJ%d@qI|D)m7C?+WL^NU?Fs99YxowZA5Iy9*oL&Hnh>lhPppeHL4kDI`1ukA3d}eD2X(P<^si0*J>YM#D$-3tiCq zGnc9P5EUDVpZwul|DV10fRijQ%l+TVv8y^w$H_B0VJB^1^Rn!+B$o^l1cmF>>t9h+ zK6(v^qM%nqE~bls$Q2cYOGX7r3nJ>Sgw4CNlT*)3=TtfN|Mz`==hXD{_VjcPGxg4= zrm8EP^Gm1Bd7pZo=T%wR(7>p<#%R+Db<7>}1QY7MAAO$w&AAM4iT=FrW51`5{VHXf zo&gM_=_CL6e%%i1i8j-|>u=j{p8peND64sS{XDnoA`xt=GlJ@IRfK1v{tZ-*1;(St z#^mf^zJ4;hLW7f`vV%O+58Aj$CQ#Ysj;xnnlTS*gyoO=>cAv*LCPNT@NBa zj>+5C=P`+N_G{bXQO3;Z=%6&Wv@0+*`7<-M$9)|z2>=J+3GW`*Koe8IFSen-%udrM zeXTli5ff&*K3r&vJfoTyuOZVK>OZghZ*J*O*;&o>n~U&CeZO2VN1n;-2InJF$MjO; z<6iU(5A{pASTq;NT^CjMnc8;d4;EQC8q=zBJmUE;LD3H0a3yTAO@1M-P4KP(S?;^&vJh4rY% z;zj+Zrg~fy-+Qu0`i9c-oEx`DA{M&*2TW}4h{|}TB;(n-x!#o+)`I_PDPDVz_OZ+>Y7yX}Z^@>%--? z69|N)qkHqD=PqnRfs{-3V!G{__s;@9xm26naRl&j<5Kg&dtvi*bZ@$_%}Mhx9guUm zb#%FX)=L0inc??)WJ6m-8e*3Rh={%0JLF@(|JuvHag_uzlD38ono#s-&q}V8s+GE4 z^w6BeITk16B2S%qEPd!#pyvYF&7FsZZ+BrESr%}^BekUYQu|wNAj@3Zv)bBTk4wi? zmd}Fc&TP)-@kuWNrao{#R{F6uwYR=>akX$4ieSlqWIc(56OQ$ep>czYo9ep zl!yjo|MsLDKbMid;ewRPb4zaPap`dVEZlQm)j8SKuUG1^7Gkm=3wKWOHmN9Jw4zO0 zlJGeHdF+3+%SieSE>OJ^J3Y@pl9Gt{Wovg#I-0^VON*a18OXmn6I-M;)+ObM(vl^d zKCfSn4n8a~O=dRLL8281zVD6?KQ4(-i?l>LSGqaN5FQ0W#|y)9X7sSM#n zKj+VLzI(%quaomhBSnGl1LVth1q*5tazka<$qtTUa(y~XXRFWV5@P~5pek^#fp5PJf{;p=h7rV(bl9?x2zs2@mddjo<`GUIeIoN znS4b?Q$+>i80%2bYZ3x$80piSJqcaGc~(7EdGM7{u2}G=RH^FvEtMx^XsjrOa#e=M zi*kN2FQI@}?z*m3niGLFxnB8t#S#N1`+qo$& zJJ!b}9Q29bw-$T8_`$gY|CF^f8YGY5kVzH-b4wKXT+knupj_T}Wg!vtM@%9Ul4!7D zN%vEai=$FC$9yF(WBDN&&h|=MY=gvtiOYX*jYp<9E;IfjB2YwB`;bp~wIzTv!_JfAfuLy-;ZA`&zS$>;y=oIHATT=IpAv?PL( zEmlmz>6MmbNMa$M^Gk}I2SB8-c`jlS(n!eOuH;|p>#@9%Gixwc1Q01pHeZpvS&INd zfJ1)2^=nG!Dl#}$sDUbgCPDMsaH=HVe|kjLn{^!v`=p~eBn{DkcvjK+RG_PDL7t-N z7yT=kTgvpw=lE*W*ig>(6rdPgC_4ZWhcZANo4 zBER&eXG=%x^7|ppF6zJ1EN5UyK9D{wqxm6CV5tTf^ertAxEdl`O3Ufdr=?KNNGy<) zTQ|H&!Ignbk2tEkRTa5%R;8Y~VoHI8QnjS>l`p4dJU?RQZ$QHSnBHJTAuUQ3N9q94 zGDW3ot|6)gJ)TNtez>nz%Vyrv3R)GC_j%A!~-%GbaCb$R>S->$xs1Y-=K!9C!G`+Pp1m-oK+z4FF4 zzEQ&Ag@3QH{ah|5HE$U;gD^%02hoqs0!s_O-9cKmOxC%1dARQhCE0 z-k@W?b?a97!$15(`O=rZB%k}-=j3_Md)}e|a*GjlkfEBJHwb*=LF@o{BkZ0U+>!g{hVr^$L}OGvuR`>C`wViNJnSVp&bpk#Qwq<{~;W7&LJLIIxwmw=B^ZehPZ zW*znnSM+_WS#!t(5UWfV|!G=lh7O) z8uRX(Io4n}jiyVIH^&FQYNfEoL%+m zHdlE7k{lg+L~`an5o(gg@H#cKjRqRz@ZiH&tkjQwogO_T=f+P+YqU$YH1Csmuu&yv zt07>S04R)P`;-j?yM&nl{D}l&686OufEX)Swot?$mlo5H%}v)TP;_?esO(yIqqN00 z$~TXEe5Jc{MMa@(u2oCpQmz!uag#B}(6}B)#Y)zkS3?T;s2ttQU#d82UQ0>3Fs!V! zXdtO;jJXBmYl^IsZO!`??CTjlVxBLX_1G!v8+S^;Y-c<_tbeZsM%!`+RR94ZUnkzkC|N7Ufv~J6mE&6vRlTmgN zWBemO@+0!E_Ps5V zjQcMtv&_Tx=8zd1gf zCb7yY1H^(xj~yS=M41FO9=1t;+$8q#NWdicK50-e$*T#z*JHof0k=t#C;KGTW9rDg}VCuMU-OxH6&Ctyi?Q%Ks(ZT-3lUAwT021bhtfU>^2*M${G0(c5J zYw&XYVVzgCV%x=1Fql99K?Sq^*;hJS(KfqniJJbvaEgTedVH~8yY59K=W?TF| z4ghw#KWC2DyaHrCKcH+uGG@D&FC!`IYzZrn#CRYtJAN)JBjXnMO_*cs>C z4aptKIte(8lL?tvbu6022p4!cIdV|Kftc)QyH z>>Wj&^t!*sFNe72}_#TUV|T8f);x@RM3OGUN}- z-i})p>_b{MlIvF)~pa`5ur0FC1=e0ARo%0t|ZYbDyhzL6ZX@Bb&`Cm;@6D zHc~@FgCvtlWf)BcHrUqK*qGk)8aF(5{QgTr4s08C5&P%8a|KG+A3jIyGaiqt*AZ=l zk%PPr5Qc3g5((WG?*jni@2P%3JWpGA5B@=H4+)sdu5trr5c`E~#rx*mk4X|&iqYe_) z{ zCc8H`$ktL+6XHIT;2%Ak(S#W`2Y?Ybm`Nnl6V9*koUn>!zDa-qlJcdB=~vj<$x}n@NlVc#K?$SIKKIxRAros$8B>U@(=leq%7! z@@AfyZ`KH}0hA=-eshuKljeqi0vfL074z@Pt^imH#gcA!JX=!G1G(6<_s3LXH8@sO z5R>)EnuLABw%xE*f9zw{P*X!tlJTH)wT5L|SF8qN9zK&1uJKs6fHAB!02ct8qIq2< zU?p26ivAVV2g%CB%H~EN*)0h@#|(g1YceDo)>#&i-&v;sMf8*YmCXi<3MQ5Hc;Gqf zEmV^6Oj$u!_D?%l?|i<|jM=uXsH`*F4g^da1701k$-#v+EoEOD+M^1j0USMiWK6nE zzc;i+bu8*}`N-soa_B_Andd8;s$P%P5wI3Uaswvm98(z*=f2K{O(tP&(L@$A+bcQr zSMKat=}-440I0^k840$7-kifKng+k(Uh_K)A| zS8$+IDJaWJSws0jokvWJ0fwT%gwy^B)34JqkU1-j01}Nm)%4xn|JC}93@eB61GM2* zsuXq1%jyZ_csZHJRi3Msb*w5AC1nHg(-CJomr3U{a{+i$cI16X_=Ym)lrs{|s&m8~~wudLrpZ@8e))+NY1MuVnANYWLlG0?7YW!Yc z4Z(WAG`DvsFXzlHU_RGmK7-E@a-hjE;V?03_YNCF!6Q6p%sn6g2|Nk;c&1_jC--u| zN&EpFNRmm2VOEg*HY-rzQxK`{jkGiYumNbo95L^JI9|u7s#g62gL88ad!{)VFzYBN zjWNH@7bj2vA!Up?%rU@1SU!Uz=3>!d=`gPVKY$y1%n3MP{$^jt&Sta?4Q6|LHzyQu znhr2@y|X3<`$JZVg|W|RBR~r4_OYYm3f{1Gkf^ct0ezYqf)2nV5)AnCHP#vcP9*4c z`gC!B_}gXY0j7>+$~smBbF6sy#mV&1m6x%c0Z=)9_zeBx*nnjR0LK0~h5(Ixj)DAa zg0~_2+_SksdWQ=%on=@P-v9SEU?7cjgVNm~-6`GO-KjJKBow5(k%j@IyQRBRq(Qnn z?tOp%>wdaDSX|rrob!&?alcno*!5ps9K^+{7#z#*bU^8y?~*&L@dV;*`X4sKp|g{0 z<;yIuHGc*omd{tpr1Y92(azDy=5T-akzs0Zl6Q=c6m2{Ru<-|O)LM@jbrDr&6eIqY zzWPI~dJc%AJB(s*U|L@)r3|i=+H2^R;HP`}$Hkd&#`U)@5Oi(cBlFL8sStnwNSL$` zFPvV>yN)K)J~#KcYCusxUzx|!x-lQ2zwZ+k5rx*8T9mQ;`!-oa=VixcyuUUU%OZ~a z@N17S(tuWnhAuExWiZF5FFolFDv|bCM_s!LSCbPTL#mLeMv0ipDd8Ot6KZS#n$08= z;b@kY;!~U}+<*-H!gBz--$WLh%81w_#I>fS@&;T#$P44&v#1ZI!$Egmkzz;ax-N&x z5x%n$usZ;R09s#8R*E3mu*}cwRtHNpXHipmeXX9TGvMqJ3 zG1H^xzi&@LyDw)!T|)c(lhc!9G^UHs{U*<0Aq?P8d~2?uwO%W$s}5rn0CsTU`nvJW zyI%Nru1C|d6k4W-*Z*c$oV)WRBQO|B;i1=}uY+%@V*6)ruV=eDK{q-@MMcufB95`V zZ`E%%T}f8giJ}jQBlvKiB#QSRk;y~@4P(fK%Po*}`_lkI!Z&jo?z2jKb_T!_T@pkeoCn+g*x6RJj3?}6s4R{Tj_grL&p-}4tgbz}7gnR41>19z6&3TgE2Ms6LEn(S#_jh3t2c=4`!f2PUK5m7fXz`BlX8733y;w31PvSuKNG?VMsI|; zUE-&Nr8aUl?`2ApkNL5RoPTq&rcA~m)L)~GZc}+QbK>GX)%3W$H#Z94BTMq$VKY|q zC?I+U2gB=H>}qODd@=&OLWR2t#c%+)?O|yY&lSXD!tNZFHcHA`oJ<;-aV3+5u&E_i z=V1|A&j<2)b!;0~3QDRCpj^UKL#ZL%c7H^JfoHEzg&`SQ{bSkXXRh^T-{YFWDr5SgF%R{FBCi=b`%OR}+ zi4wu!M|y+)b~+0Rm_+AyGbfNRM7Bq`AQM-vU7i_Y7-Yu z-D~JT9%W2t#)J2USIxsFr36`Eo+Dx*kbDbpho{`&_s=nPQo^uQ){=GR%{of#fx&S^ zcB14yt}za)My0p$9(iyv0Kz7IlEo)dsmOmdZ*z29Q%gjtYJkqL83>D4W0Eous{Otz z$>QCw&#Db{@2`IlEQn?vSlYi}xQ0L9)^Z&yh$c5iEn|*m{4Q?w2zc&3Fc(bkO^eR7 z_;%7}aPr0%D?q&$GY)cmQI~zxBeSKr!<$c&BPM2XPf8ZV7lM`_wsAi|KRAS7SdKK)}lT~VAEW`ugl53TpTP`_@`YjBOPmxwhu1_HO z&c6?rM#(M{_pS#q7!OWETG)K7^SSEx&mU^W)vp0Gg|k_RM&C83iC{x%U@94)K0iUG zvY@zBrXTT!ZUL8whDip<*Y)1xJgUOPr=0`)f$5!FzBD4!H+xtZVyi8L>I8VK0fKHGN;nJmk!^yX46CqfL;k_ zo=QLmF)3DJjjjTh6QDF;6`S!4ff-J$@5!#5PES}{7A@9lHz4SWti|UHak=SbJ=s}V z7@rWzaIPw@EHi^r7?0L8w4hr8Clz=$?~xu&9ff@!ntsJdw6a!jKqRFRobnlzhdx6^ zDDDj6&}mF!DfY~|(Qyq@?65gc6W_C9((I?+LCw6tVEn{kq?u)}^Wd(mr|kWQ@~izj z+WA64ZIsn4jO%=-zGac*RJT`nTd`v9)y_((=5DL`3)5fKV8eO&TDBd}_7Y9jPk7z6 zE23R=x(00kVeuf{7hlsqFko=j$q_kH9K(?ikUN=)26QQN#c%blhcHC`w+*wyU>4nr znp5bXrsVMc@g=G!RAsodG@JF$6uJaNb%>^ZMdKF(4r4*oQn;y-oghS3o-X&q1M{@5D_tPttjsP)gyVKZqwhuw&(wT^*b85 zk4d!=aRDc5EmuUZ{7|RT>o8_?c&8n6Bn;B2#U)xuQP9g`kXG^396+<@o+#0V+GVKP z^Z=j>#Qm@sg2(NZ3owEP8ZMKJWs$hFs}V^fVUlYiV#hwLE!WxXQoL?b;Od6#N?u=I z(_bO0BDzeXNq&p=DhX2=6qoyt)3g9VT*{in-Bnx-=aw4K9Lq8_FicXXd}1v zBpZc7nEfeNO&jF?(cfAhTl~+dKU_E6M$=RFn3Vf6$8I?&svN^s({0Cz{j=suN(JnV zbpu_IDG^j7C*duD{xz|A@bpu%gxsv%4NKc?y1*egQ*}=vubJyqA9|fR*YueS9Iesb zV02vGmmb~rN{&)6a=@)$Wlim9PC9x@ce9zCa1!GoVI9Sa8lzU>@)*}8$3U`M9W#lF zlD%9*)h`SG81BRK^!f=3yi33};$YNag>=vyn!xd*S11}}9OBiu`-q%PHl&A{D}x&6 zUX_VTm@uwWd4}~WW-g8|GV!T+!pe)K1)yo5p<1#MQ{a~XCb&J8oGcOjL{p~153jNj?6fGFlu)DTT$HIE3qp`* zfqc-VPw#F19paYU^P99QV;+s1`3oQq&C1WJoBE7hN)rRuxTL$+Q?w62U8fN) zk?dlekqX}N+%GhhyA5=Hpp={PI%j9+vP4gYo|#-$PuzR+R;C%%_}u0T*quMni;7Xi zZNP1R1OQw4Dc_Hg?R?1>lcM1JmN|z*53x9&uDL5moAi~tysP~JnaUAbbt;J<%Wm>b7sdEDCS9a9_(0Ju5|b+jNRKFJN|)IVGp?Uq@$Q=4f8q{ zu|4>`1*Kdy()%o?3lRnlkjb(4gH!{vKl8N%{zg6s#eJCHk3*;e*)1m%vfBfkUSUTp zeIOBlvd}UyT?9Z~?cL9W>nh$EhBPTF`lxoZmcJvIz!Y+We1F-48K^2wNhtXBkC%>8 z@otFJP&@5tHEk0pmZF=y8;4MN=r15U&d~_={AFWPhZ0k>5~7oLw2-4afU`#Qc6M>G zWL4b$_F*!}xbu0J?a0rTDG%oaiCRKJhN97FwOD56^LuYBAT@C8gNH#`v+qO*Ofcj` z`kOsuuAKJ!#?nQb_lwj{qp(uy+Xftb+&;LaN6vfezIok30&Ugyk_T7bb<7OCXJ@H{ zp=tvt!d>5FLs{*J(uky6nERD6n}ymCQT-g7#9m(Lil3>jc3_mDQrT)W1@q@-pN5+2 zJA8Cg$sse64@MuCb)daxUP4a(D=x{_b{5A$r`utkNlfK(pe#snctMZ^1^ zU+OZ+oDqy8#=-nnJsFW1A0}l(KDDqsG8HZpSuLBL7IM-gBf6`+nbjE8i4;S4lM(UD zN2Y+d=8nk?g}b&as-6oFq$OntNKAR4frEU`4TKPF7^23@V+e2&IG?_0bf@_eO$5eN z%vW29Ev&I5(uOUcY0v>(F_`^VEGkYWx-h)l)1D=en~bx^%dQ8{W~V7s|v$W9>(>k{ z@#r2Qmfo_f=o!bxPWdy{?%ESE6v@G!v2buti+^l83toKl^pG_g1{yz3vsHL96oDoC zIj9g#&!6BJi3&pI!*`VJqU$Hxl%Oq36t%^hin(w!0DA8wh>CgEA7d_t&b7vHj~93I zd*qYZ^%%jqXK#jH9{VpU<~S1+&r9&^@FYmc_!}9u5XGRR4~54S2^uw(jz9D06!1+$ zUqa_52-F79F9lx_!9D|>bgaj9J96Vm^F>xWvXzsC6ISB;QPpgKcEVL|i<#`^fwjQ~ zCD2V6v4$N{f|y8XHI|NoH?kKk?`KKq#YY#$CUk(x{pX*1PhMS`@MuWCX7CNFYx{?< zib0xVx~g7PY+m|B5$M)2cMX55t^z#I--Eht>;JfG1vgf(q?NqE{x{{r)q${$o*|Kh zTnT%`mr#$7hzoMi)0TbmhqkUKL6APc?ff|SWg8KUmB8H?oQoA}w=q+0!W(|#6k zmYL!fI){QEKU$pcDFntLmxl(i=OCWV@V-0Tm2Xu+%6fXG;{k<#B*( z%L=Y%uBn!e$yORoq|=zOzl}v2QKV^-Pb=7r9EmuO?)gO6*l)W+y+&?OgMyhBr$NSe z0O0&g<|}S-yNvx#fN};D1P6*^)Up~#V!}TF_y_ZZu9d2C6PWREIWw{S8fKh!CzN+w zO>SzLuWFC^&P6V2e}8NrM(6n5t4Sek^qWL2C=qeWMsZcOAxB~FsvU#=m*x{#R)3sO zhOC83n%)+@a$4tKjLr6uE4U*kBl6f+0MGYSHhnC{2D)jothyn7^UgZg3t zOQDD?CxH@!nUfrEV7V~{nv-}M`!L=sy~e)@oa90_HJ) z24jDIro)>WYmV3(K5R=c_xPIlJoy$F%T23|gp)^JAt;vwJ077@aVDH-xrTnLqgimq zUPNaMYgv&P)uVbE&WhIzlp3^HY_%!dVKO4s0r%WMgEzO-w13Frd@MA~8a&VF`Ay8L zx>lwT79FWz>v0psAXv+-7-#me2&KX`!Y6t$xk20e0h=d00KI#A!fV4Hv1MZ=Ft?Y%;9Hy_Zs)fUN z8C@*R3Ae}3XO|_?L^Q9^<=5MVBTq%;pCXkOax}vJ1yxB?6kUv?+cb4)K-J){SjIv@ z*Rny>E&)ddUIAOFT+0*@gY5)gCP$5LIa_v943f&=GY|a!J{c=z?85O7N>ZU)WV!@B zt>Rq+nR{m=60X)*384M}HGp$z?SKfN$W*w8`SBl2j9l@vqdT#9tIs_&lWSd~fKXbx zuqS!P+^q%N{UJDsn!Ms4D>lOtzTXIC6o)T#f08yZ>?#aH;N*IYtv}g}pwNbnTIttd z5-1uR{@=l<^SWAPc$N-nm!-Nzdd1+d^=}IzGTT5Gn3H;E(H{AHta&n^41SvuIbmM6n9H~8L7E49Mlw`zU(q^#DNO5B+F;N>$kIqM2F;4kF zkI+9s2HT*=lAFx31!_4kS=OXNxf6~@up|19e*~uL%NM}j2&6BVJ-R?@I~t}swgJwg zBs&W7rWqDXv*B=X4A7!xn)+d@K58Fe#tdzQda0HvC+ukf3z@s&3>L0at4oZtHHJ?tz0w8E1?Ljc=zdXH6X~~` zOx#t*ax^CdKZ^Ju-vo&=LgSHS0kD%2H;^mArjU1Z&zaxqA8Gc{S1OjI(uZmXnA2 z8j8pTmZ^qMVqxQB8}G=oMxSYqWn&tu{UgqLZsnF|!`T5Y|3nXK8k>f?n69flJ zVV_zX+tTIGY8zQ7beNN$mQ-eRH%6I&uy&mfekS*HI-tLyd>-G58mK&mzU0;(;r`<8 zevd34zPgjKa0HuhEl{v5?CY(?X3rrRN2hN?vPK7}N$UydRW5+&e?e?bG_8z>I@=Gc zakZ|jUZ%r-m@4xK>6q`U1?7Ie8+AfGfpT*BxhxwpfJz2DBo~;5wiCVC^R@0E7ap0; z228#D?xLxc2!cp0OLCnidJ^6Opcjr8f95z%WI`wOE!161Caz<352V$14@8js_jNgO zKYPwsmYOxvCEo}2aba7fE=v{F=8y5u?iG4c^Bz5K$c8=K*W}CMX|Alho4#+4(2j8u zFyf{wsBQ5(A#!fpKnh;w8ko4hqhttG@;IDhxS=uc?7A*uW5@$8^)`@;g`tl;9S0re@d5q6tAix*ZG)s6meBRsZDXI<3DXp+GHnnxi66C1(%nU423>+ z^QXSPJ`~T*&H6Yo{0M^g5a#HPW%709f`S6~y)mL2^~1wM{V5S^9GykITJqGNbeP$x zZ~i+;?GH4I)t(v)nsN0B{^WwWYF`GF+A``7m+apq7%NQiojPj%%?}Flm+Q%OGUBkR zGh|)dx?*Q*_JTP~XDNs`Mf}UD@g*uH4I{W?NZ@$e-_;v4_VV@rG9T~L2JN4C$M0?} zzb9I4Y2rBokKwTdd_Z%qZ9|?e(WoOAu zpAGX((cK)~`brLZ=puIYJ)S?vw`X+x&?cwM@J4j%Ag}m9%l%$vD+I_zGeHtEUR$!Q z4I-_IfhPYhb=LCg27-ZwqG`Zfm2A~X62gy3#7Jl%c=aBCetyQtnkIjCWsWM7RZqj& zN3yioc3xf$%&gVI#|9-&WUGR0byk&Tl9vwK&4Vk-`O8u`LaOwsYzc&b2#n@cM#PI< zX1hCCwdZeMdnUM_LV4ylpmqXVnRB5fX2B<3t00_`EpH6G?Aq(0w#(gm&W@QP4v=mf zJg1JCi6*btY3H|U?7&z&P>m_C9^f<@u9&?ZkQf)ZIr=-z&-&km#ePh}Es@pRb%qxL ziEUzfTR`^L32*=D>U#L49CG2pv8toi(<-3Vb(c-n4;Z=1=BV!aChVKRrjr>WToj36 zYWwt#)|xf^FV?S$);)^oVtD1@0nghMI_KZ8?*QzCGE_Z}TLfLDro(5ir)TABsOgJ) zG2(yIhkix?Y~=*%KOpu~5>>z|D;vH#GDs761~i9QVf7tGql zEP__Ln%AdDjDxSN_}3ES<1Z5Gc@cMH&ra2Ed+kw`)eRKg*Osx>d9i4k@Yd{N>AN{n z`CXI|#>R~)dkXW-SKo6Xvg`g*_JS-}LTQWi%hYcV$Ek2)WMi%B0Gt~R@EhO{yn#ZD ze(r<XX_ z6Y7O9tCOWg!@pzz`cwX|mLGro|15x0>rBNDDhrE%4-X+ActP|ObTLx2+!q5Jdu8%2 zDZ8g-gr~lt^h0BCQ1ooaViO~Y&462Wvno+EFDES|Y&d!R03KUtWUOy2WEWJiFWppM z;pkS(tCj7v9S@>JWZzLZjQa?!G(y@MNyu=4YX4`iFFL6(vU`{FRU9+i=(wZxU9g(J z{;{y+-$|YY&10=*^ts=A>Q7+eS^9mgA3&@ayaZVvuktm?+dOga+1=>gssiKWe=z99 z&e0gvBM!3Eu(o(yjV8;x(S#jU?)Y{CEV~D40{Nst_zD;sVzp?hU?11ww3!i;Kf@;@ zGrIQM)=k-9RLvO47=$Y+n$F~q5Y1JCs>EX3r~17mF6WlLr){Xl1^LJ=BLGPG#p$VX zWQAYwi>A0_lf6!n{jUBNB>jiNzz^IIojo+gELTeA%4t?3zJPy;m%U^z8E=G?pEKuV zsBQIsD(X2~prqNkS9NRkU(ho$cuVwG7rSOUDxS{N_=aP~D@-yj;3x!yb27dkOPav| zSog4X(+EWX#xg#h)LC1%;LYnAK*In&fanvQ_%N$uig#atvQ;*qS>`t+|0AI9muySI0J=o@bGWJhc~t&a znSbZ@Z_oa+x%h9BB>#km)iPVX9);Aoby#_{Oq?t4N z0gzBq{%JWx3L3F~5%aaQ>AfO3Ap1|-*CUY&>PlM!WK{4(D3?`=@zmg8Z}Mx-FX>m}`6-F@&Zx!S+z7Afz0_c009 z4_@W4Q!jcODIx37I&QP;l0NFqbp8q8Q-t4)dodf`(Q0oD(DUG< z$0hRO0G&GD3J=n zrnPeMPEpfW=H`HCKH9C4r@9&IJ%`CX?>|yLMDUX}iTkqS0+n^4vScPO?t7T&Ebacj_ln zZdN8S#4sMfT}jglp*WkmM^mGnNBE_&LCs7tc1QQxK6Z)|v8Um%X=R{Tgsq-78YDH$ zq>2=2&7QC1>t~EC)w^r^%Pb_ER0|>!h<#wvQK%S;syzbZmu;nKAO7kk&OLS0FF?uJ7*JJxB6Xk0~4>+>SU3Z=&6&ZmrHNu3X`ACx;l%GFdzT z%~3ZxpiZM8!t#@cGB5XO(wFR}euj_1$h>HbVngYL@i%FH%{2f1lpE`#QReDUT3xn= zNh6d3*=tD`>6`_dZE_Rg%a)v){uKX(H@WX6m^(v~!3bzJWboPXgjQHHI!*d5-KuK| zP!X!6scH=Rlod=+m&*$8y}!Tmf65nE!p-$C8kMG5UzDKK0>T*TzrQE>zdXITKdtY9 z(y_Vl?_u{AE%t|zA>Ss_4emPpJzAtDL5AUKkjdC$$6}4RiPAPXklSn15wB>=*hV5# zEZ9ngVi>i(C%T)L+IaHC5IIs`TA#|aFw>!vHHfAkHeEF6RfeW)#l=DXKjH#;=t5DB zM$+=$&m#tahq@^!bz=?O_)lsK4ZPV$rx!rU-_=t zweRi$LGc$EiB1*u$vn9|t|80F@N~(}R5zxUfQRH=2d!Fodh?f_8C1M-h#?vnsd(75 zQTE06QKMw+tEcKh`fD%jo}VR1Cts4-*Un(p74q2|LV#Q!sIg({wWW@~+F`;&w`PYqD!{xrY(q+W}Z4<1I9iA9cL zWDtWHxqwVjWU2OpMdplgo@Hs{yab1$+?l$k7oHYQR*nBluu^tU=R~ic@yUO5uWVqw z%iH;4Smh0MoOo8j_L-Y zhUKkGM>OIr zD5db)$H~+{j!S$_gp(La<`1i{y)8#4E165uJx$Ax{aoT#3@AKYf2Y+YXJcj8PLhEdd*`r?a6U; z7=Y-Xc=awET+a7`9k1)BAo=|iMpIX}ELRJs>sb8V-z^YQp>*wAN{g^A>~)2O4gY^# zID{nxYcs{1lV=q5Giqb4MwHhIKanVeSwye=aucil&$~tkCc=C}<}6sG=HoSigx#be z;-S+u#7@&KySXx%>*((?Lt~CyiD{SVgp=?qW7*g7i&_4$+0R! z>O({<&IvrArdvz=2^fh)5`IPRfVFM2hf=m`IWTq{;(*m&P>X_A%Qi9>n{gAvj82hH zJ1eW$?6Q3S3j`ks>B&kL^#38_AvvOGAqrMr^jfD38x0<X}Y5t~E)!Rhkm^kYFG2~D-z zb-$O?8-7|4=Y8kAKvt}j6!AGYFGUCyxmwZ|?h_mWi4a}sdYd_-uY5bMa{0v8AqfI4 z82&R#TrSami>fxf>3TqL__gYfIVcX54{mDK(ZFelqj>n?cOldXQhRM z^~5?TVzRL4&Qv^Lu)cLYnK&o;Dy)3)5;1{dN5N(=mht|lWl@J&L9GSg!-9nAO!VBl z4I~rF*Wwc}nKnMInmiVz+|;5zywslgNA#=6-z2V)lOI@CNxA}7qyjz!=VEk&6e*s= z&P&{^w1*N*F=2MZ?r`UuI`rLgs_%{3`*BHWtu8py_b{rB(uzp3NqT0o+4%Ek{>$Fk zf(S1qNR$^WEVKFx0^f~o=~$$o48#QBERcvzr`J&NJP=>u-tq~!cQ#MV`;T`5kp`nA zIddo$Q0z6uQA-h6Ax~zjnVDah#Oifl)sm9Wlyc`7%>mMK>mP$l>j%CtNjfyTv+a>k zUDs_7T8`Cc{|K(M7SaF3r+5S$(6^cJCH(B7TBY~(B?JB|0Wq}rEa<%S`AL#)_m4|J zj{@I*If_sSdp~Vyj&|(wU~>HKg2nV!*w@pf071b9O$%YijhBmVoIdxH;%|mc!ujC? zI_BL1%j;x($kv+Vq9Uxh58AP7HC@t)6aA(#0O!JalG0L|QI2=TA%u=Z?Zy-7U4yk& zAV$74-nHfewL}wEYY4;jm=pN@w=w20Hucuhbvd~f)5G5q#yp=e(x0TL^J3+<%mM)K ze6oJe@+>zhWUr_#K&&GbqZRAR%x(Icl*C^uCRXUf&wQ`iN~>H22y-OVmw*Q? zZhek{NbWA}+kL5!>OEbeSQBF@9wv>JZ#_-Bj<6kMZKZ;;Pohu)V5f2`3|oQUFaPUBQ?1pS`q$Mt{Jhu)`7U0%k!(`L0hpZSSKvS$?x07h$DJu;OLM8a9?$NEaEt_+Gk z-~7YO*SKr6;k7lmz4zowU?13j#`fXsg0x3rU-V)fuD7{=yR|TL3Lj?ABjM0>sSc4? z8mD(c8})l<>@K>zjX6WOy(y~wu8Pa9%5lHl$5X%S<}ADMY$8#{!3gqHhgD}aSCQ7jcIe*DY4M5N>14L*QXO574R z`ovm1f+|w*O^#-2M?nN%pu)~e>6&?fSW-q~r;{J$+>WiUZNnVQ+3i&DeWQq7PY(-{@U<=m5=?b0 zb35(i1m@n%OcXC$m{zNg);{wce%h`4I#TDt-yYbbQP6C7-1y|5PDo6O!&|5iP=V_B z#Ow+q`ivcg?v#W?a{Y1N&C-NYND|C>=r+X+X=DMSBqU#kX%pl8ghWbOOF#CSg>jxVB!I^k_Wx(|ZXpMcMs46yb|k+6Tz) zMCkbNFVZDm*s19=bCPOfpUTuUTP0hwx**$WGNCHGauPrU z`QCCuptU}t|7SeIup+chH_Uew(g4rU^EwdZSvPR1Ah_@wTtzqVUKk z{o!qI9;ugzOkYUR|9Mc{!EF+W&C7_$uT-*wW5d%0yZwBp_L^L)75_%FqgEyspbFU& zX$W`2I1D$>z)x^Pz(Qae5syVtDj)e1MRL9b;9X#>Z-FWKPj0#cxqAPRg^MKx1dQ%! zmjs(e&{LR#C4PH)T=O#D#>}~{+GF@7it)Mc=a2LTZRRt7u*;KE_5*8!Jx+L4JdV@h zTtSrYByB8ZPDTEayhy93M(e5ex4oMod`>7$8CYr+i?-InIOr$*K=%3fgTP&BgoaI> z)tTc-G^6caW*IiY4Byqm_wV3(L9Ma!7{R9Hq7X2^O+RLR41|_%%tZlE0Qif`rXWFo zP-Dh2pzdjTMjN^!FQ6wh_QUOKd$6x-TCHOJ81k)q{U^8$oKkr3?TDMysqZDf`t64w znm^PGr`+4yclB>$IBDV}_+*%-^4(V+X-17Ww!m$-mhH!JuQ=Oaak!OKkpd9`oLd7~ z@f4PEzhu45*&QFBE@soXZhfGtJf2Ll{C15=<1Oj7(7Eq3BdX*|m>Z`%Q|=<1xGW+? zid6G*Rwlm5?RT~bSS6I_LALx$-W5N}j-|r3Ix8;TPJS!y2Bwqes$Wfhoz=j3OWHs9 z4OMO6Pl!mR0Jjw%aDfTN);U1gO;nxU3a@BKZH?``h0epDzPd#r|cIW#CsB#=`tElCLkQfmx zUj=E??p^F5W`Qlb?LQUYGN@@^E4dn+3gCIz|H_7*Q7Qc5)-ymkB&@Es_7=3=ewlBF9_EGeE_ z@gu4;;1b}e`bFdzj0k?`z2zF~C`&{XbHO>xJIu1`&JK2#;I zr19xSjJ5C-ebKe_LDE!6o;&xmTuV^`5gtRpkn zP@&Xwnt&anagH9&|0Ug0RM#z7mJYOLxBxQXT;a9N9bd*N&`RHMIJ+uto*ZjJq~g4c zD;Kh#_DG;CrrlY@J_^{I+AKz~*R<9NzGs}Z;ijW;L&YePBXNWiH;2yyN#fgyuoCBP z*F{0i?kqgYr@1AX9Adt@MkPYrsO7+JY-fO-TxT3}4>r&hP$=N?8vYNSUV{G)%4WVo z65qHa6VXHDN$q5ORI!aNXu*s!l}#*MibnYS4W|0Ij?B^B1Z?T)&O;vVT7GrPlx>x2 zbHc}Ym!FAS)gkzsvGh$CZdf5ihUGW$68EH@GU?BgITEFEP_ZlwLr^u-UAQdjE;A5*OK-|M`Cw5gJpY#G zN;9<%6&ELOJjxE^#uoeMXVY2U{ZYNc8)`Q#UNZ{H%eOk6SNVdlJNxJI9O06w+|P$# zt4uYPKLVeH9=A}J*`DFa)b&Q^rtPw71QxpFM;vA;Zk3%Z!ozK~(@9kDNGXt*c1QZ| z*B>8ImS1y~CWSYkAYdWv|EZ<-n4M){61nDd&tYE%A_s>?!VTx9N(FXDpIoFNpz+^zUafdH&Fnb$(e|LNC> ztqbyJSq-^W#izZ#jUGd+IC!E|10S?0_HC&33UO%dBK0cROb#02!EN|uFb5&g&BA1R zIpJi|TV6Pm5XsH2I*d zvRPX@FOIiPsr#ox6ZgUWOw0xvb}_QSv~I9)Q?=G0)*DPN)97=K;d%CZ3?H}&0LXoi z9gu0L(;YhVe!cfJy3fApxv`#dS)7VSE1Sp1@Rk$()Y!~He)Rdh)ekdAjJ&IE9I8j+ zBT}&x6yN9v!&EL8B%wcDEcwjc7W7*KuQEaQ&Nww*xtKQboo8iz4k{=4px9>TJU5?c?SBzhs#fd`uiGmp3b~H z#uZxI0Cbyew|*Ny<)eB@AuN^IinF@EOVUufsoqv~+5GnUw2S$=foW-KDi-{q6O(?m z(_xYI0oEDd|1bEmfM8m)rpB7;)xL*XeBr}b-eS6Yf8YF^`*szygOhvzt$my9ERL^z z6Fq8qjpqA*mS5D?LAqJu&b|_-t+K%8vO~?HJ!N+Ahj`xMLwssQT8=hsOQI?|ENs@B z(P1521UQ5;0ql>)+k>u)CWuq{%hZWvmG1*8f9(oXKEJ!gOf4;%`Q|&`9dds+7?HeN zTf|h3MgWe!wUuLHYKi`syQq=yKn;Z)E27bO z!au#HSJr0CKPOPlmmC}!L$?Jy!5m`%RGM}d%=9f!N@_9vXrVbaijxB-scWe$fW@cd zO!WDd%AFb=JIK2Jjg@`(2nF$rCe1qWX1X$_&ilFIgBMtV<2i5^o1u z;MrSY*%tg`Q@Y%^IjhFcpw0gvyE>t94^LU_&}|uj2E--ilCLz7 zXlIzAZCiD&(u&ZxJtsqn%rj=+jn+0Y7iC9=dEX&#%HO{-zLVI>u(eg=#Tvuu(%!h7 z2c$f}VbsHeBcyUV;k>)g0a;}NfY=+v+L==OhC_ff%^#OIMjQQ;u`CtIaYmCq18e%P z#-g*!P(2`-rQir!Mp)PT+!8XmXH+4G+es5d>*SC^MDyrjwe-;5s=xoMvunRx&!$CE;mFU@OqfC{7TeNi zNSu7)?SQs~P={>){QBc3G7^$RD%lv@{Pm?Y2u^1yquhr6J+uRhaJ#?*dyr`fan8f8g=w@Wn^ml=!g zGO%LXjZZi2p6P6{(#VcJs+#DJBR4hw%~!ge2&BmozrVLswKWB9MVWP$z`XA2+ZGAE z&v9peQ|7+;S1xk^c}-2T7Je9~^uWV^OMGuCuk=dOh%317kL_NiX9hV(k}sokvEtC% zUxz_5E>{YTM?0zLxR4#Vap(8xodUNE^>?L`;3HmCuMqUZJ7+}*@z=09)EcE)u;+J( zZzmt}+qDT%AKUfiQB9F2Dd84Sjv30uH`#FC-OoWgKA0z-kJFYFizYJVz?8I1nHgy- zgw5sO&77jnR=j&aF)Do2QZtV5-slR!_0+4w%@7imyb_h7BGsyc)-XE+FgpR)0f&wM ze^7z}UE0)}TJg?BU5C_9;T3i3*N}n%Y7Nz+EmXBdRlnW~+1en3aDt;uUamoiO}FrZ zFjEz$8Ik@i*Dd)*WMCL6zvXAn4LQPqiWqsxHp9K7M~Mb~vG}uc5iS$qP#@5h;qvCd zZDfY&WVrgIpzb2%-&H7xXUFE`Y=uxK{IC8GU21wkzJ@AKu0sw<`U=VbeP4}|aW_46 zc?_AJ|HRFQ^f6p88R5U)Rv0|zb7w*gIXC0k1;e@E>dODm0#wKV?Gtwflxm|qnQ@`W z)dp&gGHe+{A%(4ZPby52aZJVa!s#<V{LTH`YkXIa}oS zQYwrm90*09A#~Od&By6e87eL|YVfKf756h$&n6MAIzppN<_R6d+lVqkLgi}PyC>y# z*ts5>ZOzt&sx@+(I4CWW;hZfa7H>u4)N*$VulO}&YN!yP{mWi-;a$`j-5>Wf`+M64 zqC7A<^Gy{Ae7+dmWpVL|5_&u+@NK&oAj#!8OIDB${#iEZ5zv6eKmp+Apa{p9;qMA^ zcJM3)pY+|Xxzd>B!hG)k6|_A)J?F%JmA&Ltv9#NGdTPF2tGsFE_#QI&+}7n9!GAZe zNqdGpRR8>2<@q;__(#7Jfg1Zl@PGUlg8n6MWkf`c$BNfveAc{>-0^rWyF}8kxvAtr z?}q;0-;!xt#&XSvX*=Z9&yk6V*=%RfB;^u)az|a$Rl`xlRJHVo@IaJuVrI z9p3{Hmob?sEzO|#a(QNifX*=lk#0p>9(g{;2-(;e1_NL&xs`ckH&4i@eu(gIbY4Vv zhVr@W!_hKK3{LK@$ru~@$elqtqvCN05b$dW4N@>dT}FP#UkTAp)+{dM6zW_G75Dov z=Dqc&@6{9qs4Tr_r|5{@G3vhMuMK=p_^EB79}hQ^K~wzz>Y=&7lcm!g%oFCx`o6&w zex~}Hg>wh-FGNmb&-Y-JWO^})?r-U&jVlIPu`V5a9*JyP_F8wQbh}X?{$+<)s>K~r zF~l!1#10Y1t&0|Y+hxFSBKx{}$`56yi}AB+TJa=CqHT>!FKv^9A3}lCIcs zes+lgjSQ8ld$#_Z0Q_{lO?n1|)GefAJ;@L}9)zS}xDJGnZ| zjll|U2(HxPz!nv83BAXQNbE`)`?=@Ytw56UB`T%}oI$TUqJ*Thk83L!k zl7+nNWrONQ*n@_3LV7h!t@bp`uppZuK2dMkTx;aX*VK1s_;O{jQ=6EW*!~F=y#nJFti#|+uhJ7a!WEo+x0pwJ z6(%p`&#?Jb7s3SQWD9y2-Gxy=wEmKR%vK)V7rEnqcwFqNg3uSb3w}9{GkIz-NwnlF zbDG?Bzx)rDAo8?4sbFG~%gx6(3VSg+HdxKu9y~T(AiLXf7Es-OY{iy{PQp7>B67oE zrCZZM@peIx>UY{AyK8ox9RS3?xc~cl^4AhZ2m4$)d2#p!s5Fh`ZELZTfSp)J9bH zQ&>s-vL-e_aW!q&(i1`i|0qU*yc69-FA^iN*v9O9btlS;X*@D-+M*@>* zo!AY}h4R&jIPuZ4bx=G3lo}SMiE{AJnqpMVeNKxbWP_kW^-gBvtA0{sP$9#q4kaD0 zJa$p1RM8I)m(jj`lm$MzY!8XyGXKUufY0FtcuJLXqSq|yE1Iga-$ zbz#c~TRqcRgd>p82PZ^b2@xH7&3dC@Z?Q&NxQ23%Q{onEIGF^6+K&V#Q;E&4iklrE zZ+n`+G=~ho7(t;l1)LGuqj{eyfn&C5H}kA?6CJpr3yYug9Vn%U+1ob!g--QUt+4d} zc@$&xcFg|+2SND0j1dVLku+-=X(Z!-@6e^>imdhBo8q?G+HhV$PP~d>+pQ9;aMcD( zrsv+aLw0SM9q@4_2Z?4*y-bF+{5gNaz}VEdTN;8*3JhF|fnmL9to+D&)KpcH#P$Ga zNSpx_05||(03Dj5?FtMb`SCd>(!Obuco(#)m99r}X0~Sms0n=zX;{Veo>5s?)>Z>E z?F~Duoq$%TGW+$et4X|kj`T_I)zI315s<*XhjZuEycqe|Y=ZV6%Y#irTalFku7#$l z2nA5XI8og!Qy5e53?K_2p(WNSVPDh%W#%r&EV^S>De5)>gaFT|n8tI(YC)M)0fz;J zR1JWk+1sHkoUD3Ig=9u+nXI%gyeM+V`9wt}ZnjK4bD1iRDTo7T)s@()?Q&UX3*MKRzqHv8AR=ByT-{^F z&bV_-&Gl-Z>z=+e%mrSEKemLBujx^?ShaBxt85WounBR>1vzx~^_{eASKAC=v^ zckAB(AOJJ~FaSFr`N&5UaAG?!ZU8<0<3IjG-t(UKD8NITzVVH3$UEQpPHAjxl=bV^ z%h1q}%E8zM!F4Pk5&#eY7+?*r0~!G;!QSCL{r&yAKfo6H1vrKm6QCGv2B6xrXOBK+ zA4G3&uQH$LD`3v|zyEy&a{yxipAH{BtbJzNj2j>uKob4rbBqZf6x*Y{$k6`!um4&> z9Ty~G|9BM9cR)7)HsmdR*~A0(hh+trC^p8ynONcBWt3mnuj;ks*~G;6XiV9LWrxHDl&BJXX+lMkCgw zS^ZZ0J_o>gjC9Mb}%Q0&SYdX zRg!Jpv3av+<{Bi;Zh3f&motTmf;lAg-Rq)q)9yxVpzW|jDk`(9NY6k{E0!VuN}8k^ z-yx-&mMl{!Qz%Q-@esl@2IkGC_Ha$EG+9cvSi!ykhfeiowX#@iV^B+)$HTs7fJ3|& zFmZ6Ss9+)zK-W(Sw$P^bh^`Nnl=){i2EP(R{fqE<5(~0SjjFQs+$lJm3w+UiwnH7O=M!Q)f52hs_J*iaH^p-&FR1<6`lQ&5hb&B&w2##A;+Rmad8^eS4eXii4uH~;%{6l}g) z1Mq;VU;~+RCK=CalHL^Ql#Yf?3XDwlBLUPXiDwz}L9|66N*tFgqw$o(MN- zQXf-Bkd**w$###g1~^&&AIrX|R?9V`;i7KK`u>y@6tb)r9@7SE93PU1*?u?>SKt7q z4yMnrfNIqpopRw9%D&HSuH7m^vk90Om<7Y|=xW$1ZSnQWxVcEeU~T{~lvy9k538R~ zzLHb3>9*Jg^^oy7UOBV*rg6s)zz49&vXSySeiIXx*#eMLHUIFAf|V3Ex2PGja3ESM z~&&L|TmFatOS7kSHK&DFmKRu|w(-poJyODj;8)^VZ2STfY2 zAjpQ~4qZQ9$9HNvKymqE!phw6Ju>YEzyj0)*xS-{tpk1r7R}2?WiR;wt;B{W7~5ap zxI@7w4A18ypF_&nmpUm!W)5LFu~z+yWb9s#r4PUy7%(t|kW=v()(?yx0zd`k3;@L^ zKlw=oF#tUPYXB!;%iM6o4YlJP00Z#n_kaKQW&8H+`X0a`JdR%Xy4NXNiGZE+aYGvK_Gw3IZ5&`O!5aFl-})_m9+nSGD40114jfQW z29W22AN-)a^{sD}SHJqz3MK)*03snJ13;r+fK9jEcANa?fBvTeLF^kqi?ODEfBeUP ztYZYY21qpZhgrt=)1UsdeB~=&(S7~KZ~TS=U4TwM_G3RLAO7%%mC5zI=RHr~&ug@e zx%}R`m4%*0?b=XHc=fUs>c->7LcjPiSrpPT@K)K$KDnNRu*+XGQ~T^ zd90Eb*eCglf7`xi5i?p(7zu1to9j*KZ_6Q2~Z8-2!I3Y2=LVJy}&?n{is|?+iXEy-WO1* zsr_C9sNH1NE$usLzWYx1=9D#+j9Ez`GBef);FSv?)0Vwk6Vhz@aPVYG`vkztcjjsY z70>pM%h&(&u)O;D`%Hgl$JAKNU`!fTV81IJ07J!cK@-lw%z0%1kwi;HcjC++1j#iMy5>F>Boe!!72&}xofJZ0HCdq#+qVo zm3e;q! zo3dyCUXWS=)L?cENaO~937`p<7Wi3sjaO|C0~F*07-nt^>P9n z;&sAhK1~kTK}hWYyngq0f43&nYierJzB0bByXYICKT4W0`q`iTS(WV3)_gv%?+0*V zdv%bg9!nN_Qm(GuFIbh56fL9Ta8FtjY$`PI3;BY!D{l2A$1BJ&$c1eHP=OZ;3BL6M zvOok0y<1isup%1v%Q};wxn|Pp{R7J(=v9A(NXS-bnl3RzKDAb61%|^N`(_ zvg!%6xicaS5&yh^7WDvfyk$s!-O}vJlyc0V@zvR6l3KU?yL!+62G^- zLB~-|noXPW*cow{QZUg1PL(x4&(2s_O^K&t%G6_V13-~mI2hDpd2+a(XUIYow1kP| z9RCD9SV#v;j-;*XICT3|DrY^R0C`+vZNJ0(@;Zz)HK3n`i8?O>2w}!wc4Vupdu~W> zPxa;$&}Cm7lkA%{&39x|JSa!crsd$NlpaglyJM@)9dizAewadE1JBL|0R| zNoTq+E`!;#>gxgsfr&1jJAf20gfPIRv^?(zb`AKf=ZoK!gn4}`gM#IOJgefAe+P8J zgU2#~Y>98S2Y`XmvCpYG=5-h=NQd0*!i>NG+A?T@H6!TNETPpZUZsqdq3=zfR6t{E z^Bx5Y7!Q?9AwNPIWX6T}BU=jxV#d|< zuCBy31*8^oOwgC}V@>0amSXM^I^RjJ2^NQ>r8P=QL z^d<#WkYge5aseU&kOT$`fxnGwcnx@jbP9Rc(bDpq8Wb=Q~GNoPi-LFdO$$obT9>1fz$63>_>q*N7y1p_<5W$1J?Y|#YX zGC-;-d#X6>7i20V`>;Tee<3*w_(Nj8!PL(Lb__<>P0Z%cYxDU)xCtjW71Wzd%+ zNL)v919EKW5xvmgvF=9o5yB6N65@bmgP9&>{a_~C9^YhMZ&2`ywLal^DIr$^L}N@d z#Zhyg)$wGU0ptKqU;?$p)+>mHTn=!>R>blxiqFk#18<s+VE*10(kn8BsqbdvCY1S*p>S_VxW(q6!TwT}9 zobSK*%_rqEUw=%#`r)5i_Wd&^_~0N>!uN)Iq-8LJkY!DNAIz5*yx;{jZyUaU08#kd z*+zgyL8^5+3JU?d@J127@jFVZvDC>uE(zD86V!}uCG8*(%lH~>ZN zc3}=tvflk$i<3m7`uyZ@{g>u?4FO{Zvuiw>*zQd?-K5paU`@?8RQl^wNt-&B?)dZi z)UjKxsDniHxF}w7_qB5Cb(^K7F}Bi8nRRen0V162&8jC3avRt~mcDLZx9_u13yG4- zMqIW}#WH^Kt_;j1a{vwe?S7HK@CiAdBW)&`YBotzSFvPI(!|-w$5w1)RSq$I^61}rSdC3>53iM#u+}aqDo#t~S%z$?=gLo~Kv0hFvaWL{F zfktZ9V%~q#H2HNUU?jzuE^8&QvSqSV zDkWvWP+psI=P*Ay;#)LX2K=~M0&oa$4EfKQv7@pfd5ub=U|yX|oz&;Y^Mfjt+K}9( z9yQ3!3J$Ai)M2C`rOT^H_>i(w8bZzb8Qz}^?NMN8wUiv8&r}jSG4g~;1^0E_si1{c z*D4RG7Y_^-fU*AcDQz`=IWTQ@x81y~K-OHt{lN~_Th8%B`v2Lx4*005?ER(pUI`>5 z0YVZ&@4Z(+K}E3lF1q%z?pjv;>*}tuYgZH%R8T<>ktV$*v=AVKkc1?p_uk9@erGaa zW)dK{Kmgyjzgf)8E9c#N-@WI2_Z0SDPI;P;eoWd9QhGdOC{}DltCFVS$XA+Z2Gn|gc(!?I_O-d_yatE{S16J!%HV{a*8V63>L z&pku}kNqa4hv^;GRLY&kuB)T7c=lZA1l_?(9+Hssz{tp}lpHhqFhd&pUar>ith&VL zrwAjx(NFS`;XE9R3?Jh&Iy+h-Hn~g=lCU{nI}8kU6=Pn*m4kB`njAMIdRUQ;A0jN^ zl;T6Lql%h(U09o%$0WyAm5w+S^0m1{dz0BnZ$~Sgw~@{>$wCMulABDDKOodi2=n}s zS{c(P%4>zlmHaz)mWW9#5zn^J04E8%{4+c8GJsKLY*H8$xUVW zTqGa89Q_dJ7AEA1;WJ$dRy8f55}TJizMMP2^WS@aqS>MK|+TfK6)_M&%S;8Na#G{G#Vwe`a zYT~_H1$-8LC>KM$C=35)$$S1cNh`Jm$s&gBF-bLv zDScQQ@6FIV%0{AxIgkv=A4BlG(W6HT(ZnR)>=z!(YqD*4oxXkhw(!ilSkOo!4d)Hw zPIwWt6zagt_ravpNg3r$-M<0C6{DP$fy{oQCl5U)bRjkn4+_&TLVb>xivgAx5Ak=DaZy>*fI?-Q z)5y-qtHQzK#X|NlX}6E7jr3Wb`h9^%mZ`}LLgge}1*CTYl$^ylmS2zcK*xc*Cbp?7&+L`gH4~?#SCSYwNeBiA-xeWGfC$b&%YbgHK0BfwdDqP`E$J2XF{QBdm7sG zoMB>UG}cSm_?*%dA%_@7r_0Y|h0>Zrn3vNFRiA&!qB*A%`;+EwL1nJ)!s@!0a_cf5 z(H-me001BWNklu?D56fghu`;(6aYAxVN6&}Qig;R z6FQ&n-g=Pi;XN6?_0U5Pi443>TwI)xC-1)duH;Q(co{v2NVYI15c2_%G$Ef8} z4s;F>Qgz9>sUK;ek~B)mK7Wh-g@E48pJUR((oDjGNP_A6N7kW*B7 znfKvWj z{7R(dRsPTO5XXWf$M$oOaME+iuhq^kjXt4gTfuxQ$Md3?yedU3nd!@?QCh5~i{{xaF=JjG++F3bE z$Q{2sHH?i1v@Q{KQ_ zc&;d_%13c^zEpR+5ILqU6!c+a`jYVk^WCtj7m1ZuUU@|bnQN}OM!ws;d9!#Cv8oo! zxRa0}K|}Jw5F}nmBCS}lLOg8vJIN)6vKfM&MZCAZVwfHzct|p_k{V^8ClEo;B<4h- z2N1n#7>*Vd6@{OF{#o8jhG#8ayjVPN^dV{_yGRVNtUU=99#67|p>_J?+PoIq!O$Cu zplnQJ&HL%whHdf8Gtc0wufCGXZX|O^^pMCQAw}YhrRaIguwlc5JY&8jdIGT>SdyN{ z&{J#W%9WD8i20v*FXntQ4CP~+vvM1+%f#FaQRDqcc#)W+2bErap3Cd=cRkpydatXt zCg5%>%o_RIS-#N zi^4N^jfQ7CVm0NfhJ&+%GUWbPe>Kn(%}hr$UP#be=|bP*NM%>ASJ>3nYZGp zo1L94p=cyZ&h);s+8B?cr_brayxIcFLYY{Bjl>MY$OtB|W;htHudhhPd-8ZaSwoQQ zq0Ic7GPh;UE-ES#Zz+cUnUZSlf%ca`!zG0s2UC%mSBi;)yTHYGg)^Pct zt;)`&${|yJE-IO6Cy<$*0(*N$bnp*`tD8rAi*`X_*l)Vzvn~5as*)6Qp(%!^mmjRS zn4ZUeqzom+g{Z2mfRl@>S|lCXJWhBncRvgAb6~3$R~J`LaqGX3kTzp=pmwSbq2bJ6 zp0SfT6>xX9mRw|KLze&#yzm7f0aI{x* z<@rgNOm7dn^OSr!;pho``^hpSC#0&mvljLac6j5HzrxeUyF2bK&UJ zwzW=jGpTX6?CVvC+7^v@FW!wV>KcCp=4VKNt@mg&GgoVJVJVIsI*!g={o&!|hI36E zrx8SLM|N2%I=Xa`u)}i!iK4o?%SR8Pw@X`+S2JIaK8MdA1xX>2OlN|rC)4R*=rQ%b zfbW*x3&{{9!VJUdPIpg|LiA8N7tqVe)YGXwp^-!yjIe&kG3-8=ivFQ~Z6=Ab3#+e4 zW#vi*ley(&rk+)CW}iAZy1?1>(kgpurWTXD>nyjqu}P{}%V=B;@6*?*$0 z_R8~UV_#KORiY?ASHelyb5_gZbqpCjwasmAX=#N`YnEZ*hyTE+iL>zFQ*WTUs`9+ariTpe zMl)1bSK-*veP~d|Lqwk;u&}&ztLFS!!M1Xg673PF@cQk+QSFqc7cyiC2%gWONxC{uwD*nE%SBIK2ND#!MNa z)?z0$r`5vR_R=Z?e?CEvo2T!5MaVIx<&zL}3o~@@*%kLac`G9Nhk@S~<`!bt<~=yB zjtdIutk$^mYkbq7-@SP+qP8ADXwPo2v9>;^&5)AfGA#LG1zg-*U}bY*`YR>zB(|>E zg{W-@kaR3rt)(^i=*NG;)7Mkm6uE#P@h@ZH9C$cZ8@=(WzL+vH^uh^tk-%sFUp#a) zTt2vbG%XDm1uPBEWY6c~d2l%~<9~M0{V{6$TKwmYC)FgcQ(V~7HPyP^R(_`^k7||S zKmGbUas93HloEY@N~@j^RFswA+t1&@!F}5?dg4rU3+pXjGhq?^uUN&1#MsVFtMT!_ zo|hdL*->q%hn<5X1`M5m8}E4n9-h8(Y<^BAj>hakSkHm#JD>9!iO1dB_@hc8jXw-m zcQ1tX8i@M3wj01CvvzO&O~zqRxA61cUu>JKj8y#k)0fJ4IEb{A1R2>xS;+$na=X?A?@IFTm#v}1qiiCId861JC%4#IUCSm{1L--7f@bnvxAh25i zQWDb;vnLi^Lj&RG@B2T?XZnowYP|gX?HVEGNKjc?x7}Q-tF1#^%yAUv6=CR@0r2u| zyN5H^FcJ#&^$llyCf?)wPnWB8E=fY#N{Y%bXjDJAd()%g@|a66SNze|^>VX^r|YGo zOaD=@9R0W7ek)!^ZT0TaaCt%_i8L6&toi@i4sAKQd_exk*RM0~e)Q!g@1RDUI39~% zeprBrK7%o8(kvm0qzY3*9YQ1e!`jByr@C^}XC^t$MY%rRu~Zrh z^K-C2a-)RUU3J}^@bvLR!qJ1+zVR1q`1N~b^bEmh)pu;4!_ko_Eh)scH$5OEk*SR5 z(l(|tnVVZk`)6mSV*Iol;N;@gW-lc^2gllpgjo1=@RzYwcMb#-$EAg(rI0?Slb!8D zVkq(0VYx?Y(lHqyA>n=7)L$AjoDJ?u$$$3lG4OV~`1z1Dv<;jeTs>TH!(G?tDzRCZ zBRS!untx8BrluD6JaG$p^beEyg5Z>BYik2bYb(k7V`*U_MtWs=rG%haSX#i+$`WR3 z+9@xqkfn~^NA-2540#&%);2nC6B_V5hYLqlWfiQ|wMkMLDoa*Yx@vLdrRA`&vXJmw zhLiDp8+CknULk&7{F@LWqo)poi<`3$U(M$?5k(BmvUjkThv4~!;4ysmn(A65zXj@8 z;ZWqUmfz^L!{5wV)m1%Hb!D}D&u8b~d}dy=rm9*YSC{tZ+y=+XFg3re`U|u8knZ1+l8TsFtC$n&P-%)f7 z?0}FSU3AZ*+KB`m`;j?MS|hgECX{dMnw>Hp!uxiY5J9#H+sa&xE#{bNO&-dA^x$zJ zzXpx!gCN!S^=I&qnsMV~xyR}sHmG$k0~2SAgsZ#rCF2dJp(QS}VxFyn38DM546ml(}@eDdy#@-GRZ>DS$9w9_a`ptY?S^Hk2L;FOgX(TznZ zvi3^RwUJ%gii$F&>=wh>O&6C-g|O)gu(7dK%9yoM+A3ui-rUkkj-~P5&`@tgGFVtx z%JJo8B|-+cxO>UH>8)aEWu+8lS8Ut(i<~!n>{aOK-xY4alOjyZlGGS$PO;uARC^M0jVP4EHl9!t$ zIgsQT3_N0VinFcUv!tj%^7b@JEcz)0p3lzv(OXOB8Ptf<;sR-FGn0gU?6cggbnM*x z8~pr(B&?4Ark_j_5+v@(zGfwv`G*MqxoNVq>DD4M@`IE5I~Ak7`r4YinSr*S_7Gk)3f8 z5&ee3-P2d(;TWQ9REDdoDq2-=`Q6V;KEtjpzu|>9K9lD=-EkzJo_hIx96NFV2covZ z-OEQjo-~5!%7K2|wFeyStTA%a5}Lx|k*)N2pH(b-W#=@$OyJM7ze5XA*0aP@S> zwCl!W`kYB>O{o+4NJ{#Bxa%)BVcFNeV(YqHxc{l! z;>#0J;K z0VDe0wuk4aXRw#`<)pgq;*WliHD%0{Vc7nAB#s_9hS_&rjqx)^x9BtG)j72HsE|b@ zf9&n;;Oy#%^wcaN$a?hc4*x(uWM}58_KlQ0QY2yYa{=2oX3r7)u<#ePHl~TE7>~dC zf$PN^sG(`i;CjYW)-B(N-+$SLjFZ{&y_ag^8FMFNw32ntwrC-mLc+Vi$InapDD`+6 z7R+BPMAAdg-J!;ZZ>vO6UUmWE4kf_D+a0|J_0UxvQqT1HzZPNNt{Bz6*Q4LC-Yt^z zl6c&(a*KqyjhZ;BJLgF3Xe-v&Wt_bZDq9ggvb8aRL8cfkzVrdOm9yF3j zgN8pDNNDIujmq*;Y+k!uDMuaAIk>wK;Xy(uX3uskSvX$^2T!jKh}ynR2%Nhf`8zzk zeDUkDuWpsMIo#&W2x?ASW{ob?Q5M#ys-uTL|qr5L?%;5Ra#%_(QUbxA4Q) zv0~}xxc!0Wg&4?6PrdA7j*{ znW!i$#s0|65>9v5!+#SJ@DB=6O8z=zrk{Ybi#u+->q&Iz)EVo4`3}*0 zx1+c)PulF3d!IpI*Ki>rSh_eP<%BxsTO5hrjWX3%ZXVv4b>saQJYuqh6|jvzeET_^ zIDQm^MobX7j~?2K9=!+Q)(4&wqRNl}sj8}w-$}rP_ZkRS4;^tvkDd%An}1L@^y)uC z=c#k-uu{z95z%L;diG%HOP&{ZWIs0j_M>?5R92M3&fXsVhm6CF*>|fx^K3S{*^m1p zw}^*QW?HiJ3;STu@QJwo_IXG>aSU-Wd*S8dC+)+qjuiFXitj%aZ>PyKZ_zzxD})S5 zuwzH|iE`U_;3)N+b|TBlr3++Sk#w4N-5qdnaB4vSv8^Mwu91X+?8DPRmw?t;NU(VI z%oS>UPnmhE?9$iOYI$L<1VqPW;2$4v!22)GL+N!xb70bU_jot&p zF=%vuxz53;!`QuPuXvl}Wac6#Lr4Db*ri{t#F%L#;Nk5io0{+*CA^bYd&^HOtT znGjYE&W(ce&`7~|z zM47{n#T*xM&B?_PvFdqrI)V$_z1&;%oMl^`JdrLW6B9ib6_((~uYM6SYryc{xaOv* zuu$s;rdCSMjLgIPvoJl!WKeQHO*)965Q2E=+&Jej$0xRiEDW&<&?g9Bu|7 zZHfv?&}%@L(hUMc-p~KNNIa9KUOxdL5nWpt+59^@JqL+LPe`I{FEuV2&JvEz-;c*~ zvnXANEy{krb2DkspyAI8jU>`wggUk8FvpPQta{~x3QqK%ZOQ=8#ekvXg-|h6O6b+H zXU7Kd966B~i%zT_=I*7Gz#j1J6o`ZB+_dE52ao7eOIZBcW0&a`XN6r2QmA$W7pQ+=+S4eQqU@~bMtCRh}@}jR}3042{koU zLgKi2=t8FK>>b1dsk*8P-9me#OZT4QrQ@b#B8l?l`>!d*EC)UM4nc%x2c_g5#lgMX zB{b@vpZp@E1U+p?B+>gOKQ|LzKAnUl=^oJ!&TgJqvtp5uUfEeE;jiRy_w-TM_ZC9t zhp+!Dc?oK(Yv9*05a}riIJAG4x=)x5&edtT*)7~Z4G|?X(0xW zoFXA>`MFs_c#)9F%SlrTv#)ByFog8zCtfwC?)+uynMr(+tm&ZIJv}v1Ld}+a`;qk7 zosaxY5?IrdXZ^3=qeit)m+rlk^6MeqTdRKh603juO8#bBGp`Wk{&m?xrHngc_HB=f zrxv}HzWV45MDN`u*X=!EggVw51$kNU4-SEaGEfrYVpQK{WAx-};pXNg$A16Czm;K8 zgsX43N6u-DAmVeRrzR~|U;P*Rc5lSMVG|@=j(yyk;+>;pV>>_i z)c-YmX`K$dS8jGDl8zq{!f4E-Yqg!#D+m>pwTL^BtBhBzPc^g+d0F}59pva}kFee$ z;_cHM#-pYZdh^8W)saXf4CdT*jgSu{b+{%DA3q4QZkZ;YA=w!@nE%RWh(DaD)-9da z4oMu6IwWrF9qrI9JQ%k>G6#+>j+i&+dGSisg}qr|@Ahb{TCz?&U>^P3y>d;4Cy|g~ z$eN>z1EyR%78`%vCgjSqZ_Y!*XPJ-Y)a_7@IT)pxO->Z`_ygd;O~u%tG46amp)d zchV6!kw;`sXJ2}E@DMMfAHVzs%f4AH&ws<+v*6+5j#~BobN9c7xR?Y9Hze_NJoY#~ z`R7+^tv-b5H%!E=Td#s|M=uGJV<@4YzmGcR1X2>yrAk;>uWnerXf>98u|m9xZoKbW z^yt@J`qK0nI2K4Y9XW7J#?3e-GYO+l-u@bysoA*Yq1l))W0ZJ<5&AGcg5|Zht&3H9 zMJ6WA8q*ZI*dX3Sr(3@`Ug?d-zUkRN3{K7tXADo%(0<@pIg*%-<0*NVH9n%f#nGUl zP0&ap4Mqs|_dsQJJ#5cYDOe4cF1TosFkyI8x9~o2c0E-lH$3&%O&m zU3y^t>yKm0`sGSlw~_lXJZ!;#UY5KwcRlhqb)Nx3s7UCNc)6Gh0YUE-hI!4p=>d$L za-E!;a7-!A|9%lCQxb6RW3OQHH8)8(RLY5Xy#DN6NJ~x-LWe{SiJ7A@QKGbmg!jX> zH$RBrZarXSV=EQknCzHdI-UHxVD6nypl81!*t6qzMDN+EK3g#J#s_fKwYQ^lmry(~ zca#uK78X_#Xr6fN5Wal>RZ;35c>+<=VJ2JH)GO_8__ASoBT$vnO$3dlYV4d zlW zPp6jQa#l*gKZ@gVG3cuLJg8fkcr0nSLSTlbFd;1$f`+z%9x6GR`4Vas*v;Q4=`(ZW zY+1Vtbv3n^dGk~}`23x)V-A@HU4A_p@}s8=ZBBljm7a})yh0&xygPWpg30pJbEO)W zvx@^Bd+C0Rojy{nZC1$3DL_?KwRjvc=Mf31q+=;4Eh(2|!xiOfZS{7Qie=E3cjsCX zpDO3|91tc%2-i;%7)w51E+oR+U%rHaBl|T!Lr8cqZ0xL2uCB|RN+eJtH}4fP>Bf6! z;`T@8N|?3Yi+Inr1E?siz#a4EVD5d_!r9FkIT<(zaZVBA$BaNpCnNthNrh4_r_QaT<&uw|%ro<3)? zde#+K`P~|foi;+k^t}B%o5zMJNb<3N662EL=I$cI&zsNwM>b}jQ*Ff%x`w7|tbE?| zlq}RLeS;pke2y(^c1U<4k7ojJu5l!S{DV5Sn!~9=jfrjRcA-NjZ*&O_Ja6xdhVu^& zmkm3QHIhifWdn^Q(qM#{<9cB7@a~d@U&ED+?2J?iKO*VVM=C#^GP8mg^D?OygT?SMYjx@CzFLhpx2I#GT`Z1L?WWDt_Ft>>=IIe z-YVDJaF2K*F@F&gEAzj5_8TVGqf0;Y_t@GyNT`#evzvr8NxmNF6k&R?(1XU+%@duw zgh|L5??K{(Bn%0Y@mI}JN1IFj99tVZ$#2B-8A3o$o6T#Mp{%q7~!=RFal5fZ9aVbI-|O>LVdqa$gA5QdJ)sEy-moMjI{ui>DsKliT3 z)%Ow7&JvPk?<9GI#B)h0ybQV9yX$xL%>KCJ;lIJZbBM@FkhBr+oUC+Z;2f6gL`QB# z%>Es!AG%ApU$?M6XY|%#?j?FJ>2olVkSfT_#)D74iP4j0${5reFc~M45PvjUjWG)y zv8KikLn3YM9Ar%LnW;R}%Zs6ky2RzDj6RZ@@v#S#{8bn-YKk)cYECPc1`X$mimF;f z9ZrLXiyeA)^KEZ&G@J#T8<@L?qzl7;eErm%Z{RUY@@3VMweacajcKzdsQJZKLUtqv zojQ)8RxB|dADe{gib^#P7U1NG3{09i2ENKT=Nz4UEJetl;o}ElKFVhs001BWNklx9pdkPRs{nYh#1m>Nt7`k@WiX zoo{sEO{xtF^NZve=VY`n=*5(5Lx>5fX_;TkV(uL zmUkj9Mf%cP{r9X}rivGrKH0ba944`5SXfO>osd=xxtly|4DNgCHX+l_6avP+U?nz^ zOdLO-yz`ZKt^MWY`z2o!`-sq0aXXIB|MiV{LGka}x;nWRxO||ERt-kD63R1bXd4(-Rgj;fl#tE{ z>=tg62$n{8%mA~CFv#K@=#GbQA$T0K`uNg=jvTq%j7nJ8OGC#8r*Ze6RMuMqu* zjFmh^iSbH--@jABvr-ac@$Kg)gxnyhMlY8BgU5E@j!q3IU4c{-EFUhUxaTOLZ zKp87F@}#mGo{=io?iSV?CB+32CRb5bg1L7-j({f5q~@`p`kVJZaxfC5rA6||%19CK zq(@))H++0MH7VGqS5Qlfbz^}$rt5zBR<3u;ea~S0v^nB#uUEX;7OY&ie%1F9-bRps z%+Jdba&YzXujCp$Zs3SX2=3k!Hn3~H4+$+2b-V`0VvoLq#p_LjhVzB|q6++T!6ppq z>93JQmpHiI6y+BoP8sB$-frkc&l7sH)ad6MT~3#b)NG8LG(-{+H#QmCoF_M~+J>cH ztQ6y}gTD_t26ja9@ie(^ul`|h_i|NpTd7)uV#QTp%+wL$jiaw{w(|S6@bqyP;)CQk z36&vZ2jEe4kCorA!Qp*zk{icTtz|QBy-J7|hHIrHoD{NW_=LgD8wi`%>;UIjdcauQ zS~cg9Bk>Tm^?-yY_2?Tavd0}dE;(^{EXff)0m6TohbS$XUNRHm?Bdi^Evy#Nkvd|k z@6evjmDx!0Fp;#6pEtsKg-BT4u1$MkYiBDVaUR~MypLIwg1Mf0^b3`>dDq50a=i%E zPWJY;k_7s6;Bh2xm@Da;8>dLl9#&v;bh5{u?a|1~DHL)pyiX_&>^h{@-U{>?(o;N! z=tV@?*>@5BLz{IJ%D_b3%(Fxig@oCT-=m}~{jhNZ(L-IU&cyT8OvtWcX-|4Em6eoB z0&jYa**1}UryXnNX3dGW`8-{F1j&Dg_UbC_q$h~Z}U$KfxNYb$wy#MyI;`!5W&}cE#S!Ii1U!A*#qeI6a@!C0b zV3!#8!^TX5Plo_q5@ahY9EpiUVL=YYPQ6a5k&%326|#q)ep9@1)~#HOgt&wFcELO7 z6wpN}=Rp!mMUNg5K>Y@f7Vj9|gZHFIRG-0P5Z-ej%F9c1aw^3-Hab!~gQi@4tGd1m zP8>fXiI?e-6CBdBnVjJ+YkEqOdagr~|7hT_@ly3mM|{O1D=irlr{Aa)YFByw+S)4d zykQ7jul~cu!-WJ0^YSoMYr?cS;wfaiOc19YI=3 zB78b_7Ltkyw8MK1!ni53@%j9hl=7S@Buimoo`hG?pkO{Hy09}x6uoSi5Ss0-Q=Bc4 zlX()mx2_SgY=C-JdPJqD{$u#t#OZTem4$7@XSP?*%6wG37n73LsJ`R#nJU{x=bBq6 z!(=-O^0QUnjgkCTBz~A{iE`B%R70~cw?xl=!fnK=UV99^21Xz^D_@9* z;E(`>tK&JhWoP7J`?^SVoF_tAL8~cuPtvhu2|F4zvM&OoM6flURHDddk|vO$g>2`u&%!k^_g= z8$7nZjjeD}p)qPTHOJJ?OSQg710 zzQB-uvVfGT)o3HLr42#Iyv{Zb??c(TC=LGFdT^n`AYJ@GvRTnO#clzn=6pml)!8kc z6GhxK5pIE8C>6RfxI!DHvoLq?M9SSW2t}$#s2($_g)~#B&euLSJwoxhi4kigJ`jqX zw`7!4Ei#(f-)Xc=s0`>v(UXrD&iq1~nvmLpAJOXrpUnL73t$Vj_s=Q!q5E?2GmY=7 zY;7I;F5-3%-*V7cgZ7nOF#?1=Pn7$VsL*8)-3$1kTYr?6RfRSyO0lyqa(RofC_nx| zq8rc&Usx>td%4jmgD&HKv8e;nI~#%Ys+^9Z=Alub2{4ikxKG?Fr=_E?NAFB*le*WO zGTd^x*aPur$mFJz5A}|I!?PR_rH6)c9a^SY0`powZt*xQu|q2!FK|)mb7b0R%{bDS zeJ6jsMaXXLK=Oqge}lh3vN!OzG}kaRVRb;a@q-YN)nGF%rjjGhnGT0I-X;mp@8u^m zmVS??Hqxn!!z=z{a#zDm*eORF?FxTF)2}U}Uks$BaukOQCb8;kbAUkr4v?gDDD;#w zk$67Z-k)-PAFjy7_We~(iPFFp)4a9xPQ8@lx#ye4d&kN0?HGQ5U*X)k&-@a77BWcc zJ1Ei$Oh_g}Eg>42hLd&^aN7XDWMMRNc3{TUm_N_q^Z5PoZbRr3@mDFSC1)Q#cHL7_ zPm2vx@jq0P?1ws{VKA?T8*2CqmcNP z>Qwj^UQRj3Ne&e_Yx zfGE!NH(IA;h>34IQMrzWPezFND^gVheT7U~b+I)JiCVeR(#a-xltWn($p`B4d0}VU zrLTduT%V^2xV%4q#Yb=XKQ<6Lw=L!HR4r5T{O~+xD=i5g+_>9ks8zeDdp}c`7D(9K zZGG5z3uz4jxnyCjzX!b+5}jrGv7HNn9L~K#CY9qU+39rqL~q}ll?x`={`y;2L!z13`i~gVCMwF^k+3W!7sKqKbd| zuLnLdAfLO=sqiQGzHDh$Kwy2T|pcma|fyHhJgR5d)h=X*CkQ?+4}QpSRqgm?j4N3;f_T69tM;i25o# zyFcD*_?5c1>A;H|w-2IG;W{+H)Mn2Q(dosScLkdAy#ePiU>@U<3LIyR4rPuCB9UqM z8twv$4eTM@sgMi5WNhwC`4IA2v`q5}-k`2bQqT)XOFiym61d*H{?v#nJ>dzoP?|q` zO&wxpTD(8IoIPkQx5cafD_B&5H>9ATk)gXbvx0Sbxj9QYP9{Zl@;wgKX=_32GSTm_D539Z6|#6vxQ>y_`P~1XZ-Umvciub z9OW_^l#Wj079U1FtY7Ig%iYg|Kf`c}BvlHGRvV&!eU_s4aIe#iA{LV}K@VeF?bN&uLr#&LSE-jSy)HB~njAd0HDRmVlavCT`w%TP@D>J)O!gWq`DdRq+~&MT`~!uet}(>Z z8P33Z`@Pd>fir=LP5_pqh>Ntf2R7~>a45%dn*{{ni+@-qz|mfs3U$+DYIM+uqw* zE_Qa|D__lb`5|21s`jl;EG}hd?>yo(-;o8eVp4DyQLYyg&-i<;ma2Z&?d=`WG*X1a4z#J+LMd zCp z(|uR^=>C$M`Q(R$G5zN-#Lcm>G&ls$U?^X4OXaCNnTr9~QRCq>BNow~kf5{JJ2RqQ zJ~7iUvM{OIJG*OAaI-LEZ@8C1lB}#ueQ+Km=C%C@s~^uVagM`EPG5PdlLzI_;Nv^(c!G!Xox|Wo+;{@|{j073aO}0hukE1ctfb0K zCYbJO@8|EQ3A%%TtCT&`D8-j(dNEoshwa9@j+8r;tuCuyQDc5e%gYYc^}&lC-=M!< zbA@uK{0wEaf|>~lYie!sSjeVZ&$=*86blL+gZ>$_8lB(s#s6bU`#f4<7pq3}Tra00}%g@t}J*q)1NPZUHBOtLyz`X_Sq7xjDFAtgN0+|UbK z+;}-jcf5}nkQX;}Kzs^_pw$%B-@nfH58!F46=5?QojKjGrR2&e77$ryw19qtobLO^ z&%*`Q#GGmRh7~@OQ!|dL$~Lm~dZP?BTH9+z<|m@^tT(+){S!cvqAGIG^G{V1m?L;! zzgcg#PV`t}ZblZX<%MxcuD>NS>J0e~NE0(3c*+fbnrNX}l7UO7UO2=)Z4T0UUGN#P zWH)-%y?M{;?K0b7B@AB&@Fa)EWHSPB*DgyWNMuKz+ey5Cj8q^PkS1W^G~ihK|DfO0 z0NSN-(R3^sf}3bny3R`}TeT*8v}6%pG(3|~`fgAjl+QG6(cd-r!X&9uWhQy8q{}8ayZ}-rBlJ` z%gyUE%xEwVjx~F5bR`&?ehT}@Fa+82%~Z(uPVk&aZ{G2VtX9qf0Xq5PE{ONA|3>S3 z>mm02F`HJG$@4ao5TkJYt6zU`1&hx)BFi{p&vj?AoRAn;mUo%+^rRs}e0k40Z+9>| zHP9Y47$c|_`!PVM`XmHqFG67~Tb2zy7smN@WAs&91=K*pe+6{knn&Z|>1WqqZz&|QH%3i|Aa5L^n zJP=G-E3j7)XFi!NR9Pp|tqU(fW)lu3z?H)>+aw)U$E zeq*+Y802v$ca?SBpfQVGGV1~~)zy!(QfyolvA0)8hVS^6A*!Mq&3~xJL*JU4&9&S4=*V01;BM49Z5q)<# z()L$oN85h<^wMMbr`&0jnFZmMj+95`?-E&T^rpqt%DU9U+(0>1vXpoqQ_!$(KvHf# z4 z@`F@=OQKpn&k2vo6!yT`*&#DGHwynyUvuBGt(usj8^$|P&=_W{l$6&iNZyQL(Xt3_%}UsJOa;B%9xU%XHBt`Y?p@>K{_h43UCI@U#UXT0CR;Nu z9Z&Zz9&I^15FP*byy3loHEH;s2iU z_YO!zPY-hOf;VeMw*4vL|GxsUVee@ZxDQ&#rMc^2rSBb8AR?Cezv-Rnh6*OT zt~MdN>=_ho5z_ztT?d+9#MN}1LjbyVxE+zrT5EGqcnE&|?O42y)@HTS|GiK-45XMC z6|}oO62Q)>4XnHQ#G88G0VKov_aL?4r1ssmhJ(>9T= z`2sB7-$_Q{GJJ^jn4e#4Zr1wW%gW1E-kJ{IBFDISEg@LIPlyY%BaG}p%U;!?2d^3Y z&zKnPLp&sNNi$cXS!cQFo<20r#gy7wX8R{S-T!+_U_II>S4IBK%oE?uvn{z}Ut!J# zg9=-@^;zAX@VCuRFN9Iq3N-xln68~{W?OMM-nTaV5St|~_TTm88qu(G4|Cs%EUZl^ zph*!$X3NUk9%Ku2!!G7|D$z?|n_9~=w0UZpI94Tz>`ly!?uib-dz^!_Q4N{~r|PD07PQ*8sMms@^$ zF){~7NrIf-7JD4lf&rN1wz?pM)j7k~UuZhTqg7P>NOs~`U2B9}IJ)6*Wq1>3O3jWb zlJVxq2XhBVNL6DC+TPjnjB}4xHlQ;XU*oE3`?Ydi4s@#(?AB#>*N2CEu?))AeFN;n zwmM@+MyIkHIe{dC^rVtMf3UGrQmC0~)ILhjyb)V(OFEC02LEZndeZiXH^M#a->4>|0%A zr?!S)r37fj#2}p(TvmLVk$~;>>F^jb{H&Aw?5ju=N6h%Us5K?j&-S7|jt%W?*)`xc zT9&%+29MG!ZVpN(S;S3Lb8F#5t*;8?=bl-UD=PEb7LE+LczBm_N7Rcq6G#?`da2~x zdxqwq6={d9dmv)`0-RV*tFg`0<6Z(T*Cn9K{__HopuMY&WZlKod`Bp3=C!l-$?n1u#L zDveF|F`IdBczIR;x-{p5P77pY6evXp>J0n4y_Ypfud$nhPFSU2`C7u7anGTF+G#ti z7fq`IO->S>{7AANek*PwA^yIZi$7!65Xf0sd$CP7z|Zdbfqj#QiKa%_Dk>M~ zhVt;W3>W>xA`@tLZROt}nqJr{2r>5OW`U&`C@N)~QZ<&iFT>qs zQJSt1KTJnEbd&V1fw>^r!n;D#+7W~8iLU@(_QJ}jqY8ZIYspW1Bh(hgpQEmFG?Cs__fbAbwiSaK8}qM8eThy}}8ay7hg&-K?|} zJ5`;0$214mwQ2+_f71X*N~gfO=yqe2@vDJUpYy#Fw5t;rD`#2hK-p8FTFltu{^@>& zHIrB?w+6a8f#yfVnyU9VPB2#QK)u!|l{uz2rDK6#f=zZDAxlWy`k%K(| ziCF8iopSZ4#%sb7vc^?JW@4Q#)q?kcd&R)JMC&@?0nwj2P8Pqwwg0CDu&IGbzl!9R z$`~GCp>Qr7c{X07%#vipC(u*(PLF+Kr%=t%XJm73&9STG7;Pt*pV~j`}jyDc6o)-frxXT8aV{inPiS^6nN!neZ zV1ixQ+dj*y9G{(jBx!4E$5w=25?!BX7wjCkmFH8BWTJF$#WeB7MnKrXP1-k4;fJ3M zJ~888!BZhzADR;^c&l5nZ*psTweam0!BUmF&-7heS1$Wt8Zd3q1*ei(xFDaN#Nk|# zP@NSJA6QlUJyled`+{PwW?Uu_o?`ma0(vC3?mB%R`Z%Z=X|eMM$7l6d1EwZ<-WoKk zgA#i@j$FI`U67_!^DO5d!yqlpK(-J2BHDh(clLj~;w4mdPNk6syqj!dB5F*TP~Qd= z;CVb0ATi`*JGIjJ9?xl3hZSVA;63z!N1FZ2JCwUInofNQxC-eG!KH@+%q!B-dC6y! z_6d53)q7?W_t90{_x|fm2ua@#a$YQZ#$_lr&!w0MB6kkKEl;mdZ32Z8H2&SH5FIqE z?AhHF9+dBdn@CT3Lb#W*<}(G-ReXw_?f55F6Ra0Q_#BTp=0eDDcEnHorkP$;oB!Nk zY3E-pWVs!QJ}P*rrnft>Tf0_6J7HBbq*?V4(#m2@{wWe{K)UPB;b6ybEH_!>xLvMC zY*%mGY>W~OkB){WXCtgbhH)Ciy$*KhNE)91BAr3Gcd}J?@@;c*mlIGQk?`Ff;gcj2 zp6Clr`?tYpvz1vonBll}cMjht=5XZ-DKRw@^xBt+@MnAYW*qxM!gwbkGt5M7>+j*^ zu8__n#a;smKv_itKJDJu0*9o-;R@FS++{wJiR`M`e3?*&XBfunI!csu8R{Si!;dMr z&qD8La6s5ibkL(z-|B!x9`jyNs6U3yW(0-PnTa+LUY&ugFs#*)Xk?9Jd20 zb~CT4*f*uiguL%vKO)4SPJ(pJ6?gMPrbA}uCA|6%_Lhe1k30E4qWyq<`d!O?B~ zY)2p&fnAQsmzrjNQqd(&n~mE_18n1Kfql7 z0|qYysg#yK;I{WW2XUogUJ|qB)t8RPew*(EOH`b|>iF|!>WA#XpUZ?pGyggD?U8^8&4uz>2ihd7{RWp%^Pi2l1Y z&nHxFU|wL&pgDN5i{-6c@d``nBu}`h=hTObFPe|)*NOCol5JB5Ir7*hHS+K)LU%|N z;ky2GXur{ioC0)}5|_&M`8W*~eQrX(RI|2Qw>`8`PSRT7c`A55BttGXkj4enl&1cC z(lK*5m$R`PuXy@(;aYMk)#sR-ra^j=4#kMK_3qGH~Y}tMzEGoN_mVbzt< zzC=n_(-^|CeQ**S@-NL+yvv#YJ^$6QM^Tv}8o!H9GX{Q$AWnhvb#rJ#L@*UVzd$(6 z*N>ye+iFseA2c(>dSE~ISa8F^+)L7v&95ELqHs#Y#Xk6OyuTNn@DY>|jj`wa{-GN) z$k4;00LT$Ct+;<=kB|#@!I{S zO@V{ib|g-H#25ad_T;VuLgsw?FN;3kL@|xXTXxj3KR=ZM6jIo6!MIHrH+Ro>Ny68`sdHV?uDhL zC1rT4N?4E9-Qsf8A;qrus`HlTLu@WGl#_)6}h|%CWHjM+-^c4=ZH%`>*^bm zZj#c|L-yd|!v$)wNJ_4R?c8|%-fWS;CjBsc-e&PAa8j-&{>!{?f7fs4+xt0IrK;46 zoo?FmIw6(HTx8NZ;)v#b7PKGwnHS;?tvVLDGCJbQKSaNFstMsY<7`a{S}8oLnG7~| z9t%)H)r>sn>-NpRir)5IOWBRTls!!jif@^%IqcPmG_YYi1o zNPX1AI5EIJOuAdF_A@XrNH{sMqGMpxIepj>@_2~uSCml;_s&2LkI@Ok3JD<+PUEos zq~4fZYkwhL|Md;w(Gp*~cnx2c5wa6cUWL1qlZr-1OT`S}SWqeC^|iXXTEW!#-_^P^ zDr2qaKR;RjrYD@Hg{{xm`6eqqr{Kl_dxdyIjE;IqsB>I3b!MQ4yO*}V{Fe?)xIR9ZY4}M7V3zL z(6vh7$={EZ8?|*cx>kodhbv;n8?ewwt*uNYd)d)&)ojK)PFKw;gcy zu0%LN<9lMwmvQp=@XPT#j8ACO)&-1i|AsK3zDrmxc)p&isWn>?(fYAscz(Q7e#J}5 z#(5qt#Nu+S^uo={o8M86ocToAOuEqT+%uq`BtecsMe+H$>~bT{tiJ1k z+-dJs4!seNn%Dle=u4%fA}P`POxQLYr-ab3bIF^K@gsORZ5}&{e@GK{fhuWi*hC5s zPB~0r*Hl9}%3Fz%7Edi7X)Q|MydAmtcV$Q00M9)DuME=z1;<3}TFZ$TdBFTn8{LL7 zUO4+EVuS+7moRSSrHJ9O$Jr~?1vO;G!Kdz3rO*qztHDO>u*Bo{L7En$I=W&r9c9D{z5|_N_DW1?dXwk82g|6v* z#P{*>ne=n)d)r_9;bCy5efuUUeDnnh1}5MAYO@^0XR1PXaEOC5CW6;yxfa5Du~NByks2=$b+xm7 zRw(RmbKeUc;qM6seD>r2I95jMq$EtE6VR0z>~C{L4YP*8xEfYWBEs$6-LKYXDeY?b zt1yhCt*ty--F6J%L?VHO;@EoYqmu(9CMHG8t@U;O6)t*rR+^om2jV zh!a{J7HH)a726jV6oZ@s3cY=OX!!W$o0d#?2-w&R0;Eiw>=YkUu;uwLR{FBCvb_-) zRJH#9{P3dwb0q8yK{)uhTCO9vYuSPMan{tjeG41&xw~upf{BGi<68}dhleLEFOS9A zySRuA0RfR8<-W)pO_wB9IOKm3|9o>Sq5`%3v*hlG# z$d%*N*5^vP-)W<&v^3sdOjLCH>-6((?|RB0|LTc)Mt3F`p!) zH|Wxp&ZAK+hHLe{-WI{b5RW4kVD@=-@HB67TXbL%Hc38nS5r&Ett6}<^xqqd!Po`C zoHR@c(wQoQ`@rCPQFY7b@Z+VVXb%RDAfHrk+C=Q2}oSy#sCxvCdL|i)sLthqqBMjpr z*t)Eh+7=uriJ5P)_Qy8AQ5imuwKuzm#bj{!5;?hNB9m*(9ZVdHii-#1=Ba&Yce|kd zCgtp;U4_EPYvG^_PYxkA5HF540{N)?bh#lx)2;RzBH5#3-$?;B^@C@xueObGJ@Lm6_&L>V1F)`bje2M4F_N#s!lipx>R^;P45tLXtGeV0NR^g^Z zlgsCuz2}doEwhi|@$vozO@o*p4L^HUR$6}bt11eFSk4v0^EA3$X#3m@;G_w7|M3}$ zl*MNhsEvEc>rpfg^nw(6&kTlsfIqc18H&a74YZE^7tp8g+auO(OfZ6gbQwuvDLMSmo%A9o z=|B=;r@_w!}4C=vn6 z77I}2b}ZZo5~JWcTHaUD+d#%K_E2fNX#|Ao&YzsEg`hH5rzV7imJ4p?@vO^5eTw*^bm%6aoSxgc|n4 z6~bHC6~tR||JS!jzL<-XMR`2%?vA98OBwcPIn1yQ?w@wUm_qc@_Iq40zttCfzwW#3 z5_sMYVvq)ahrHXopX}ftcKBs$zIT-n!mc+R5tiW{;D5rkd^+v@8l>*@d)eveMS=gN zTp*eKs0$L;)N@Tn{nmlJlo==ZD$VKrpX=kl49c-Zhnv4IA$QPR#Lqik`A;hQUZMiR zTSD{Q$7Yo%juhP8&ei!iE8v47gdM0X?evSlhd=l3AJ`U;*7=|7lHOCvy~}(v+XJ)T z^5p&Bd_TFb^4%vRe-6V4oDjzV5JS!{pUSWWPhkDFzjH&ZHGdq%jawb3;qp<3&_VGN zPhTDd&JWin8!h<^cA3El@S3cm{O8yh34=p+sqeT1ibBHy&=LTF7Vq(>yRpzRh<;Li zY2qNk^}y;Xeo0l~n4WAwd{Bi(6*_A;F|iGopQ1#Z5u_Sw*>qyC$uUWc+$FYh*~}H= ztkUn^8mi!DKCBc*y|(cDzyA)q)pVCpfV3V&Uac0sPM;zYAX(rR9(olE3(G7MBn)Ew zh7|3C57jCHA(znSb3t@frd^xN{wo_kg6%*cGy*CCK@^U@M_%jJq&WIX&y$9|j|2U+ zz`^pi0(<9I8B{hk5poBPEF?lM_Mbed4umtpX3OE56-k9XD~8hOCnEI`nOGf{9iDgn zsEdoiQIHu(K$=p`nV_^YfhFfqxEbpZ0-^h5%DV9;T@pZ{BCj1BNGz-1kIf2zKpq%+ zpTh_&fwbtp)_*4w`!mfz94m>Ug?#B;SMfI{wLURs%{COnP-ZNNl3J&mQfA7rxl0^Tn z@pbk2eIT@YJPL;iyXaumx+eKlSGO?@Cm;&@zsO9W(0yd(=9~1Ij1ctY`*^O9fhbIA zhjK*8O=*p!$UwqHhu4GE3Qr*do0wK9vkJt%Bk&lvUa}}`NCnSWry)13Ha@aj&>dNZ zpBD2R;>{I^czvK90+L~TC_gScUbVZ=qj<10TA6%=^dPvfSDh@ zL*o<2fg1$FBR00|9vbSQBMlHc7K33V3~3 zG8o&66L?J9@16DLZN#cd5Qkd&hAwfR@^)tMj~1#JJJQQYxIQq`?$w6__{NRGhMBe_ zbI)1$1UtgpDK(bf`y#{YC>F63^c_X`>bDe$Z=-|<-4O}8c-EAba8O%MGF+_LAEpOl z-};|_Sk_XlnIbr2?d_tO3Dd~liZ90RJQ*X%ykJzBOqN%UX9^BJ@=8!2mZGwMiYU^m zFrIqJr7~jGpFOVG;}B(vr%GO1O*@?YcTByPiPQj6<_gKiH<&uT86#jBTM#?9?Reo) z&{5a(b}o^yut3;A+LmM1^8;P1Ejnl$0xvhEnLYnG3-jk+dG$kj41Xx}Mp~o93zGS? zT&RBX)g!HR@#lKdO4H7i8j{45<|FWx&|Wgn7#?txA|??24($2dwg;9jyYq(uVY-j5 zitU!QW0Rd>-PzKEA|t8ZB7BG455Vr{D9mH!#s=F6xV!Pr8bG+naCCLynqOZ7q0l4( zsrj#IC_9{xG&@VvS|oiUAqKvD1*w7Hs6`ON!}qC4sd*5Y5feS$ntzAR8U5tVn)7Wd z`MkThy3eY8(r!{&@K;~Ruk?~w9w?*EmJH5%zYY_sJcw@=_Lt=^h$o0cJiGFHYWLNB zgq1^ofLU3Cp!D8T`f4;7=4i7%pqrM~Y^cwvc*k!7r>%-t$_cfPvP;A|-)iIrR2C-2 z5XVHJ$P_F%x!_L8L(1ht_O)8@)s6Pv58?QmhJ#)xF!9mbGyVbD*`3&A)2rvP6Vgg@0N*HPh)kwd8FB1k3W;F`<@qRI^g0^7*v}I}kmLXPa`0c47&!P>P_@%ya29_7hjCQNH%XWKL_&>~(azLn;gw(_X z!GAAPsqWz9dJbS|cVa!YfN8h|IIVB-FZ9Z!b43DPyTkqj+;2j7qPE0j(1xS(K;iALn(HVBlAM+LCNH?FT2Zq+9w7oW+d*z$$}&h!@M@#Kxk! zk;$U?W0Kcm>GhiyJf6K06*HM?tcXH4ci#eW|Y)^j-XF!BT>|JR=T* ztJ!@pUK+nduEF7RPUxTM<;j7E>+kt5t%??d?2GY9NYF&}q+?*R>GJ*%pWHG&j`KG< zv(hK~1W!~BZoe-n<5R@XL0n(KopUh=Dwg4jSvb@+=y1d zH-I^RoOM1LFSLLYC}q1mvMzGV1y?hBnRN8I>dOzQ>^!e5rQYhjp!p`&E|b3*{w^UZ z7Xpz^GLag-jH?(?7a?sEXCc*M;B``DHbmrJ36uBdV&02W2}EruuwB>7w{T@_Vy-U+ zWRlFexeO1YumNd3RT??<##g+w>Rm{g=flFY@$N;lowl923T)!c%9@rqO6W%V!80vrfwZ78h}RiBWM?DTmOFjA5^@;B?qPLD zc_3l2k=Ko9Kb+_3T0ohAt0)bkPQZog-*?72jI}*NlIeNK;Q9tl4bqkZ*^H*^xk@HN z8^C1}x&M;_*LLxw(I&n`V&@XQcTy`bqHmwIKzL-1DzQNN%2pyIOF$FmL1Tar;O?N*P zR!}BDsSPE=J|^I|MZ-JP148SC5ZVN5H3Mua^_+o$S}pjkYlQVPeRL5yY`FG7)9%(R z^9-b>85jW=yg(?BJpxVP)*ORqHD_UxxlyC`V!9b4bL!lfF7v9{cOd$O2uM7lA5|F7_=|Gzd5!~BaS|Y^1ws`Y6(hi5 zCmeuSeW(ZSfth3Nvjm1{o4$@FO2OmpMk z#iOM=F-D#DG47V<2YW|6!EQ`o71Dz{-!d$rgRo9#RAc%dQCZR`w++`N+3&ILY5rO7 z&ZlK1lTF<*m@@O;Z|Ro`oe&-SZIBa5@bV9FsY9lLn7hEeh=DuF5>8O&~ban+S5U@XpvtBj;7? zkE{;j^qSTZQ;4Q?gEJr(-c>H{?eGyRZ{U>M(v|r4QPTm7m1j<$q+!P>rhJ&KlChLI zbHilL_1<%26ETh;J1$f)Vf$ofbO_uNuuJGQ=7ldn50HRM^1t0qxr+&#Lp2O{xiAa~ z6~Igv3H`Oyy)-%R$UTV{2q^(IV@d`iOH46xi6}ZCdM@Z3b#SFYx`-+W%2fFqZ7yq9YAVSlVq0LCD4MRB4?{zNszdE?>Fm#4A;wuYwLbF=|Uelqxejq4^E%rXZ z(}>=Me7ugoJ6h4X^3Mz-zhfoeVZ{vTeW)lp^gS1l*2F)I$gT9@#B|x3%FwsY4S*eA zm0+6To)c3JXD!OCg<#A4L$&b-)fydo${2iDxY#>6M$BH1;L`eU1t~&D@dal1KZdM-Sn%_Bsgtw?h)EyYb;Ko8AOm*<*wfm6MDfdI&LACoz5?RtHa(o4EQP#& zdAmn()eD9Ji_I+!#8Z%%^VSpGD|JRQq=CFazzFmRPBQeC)s+G;yK=|1OFH}|5;o*- zwZ6Sp(nn6df+N}<(a@C2Ab^F;2_m4sC3bIb2%Y)@ljR#@$dVn^RnDjBEJ-h02!u&V zoVCEpn}7<^$()z2jsFyt`IGRzk{ylcYM4zYs~U~y?HZya8RG5mM-YD@>EMa=YGS^$ zOibZqLRKbvq;%mvtPyK_2dfE@2=?*Os_?Dah0Lck0IEV)vXA8ukZ>45m7|s0H@#52 z2W@H*>u?*lPKVx)Qdkp9DKj5Qs)1)Jt!}c?!V((;0aJ668WF~a@G-nBwh9QZ)I3za zV=b7*CA?5N-ia6gZLsoprwE;A?66VN1+o%H8e?Z}aRkouPcfW*eH|GTJWpXvB{_LA zEX>Fl!SuU{K*8L3$K9msC9xIW{F3fZJCJ)x_I}Fk>DnT`wW#$o_sVJ`+UmgBHx|s1 zN3mitnkZKTI|=>i;Zl#d$wR!}t(XcsfX+Ddy}bUG3!op!Gp&%}ae{Zjb)EkDmM?z->O3u_-50zrSZH?^bT?Bh2 zAV8lU3H|d_Uj&vDe$N5Lv-A!ilfK-R3*##rpIfYQ6wi;B5u#;CChXBp85T){n$6+D zc20y_3Ch2o^W5OumVYKmO0xEfw|g;?e<|Dlb}JX2GtGN2?LL@TBGDKiNIb$Mmab{P z4_o9#9X|KS*s>jXinpU5Jy921x&)3O(tit{9^-kq({MqM!3@XniH3)X{B!gY&?t<_ zN_doHLc`)hdKN+hIIAQJC)-RO4pZ>XLxkz0i1@3W=l^jmB~JnnvbC3~Z^jG`#y#hFW4D>q5XYfqX+uW%C2a(-N7B|FeMP!vaZf7ma8A0qLeM zF^PX<)^`MjMOrGd;Ao*|P&`-;=}BDmBWh{lhk3TT4AXw(jVaL`y}xz49sdQIOq{qx zFY=IvPmwOTDp5b#SMN_Wto;VxN~+i5Odhr0ZG=d= z>!GcLN)zR*DxAQM={6FVfQ?AAj|K?R#iPyBZ8P9=hiD?1MKHpWY5b6Xxi7y`+NJK0 zCU;e#gn}sIa-$?KJJh=GWu`~ zeA4;9x(*6ejHck=J=G;gRY%usFRgEa@1j#ru|zV)4c4 zUe3w4xSzMm#GCFN!Icw?Q|Eu)L8j7ntLo34wf4MX`*ot|6fC*Y!2e1BNr)tI1a^u9 zr@rTAX>!7IV(rQV0!U>G+`l`VM2=UE;|#OGAklPZ|0p4@2U4Ck=usLecxa~eB{Xxf z333dn`CDdyjyQ^0QXm0Us8autq`2}axjlq&Y$~K#d`EH&|40h@sb98IWv>n`D5rf` z&T}5ka^Kl3nA=Fg`%d(>+{f|sTaXF;IUEe0aaMob+I|hATaca`?@`JZ0b!O!l>HzF z1DS@4**Fdhu#*zrD;!|YlNlEQhz3<=L@f56LDh+GUv(j*G18SFn#FB~(c@!byPb~C zqv=tMIFDf4IIRGJ16VQ#Qr=d&0Bm$f7R^nF>KVz(#y6_r5+wa#@Cpg_rPk*9+f2rJ zp#J`hVkN^op{xpXLWJG#8M|BC`gEg+JQn%F1cn+=} zeYlZ!wS%m^*ToLESZ+%QZEf9Z;~-Dx!Rl-h%M?A7*u#WFA6x>BG= zOp^Tv{xQ+ojnS<3T0A204&c*`AW}Uv8IuEOiH1=%fmX%SEmUFzJi;#pzit=v=3BbE zrF3YJ3gV+$rb%-Y`33hW@nV<0@Q^|ef9>g`8}@dK3(*qqT@Tod3dr-Nj_%NH%zQ%i zazplXQpQi<$i8(u&gSO3HBx?^S6_*zOBnk)tl^28u)2X2{KKQpmCiVe^ynP>j2w={ zsbrf~KbR7XDkq+z4Bqp-syom0uF7+uOyCt>_#T%{tZ}gi{Azt|D&xhgz?FaqrL})y zsh`m}USIT*xFV@YB~7%AWJBwSmeDXEbTE=jqg@6tU4lE#e_IKyU~L6!Ss8cxOPq38 zt~58px~itGZd=q!e3D@53VU}m4S;hO{UE+Vp&6UVyH-X;>KDs@J6sPg1D9Nh$7c>b zOetT;jE7|mn*I#D4yaQ14h_co4GYEss5y5k z^Y(FeidN@!r(+5=!0<>9D3c`Zf_KiW!hE4dMoOKYgu>U2?MkMA{n*2Jnj|Ef?#+e| z*kOYXtdqCgrj$SV7AR)1XaQ7j6dTqFbQ-M678x713NpWb3C3uG6Og($PmcD_$<;EbS3)koaijqjj17tG?Q4G|0;5h z_0YgNX)yKk1J_&>yonFtFKQSjj8^M)`mFh&@4P*d0HOPm5x{peN{C*<~GBq5S8S93%P~D2w@n8 z%_Za>1Ym|TseNpu^ zRi8Uc<#8M}LR4?doz0u>J>vAzO|vGxYRittb>aRhA%R&PsRDqlBzSOTst@s$9!Mg> zEfC=u?X3=%%Xwh$I&YFdahz*~!1gxNl*|Ef{USX+V(b#$v$od<^o*?Ib*gsz0ZSzE zjQ75aR=18c>YkQ2j1q1lhxF9>LAA+y5diaPUA_BS*Q6>5jEVA5_h+>>fk@p|wK!Ul zKZ)xyJ@Bt@B3rMpWOJiI7YHKBA-Xy5zQv1a@ae(X??OWU+PFYtVU6Dowd)!71hwgC zCCYjkx3O1t%Bwy7?5&;!VSj4=;nOtH)-$}dt2!slmbHKCC|GSj2AKQY!RfZMY-46H z&+ahGl|Q%>p7((mC337c(EE4%OYS6NN4a0%?nlew+F|OO&%^jRWggVKM~kgw8M+S& zeOy1j&++AtR|Hjo(lXT2r`d#`aWv0E0qfYr5MPTYp zCPYzFl_{-tv3ac2xdFd|hMEYQBVu-H0f%eeDJ$T$rut8pNSBWM1dh2U58ywK65&qt+BX(S!ZI+puvf2*-N*HJfrN1Pw1f@Sf2BlEf0_uDy3OuB zs}qX@d6p}+eK%m%m5<~0ba6m*<-k18X(9}C>+YyV-_AzKT;GX+#!kCu8j+Etq`(3d zey4OXvEPRO=6co65gI((+y$3pHLI^KM?L;iK}z`@us8m3ShI>+3PH8&?HicNsabt? zcdCY&HF!ciSFWtl_9{$7{U?QmOMgEar;{flaenIF!6!2}I4nll){B_kF93eL*Xf~p zyrlopx~`c?X<)C84gLHR^Fd0*&#_{66Q4Ft@B)XJ*0+NcVX*Byn^#Ay()#0t^{G$A zw_m^}VJuzqms0usKXDHsYzIgTeM(F#f1Fk}^!#g`9FX{3i2AJiT#j}%2-B|7%6=&y ztFj%9BU!R1ACnI4NUOJwhVG#GMGu;+d+fS>W2{rRcaukc7Iy+Ox_z8{<3 z2Y`zh>1nk0FvL1hmr*9H<&<44^S7IQ+`A>#TlqY{msp>?17={~$Gy7hoh)jxtW{ao z!I9%0DtKj#txY?e^#zH3j=kDup|6gL%c$?eZqq`eMTQWot7h?pzfW&0aS|jGbBcnX zvIEi6#r4IlmzfvHQwryuTCm=BGGB~-`!Yzbnt@}l^;`=K=X$lBHfg4B$XR$LZk}#- z^S(Cs!^=OtY}<$F17%=Ouw)+8e)Lls!#zSmO8ay!MVP`gEQ*?{0o}dT;$y9R{>r z1hhc^fxQBqAG8fhX@Q^(+hvk_9NV=YFLnfdLeg)a7gk%>slMiM?eoH zh;$(oo+zWVnE2h>7xhcjuHaX2Zn|$~2C5yk`W-EFag^!W7bwe}N}6TzW-d`?o}Ecu z@8yLlP1~yON%%FqB9Ol2KF0R)wb}5++|NcfUw*wo>!8VFm*ddKwz* zt0%=FGCf7e%R~vZ7uRgAtJ_GA?Xv9-`s@%;vm@dmxb&p3`^62?R?~S(T3OPL8j~8= z(_apm41&gN)_Hul^18g|}GB$<0t zH@Z^Um}{14Oful|t%d&kE@a55$8t(_riC37*B#m@zp#7(^B#^nypkvU@aM$%mPn+{ zi~ssftlC!0Os}XXF-W8qF1!jEQLxRf>OW2a+tOd-Q(O8Ak7Bn8JFmt5o=olD5tqYU z2a5x!dl&HA4};t7ZffH)P%Yr!OoK{%vx_HKAmytOWFMFQ zN5CQWAodYrs}a^DU~cz5NQ(!cb?Kdc2H4KCwI%GEwH5M&pQMeqZ(foE^7O>CtTn|a z-k*MuNz)|UcG9YP;_>kN-el0}vrXI^DG#cG zF);dsC!ay9gvWEn-X8Z;7-OBlU7ecW=Bk$_xIUBOKl!p~R!up1o^HEJ!PEpSOf;pw zF?dw-C~e_W&+^AtLrHGqj!VoV8mEKCgt25ZPo0~p2oWa7Y+EY&L<;}+VY<~7m~5|9 z|9K$bu?Mu_kq2KJiZoZov~IQXTGl;3TH8;&tyS&7|FG$ba8oRu5tC9gVjaVHf7XX8 zl@%SuVshAa`Hd%pz4a{jYKFUXK{}$`ZX#FX=%-$a>(={8u~aX0WL#lTvHtvJMMx3_ zY4NTxKe!Z@MQDcfs)~@v(zLc*qjyLaw)C>gss;*@lAwn`b?G5*phrML)m!e-Gx_f& z5qAsy{d3aY!>?J#tDk0aMuX9*)F$K_=Xkpk|9l>Fe6O2bYZVn+2#8aEEBykj0Uw3T zHUURdKL>rXb`t>q4m-kMM9~PGcW1#s6)4_w$D(qnhmH%uujLr`M*(<#V{sV&F z%IJI_bI`x$3Z@)N1aXm`kjP00g?ud`urYJls-xa$@SACDJyKT;SWucQ#0Xyql*f8% zGpXCDQrg3#y3zpEh@)6d%wBf=M-_ksr{TdzNz_VE)m{S6?dA}TN>AjCvFlz*JE-P@&bw4ORhA7+V5aMz}DtN26Mxtz&Ya97( z>vJcy#&=>iKQK0domkrxck};QI5Xw_Ze&t?hkb>HFNGprLGHwBc09D} z#9=~8y0Z=j^4qDAGII;++mls;&Np=4)vFmk^HulF;1eCG} zU`@mQ@~_-rzSi!pY?W6Idz2odaV8|bux*2KmXIX@8Wkg(Dv&xiSoxx$}Jgt_~F{M%2D{7 zLJCTthDL`)kYyW+Fv$}!6k2;Ebr*EQdrHL2Y(=RM5U~f~PxtJB?(uvHyMHbJ{{3_8 zy6tkpVM!q_a$8iZ<(}+6NVC}&?YB1~(=X5ZQBx_#G$bbR>LUHxc)wOJ7;{n$zaid; z_0H^tFL?=VIodRca0o@bwiw7AB=ZWwbdcD<&U`?q=gL1PeZXhO5+3x?1r|a0$r>&k z2h(C>{zkacY2Oqp0)AW_moI7^bSgr;j>tOskG zL(HV*2_<=OCNw#HU)K@W&@b|e2?f(x=*lHUC@_2Q_Yz;?^0UggX+5~|C-m6g+Lm8! z(ts3!D%{RXH-2c8tKW(l@j+j@BvIuCiy=oD=k4F0gb4?vegdtkm-yHe&n`t<&0Neq zrbA5APR%7bBv#b_*!+ZL# z)j5-W3Ne*>t1tT;3=&JV%lr)Aayxp)*mi>l^3}cr}pOyBT6SFjl32{@A zDjvu7M~T-gzAJhqlUaW83wD^xHq=aH1{bq<=NSE2YMxp3XtRjS|9BOvB>wisOnIgE zch93U62uaibvU-57>~WB^R6*?p{Z5nZ`pEP_Bi^fneLnB{npW$BRGy&fJm0POIq76bTGQKf#@kOAiZ!%u)7Ag2C>_K*RCl=@=4U=(;N#>WE|WCK_T_1x zQO*gHYTNLh`{>e|E{&T6RA=p(P=?xh&FyAw3WSh=dtTuc+$cr`!50E`hLA(=SkYMT zE8xf8tVtauwJlBFu>@#wtsCoX*6Gp0!$P(X-#`DkFU^sCj{gtlE1CJd3-I2)yFmBD z67P-&|5)__7;tlxO*`smpg09qi`EgX7~2B!F|$c))k-1>IR|lpf@VKIX849jK3BBX${n=E9jp^CJrX?^}W+kR|Iz_UjW_f;U0j@edto|m@##QF#d>54>{Iz*7O zLR_SI##>JbIJiou`L-g6hQt@2_KH}8yKnJgay>T1Jq#qBrY64I zOTWl78wH7vb|nft9SYnLb;fpGvbMkN0n!+nD}xA~Io|()L6)y!n0lx)|ls+)O3?%jHy7qgmUuU(hunR>bZ3ap3;ZtrhHoScOYTk&{uNv8T7EcG)o{F~=)mLq zCs-M4udhe1&UGI|%|2RP0c7#@}I5 z&`n7jfzit|IsCjx2z z%o}-Ygt{InPqZUHE70#UJAQkfhWDSE?5t9bnpXT>h1(eS&DcNpoALI(P!-Ul?V1P; zyz@cwrLu51;>(tcJXL-A2yP|!=9hDZsHrAy-w7GjU{c7N}xfU82ysGw(F=k41Q9vZ1kqhJdYQulhRKpdQj}F7EF{7?}#q95w_v?M!=CKL; zB=BDA9p5OqV2>y)kUxroNv_X{OiS5bPWB+`v36Ej!)dMO)UdAw0(5h@B)l=&93Nz& zYAN^cQtfOIqg*M-ei|IM$|Ce4uLylR)<}*HR5Z_O z=R}6hU)?sr(|W;-{j%JK?)@A>1?H70oHH4){AbN|NB{P`_li;azepGFR9@ed79Ddt zk*OChJosSQ9Mc5$G&TbNnca5zJ@UXQXoaJx15O^*GhJbx5mdD(zd7D?P~5c$L+CC+ z=J$hLt_e`9Q0`1Xg-Hou@-N!U>7`DyNLWVWxuR~^es`O46x_pIF-#;#i6^2QX%I-0 zuqXqIHJeM$pbsP&yKmL@;imHVitl^g>rsX2qOOZCC5JU9Arjffn@w}~o-(P? z_6v8OA9{AmTm17clB$A=pHX8L*Wf|{0DC5DQy)kgD5zD&{V4o}Tp}1`3jB9=Ezczy z=@p7^65%5IK$Y6+u@=Ds?k);tCvvvfF3La+v5A=u=qu!I$3MiF@X3@5_q8)s^^os6 zzQs59!Y*iY&SnQ-uE}?4lT!j&vwnzu`K~8p6WkiPc?p?)WWhpNj`kdrtXb1-WpDu^ zXCk(GnL_z>-c9kruW+w)eOnOy^XnNBn=S>ue$_gI6eZQ%4_I1O7HFcH!iqw5(L1&b z2?T^^aY5;c9giBc<7<=Gh4Udl0n*g}^>I}KQcb++GMbeTv*kP%f6@Kn)UR3F%v-AX zE2-t}^uImN5-m27KJN@vBT#0NUMw}UlNbT2VRqw!G0`46V1~#8`Cbr zghsNSODr79`m1}E!Fv;qw-tcTkboj75$L3Au$J~4^UqiQg zOl_dE<^n{m%9@C>4*!;4viY0!i*N9(JU2+ycPusiAluZN$o#lV4gd^b+P5b9v`emE zizgqgz@i=Xx1}!Njrppf+MPkdS+6|J<(QOrewql!?S^~k*8^-?a%By%@x({#gO@Gj z2+>qO*Y?NLA7>TygttW&c2}7i#$HtPX-mVL7^4bO=lu};btV%5J( zs$nH*A$s*al2MytCA+{qLc43r^Q#kVXprwee3`j3iDM?|u98l@s0Ji{wt3P1uCxd_ zB}c~HV^Z2MzV1+RmcNWBK>hKl(ZO3BFb)~0^Q=^e5@lr*gZwPT z*yb%V(=j$>0n{UXXsHCKD>(WJIHeJ~2~XF5X5o1NmrjRE%}A9L=xeae0d+*lmR~bI z?v@T~YZs@(yewaphqV2t^CLWM4)wC({KF|*(Kq8#;*(G-fe^p_$s}a?O4RtFOn$g*cE=1Hl(~BlFibVbzdt?V7QAVj6Za4-|Z(32|er zb}iX929!iA+HDnR?iQn`T1dMQ=9fAN@Z+b?4*l`UW;1hXTM?R5>b9xAp$6CHV<~am z6%zT8sq4c-*kW|Cv2OsB8zKud%?m%&OhcXu?pYo$8xNW`wmIX{-8wv^N7SP5gp*5H zFb@r`nati_;=yev5idu4S)OoWGv+NlF!*)HC?^i~(O;xOCd{RFGJTX+arS=wZ=_QoD?-ojJ+Z`8{ zE_2pQUY|9SmV%J`tlGmeo&-r;{-(H%fYBrEzKl^fsZyB0n}v2}_514*8%*yf-6VTN zl)8!j_*;Mrq&JF^-RQm+aWCO!U3l^|4?|5-hqxRf#G@I4BQj)Y3AkV5anPP{`}DWM zv^%u3Tj9Nu-XRAwotBz;5Y-MEki5*JIlEC;`f5&W(5S0-(d&YOj&Jnc!F-SwnI`;c zdi)2~U16CHKg`ADgkGkD_i&EcI5p&==AvD?RelzQwReLb7XyY{G>ekeIICyb`J7+Q z|M=nmp^6@rZ^bj9I?JW;1$GlM3v@>!Fdj@o5)ZjzyYL}(KetJXj;bv;?sK$wO+kDN3)l9hb^4uYm7fPoa#$YPkg^>Fc5^!L?;b)vAH+_YGfp4}u4meaWSEw~0bs_HU98-b-y?|c}wo+T3(D<@6+it4t z_YAlaisIY8E9|w}$xz&O${#^!U@wtkt|`}chuC`1Du<*Pv$9}(_`Wp%GnB}SsC>b^ z;k@8RGC;v5G60@lJWXBcGlLK$ZIKr_lnXz=7w6s83jfz6V_Fj5EB5n;Gb*&nIO$>%G zBrg3bPMn15iE7-sj}#pyY~I4AdD9eTbK!)vD?7q+#62!BH>sOwwZ~+dy$hIuSX(nq zXuw6{GFCTT*q!N6V&%&5_rEb{tfoyKJ>AviWCUpuRO!n2ZeZ@;u^8zKSP=R+5&3b3 zRXq8S|2F$C6rC+o<7*cygC$-PkF7L?OR9i<80SX!Vc3nSRSm-zNDwCF1a7d33h*?Q zr^SAXh0BkanS?cNu^^wW)gP`8c94C1LmYW$l;YF*5jfr!7JcTm4{q?Ln!;F`#xWFZ zo8ba71N`jZCao0aR=-?Zhz~ISCVb9pO%GIjuROs_c}QYr=1(Yd$sB9ohk*2RAC46j z9(9(IeeINP(xg&2Ts=3)|*e&tCmfZ z)1`mU)r+e`6ZcSoWa8C5Sh z21bWGUwVqHzrOn?^@BtP)t%l|wnZFrG~2m$OR^PRfmz+a#RdOq<~@1-+d6`~NQ!rj zJ*RG&hhZ#W{`7$73-@_975-#BjZ`)W}Exg`hTl~42eRA_aVIlQKmhA0Fs zh%FgbC2=@WvwbZ5!TcianRkIkl+S+|aI%@eZAiF|!ugc3}x2 zCD_WN;SjmRUjVUp1Ea2gd77{xJ%Ggirf&CiNmd=B2pYz(HGpWV7uCL4@{jLn5jV6C z$_4*iw5m!bvi}`Pfs=!6c!k@g;wxTO&E02!KG##;9pH!${%$?HqDo=iLOE-bAEm3b z@ZS;9fkcc*=-p-f;vp|^KgbHh9s%I*8FHICeg{0^rMoeGp|Jfm+r;~KBZ+lNp@JA$*6R><<2dWP(MKZ^I)<*I_{* z6BH#DWF+wgo50of8g2%rjz_6*>OeOV?=;y?yh9a2?Vi$4l)+I1A#@P!9&y7XlE`gy;gg7j(IQddX%gQg;gC=$k`O7-sOB9ThuEFN8fX_C4puX zC`Xcv6{V7_Bm*vu@%2qY4#IINpXw$*Ggzal1{Lm(s-!$*rJn!H!&`VRC{5U5!d1%m z*oM!y)SlL;?6>I;VXB)Z6~l%S)^`f!gS#NR?T-CbkZ&H8j7qx}zDi=mIdu+Af2O>9 z%EomBvYxOfrVcZONE$OfT_l+q6HqBGkSc;fYC1>*6=Ci(Ucqsw{(*4G^XACtp06%D zC;7Z}*?c2_VfAV0yP>?kUY{->A~$*rs*2-{Z0XV_MIR)D4fn#Z6{+>%w3SSo9DZxg zmZoi=stFCtt5wtqt`}>g5j=j5g7+7~7y04)TzXcx+T<8UK0CPGju-8dS~;E027pq_ zSK_nrH=PqB*&B-zY-Z2Z&09eTcx9&qNmDK^lu9KRh5kc}q^tPrtSvI#^10W`fR~h3 z2`LHc;Nj=i2gpbNsWKBc{a4Gh9}clgWEg0%V}Q=#&^T5`DVovk*#}l|zI(7Zeas&4 zeaW(rPc)Yz5kihT-?BKb{^OtB817AQI7qEh62{)z9?io`&yn-*XpUU>W8UP>9ZQY-5ZXVWtz!6muEzAiWo73 zaMLfyH=t{PNrQ%`h%-J>#b{l!g?~!fory0U-i?W`J6?0&c>C(nNMji8C7;GLvPC}9 zfZI1%$^{Ik4iWEWZYy=Cyie5MgUQq5$r?BIWUS%#vz@D7B(;1E zDFgk+J6^{T##}a72dhr*z~RWhgHI_GzW3=vLzl4zcPZ*c6v(ZJ6n!Se#cbUh-u$BJ z)b3;ZKceKp&@StKQTJwOx-m^ydT=E2&yU^`{7uHgvwWt{N%hy!9}ej+M^>}~Rx4$` z_!|7!YG?JEpo4=zif|A{roaIbNh2!&xsG%5%)`jQ$LY7iU)vV6NG;`F>`1B9;t>5f z?(J!Xhebigf!!hw`IqhK#?O)hSvQH!-uc>6Zh8uHi8b3@YULV}g>VC)X{IC4jZTzu zu@H5eNL2|8U*1AtCIA=I8-^y2vOblYJD*(T(LT``dkYdy*z!V7 z+Q@9zFc5|f5z1|5zSN4~{ao|S8~f3n;Pq$$Va^_FxKI0xIQH|Vh>_(Q8NZ->W!KwD z`xq~r{r8Iam=f!Ak^Hm@&gEQvU4%PvIRcQu?K9@_tM%7*<=4PXjHX+S1G$~J{`)|V zg%KR9cEv)@@MmnhaDI=r>Ot>)VGI5Nrm2_$nS~V@J6q*x`!wnyM^61uds=U_4d!!- zAh0pnj}2(^v8^|&UCOmDz!D`{Ogj{sv&+&L_^q=m{X_ul+t=@QmB`q?1>(=cO?~Z= zj?Gr6^K=>h5nOYl)}P3w8gHzRVs5v6U+kN}eL{iUBG?tkUaf5gz*;C^)WpJdlX|Uv zRtbV?YskFYO-|5Psph%Up?rdMUYBdxSrt`D2Mu3qb)&w8yFH`~bH)WCeJy`GAF@fRSSe zK#TY6`?uUXt*9|)9!b@({G>`I=E^BN@%{p8qLUNb8hzj|JG}XMmixUjtn_V-w&96_ z^(`IP%f1a?vA`M5$%25WVNgf`x1CPsC<l-tdijUU6Q0g*%yD!*iKRcbeK#bFl6*cER~BhiJeTNM zxwhzx0L}HOe4K{ZLPD8 zL((QdQo(pD5$|YD zI1Q$+>xxE3ScoLk42D~a$Cd*v4dVa7B*79+z+!Ezen<^0!&$TqnCV8stv`0H-4zRh zka21}ydy!MNyTQ50kCT9PDZ1j19aP047&4o+-Gumf55GC>4l7~dIOX#)0k{{{95XR zO7rE9>_9vS%fp8uPC-@=5)HWcjA$-i)%HKEJL#5m_(wV5cjxm~k?TI@-pAu#zyo)p z$zezppl?8UI)+#3@A2dUK&7GA4gMnAlvs8^6CdHiK0#KWIhQI}hTf?|b>F~9BTU)X zMqqlp8jKNDcm;mXoQg~Q_gJ>RBS#j+=4k`E;kUWmKjJG4WHIklw7p5m`@N0yQm2f? zFP^{lo_g=JZF4Ig>AWKGYUhBFmPHNRVrm2IJHr2~;IIExut2+) z@Ovp9G>v5Z%%YivmUj#N%6iYDjTu86!SSsF>2PAN{(jFA2*7qCuM=YvoBao!GX~Vg zu+ZQm@WxtQ>FceV*6><-SonQ`S0Oq4Wj=CYv`9#TBEJBndDhCovW}hT@$BbV?A<-i zOnG`9Un|+q5zh?xWPJHEe5WHTCaXuBpxu!IjI?D<^8MjBQ=Z4T$9%e!z!%r)uU7MO zqVvP#nB3(sw!X%flU_SM^f}^g?bK#j+I&o@>VR0~k#t;1QDxD9>0qHw%^)V}oAAw= zRLC+jcs(zs%g8fsX#=Y`@!Bp9dW(m;K_PGH68V;w%}$u!pno0;w!3h?pqp|c4^$hE zSY~)Hu~j}>VSgi46CE3_n2c5UYk_itzVys!0tsWy?ruU$?c=A~kA@2kb%8Nf&& z#MW~XEp8a&!-;@+h+@jq5LQ>HHVN@R=to!#L&bJjImjDO)>bJ@YA*miR-dI)=n!;B zRNUu_wapuT!?nw9b(22)1Io*Bk{UdP4Gp5JtUh#!m|s9!I%JSS#Uh~N7QIBdDgAb}rKh0WLPTa&+ z-^5A?yvDQu7!*8+rKKzG*vq?DwVrjtsw4V`G&F^EU;%N!Sovw}bs8R-%qtNbK-Sr( zCH+AdsY{cIU%UMHPOsQp-Mal#mZm9}#E>u)Cx5xDfBXGHAk_`~Xz>}bihq~k$CR47 z;%nXB5&rs!nmBvVhIWeX99xwQ;x@-7Msa8j@izC zC?U)5J~E!O$82K_EomJwLxfO|jjEv2!$~oM9oNd<$YZJ#rouw&E{u9d?)_kaQr-6k zWi6|MSg1TBhVO2d(?RJWG!n#x$nA7O=?ygL?(p)x23u4T^a&uPAb+hloSqDZ zR*wIqFXlxl1Q;IUKi}rv5wo?urQbmvpNyn>nAG!^vcd$D4LV zb|6V{h@-dDT%dI#>U+l@j!RZZ3>QRe8J15ty6Roa|02c%ClbGz2yS-#rhTk&=19e?}Ie zv%wA&p9dBrD!%qa=JKNub!jwl#hwUx$s~889^lv|ufMxH2?H}|hDO+Z< z^fNrwSMqndV(`w&gB{NJLNY&_bl2GH2Y&;7ukVi@qjixyymm4YgeewV(!0R6l{-v) zFXPNim!tk57x_Md(x!e(*B)q@YKxS-QXUz3Y;0+Vt;{r)pMOrB^3rPBy%{Ujrg zzkKFckM~|{5~_YFhwcYnqlR+$n;{0~@w$#R=~CzEkjC{a@t@ka^;76RNw5{C66 z^q0i2FytlXYXU`O>J}&Zw*e5XTQjM5K@Of$`wa>?dt5Bq`77>-8Q+Dib&4tSqPs}Y z;Y}7A1q|d_L~qZPj#v4o2{+M-*LX}=F-CHNB|^ep?Wqo#CJUysv^{Xzi@$dw$iv}K z4NUtLAuioI{H=Lk%G}MN*egABjJ6$4w#(0dSw~dd1U?LiVb5`$eWx7Z->^w~@U%K< zY?8QWb?*49k+sWzutViP*b#aAwNx-v9rC+TI~I!fO*WWa8!?JPUn=K#57gEzKe|eE z@{N|9t`%+EHUL>%MMy))Kr4Z$QyColN8nAZ=a3a5nEbl8lQV>iz0$LPGtdgs)m^~p zWJcYdF!ygJPGTcq!(R;C^8Ar^CgZ`mFCgnCtJt4{7M#n?SY8*{V>-4bjb9AScTV^% zL_%c`lr&i`hGc{FLe&09JTWlI*{&!5_qnz{#D0#OIw72U%$CzO@NS(m<=3WeH|2jzTQ%OCDH<1Q^5grElYPQTS0vCa_mtEqj*1-Hht=}bhzlF>N5npdL z7Elv5gac^|yDeGZ4%oJ{n-q%yPhnue(WbICn_Vh(#{ zW?=YZ%lN@Lia=8uo(&F~&dUvk9x%jlNn6WrPe6c*_gMHCeT}O@4tBY*BU!zUz;Ad4 z((8H^gKxYu2?X40FMT8-5Sd9z8ir0yBf4Lv2|o$K64mD~+g%K~;FQN(m|p``Y+4D0 zs^gW`88^oF^mveQwQW}$tisbFNg?WbiLjom=b8#;NG&V;lD>^rgR_SZkXI4ii%hw1W9#VAsbl*IJE+GPp_|9ET-mwJcp-{kt--|bb91hnB| zY}*Q`Ep2*P^Fw}#6(kCvCB60A&a^Us#z9gk;gT5kcH#wc&*ZFDAP!HsppnuW#l^9z z^RyjD``quEEe8S6zOlpK0&&MzpEtqWeQ}rgldYvktyh%V^Q4}H@f9-nL}&O+RMRu$ zjn(I$%zpojq*YvUr{)}2y!4kD+&Lm&parFH>>EVP4m}c}5t!|{B7_l+J-I6cd@=K# z2(n{FgdRcGmVb@(bcvF6=@j&Ng0`);Ri>K{Q8X0kHz7FI`DU@we(U0S1%+X}#PWM! zlSE8fqhBE~Q2=I?cjVJ>IIideo!z~O)Wq|E>B7o29ib+$^rUpO9eeZ|CQ41I|Z9DS0wFe>q!-X@#>Dqj{pLNPUBm<+>x8TsKCkm4#a@c`>)=`AVzy0f) zbwKBFX4)i~gSqD%D9W}s9=Xgrr@gwAKN5Td&T5L8pXUzS>}{=(VlPnLfbB{0jE#m# z_D&GvAX|DvHrN^xB7Stp40%wZ$QOb}F0D+Ne3(2iot&#~;@559xdHD;;nOb_pFvK_ zJ_X$d2<&+;nPy&{ke))nzl+VA3YSW4z+1&D^RwXCTS`>km1L^QG3P1}kPjZyvZHHM z`w+Z|J#DX^rQ_?wn$)^(E=Sr8EbNHcjiKVZ)22=tVUaogA%C?r>h0s4fGRXvkLgYaOhZ%sb` zmj$r1|5O!^%#bY6_+ca@0Pbs_98n9B`LM&cv44YneXTuIstH3@J#oK#`3-c}Mf#7r z&;yVZc4w#x97GQLJ;#~Gt%Izb*Rl&k4=i9A^F-rs=$Rv-h4~BnW7`9*vKFzXa4f~EqTwDLKJn@Z*QdykJc*L^gw#@Wmvg()*v_qnpxZDwUAz_hq0 zS}Ge?F{z3&Cl$E|MRGlOHwGHm6?=#DkDMWz)yKU1)5=Q|9idJFgabbW71@!tZX0a8e>s@g;Tj6%08Wz?05FUkj-W^-6*%G*7 zI$4A%=ifL?lYnhic_RP`6;cH^%M(<_E+5OEU1Dq6|GUJ(tdL3sZNSBu{_dN(5oO{F z&3Z~}53Zat2@2T5LvlKlE;2M0@X6TG<$@U+A3Knxhni=?dKcd&ahYOK8*SCOq zzw`HJlt3RYh^>c}RC6ayQXbvBj}z9$Um%D7R%gf9bI=eyK`6Kxq#$91$4dhA7Z zV^4&SquDfl3l=$-+oJDOCu;uQktn6vDq8j~P(`Rewi+<^r&IrfAo>mp<56HH2%n3} z@ZY9(v*NX@7S}yjp!w(6uL@s?pQFyEZ24Vhmr!WQIYIp;m7EmBInmn>I_%sG;K~74 zC&L|x@fyWYwiX4%DE!Yk%!Ks|(lXV2Gu2aE%A*wO2Nbb%+vPS~(nbPdQ@DVcZh4K$ z-|3FY4o}a@w}$iomoGnp09nQI^JscU)%AdmsSC%YxyVfEJ9(XgmJ3{TTKt^V9TG!baN?=t)bH$|rEl&j0(SNog zx+=|E9YA5>*On|HYp?UO?JsGQV5vVQ4~)`%$q70q(8%90rpJ$K{k>=P5H5>GMB3d$ z06KlxYZ^}|bIg!*^JR-ilQo%gPxnqwES1|jziEAPQpMCr#hsh$609OfZ77{xS1DEh zMcq+2a7qb3Ff!sI1tA~5DQpK0%bXfnN{A+LkZ-~fkZjL?$aluc0v+DtXtJatn6u3G z6*2j7ly?>iF?24rC+1p94Y=Meycu6SA&IdSH(D{d@tt!zTHe_tVn)P9IT;_bO2U zD$0jBna|&70mBfM{o4Hc_%Y+je8RyKlHFN< z>oxV;?WD8A%RDgn;=IK6o}suyw+rjIgV(JcI?WeN7nEaYeA1+BpQer*c%EMLJDp#0 z2r@c2z1J{T#Q5)f$HUBE||k5?8AEc!x~SqAPV(j9J$)TT0OLHTgqNW%GjsHies#L6e@M312RMGt*byPWzp- zvR->)qOzUZGw}Q}9|uK~xTq;nf;M>c1U;v+COhicRaV;h!OUy9`IzkNX6w8;w>%oL zAXF;C7NQ2}Kl7m|pY!UCGOs6}&PnVBrn%NQnYiuGbu?^L=XbgQC-%7f90N)Rqo0yG z7d)%W-eg%KcbDgbW!mcxH!LfEy^1EOr5ikze|p?V|8_&!Fvj14>dE=`*~WG0UcY(d zQ8D-^>uUVahS{x%K-JnZP14tdVa!|8dG-zA7N*ijs+U##>C1A_z3L$eiO0^x%TDD3 zhIdr`$Bty|F7`wLhW&-W2gel|Z8pcdmXDQ1ZjZWb>oQ*U?yP$A@oK2Kkrm1-uiF2% zSsUi!{Dur$Cl#eun)TDm%^#-KW^Kh1^N+T>gWgbgh?e;|Gvp$_^Enus2JBM)+3F`o ziyxTpk3KX`a;==GVPPY~dnW$;Ms^hE=xdl7J(a0ke%t3$oHmo(mi{w2o5!$vq@c8`-t21@zk?i4{=F=zQHLGl5``thx!>Na%3;7q4ymTq2Wips zA0{7hm-H$x;p7;_j zcJPe4KE7?{|K4P>3S!|g-q60ZU^(tE^%YWFwk&PmAV>UkqyCo{X*xGfU$o(1e0T0` z+QyO(c+#$Uuw%V6z$0#NJ!m$P5^5wNk??l1Qv6MLuIJirO-%cNvvRUC=|aF@*QvO0 zme)g7(m}s{vFsZ$_fe;rFTY?tYe)A2pFL4nlN;`=G%KB0y=Z@E{AqNiPDlUl&B=q` z8y&X#@IeQ#&qjcEpb2`Ss3$|s_V2gpn%cAlv++{H>M2FBWVyO^LHntTSw`3odTAzl z@+H*Y`5jmX*EIOG*ml|QCP@3$2O%vN+7MkiHmP^;WJ^~aM5 ziMhUpliYOwrI=%p0hzSJQWI}-l`rT-F}EPonJ0sJDc8n#<=WaeMB> znPAU$v~hAHjHv8c8% zDbSn>%wGE1nsr>|IY|?@`^jIl;;7dBG@jBV`=$u3==%sYLY)2b@lDdOzr9Jo&6ls% zMeL%sbUP$W{*GAZerc!N*}=E5H`LW1?T*wQh}fW-i{;Eey43y{vF{DbTCv=N|MR^$ z9NOjVJSxWaW9M?|?`)U-cv-)(l~TE*+Vp_lmve2R{eA6XmxDeoZn(f!8jZU@F2o;S;XVm_$48CX=F6e&+M%n$i@NQi%< z4uv!%Kt}zP=;BQb(Hxg3y^!dFkJDQ~A{*CBT zvm`WVx;z!ZbG?7T^xM(eP@8?~)nO5d@y)QgE$s~nBc9s-N7Y-$HQBa*z_$sCK`9{J zAtBOI0;4;mLxs^HAYCfbIT&41(jX-{5Tv9gF_2bbqbCic-gEc!{GRuH|Kjt}ZC9Pw zaUSuFc8P;u2Ii2)aB^m#a^ z^?STGdyb#a$Z&l~4C$3$uxZ-`!AO_c$oqFgJC%RIcMe@k^vaofWG%8ur;xeITqEu4TMEnv7&RE1r z%wu|=E@0HR_K)x5N&Z!e&jzttf23yLVitzzPPeEXln(*dfX)S%4> zPRX2+GZW9X2q>0Tgtx5n=QA&9vp~NSrC1yIJ{?M3I4*=nbf**t%-63n5hc-nb?!gN!%RFo$4#4HAGh10U%Puf zhC7t7Qarvq8Zv>ivPoe&aC-SRX5;X+JOyjX$%>*3+_1%jUq@sNZ@p`r-^iqfbI;kF zdd`~U0?&B4WJ}wjJap+Gd!VXdds5ha8JV%L|JCZu>yP!t`OgcJqtvL+!Yj`%;~L-# zLM!&PaXWhyg3aUaTU@4>%*fSK2dxc|N&n~$kVq^-60G%+XLyv=P^K43yO zb=3cGSW=jW&wj)OwSyL?<6e+n(F{N{8NS{L8R+nHHcB?CvH7(L_7UBSU*#T%fx>JP z!M&y7q4a4#-R0U|Q>|?OQkSOLC8rp=rtNge`}2(@BZYm9M*{mPsC2Pi9`UxiFy0%| zF8jmnX7tS+{UBuz5t6XlKBEY4f(-{HOuM zeYMgkltAd9!1W%|C_WwYDQ>0;7T8c>knrvPrj+?{8`s?DPK_P*&?*HI^VWUAca^Pw zELP5>XJ;voBq>BUmY8BbjApR-F8;7v@rt?XL4U0!C zSu>{reN_Q^fh}gF#m4f<*GGipdMnMJI1R8NlgtJtb|q3QBf2rWr!5M9AZq*Ovnl4U zVY{JD6IdD2D@)6>3k%~mf3i+ChYy`+ggsoem9iNZw|UzyH>7dnAv)D=d;Q=fWgUTCzpoK5V$u@tQ5+9!umlvg|YjNPM7TIqL>jM<%kb9!y1^|QJ! zc5uqaB(Nz0I>R$zXN|@z??Uqbxwvrgg3ml+E%j>h_H>$tJ1kLD>X5bEjv~SBO)*Ow zR5s#b-IfB#GHixZ3$G-eKXc%<@N|rRr=PI8mWCuI#R%=L$CT8YMx_|IH0#G5(Py0L zHSJe5q=-{rD;8PHIps0P zx8)n7RIi$cG-Yob^&gzJ&lx#yg?+43#Vt})0!@QAF-CJ`37ljP`G~mdJ&pF)lKKmZ zic(56rS(4ZO4P%!JS(|DbA}Hs{AcKLDR74#4RaQ!1}?<|qXTN(Rd(=k{THV@>_y4a zg6A8}8JBZKreqW*#jUOUuSLw;i34R90Nt;RSw6nui1KsUUhLFx& zQNp21OsW++Y`hJgM%IOQ`)v-Vs~y|^qzlc46%k|}g_+oANX%3Is(3v`+nv6YB$qj1hEBJ!h=ym(6AT=+niu-Acq)N4CNVerb!e{pGP0 z*qUkzbYixZ%3^H5NmGM&(lo$eZv?)ZkkVRCbC`>;_fx!=`D5r8ZfpyaR94wo0X9oT zKX}ahTDvh#-$Y0vonNWx-(Hgpu~wToxrgE(o;r!8 z3u|>5oWywRVUK-GW_bsQI;ZocrbDR^^ZHKQvx+piquK3lp{C|pxmFEL$))ldoPrCN zmy~F&soRPfa(*>!yXw;MS2??+a-NOwjlk2***WUR6^&Lt80QQ)Yri`StS!L3)*5y3 z0#{j-m{D^G&cn&!ug8$_xL=!z<6>tLB@Yh^>{^+a0p*Pa> z?Go)#iaW1U&yp;H5Ow0XUNI8JqEi`lxf{3Z;%lXOw?JZUkex6hMukYZ!1TW4eDn$R zX|zx3=pYZh_;Ee7I@zO4;`~mvRkopbZrffLykra)51IHzlyM zyO)qzu@vDIm-)^kA%3J94VN2#YOsnL+++_kwGQW2per6G zl_s|)FC@0Wj21KkUH_^41`?QlH&o0eZ;t=u*A<_#f>!|hI2~-r)cd`1$-7h)-)JDc z)}3`EVa$uzJ#_1?_X4BeQ)CUgbO0{mSS#am{&<3OfLp9Jk)JGLCmbNQEgy-@oxD8R z^y1@~oPVo1b@+mhJ8bK;bX@@t>c+Qxz#j%~?O$n64zW3|K26)7gHoJ?En56vE>!f} z^SsIgpHa^1qP$@+N`2vKaZ**4W5OnhvpV@HZVp5=`aK3ZiPTf){_y_yw*ZcXt~WSl z>n}q0LQmsz2(^-k^)ahwe7esK^S*~~lF+cATF<6i`VE5I?9?6oer9J(F4v#4E~@_T zd?NH`kN%P!JddF$|LSN|v+}kLoM%x6YYzjR|68|{ZO)+5q9FgHaLJ7y90*MV!1nyV zgI|2z@ndQ~9xls&IJ^1g3>!^tar7esc z4p#bOE*sl3cV;#eT0{X+iiymCz32LN!0L`0HaIS&Uf<-iB|9mP)J|2yqv z#(syk6Aa%TQo5$?PUpbaskaqwXl&(Uo6I2fDbKz70hIv_nvth0q;ntei{L87+1iBL zFsle$?r=KK@w^7paDFjXr2bg9NP}lA-JxfH4QdUp?7s)-3co5Q_^R_`&CgDvl#qwU z#e?nNt}-8PCZ(9QI@Vp3uFUqJkH0Tqxmys{!6bg3Oe5+5V!+S#1}w;=-t1HzeG}Io zU*}UMf9mRfhO=jT=aE8~5f1;mewOSjRTscXDCtKcdx|`TSsnM+<3&)0P|(0^_`a@w zfNrEnR%O0HTMXQ7m@}|;tPQ*?r^*a5wFbGB0Y^b=Hs#T!0#1Q#)IFE4WJ{65M^WI6DDreI^@N2pjxF#gEM) zTTWAI2a?9`ZcU2E!Tjg8q+HeRIt}V1RUI#w$m${mLHPo?o#DAp1%wHhmmI*aoYMQZ zs--=xDF3p#2+G1x&DC)UNP?bbM)Gx_PV%3%HPZd5N;6=+ce&H^sKr0TxjW}j3GH?y)Hv>ibF@6JMOU^qV}emDGMM|MmRGP}yk{@*#kD5X}n1r%3}bc=>KK-_~p z^-=NVWkst4F#W2plCh*EdTeDap-ba`Us$EN@Bt-adz-Ik6AtaC(82-U1qMwK)wh!7 zh?8k(;e%w3$2<*PwiH16?7#0&w55a?)VK+a_UkX`EsFNkCXLX>>(N#ACi5BPJ032g z20oCD?%-d#9#R_O-=u8uuiB7|R-->oziZ`;tnC7_1*E~UE%O@L-jjbX_o3m-3-qHc z9dLlIzE4SpY1GZAcwp9bGBB7XFaVI9W^C%|ddbH2B{iM911YzNctxk>Hz;x9{I!~= zf(vQ<0lsMxK5j-|VBJhn+@ptX-sola?Bd*MK#sGm4ZP=kC2Jmz1sYVKhWct~WSbi@ zZH^CY;ELvAz&Z&aW+%9#UyTPyb^YBLhjT)WEsK#c|HyW!Io>&O&W|XkJ(_jbJVA@J z8}Xigr|-7}Ycko~cWrkiKbDIRXoo1ac+-C0w;StE)XgSRfBuuZ`gpOPr%2MzaZi?r z*Kqh5wsm(4C~dZ2+x(`2+Rvv|_8Cj*0C~afFJl<|fq`MJ|L<$`91a!r6Yqkg3pp?G zIxZT=Q^@|7bid8!jmo6W{{(4%EPh$hOYC%!y%;fl9Wi(X9|)Nv!sr7V{g3~CAE;=U z6W&0q=Zp^2hW}|pKXQQ6)A>0Xy~zz}?#U-~^|-utAC3$>tX&bIFx3*CdplO+qdSbR z{nau>U1vl2hcfx|{n35Q1Kla6Ix`9xJWpQ98o3{!2JkSE8lZoqzq{E%K+lcI^mA_7 zZr4tO z{z9tX7Yz*h((Kx%FvBQm!SYNE>7(ix%B-Qh9T3F3&m1AX?Ncy&`IihDjH%LM<4v!` zf<5ExYR8wJM-F;R^RuTv)+qG;`BZ=~{MntA zlfTH8?b5BcdqW~%>Nr)H010Zxt-H;iU<@=%flpUMdBeQ#3Lu^q<3c2|V`~PKS&bQOs!#H|_WAu3Y-d8DV_};*7<>8t0?8_Vh611Q(sow$JV*1sWqnGZ-!F)KC*sT|++5~G4Pf5APW_9V@o?g4qi zQCT8B)4kt>0g+-00?XYIVnM)z4*DLAP#!v>0QNAx3lmu0+1c-xKAB8G=%W_2cpuNv zP9{B_mnmzLCm>MP3{N{4BrH!|O$o`^ZPYwJug&JtHO|hZ=`YWmU|OgU^JLSk%odf> zy$L-qTge(NHz5*19tIS9IQQpoPCUUjgsL?)_(_~>O8)5gy0%@y%gDa4(aDBJZ$ z2Zw6V(XX9%oklslK9|$W}|F8fxyZuo6J^z|wq(kAp#lUP zWfp}SAdB+Nd?x3bbFo<|SlB33g0>}*k5fxG-EPCc6pXeks@3sd{g~9q=)@$BK z65_MFxgRP5pStl`^|I1Y6w_UB+)Y`OQ0VLFRzXjh<9ul6bYXyX)3gse6 zEExBDiEV7w4!kfhdl8W%|Ioa#_4NSH0NoJQyZmh3pq;Bo1WKY3^8DE$fiwpu+t2xA zRZ&Xk#{)*2_Gf#8ZbS8x6JH*$0M7AVkH9~-TqlfdlBCa1kkFRNwy(2u5_|oRtGIz( zo$pCI;1Zt%=(6Wv56~i$Pnm?an@_iEwx4al1CFMOlJ|hF&Fj5l*jHxCu83pUh8WOS&(zo4{^=@wu(xe2G3OerlDKuO#=q)!8Z9JiGb_TL!!k zc&>ffrF|-Sn(5vz4UC0UF1Xt}Z$mLzo?IC&)!p2$xx!nC05`)oCkW{J985hHT$l@N z*|MkYJ3JKM=HQ!t8N8cE(cU;=)WW#^&~aT+p{(+&agK!C_W57A`!7D&H9=E&Tx_!? z_l(@OfuoEOYXe>yspdf6B&ONR6>KVRAF1-RDNXGpZNz%=gS`>pWcEZqFt|;z7QFlF z`@pB_;BDfF);*9zCfz{iR23M5dH~JJKorj;p#~;lwd+%4*{tZ-T}V7IBXFn8BB|ZN z<3(0G)uDLn#lb7{{kYOZX#-Q@(;(^I~C;(T$~(4MrA85GE~_a5tS6TmCG)Ut4D-c)lki0R|VR!`tN zJp;@p?Yq!D#3{DDJ%Ob!b-~>G)j^=|-}eOlF(UJZh{Wy2rC62p)>cmGP9nRvrhBJ& zPG54+Yi6WsRa^7f_F@!pm8!Bp)b-&g+K=VWvzvEUWwu+tocNb0#Gb<*Zx756@rS-? z#-pN%7v!Sk>QGvPzk)Zw>U z$QNK|NE8zfvR@2W_1~;2O z?y>32%D&6DNb(kKWP`GydHeglP&AQGCz;KN&G#PfO;ccTch^ z#02T)%e5N0PG=rwCwZwQ&@*u$PUS9ZNsmIs@UVDiDZ1T3-%hW1l4$-+UGEUB+U!uWBFMH{IC zV&?hzHi#Z7UpTP0h6IRpy%qwzd_4AAuFYQ@PP0iwUHG4@pib=?6uz!o1$ZR| z56TV1Gt^FY4cd*fo%{W}ilkncpQlgixs^yK^m+Bsc6=n3y&Z_I&8dKTZ z{Xn;+<#YU-3GV>E5A>@#QV97{Gtrh_ap3)3MajNAi1$tSNx_31QX)1AjJyU=Nw$M|2WEJk`Lb!`LCo z0I~kUqe5%gGFI&K#f1RyojUXZjP&%STAr`7HNJbr%yH}vgWyh^Z+zLHUNDTfo2z}n z*QH81walKwEx!Fl?xYhVZQ+jlC<6QP<*4;^4#jM@^VyNY;;nYTFb>d@E^V+Hzqhi= z&fiO;#XidZD3{^#P^D5S6f`Ti!^#O8{KzpW5jrT>r=}PBQ6;ob8a_Ky0nckzC+E@= zq`bhKL(hGuv-ZTt=&}(%R@~ZB>WTtDh2*73>Z+ z{I)p@el6I0L>FX$TvZ*#7j#gzUdZR8EY@45SUGZ{ICE2K8`}@VK)U6W-|iHqJbaNX z{5+k}n@N%w77{%sJS>0U{=0*K!p*Kq@`$^{vC;>OH#x7z)AnDSrL@DpCS9-%Y2Y%` z6;==vuf=p>lirevqM=+Y~fp)flW8oAEuF1SO|ED$H{Q<%s4J5StE4vTD2f zPN8OxPFk79auBO|d8xVE)dLw0Z#8bS_S4w2VB{m`B;2s%VugoDS)vy?&v2`5+xXH0 zw@ji47rG0~vaKFhwYq6@nEC%;U2(?~rc@#=5T86A*X1smd7i9d~a@{!8GE*qAXZYOa_r5jrqD1adgJxSr=g- z5|P)0&P4e3oVh;`mIg2VOe> zAQQ)xT3oIhKy7$tzW1gVSi_9P)~TB4mtPpQC|tw>R+3Zg!Gys27DKc1>0tv=2YNnU zew~^zXTl=rJTLjyi|BSf2{(E@?R6pLCRvDb614-xm*?aUQx1!M?yWN;JG@HTIEu(q zlF?!1uu3oF7^f}L+<#}+(M;S;4M#i5J?w?a@_IO`aK+8ecKAKVZi{98wzsw9#azJA zgAW4&GBxnIGJfPvJ1-hP$1afIG4Hc=01*+pbRCITEneS1FJ;hYc4(mCXqEP<8+heeomZ*2;6#^0XvPCsdnQpk2~bn3N?`G zC2Hr^>Ue=Zf=CAJC3>>GGH`c^K1FuGp|ZLs+P(DFezoLb@qAOqO%xg%6r{~qVj_@X ztN@CB_)|}LzY~ztO1dojG9?-1T5t3|9pE*AKYN!7>Oks99Tk-sVC-u)tlnW2_GC@x zw&>*wOtBwB+gooOW$HlgsBfVxZhOC_ow1H-EAm4>qRn^NaQz13^8)Br^8jziKX&RB zp7%<;!f7Cby8}ET+wN|X~ll1c^xa>2}9#Hd?)&v&deiUBBg`0Nv@#a#-tz*oU9uR?8$#C;X|oRHy*YG?k46%?)ukt)A?;1d4}sg_nQ-G_Zj>a zdt?@+C)zu2*)UfsfRs8dG27#9DJO<&yK{m28+t=iVrRLp_4J7Jx-v^B$=uN~nWUnb zJh@74n!FlJX@_bd0_#P@2goX@nMvakW83#lEuO>pgy)9nmgil$W`;Kt#+ETfkt&lH z8Yhe^QPUX749BOI->*nlflZl+vl>_05@vCL1+21x}+0j`JsCC%`MFlA%xr@K23rG$o zbyU82?A8J&Jd=QPG5hQyRHXHl0)qC4wQ#`0j)1GMal#Lv=5X^z?H2Y=dD^;2S0pEk z8ii_<)3fzz*Vi0;euI=%j96h5;2MkHF;zU`mwmrC(a=`E*(N;yOmR+*|07M!wM=pz z4s=LtRI}2@ zH>}?LI$kFg>@NHFN?0E0j7Q{g7)m}oiy<-A6dPc22LCk@m}=sr%9o`&^5=npaf#>} zzaq1Y%Ck{$r920-82u{0xee>_L;;bqTi5+I>kUc{EX5CV<^#MK&4{mfP8u(#Ag9|8N*^`486Orr#i?U9Wu{BaLImB1;5OR<`FBOiha0G8Ig7}lIz6NR_N6K5 zth{XrNk+7L*x5hTw{_U&#LYn$Tbc%x!BeZFM;qIdd>jNn?6;F#Lp4@@=6wXdHZ*8F{uHnZ^0nPQUMmqG@)B~#{|6{{PT_RnclNL;l4kC=cN*qE+hW|d zmo~_MD!@rl%N^`Eb0Gz?*(a688W}EaKH4(pR?99HEr-*3WcY3~6F($%<;VVE*gcqa zkGFNB`#(J!H}AN~KcQ;OOfg5X+XF`A{TRw$OAlD_{TuU7QuJ}3r!%J(h8ze#21O6L zBh4MFWnW8(J;f}zV-YBPPj`KBaFf!*jqT-7yfJiMvGKatxUae;r`ox?@wnW5DZ|@B zsw?*Ao3o>ru=A*V6>;+5J~)oOl;ne(qS%h5aDA!I$y5ZS28j!{QMxQ&wa~Y zLCidhC3CZ>_jJ(v{0Rud&9~e~CH964(Cr#a1YVbw(^d_8WtK;#RlZ~Ek3G-)BOqh( zYu0k16TXn}yokRabJ=!jNoJfsX9Zvs+e@}D4(Llo5cVPVup71S06d@4F zDsSRRqI()zW5v`Vvj@hK*?Pe=pPg1@WbsW#hUUsT%2EwzLX_Dpw8A|D-`4ePvzeqTqXE2*Rs+mb$CpMx^`bm=X{I4 zi41=tEF;9Z8l*t%Cn#gr)j#ESi3!Wdl0Bp3M4P*jmG#%5Hy|YZ=O=9MHQTjc9=?|b zxiseUdZmTL&!jz8QgzTXl6yCyC%|$)&$jfD~@l$JWP^_UV`sM>DA=2m9%!E7J7S+q0C#6!kj zkaSnmSZ4d6IUz9R8NV!*KfW^#7Frc2k_t)npv6yEth)|NFR_!^AM@ zW$6Kj+l`g}AyYOPtOL3%p%1|mzp~E76jC%J6D=B>^22w+Nc^#q$=h6?fwHVOb9;m0 zE(aLtbIu3<8*)PA7>AiINdv+tUCtFo0Xm?|_DM>J4g;nh@;ZE{EHg5fEG^u5=?B%NDog17f3Kp% z+I&m4pi+5my@3B{b%Czz;v%lM>;#jRpYHVaFmge3${i{0B6QlfC`WZb=>gB085LC* zq-72`(gdv$$;0C%6cf@P9X)r`Cb}2aE>OL{Ec>uQMh4H9b9!G>b7k-)iRSlW^pSW;GU)mjzbjyOHKH zqk^z{B3PR+W=uTfhW5C#&`3)AN#gnh_OfQ$HE~i8Y2x4Um5y44aRb)f_d#z+K=bBZ z(xKiyL!c-VWR7nTf|V+vmE}FEj1tH>8C(Z!Uzs0M7Tz5<$`nCUS=M`4@@N+24+4%k z{$;uTMVZe3Z^!x{*d(a4Oez`|BT+zF>hgtR+@PKG@%&LKe&PeNJR#oz(Co@ne3^0d zRQwOs9`XgkH3nmVG_I4(K(mmZhXOHw8X02kGc_-R+7SlzD6&VKHd(1Z0hqw0%)tfc zTHifmA-r34my&mPQw%CmQ(ZQ#hk zle%berDctn6%fUK+1`DariI0@OwhRcE75jD&nQNhGKmMcwSq(U*8S3&3d8tl zc8gwEG+DFUyak%WS{{SWE*l3>7E(#@qF;+<2$$U%8`i7+XZMFIr)y>z+2Cb)^4<$wJPFa(ySVDtbre`1X<7au-I0Ig)y3)EF2ZY>_f{wNBVF)mV>3@IQg8RAB% zz%uoK;KWFAFh#l&eZyBF2~4len?duwzqJM+JYV2@XD1TSC17BAxDHh7?0o25tpsir zjEG?}bz>W*tTQB&-1J{aDvMCvguG-JP2wDuYzKxNRk$ISKwTw9nHvT1Hj8401QHlf zuD8bfY?Rk(cmUNCxCaaVO%Dc2hd0SdhKTZt$++@F+bji^JHt0xu1-Wi=zjwyqJ{^! zFp8C*ye*qLnY>FHEJw6hZ$W5ApcTY?U^F<!e{=|+|0_<8(=??2XWyBb zd3~qmwa+0Ec@nreY@h*Lk&yK|X)np2F1z^1{wAfXoUm2;aownB_vKiz&SycZxnaqA z`dzVN&?>|s*TNL&dz6Ae;-|1~|Ie+h5K~zWhFlV9yl$Y_Hue19&;-xVY$Oa!nFIdl zM+L*?LFWf3JnxYFvG^uvPMZ9pi3s{|{=mKJ>)vK-ytH1?xHP;V`^t@!uK`02=DK>O zF8)i@14xV{)@Q|tG5+8346_ZmZd?oX_ZUJY9wT2Dfk99KjN#+E5O+Ueu?76Tf5~j{ z>x#TX$2$A7##(J9_{NR$F(nXw2$oipO`JIkVr_XB!)k@-;w8!DFgaknyhMN52*FTY zD&@uqPZ#_599wN#_-)mr0H8*X=0=X(e%%NAKK_L>v7O^0Sy7!43CeoY z)i47ZnPsvE@8H6*?Cej7?AS>WVf+>PKk=bOC+Cjqo1-}|3Z0(ggCmrI1%iWehzfn1 zr5c$Q_qw8p{L$T>GrZBm!|vO!ugd7H7t{JVGG^N7U*6gITa$t)=ZGqfAfAw+OS2es z2><&kLp6^rp5iHiwkGU>Ob3U+Z!a)^^pTq0NviUPGWtkJJMk62-2Qwsk|D=afy>U_ zAD4kIzfT}b0qD0C-CmFkr(>SUb9M;a}04MpR2-A&`@!5flWx@OMa@}3(H-+&?}y? zD%)EAb=n?<&TujBIR*d`9rR60k|r9K=pAL5t4kGFodjYOA;&>P+sTki@=;-dBzSxH zdeg&WhzkU-s@|qfwV4e*A>rSd24m1^=awz&LFwSD*YO5bj=b`9Y2rh3O4;5#ZO4lt zsWLw^yoQFdsoaXQ^Ay+e&F8=#u#%@%xG76%bj|#r)ZjtQk5h68zQuow##{el`-MfY zaSrWiT*5eza05;v;ol&6IXh*3^yQx7>!tQp_bbdQE@}$>0>mO904q20j)^djgKf&L z%ZHM4OL$yA=%fa96`9&#?D%JdIVqy+(?5UuZ^cJ;+|sF)GYkJgTABHQHN~`MoL+X8 zDH0VQp3ZOu^ZdE??JfyM$yY$$o#$Q%miav-QL!a-q>nkAoUE9v$NmxLn|?9ZGib8W z=T^R?_qC5=mqC(ZMC;l`veGjB-4tys#;KclYt1?#l8us9&;RzjFue%K3zxsG%>^pq=$IJrFajt ztDX6F@4aNST6@N1{>?0dfW#}kdwJL0Oga5MJNKjGWx{hoHd8C%b(>|)AMO7!{^0kQ z@An=%VT8<8at@euA|)-??8i$8^Eq@v1E=!;4-1g3w9kBT2r|E%i^ccrd~t6pO%<6& zto~i-k0WINjM-ghWc`40W*tc{<26H|Syf{wWsNpC_#0pTRL#}GgylM?Jniax+oKH$ z6uU|zq>;bmCb-^vSMeI8WXrp!n)n6!i<0h=qwDi^mNCdrU%LM5RnJ6aGvOQ`A59Ly z7H93ru&(K%BAG0lPCBeKiQiRk$UDW`-HUkju9`{y&W^(w!$0`>Wdd%^OXHc&&2fhO zPHkkb&CuiLJ0hq@ACw3&DWneD2`bjk&qXsV3xGTt@i#?U4?S&R4ZXW!B&l&=)LZ#Q zB%W8N2yX}fbh5;YaR;9!T*WWaLt=JU7 z;8}k|x{!b=V?X2<&8y}OL#C0(aOVs&5&i;@*M8Hgk|)1WCZ1-mFJ0v>X5$E(=FPJ` zoU-Pr0tPl6jYXM%I5@699u=pO3y|R8M&L0>AZ?*2!;iUecvd-0*vQL46`nUdI$F`t z$-gv7d5lU}WN7g4&V2^w^4?f&`YK(< za4>YdY$-@+x);f8bw}qn*b%*`T6z|Lu;)nR(@ld(mW9$E4Nl*6@yF>!@r=_DGuGwE zQw52~tyN~z%8wXj3W6E!TE};CuRm3^quAP_602$&!cJd>efS&Etg+*`^gwvr7QC$l z_yjcMlfb1|=#n3ud8|vICvEHjI9kJX1`Z*ZW*>t?w2$eK*kqpiv`~s-0N0i)Zmr0` zp}bl_J86YC6`#pV)vo4#9V{lkEX{XcOkGp$5btHj7G(h z&Y)jD+Rk|e_K7Cwz&yU7C-~_3C4JewK|QZ4?vg#BlMlcE8n2Gcwe*q8wsuH3L#E4k zZ|KT+7!W1_wwmVy3(bfbJC0ZD?jdlFxrDQULAU?WPNfykh~K9s&L$(T5?0 ze)&IThkZ`YgXzy+`p{n?7y2-m&o8ZhJ@;yyo*)LeVqf8>gWT_o!u27T!s`4AXmohB z-KLJ8`<2&E7(>0(3W!&jMW zBnPp035s>3Y0QX;{~2WQK`c~4mt;!l^VrQg4q+QcPZ2URjHA}oXo4lhuC-YA)?wD6*0;qqd44C>Yw*|AXPJz#PkSHwumDS6F3-I`5~TrM zX-m%;zl-{eKN|IF#Wu$BJBo9%P7|ERl(OFDGvP!1%-#PhQcB!C+wo}IU5Q{h_=2D( zwV5yLYC;EPZddMgnKPx7r-z^z>|H!=$yTDO?_qxZKJ2io(YPmLGK-v*Si)J zt`Us(lE=q(E)2Jz^i7gHQHT8OY3-&BCj+%^E^QdPO{=ekyqX4ld z;?kN_J!h;~uc1Aev`&3ixaN&I>ifUEGM*%KCf`l={cJZSDk&y5u4 zktfRc)YV?}>HRb|3C?HVb#}f}^n@k@Pb!e-WKgj%FknpNZCXy)6)%e38`xPKXY46B zd_&G$$fCRh`7|FVI;tZH3B1C4#4Xs|FrI8bxq1r7_@NQW1TP zC1j&O2i)u8x^4J_#TTeKW*Jd zbTC9bs~3(IaHr9cr}mD8H9zR@nHWd>_&|E=3N4H2ob%ow3A z|7}zKlWYDZy?P|(1FC|MAX;Xu5?))@wv)g9SmeCmnQvT!!DF7R8B2B?MP?F!YE=(xC>kAM3+A|(R(aX2 zKU(p-K$qdo6V7`~^}Vx)sH(vFGr@(XBY-ceqcQLe10h%Qwn{-IDq`<#G>-lN7qk=@7=+d~6F54smcUq0mi*P&JuSFnTDR``)7y zaGdSRtNu`|`UIVk!Zfl)xj<;Z)myFeYQs}wZr)Y0r#R=fJ@axaeLwowmBDrrb3$^V z$-$0TSNst3^*wFOLU{DAIfv5v$yvENA!zdCR>Oihx!Uw}{jqWqgvV<52|=8~gd&dG zO}U>B8hg5%z46&OPhsnD_QQU})fr1hxcl$`mzzMi{-6J?+g!o3;2B+;$XZDysS3SOWH8HP0#AL_*+5IUOr+;*I7{}Zj z;OI+{p|nq!_dWCB8nkB+ul5;ZM3z5e2+BtlxWV$2VGR6T$vD4^ppVS>_@__DofK_a zdZObhu`_&;)WeT@hsy39eHqgbe-IY7$CvcopF-P>{GQE zLzcQYJqJrBVwK-M#MKy#6YV+{hBLBq=oT0<21+O%SV~y7s4zYV$Yzq4Of~OpwR9x- zQ*vA__M={>v{>NdpnUZ3pvK>TenzV>WV$q4Lgh2<>k*}hd^*z3srnWI`*tOQMB|kr zSBmb9bHO%pXW=%EI^wDMd~=zzXazL}p2f3aXpt!Lm%BXhEJ~|at>g1Z*oc~2*uTGK z0XGKh0_$*wUJnHBhhcBMbE@TRtxd?!sAjux3HJ(*0(0@onN&x~XO>r7RXkLbS+!uC ztr2XmJ5DgA69Ge!h_+}*o{dt}+jm-Vjtb+P(cB8;OGQ~G5OIoYH<{{#-Kj8Dibh4j zMv9eQU#F&PqjZ=?oZh^Sm-9^4Ci~9IS7Gw)h_uIsSpUuizO;nW=-yN#h-G{s|28AI zi-ZuPX>#nf`EAT4T=ut2IEv9MB+-`0p2V~C#va42Qe;%lmxBqDl&?EtoKDlhdC!}( zFaFSIW0~`}5LY@CD=6-@9}K1bl_(Jd4UL+b;M7NE+CjbU`K3EF8}{|)T&u*!N{lS5 zNka`_67>`v&tzVm_{Y2Ca}Q6#BIr7C-KJe;zcKgL^OY6z>e6T-&6wtG?&S^)DE{5H zw5eC$6VP$wDB^5woT7ip&ib&0Cmu)8+?I98D-N3QBAM=^_?YUtA*Q&+fSeH!8a5NC zxec?--g7DCe%{9Xp)Sd?x}9mrp4*n7H!-xQE#V%fHCBF3-u0uc%(l>t#F7BJ>{?!} z)MgQ+G~PA=W=jVe690@pkb_OqwOGD+PH7m}qd~L95&?NY%jbFLcGB}rH*}_}lkzT0 z=-td4;w^84<0KD%EwTtw_3#xTg{WtkR&{w~x5Twg@+-S!j4izXgg_hUI7yWrtdVA^ zRIITLdeMxJcs?bSJSzP@Ya2KrT_-{ShA~IHTvxV=vWjnsCTHr|hsxL%eU`C+--aA? zP^E-g(G-iz-w3;7Y?I~D-;8cz5W!1h28qY+jFDq#VIh>Vx0|HxUfr&t2R*fJnVdTu zRvcH6plHxhRlc(EgF#8hQz@R)JhyRVJDgaWM4Rvd9~bqFS`y~b-_3<}0}z#$k~ea9wE28I!#R8NbZSHVb$bI4J#&yh zV!oB;R{~I?|HAqOfHmJgjBs%eA?*T>a91`Tso((l(v{sOU+644WU z-8l8IdF?t0WiQieW}g63qBdS6?{G`6h5;`2E3c1SUhf`_yS*aGTdpvztNYEN+C{kt zPUcuJ!Pv$AYMTylxZfqd`=O@OJ^u4zfQgAaTj7Y-2ZlX=eZ6ev;ahuybq8Nc^Z&K;0?LT7ueSS>Fn)qsBGcJM=@cXX)6J_pxZIr~_2!KOj~>K&6`YEk4iAx9)0`Iv)CE=hBwsE2 z&g;TcR*AcTMP485*(F>oZ;pIgt+C;IvGJhFTV8v<(dJ0-1vGK4tvGDcGxTLmS&mdh z-8cb^HPdg)eps%xiOtd^XDk`_Y2zh3q}nzBiB%e^b|hU^6Io9dQyiCgnq2&0mMwkqS@#EyL5ud<-)HzdjYZGEz8$UpW`gmJ&>3}YW{M*l$L;UF%8 zLFS%@!Jk44#+d@j_3r@YW@bTb717ZnheuL<-;%B}uiteqW2Nv@sN!RXtv&-$;Q4ia zt3Pbo)`K6_+UCM})Oc2_{ghf>sJah|orWh$DhdshpIeHuhTiX?Bv=ByH1CKfSU`;F zR$ez4kIJP3T?Y9dH2@P@cb@&)d{H#;_&YtX#yGwl`09z4kXLzV<7oJT--CS+JIy-H z7A~PBpqNFqOc)|tSHV;bqw9W;i4Tcn(+PMz$sYSg`kijvah}z4LhXD8wt{fZJ5`rV zk6@7BZ3jyu@Oqsz+zt4%@Las|ePN^9_yE5rhjCu`U#m&Kt<_(dfqTEUTd?N1^ba$H zj4b5x86QwE<3jb4yM>?ZFmE=O&PFEGWf9-%e070Lo>ydNbSVj^)g5JaNP=!gH)7Cn zo8Ngn@9FU1X&+Y8f03;4oLJ6$^kUp^PuDo9iF`*mYi=z4!x~r54ej`h(S*Whet4Ru zZ;Zv|VXi%rxvDhwvI__PC6qLVF=D+x@d>Xl!d4hcK&L>>9^oor3@@3t&V3X)%|tWl zi!*4!_KIa9ndrOAHXz!jlW&LrADYfGtO@>Y!%9iB!RX#lT1sMc3>ZjvN=tW1O6O>l zt}&2)00AYXK_sL*q#Kb&z<2zQ|7#yb?=T#D zeZI$$c%sULrxxs&-?kToUd!_5dJM7mQPWhLdy1JK0~%M+JKDExwhyA{gET`+J8fE8 z3x*-z3dUR16ygk%;ZMB_oP-lh)&S9Q3PG8VuR_>Kkh%klZh{p!iw(vmnfz2MM7D8% znw^wMtDR2R=X2_D&v@x=OxV7@S&+A8KJ!;V%pg7s@Xz{;vFp8X{f|bW#+n}nG0V!; z|6-)PjRAr>FX6oVX+0!Tfm7Yzsuz0|Z%wDzr}8I00h|`5ze#vjuwcdQI1e);&MC34 z${i5jN{jP-PF9wz*z%Kq-lku%MIp%8)wV@YKXnRSHHoXgJ={iK%T#FMD?!p|9Q5V( zi|Kvlif%VqrRIfUhcEz^rx??d)A6L>hP#?1aBa!!7Z&NecvaM&&uk`U;DoF+GNgvR z)S*aG3T&Wbe~kPG=Baq+P-qY#n}XbbHa7_WD5f6OyyQAUL`&Epa@mLs*J4UdgwZ4B z*9hF(REDBaVW4#5XZzvJ>vVna_$w;2FH0V;l}>-@95v3qd9LgDv(W9&|IUS@a}^(V z{#Lv*+-JHpH00qoeQgN9uk&nU+3`#&+U>Wa!4e}wp1Khta7;({D(YN^QRgYOIvglY zn5-1<>gx|OG#XkF+QezP3cXPTqkGqN&s)8(r?l0%9Sb}~%sNMnB#U{blu}DUiTNC1 zRDWdoH58cX%JPuO0$>t%Tf$zo(u?d6O|$Qrd~c>FWn$iaJfuD%XOBMQ-iSd)iSFG$ zcHzwTSnul(bb=K1uWFW2Kgt8PS8vn)!u)TPE>HzbOr<&abhO^PmAA)_P|t0hZC{kG z8GMKj{LzRLuDiOYQ$6T{)UIH>ze1-bJY#lfzJp`qv0O>IET^u7ghQBG@6mf!J;Dz$ z8vQT%9-0Oi0=!=R;m;oGQ-k5}&3~5#HD#E$rO67UQ_EDBDW)yI&eN%!Wv|ctLR-XO z>Ey2vtfbKU|lC<drrcJ#gkAea3VSTY_77q-{=vRZW~HEoICUnVLw^xy6Rf5AH8q zp<%TNVdrw44zn7|Oj7JuRkZf3_hQ2W=vU_J`J#@3sbjkick9(<% zH~#h)=mTm0Euhz0@!ep$uW#5ip5e*-8K7faYg2;K9i2Hem!&{DN%sV2d^aN{!?9%F z;u7C{nId&y8cVe-)onb;ysyVm$HoxWVVy`r`6;EHX$Mi9DF;SLuq>*iRP&OKt6_*v9t8ao+ zwSr33fG?f2cV3-nbIx~>;@PaEB#IfeH))8@{N)4xIP+-7NR4@p6)1&64fv1}x1c7% zkZRzcFRFGwC-k^#8{5qy=Qet)N!~|#1bkjL+Z+RjSe5D$AG)H-G)Zj1F-Q6b1(fqp zbUdZ(j47ryg}!qm4>#G$N{XBtn)X)l4Mmihv$?;B_czkNH$0%+sar{S_J}?YWRxfi{6=Rx`#}x5>Xx)<(+=p z*B&2_hBj%)@-XY8dt8pz%LuMDa9;Q5yjC_V>c22SRlB3gjYMW2PmagYn{zk4sl`U6 z_r27R6tlB`G2_31#zeqlBa3)$Sz-cE zbS0$qul9>o`&Ukvh7I=}>q3_^9A_y(UxaYV2D58+_i-A?F;xF5?+MJ}u4&CYC(Y|m z6L`pg6EgZMkj7>jdQnTs0H!GozX^oxC%Zgv3}?xH`kfao9&8+XNqP|$9>WMk*H-2~ zan+-VcuT&a&GW@N*-fo+**`u6^<~@rw5}$E{6KomJp!J#S}LtL>eQF<46)bAbB|NO z^ZH)`gt)WP#G{7RPxzha6s;36$$$Xw*DukD&$@~q9~OO5!;<$`DFQ~n=9VXjr0<0* z!Gwu$>{xwP!ZN)*e{`_uv(_rr@#S{jm7!|pA<3q~R?_O4?hnkfu#mraT1|GMAbZ{5 z0%KdOFBxjzu5&En8sy7yR+`sZ#W(Zh?5b(eWiic>_7#(7Rr>-J)cQyP$345Txj{;; zO(;Xz>PX;C+dg)Zm2)9OsicRALyEy!Rgsnm`z2ktz{8_G^ISl!(Nm>y&`lU9R;v_k z>8YWvU06#yOrL8imTJ#~dJ-_$PHlXgldxf1*=<|l$oU-MV{Kbj@ZxzbJ3i4q%@AFM zfRt;;&Gv*4TtJ!JIm~TJ!JEH@tc%S$Ie4UBd)RnY*LX1ZtKzf~O)P#;xz4OpRM$K}Az7wG=cLXkKY8(gS z-2!E3WHL#2)ud;&ZD|37^@7_c`%;tFj8W0UKPrzHfA>p15w`Uq7w#>6pI9W2e0~^o zyc~5pU2Ps~Yu;8F+4Q(!GHw>k%!M}){E+bdwf{NV*=R(}S0LsbhFsjGR$}=?OLayT zD=pxL5@hRyce_|fUk~a_qNktA$BReP=r01H4Ff{br~!26Ko9wV5RORq$J_itVYx~C zNycmJirUj+Gx2S@0d8KJX7R`F$NGsZjwXMsDBoMwjdx%5Qmy))7w=X$7O{9F5N6C+ zBGY6Jg1owaIy^^-x+lIDzkGMGyZSI*x1bZ-YoNgidzFP*7V={K%WNyX5H;O5^f*>O z+&EZ1-ENZg4<#Z@nbTR-h$hJAGOZ65^eIp7lntjze_r~FaPo_;57 zT}un#>X#zz3P>T29RB@BjJ&$(RQPCLtV!>a^!Na3tsPy{E-Tsqb;{ zLZwU3eTZs4%y{+i+*dVBI#ecF{Zj=Ri_$5>2Ct~=P6Btt zDj&NwY)M>S5kgw7IJutu9#&rnaE9JtB^|s?2Y-Y&aU*JCfmBhytl?k+x>&zF6L;5| zzSD(9TEkC+IWAovu1>dDZvp)+^w>}ou8kNMpIS7u-AO#ticR_u9vdT5bA&E{BxD5u z7zTqzi$?IfZ{)!6AiXQF7E)pHwBBr-W-sH?X@~28Vk>IuEi9#R6q92rXk4_hi}@5C39cy7E+=-b^3Hva)+@TW=MA#?!#AeV6Wmtg!SnX$>M&JSaNbr z{~eWgH?jjg3kK6-App7y>ShxkXRbsPc=msk2}hVn&w;H(tD}Ih;$FVehUk`5I24PT z93lB>60#rMs4-DiGnXM*Y zc6KEk-aXan{TRF)&o_UxmKOjUj5_H&ZmL`^&2bUs_4GmaL|;9XFvL^cL|`X}^%>-3 zJ(bsl41F&HNR)?%_On^##%fKESwtr%>xgwk3L;1{57)QPV(3%8#^l2I`pOx`Fj$8a zab6mI?UdD5X|a`zqb(Qx&Y)ssK$W8E0?r3j_8lWrGpTdeO_U?aBJ~pE#18|-WPF5L z)5k5Zap&inO#K;>d(B$I%oEAM)Wb?FqDi7zQVX}F?+b?B<&OI1Jn3kE{WAge|6Kq? zxyJBi4~+ngzsEFg4qPtTmOB-iPey^I;;5LLrQH8FFUCXVBC~qc%BiB&Z4|l#`hYQj zwO8^|vsCXn=`wb7s;B&i!JA(?+8@skHdkJ|(I7J$bckVJrxyG}_ew+Lepu+Z;->%{ z$x`Dnhr9GLoAJbm_KF7yj^Z<=b;=qCs4UXI>h-xiE$^~NbzIEPwNKi%-BJ(YI-nwI(Cm;B#l?i{iXdkqv~0)BV%EZLx_`r(E~H zM_(v=K1{0d!Kx1h@(G^3X@#))TP#(ShhvfH$oXhxPANr}hqMg!(ZfS=By&wui-G%Ee)P})d_%e+PGG#pmu`QOUq z&h!>4QP#l6;W&=Qv^|&hLfAe%5HF}8bvhun*Xwj{LtTgDVQ<_a^k%zeJG=Pm;Qb`i zTv#qSAt0urC!2F5CtIh}oZmCzV)?F*l47m=Q|=W3n*(;14e$J1h3?G~&u&&~%$fX_ zm1V{ZGlnn))mMaAKVx~ds6r?k)pw1oAz7OgtDoK&HLAAH>dl)^M#-bn)L?ZXHg|`% zQpOZQQ5i8lsjz>&8|u8U{6QflFHqc>(eGjTT)fdY9Ji$;#MHLS3K+C5UoAFtMDzJ9 zL*&TizlIjeKbKAD!L3gS5LT-3@3QG9Z7g?OU(zCCBOa9@bCPyy7POUp67lvdpoON-JQEy0jW1_ zoKZ_f2C*08nVXw`+2uzAK$;8EfbWi?SBEqW0*xO#$-52Q2K*C41#eJ4B){uKSD zk>L*7Af97=;{eT3=E#Q6O|Yuno!i`|V;kn%kS6z$1vRN6EtCg8MI&>4FQhf=W2o)l^SgJpNOETUVp)DpS+zPaqG7XzVuJ7dL6TpLc{$6#7yeb<(@p0XGF&v|v zMvm3GNHsWnyggOz#mTHAjm+JmE&9)-TJa`{PfuJ4VFa1o^IaUb!o~4?4#sb1H}YXm zl%mzqS5)?Y_mC90=lhS8SadJW_{%i*SNza1;~O#Rur*r zj9cOT8iP!6Dcbni5Wi%ieC}3E8UD}d++)*cR}O-9%V~b{X5*{HdcaoMbWII5t)@BN zW3z0~1SWKA1sH}K!sG*{Dm^hlBiZs3|2-fJjZN|N6`d&7sqH+iou^dXOlFUl2Fb5# z^4fQ7wI`fS{|hD)E7erh{9a2vAL1l?c3oH=S&Bd8)l_xE>7;+8$XCfd!k(TtsicUY z(QU60p2hc!sB>~+a!rUYP2G;&w)|*lTfUvG?SB6jP;^i1n0zB96j)2~>j^@?qxPzc z=DYAGtVSF>e1ig@)^>Hv2o-uEpl-xCpCNe?=p=HYcqH$pHfwS3#`P@+Lce%xQSGC$ z`sKke-inxS2g)kewKH6zNjskn#qHIgmoUAj7P8Ivdu!MLS|!+!kG7~yc7jBSzlsO+ z0S(K?_-nm!HsfT|yf;)%mBwG?y%bH0c8n%U>2kxmAJIdi?kLD693s-yLOPi_Z9|^) zC}m?|cH|om8pz}F1s?QOkw7<-_j}3Ix#H{3oY4bnNl&>ia^nCVyx^>Ok2!0jNeP{C zze4ZksH1W{TNroj;<{#3#T|d$3~{E|Jrl9prYX-421h0Q@_H#s;HgjbbbARc_WK6H zR_M$;%OOo+AaZy9_0vUD%bE};8ERV#gCb$RdN{{)?bC%%o0Yk8@Q9W}^y`?ePPf>W;b z4L%c#|L&|G{qtU3>i)cbG^-dDPnwvSR25qvm2UA%kzi_0C5+?GpC3FtO*+F@J``Ju z1W0=S-J&f6)?7DPyv;hfDk4x@1XH4O!iQ(5@U^`aRtLn#eoFwL-g%Bl8^eG7~$9Ukyq*x7gO*3)OW~3LCcn? zX^3E)k}P}*U_tjRZ$9XmmnDHK*D;PZ`t%Be%5MsCGD*d$K&{n1j&XXX!*Wj4O%{ZE zrrKUy6$WD2R$>0?)SB3)$gk0hFBG#BA7=QPijmIqDKHAhZ3vrHK~6hvPa%Qvziaxv zEyJ5wgE}0}%!89U&QAq{JkFNwm-u~E@Z2@^FF@~S2|@c9uy7ro*@Iv#bHK-CmC%OE z&t~9}TH;OBDPQxD8DDi>%QJT&9vcC(By)+w6eJ z|4^U6QQ`tuYYQL9c|VeB%c1Q2Bf0UB$4+Vyw9&(vM|}p)tjXhyw?Cuftn51yAEsZ) za;8vG;F2=mr%3e|uRSwP=Fquk_DN$Rs?A|@fYe&Mq*_>2ZwwgsAZ#PPm?u@`0qjI5 z{;CYoEUM@wLZwsQuell<12hlSDr`86B)^{96i^-(V-RC|VOby_^3KT0uRBgeD4wS} z(y~lj7IR@o{D92`E8JRB)}A6&yu;pdtTGabjvHvL+@3C}AKc`;SXE`YrKVs>#Ccw@ z{sp}I;`$X;0H!CT%Kj7fdVb))`VHnencmjvxM>Q&*7Tq%~&xQv$m( zF`OzAXP$oL1c&*7&0I%`iW_lUb#lV(bYV#(nu)^bO(JAMeQS&!QY^$VbRNa) z?Wgarr~+pxDi1S##rDM}OOG!^m24I*DM-(2U>L%xS&|jMqZgUBl)5hMQ7mFMj|J{| ztds>FZ9KkBI*@X;XU+)(1^8J+!-@;MChM)!V@30pUcmQ%|5{D2qA|*BUo@{d&o3;( zr{o3$08uEy<-NnZG&>%6puBsCfUK$YucM|4Hjpgh*(_7zv+}|u4JaSzk)v2ojb)gG7FA*t8}!Un9B+@N@GzKg#e>xF z=`9COkl$kTD+JVFRL8NS=uk{jMC?`x;TSYlicX*6#17t?-c!i;=9H;8MmnMxG@^e) z7;oUzw5qsp7MjFE$fWE+&C~<8^jCDQNr@b|Q& zXz$(WRC9XGlnOJNP5q%^!7&C5#H9L}b zDuv?jA)2HYb2D1ZI@U1zx)F$of)7~elzDbP5!uePs^CEjhWgko+jucV6m(?YRO3#d zg70n{1zgIW(`8JyKgv&@J)ig*mYY2)A+Jo1tbW6r9SYm)jgg_aroVaKn4?)zo$^9Q zGls|8?-nnGG};Zoty2kLX6=TzxJ0w|nxp~8fLW|zd(F*^V=-DWZ7a4^rMDlv_%1;E zQRfC?Rj?s5PV{H?w3iUfbjw4kBSmhJ;M@mvy46EbC^*+KEhx|$7cUq86<85x1?umM zwU)b6A!6YJ;@4~8KY|iJ#$}-WSn`@4PltgMG*H#tmPh4c@;0BZJm0@y+^k|C@W}8RzN9GX&VN{Ca(c7_uv6vHKDi3u#~Pznp#h;lDDW_O~|~ADgATr z$m;mS?;p`to8liPVd)+SndJm=bh~NeG@Nl&+tly=s#0~O!2|6&#p{w~c6j&wc?L*6 zbU_Y`6}v6l(C?CH-9=$L=2V|(8FZzj@T40?EdGgBru$T$A;v({R)c7dlQ5CVh*)GT zMC(bN>6*rC=W|(jpTu4PYBJZ#fj$m31B#hhkbV<`6{XZhC3)P_2%)lk20hXK2NzCN z3p+F21M0g`ieWB zJd~+WP35PgOATO+Lap96K$|<#7MZ?Z{srECfLcBBQx^iowT-6=_ckuY;`j$S>k_Ra2T`@e0UXEVkPsN@c*J*7FZ`m1QGpX(d>;ua8=1<$?3klBXp z6^FUi%|F{JhsHY?YlzH{?40?=4W62-l9{B|U!t+IA}m5@;$Ku~*P|}p$3cZ!g;s`8 zxy^B6u|wXH00&Wzw<@m~J|`01qETctX37^R{RA!`BQ!THy&yZe-^HTvj8*%Wl%QT1 zFSX-Uu77&m8&d*1%dOU!f}gm!C^6>{n9$a z1`)NfLm^C&mINNw$vYKE2aC5Q!7*)V>xvSa)bZ9@r(TZ7adjw0`yVl1G5BLG5L15$ z+bjdKT_p(m1>{q+l~#%+V{c`TiJ(8A!b0IXRNNDXCQ|{UDJ+@-Yla7(o+|T1q)d_Q zF-*183AmL(a`5lPe7F@%@L4qR3?OLCxwmVjcDw#Ho7RD{btvZn{UXZr>}f!MsYZVl zcq+Xd4xR;gg7^oUtIaM<$5@qB3@fX}CS0-k=B#7bX%%Y6s}{oo!q}Z&ACNC{;oIA` z7(Q+wLAja5iJ}!PYheUKu$m5eWYy$uAeiM<>$dU39i^ECKkFULFFinlTFSBATXcHKI zBKFNEyf->sVadDdZUA;XwlJ9g5nM5)P^4(E@m%J;A_bhg{!I)=-(~5~9~V9Toxm4g zKK>!!v(W0gJ9b_1C3)bk;5O*k60n3wjGKXdaCToSBNRH$!;wstH zpM5zKR%S61_s+EeJX`JgEC;Ttc-bL{l>A0D;G_KcMS*)ib)-stQ*Akx>0D~j3r5Z( zRcXZ|hpipo zUN&F;ot}?&(yDIj{=De3)N3B%`Br94I?mjKD@w*W6p>7(>c431_wOCAu%XEaW85Dt zJVWdDv2kYIA&Y8LTcqSlqPQ*pX+9hGU~v(;;W zK-GJzpbM4iP(rP-r8D=D6b=e($pZi=P3TjIfMBBK#4Svl-a&dMd9l^SO8XnkjUU;L zd~*5jN<;YPL7#d8v!>zBywm3q{4DFc%#HK+8{OBIF*%e--quUT^r@lXF!#U5zTaA5 z7b8}uDhYUrmLL&DrUm-c`l?uaQwvtbe-{C1>4C8;YbXESwcQ?_{L|rX`59>AyVd(G zTq7)x^fESt>ff?|r(V%^(naT8qDj$5n^`PBmTOjc*f}1#2f&@iDw^qel^a%xT>}8@QXrVuM*?xlsAQ zRG5tP1@g`Xs7ooE|HY5ssnsvxPrZqxbw3QgoRAxagMR;MwT#6jB9^$RZyZv`dA7k? zBQ{lX{6!5*m7I`qQL`Z6xY?%CP3&^|AmSV%u6=*?$SFH-b-);@vvM0=fYKX%_(P~a zfKz6~M{9K0%dKvog5{(xJ;C>rbMjs>@0309CO(({+?f+ayjA#%E@&9s23Gw&ib%&k zi@3y7%)?hV`<0wY(2#NFfdxxUP_;^mi*@)RAUj9H*aP6*B^OcopPsKhF)KlrIK%6; zbt=@v==nfpth7e(zIFjeV`K7E?61B&sn%}`{{P}D!+nju_%FZb5`LhJqWVRmzk@vlXrT_HxmRK5Ab<&w|qNwux8Ub9zbwtKLv3nJ2GGxd`I#gT!05_kEx48 zHNDA6>@4fcO#EW{c_VSwJ@9Qq%tr0TbmdF!5wU+u)5x)5Zg(>E1o=_XRGBU_4}EXD zs8}Wk8gVk~R~`ezs$i}<)3$Wddb57vfziw5m=9>qsCw$xFFP{aTQ#;mwsRjLXFH@7Owl zE`^uf8*Q9=v-fX-bARQX{p5^~BPp0D>6K|OaK3az>B}_D`cFs4prS=bCabZ=*f8Z{ zSuHmwc`o+gsYzy5Me8Wi;UA4^{Y+lOu*t`I4zg;y`bBx^1*^Tspf7*GxJev4ar^Qh zR*yJjQjgW*h$9@!f6Y-27LvVY+BE*P|uUlgD=e`~>`{!EKZQOY)U>*|RCvbf@_0x^GTx6H7uN`%X18Gd)o^{QKQ~Oz5Vws?f6pLO*ebZ8-jpt*u zSs~f_s8{1bfmdiL@2J>G8$V9@wdLO*uq*RKfp11^>~W)gcsTN3 zmuO2p|3eGqMt@z|y1z5Ya7^SzEJHB8=ai7pw*`%Dq&-9%bMLkQmHJ)pth`G9)O?nP z+gY8_WB+e0=TS|4HsL&+w#}`^^DKT+mz5ofBY)tTz3&;n9=ff;&RcZNmjMpnOb>8> zBXXNdGTZ0=O8uFpP2v`vmIq9VCPCbR#t34v&_yG2+Vv~WTV1jv;6#@Q5K7|sX$Ne( z>J7dMqubjaRm|0l0Nd(tKfO0-0f9A?@uHK@Do$>xXJ-TUxEWX6*xXkiTQys-zo~LLZ?UAi20OWz5vLNceeMc5cnwORVn;x5E;UF~xgGP-OPdQc1#GLYU7s*t7VO zpQOBG>+Du&ec@Vw(!nu~Yg1qylUf4q&4J-GwYQ{?Dq~t}!01M1t=<1idr9$>K(sGO*58|&%YaElaY`P+ii7bSun_{}V*tPgIpUrX z&_O?g`1hV_=6=xnUMH_j^(SO=9i**j)4;(u%sYxOwO7#!5qX!J)w|hMD17DiMg*G6 zgii+(R2;}f1*O0U-$y_{MUx$!ZG+#Z5ib&F#+P%6n2nC{tt~0m-HU2QfcOg_bi?x) zSSiHa=EAFsVdRFuMIP)k2wX6Z-5@BHj-FuD|?M1b9za_?iwvv70YO9;GMe({I?oa~wL} z4rlKqA4W!ts4OYvP4m@eCiYo->GW6UmEQwMRX+jB=T4lUCz^%5z7h%(SWcZhQGmac{@3ez z9Pst_UM@+8ldBQ#A_s*@+wl&GnEY>z?*E^Cun^FD?Br1t|9c@ylza@&ki zl3=zbJ>lqVO5;h@()FJr(ap5;a)uV80<4g5a01c#m77Tl)wa{OfhBq|)d-rxUFsXl8IB$-(4pf{{QG1+}W+1UnNuxxIck9v^Ic=E(nbj2>1b$fq|dz;>0bLlsDN z5t&MrEplF`cM;?Bs!8?(gN))OJ=jzf()zi>4?j%rJC;x7NzP|1=mo8HC10!x$ow#x zJ8k4w-tOYUf(QkG|4VPn*o?k#obrT0#_eoEizZx-JuaVQLNG9)7a;v_)F8+bJ}%6H z#_43RD8_h+?-PHKeE-^~UtRRrN>ApcSksHtps%YZenjXLUv+jKF7uPq zAK1rLCjl_Q+`3loy#0LuKo zO;U*SE5S8%ECW-kY)#30bXz_ty!jcyprG%9O4O4D*uf&UHPbuiHo)M`7h48A@w2o^ z3kpQtZVx5@yjxEE@)u&V`@YJ(Vmbj3If2J;44+GXsMx$jzWG+Ami}%#MY81n;HT}; zAGA-YgP?WHBzCTN_$mJL`+=qXtf=Y(Xq3mmYN3AQxz)24wM)(`-7g8VXrGsChbE=r z)v&sG4X?Vv7?y!pir#_I)GC?yWx#MkzM@MmU#5Y>DRw2qH^8`YNcmp}pTf6LZi+_t zg!#cH3);80)>oZ+06PKK+Tv`Ng3FQNQfBTagl}3~;Utx_PviP}Wq*`6JYftvAAj!O zzlMTk;+JZZm3h<7>cJ|oFq43E?4*Mbj<~)2_Zq)D86QmU4>PWk#0L;7grZbqT8h-6 z>0B~TNH{Hp^XDAq!baIqnZKBfoM5`_SX_4#N-%Ti@0dn|yJ_3A?8D|f&Nti7fhTh> zb1l@!H@EIH!`TgnN5Jib4iQ+9c*VWQ-E7gu>AK4lBan4C$n5@E*7$kfSK77Vcar$s z6?|LGes2jW_m*;X_6%c?Wdf++go`PE`Mk)l7fx0gEqhE3mhRUgL!*129^|p<4myJ6 zvJP@DM8+cu#HYn;ekE_;tzqJivBehxLgSA$fWx*BhXv9 zcOaemehqicAs<<96CP_iqIP~-0L8U6Ct%Vk zQOnjg5^yG+Mr8MFFP0CxA8HJgE#`+QDbM|Op5i=)wYZ3li=>tbMlzZIp#-M2^!>I~ zi9>qa<`r$ooSTt!=E`-<$1#scw&qYfl{_%_yD!C$Y}ROyHi8K1xqLR4WGW+i+XD8 z_Ymg{?d~T+lzunM@d9J9?xU0)zat|9f5l|mLh*}89!LKgoxMtKUmiO2!$YRxGnL=HMi6?xWmAgAoY(gLB z;N>xYS#gC?OlMepDOG_7hPZFWVsTdD8iIo_QmELwjJPa(4qgJ!4-b^#1e}NZSwwqg!;M@l`B+U-u))~%_YSr&qCR|2Jb(=0*r~RRM;%>8z z&S&S+2^Dog(C}Xe>;ie+?D!>-C0|)P_2n-pgtm<;`8FwCqWonpg8~9$_#u%BB*>_a zO)>sOsvRZwn8#u#v z=Vh|N+W-2{j@B}1Jd9q(V|LOB!b=_FEg9Wg zWL*XY7bpIP(LP8%mXv2OB)4qv-RZ!;AFEy*f4H1hWS-IMh>9ZZ(4Ic_)k(X|7N#^2 z!*nQq=KeYI04MU~-5BPXD}fCvuu&Fm>Sa*tcC@J@#~sYq+|#Y?oz} z5_4Rx6yvvcic&nHEGcpaT`1;Y(PRZP#lpzU+dLRb)R;w5uqTidGuG;b`aFE8ToC-LYO-tO<_Y3v14AeT`pPb>n*DCn@u(wgSEIKe!_ZJVh20b2>VC5tyVQ9mrd^*o! z)76ex^9DU9T(%?U1V{Y@)^o~=k6~Db82xX~$(#6XpHb5h(^Gw>G;x#Bq}X*FEJnj+ zU*<}bnIkZ}9~xiwp5|&7cD0PNY#HTYk@h7KVun(k5gRXy2vBBcl+q^4<|+n=)!gXj zcsb|~z^+34gYHgQE9T$km1qV9R+wifIGa8EcR{DbLZ!*G+D$2e&ShWUPxYVBIjIAtm@3(NVA0BWF}n9f6`)F2_JpaHDQ$YuNTxp|vAzZ^-J z#)L^^4B9l{8Si}X)hYHd_6!tC--tPVgph{anXs=g8LvALS}(Ubhor31fA-pr5FDp) z{Dj`@)_gF-NY~@1b;CIRao@~%bu4n6MPD2foO~I<9{6rxYca{fP%l`y{(420PH{=bx!ZUye0G0TX;>1(r|i;Cw9-2J}PTdtN@?f3hr=hnk7W z8l?^OY-#9r4_?Tn5T-3ZVzBGtmts__aK?UbCvPt2sMA5fmrjq~gewDS@(rCybFN~3 zBm`)I3ueQgF{)*Ig|@u0oh9CUM_Ru`e}*(Fu-D-337Qfr*jFi>*PWg+*1Y&pUGs?; zmAJJ1Su}dN&l_=N{YU4hIJ-yfr!uG#>x>y7Das+kczOgrsB=?#rlc=sY2{6mD{B z!m>M;3F1?h+&!ZcZ7Oq*N9iOl8(B)k(^%5ctU6k%GN_eiK$9VKL)aUL@EETWtn;ir z)JR`$KXrOd>!ZEqM$p&l$Wwf1;G-?mw z6c6$HaZwiu&~m`5*B`d1nQw3L^~|XwbaJw$&QZ!?_HzSlt3EUzpa?xAw8k1s))F@q znM?w*%;t{Y5n$yCNpsaDn9QP$BnB(CN%p>1heq>G3wiBN4>7H?{w5Hzk4*_eYTQ5< zTH=%PR#n7AD>PknC!ei?Bz?~8vHQOfCr%mkwJ4;y8)F$m3K-lftyQ9Jn38-=MFATN z;X;{9=v%e~_P{hF%2o;@0kq#gF-0i9we}|V;o0Rf>;=t#iP6C;*04Cc_9yfhMqU$P z4xx2yhA;q(0-*32<1ZhH+j$w2qFAY4pL$dk_8{Gw??B@d0~9jzzXFQq1VZ)mcQSRM zzt@rnMmb~0Ek*@b^B9uIOzur#2qx%6iVRvw#kv$$%WGN$D}k8YT? zgfTFhFrL}wx+`Q2U{zLO&X~^D1eJOO(V5{}sFg#}7t;>h|E*JgyRmBQk;%|wJ(jTK zteR)&v*WQ?E@HxQTtFZ7G`X|e$o*%JAa%sO)}YP{zTNBlCb5|;r>q3B#!baNKbSMs z$EUT3s^ht!$$~5o{XI4G!I&Pkh^E&A@eBe=h}i-V{k}edlKd&Cpz%^`^DED+CBttC?}*{2J{evk zPBdWRS-Os|e}B7r!mrPql*-$j+=WDFL#xlr)q^V1qX0T_Otmx~8UUox0gqwo!Jm>U>Z;+@>@lVX6e0Hq+ z>JckwTtb(SoW(07P8AJQXCv_fD9DZSNzB3SYpS?LNxInuI+P7jzo-8!4q^e|!OUJ<9V2tV zK^N!2=p>SkIpA3Vbt1CH@9{fOTHrmj!M^o($tprZ$dPc^QN|v>Vj8*!thiG15GC5~ z^WOWHK~v6)q1rk=UcPkqcm>G?D{szd^p#gq-*k3W z=~@t%Qe$s@CO?`dN$QK)=5y72EpuNY(+fRXB^(5YY!pYYQje(aIqZ;c`8 zT9I1PrXer%27DDfXjajA>{-o%3N+_zI&KpE$o1hZIOEcFQsT+y-*SK7`zNgzCOaff zSey~A*TI>^PC0QNhE|f^7QZ>M0*L{ZyDB#w-DJWq=PPG>^O>y%n8Mr|zwe!Lg^8%S zQ@+hxdj6EZC|{GQYvnB!$+4KuMSo8V3M@O#aKADvzQ~p$I8W0r6Sd=Sy;_?bTN6yO zlE`TV^yV|F!|iC$2hz0UrNf>p)o8TMFwK4v6JaTduCg1J>Nb7!k>{fLi0V$1LiX%FrXokfkL8Cl(Pj9nH%#pzuRH}Fty*aKR!dAuSr{+jC$t_ zdAyf-o{G15IEog`g`zf60-=a7% z1O~_t!8Qt7onU;pTF3ly&vy$6JT!53&JZ?TYB0T%YMV`&jC>%@&V_&BO-TyFJsnMN zy;B`$HFk{7UU;Fb+i#iQmkVd9?Dx*nUBqhlB{o#rF^oB`wiLK`M3bb>MD>$v#QW)t z2zxC9S+0d5aRhTF)x&ddZKzNe{`Dcqn>q?*wMnTzAg{Ahk0#Dk6Sr7wX{u-{(SH8a zrim+r*GL%tcgPjgso!lMhyS+DNl|JCS)Zoz+kPh4pyW^>LjK_0wkz`HZ2XGC3cvW$^6z=pvAS6yK4VMMnrWf3jyCQXRN;s%h5F zI6|(G5Z}MVQ4Hj#r2=O$sLeYNHA zsg6m7Iwtj<&TQ2H18Z5q2?xV=_Asn{_v_dL?)UF>6{@q*sY@^_rL~dS}YB7?D($9qP)tB5c44ac85rT}VfUD?^(yhLlKAqww7CMQdem%#=S<;J&%UMRK zYTQ5;Zs*hloZ1XB7`Qg@RnLqu3Kh*D7QLAa=O!dKoz7Dzn$Ynw0+B| zB@i<-P0ti7e=v&znw8pJJaVMAjar=HRtIZBlele3d27f1@18JD8ma`7GIv(t7e#! zRb{hO+ZDwbiwgN2ZOM#X@W-;qZcc8`gjVK#me;&zgIQl>)9sSX82!ZTSP~1lSOxlP&2fgyiAu}-kO@(2 z!SYndiRSlA$+i+M%Bp!XOj?SIY!5nL}>Dy+zXdM?G%rz(Qd@Jg=pz z1CGQaumfBGBj00k2Alhr%vr;>X3(#}KN#ATKFoN2@e=>!OR_Z1z?0GWK`LZEr0^=p z?7dYJ;Gs~V-^VOKSYR_a9O*i&QBlOuD`AZ|E!*?(syaAO{ZygxlBCheEP%whAR_MV z+tR`)vZjw7Z|w^?i=2M>@QMjZi^s=Z{8PnFMF6%B@N>h)I3$Eimj<7U);Ljd1_r47 zS3GH4I_BkS>q$M|_?y=;8+&y=tkLkR9VU;@Ma^ishYKVw>rT7_2KP0h{c4Sv*Q+J? zi=@bK^YK$j6}os0i$?BfP0#5$|*XHO_Aj4zURv|o>l-Q1eUUt-Y9 zpyVDAiffyDhwPCi)jkPXCd+)nvkFZdk?&1SX_gk4TP9cyleYZf8F_3yzQPKp=7ZlMlq_$da z(V6RU-Rfv1oD7mZu$frw_%2^$eCpsJQC`VJJG>S+kux*%woSTFb9YOZBF>_b-W{P2 z4TCtC%(>$%R&rtBt>hp6_lfs8?&D0fn01DBRlZza=9{UtMC3F>7lP}W3?*UWURX`2 zk)OXuJxS<+8YT1z`d5loF?LAxL3MA1Y?eVnoa>j89&yXUeQ|9Ik;|!!u7-K}DUl;~tCYA}e37gB*fSDl zhC!E#e`=`nru!u_Np|Q?j$8T*sE4TSuru#5iN)~B@ZADcUOwNr-;}_%f2hV7odY_v z1+z=gYq~nE4jr+w#^-nK_Bbf_-8&4ZlqQ{f1|#|Y}DumLN)MH+=V%k0iB|^V;?Xuj^DIs8ZJss7HTsa zEI2l^y*L>()p;CzJ6>*k_ev47L@H_H7~Yu2{l+Pa_T^zYi%*410-akSGoagvSNZ&W z#rLMud%Jxn$6r*yZyZ%xTbK{%DJh5O8}MsSg5g!Ue!diR902{&Xg_Di!C)>U>_a3` z(i1DNak1VCAe+47Hhu=eIKJdShRp74J%E!E(hLOk=eOhnDK4Ux1iTv)UhBIUIuib_0kX^B&{se8U`-Z$?Seaz#p~V%B&UxUz<=aOLz)B)pDr6U53iTs@ z=s)>&^2Y5g$rbXwAaQ12IKuaq`uB`yRHVPFXvI2Q>!oFG2StJ);_rH)rS)={i|?aY zGKFRG5Y4a`O`vK8)tDT5kEl0-(%JzL{v;D3FsjQeoX>HN;HC}Hfs@HWFRa-CcSH^{ zHHhY^j$|g^cTUJ|zN73z0xt<78jQq8z0V&sflu$Xc7}XH3RI4eN1i06>;;8SdiUM+ z=m<~nqmI9tV1R$Vx@X57fyc%3xnQo+SmKwxqGthL62ey3GWkt}H|Ghj(9ijL$nNgeWkRcE zbpHtA{LQ4K9Dkmi7p2Cjqa*RfW-P=EY?R)L6RTbRZc==^KRn_oP*A33@aegWSRyyX5Bd7bCe)t{@@4mpX*y&SUt@s z5!uM!CD}!4c~wFm5vU19NGY_(*hwj+0{)Hw481+dY88L)8D2)8NUUN{Uc`DC^z%t z7VAY;jNI6L_rjbSa{~4L;+E1};wA_e#wKj;qbhvWX_|q<6yi-jYGXZF%?Z=&coTgn z{MTa@yNUChW5b^uZxe~84xwpnxmC<`i}Xir>>JlF4lcs6N+0mrMLXfRL8!moVueB4NO(vj=SLd$hD~rk>P~PkP92 z#oHPY8s!rn!nB9HNt#kCf9;8}my?L?%& z1FB|nN=YSTv5XmRK7S;_5!MD#Ta6i-%L0)=e|CJwUzQW zxs+oiuVz?+J*g4r5GXi&_zUq+ZyaZ|2Q1J9(@w+sdqCR>S+X7hu9z-QPd^N(LE=X7 zdLKNXOt^P^!Hw*fL>Sqa9A@6X#5jf1IZwGd=uexIbO~W2yVV~MqC!dN>k|i20tpw!VC!JP?QTu)`-NLk11?NQ z*kcs{3!N4L47l3?d8`)`Tv^ou=`KlxDf;*U-m!_=H|Bolx|dq0jAEBde*}zh?fR~{ zD?<)#XIrK#ne_5HX!9$*z~>J_aG)*31aneg1xlN>?+6XWt_ATToY)t_kBJXq-k^wq0B zAsuL=+gV{RjyqL~h`oMROsDWzuD=K+yV{%MjyKO)wx2vR@cJ6+>`|7qo%$^o-( z$>Qj3ly$=QuzIs5GbU>F0wFe~r__6_4a2e8hc8r%LL2Br*e1(W!07tbvIYwUB;im; z$O6o8kEw#ki#OEX{26;7X4zR6{dgL!n#}FOCJQ8iVfz4j`>?`5{TR?sB-~RiI}kz% zaxSZH7;`qC@{0kEq`2nPVtB|lK+&PJwc(Ex)Zq}Ug1|62&XrF8Bl*W`z(gK1^ zs$k0^$+-Um!%efd91CWFzyCE-TaIO~OYQcW< z5dVt9iV*TBj=&}SrcA4n`D*{S=K=%ZI}w%)^=gPU0*mVhTP>f)e3(o;dc{7V0H0+q z-x%sMS>$LC9TPX3%NT|4(<_B4iJTV-XKv#ujuQM(e;w!7fBFN=%#tIf+J{sKpFhNs zB&Xz7ibY0Jk5H@_%3DOwMj=#(2fuQ~o}0-00DeY;^(Kfu>mGL`byX7YE}*}@TEjXa4sqFi_cml?Vl+jt@!hkGf?FgOC2Q}G4UlwUo9Di~ zyiJLgf^sTUF|9%VYnnC2mR9S20GNbC=I(Vspd|8xu&M|sP0I7 zx?;BnCan;RXaHp}Z77ZW^#1QSfa#aPZ0wM6kjpGZZ>+yS%CmVB| zL~)7cLPop}tt{xr$b*b#k=J}jis?z%s;PW~7-?NbDII+c%{s*z4$z=90PuZJ zEL}gl^;s+gSVbJZ?jCBnL-B6B@6-Da5|N+29UJB3p-H8RQ3TMBP~8ZQ+&S;i4s{Sh zLBV~RWE+p(;NJ$PyUpc(XeLZmW3bd%TUmXcv2KHZPb~pq^1w(-C5imcs@uiog6?VD_OFm+LlI}mzYu~O>&aIVDE!s&P zetyGp`W}}P?|)+o03_=zb<6};Yl^6@P;zT6zAZgY+r-&${`ZU6>AelQ%5ZzvMcwqz z|2MXm{1+$lTOx>YC^gQ%llt$>fCci>|NGwpAemT=3Epz>ur+vJSlIW?P4a8C3Iv+yR=h@@aoecNdcU1OF6{Hp zYWy?ZQj_B}qI%@@k>ze8q>R0~WfaYz@;9z2s-%*xZlxx-CLrrzAkoQQ#-t;%y6nd@?Jt6ha17C?E8QU_rxVW}vN;Q)c6Tt-q z1(tSX|IP)UP>^<8D}IZ}g~#H5XR3D72&(9e0hSq818fwhc=h*mZrTwmVAy4<$X}7mNM>zg#J+FM>X^@BS z<=(PEjI<>hax0T&(NH8+Fzbu=gb0_*T2DQ?2rw8T*6ZmAGFW{Ljw@zv*A? z<3|G_q0wmNEYtPL$n5yhQLA_soaH!4)sg&xy`pZW*>^Vh*x!9>z5jL`eiMKoJX$J`~4(6 zBA99SKB<33y_v{qb>D_72BkJ!dN>9z3wkOZi~V~ZktzR;9|{sffJyd6V5PC#UrvU5 z4~dxsJhuisZ)dnIH_naZdjBc-0SN$9<3o`ph=FKmlZePrIFtD@umei1p?HpnM0@s{ zh}`VJ3xP#17Umf;1pH_+Z%<$fl4ffYMIJNpVIZ*q%%Agp96lgwY7}#iGX*JSkK462LOB0EG@MKXo zkpQoA0Pnw0YQ0z^3XfM{%*SXEp@{1kRTlY?Jdc7><^jGvJ@ms^#`?X^Y3nO6-$} zKdvc+XaxlHUa-u1;JQN0(b3WR7CrXcBWZQkC&qg#mD%GN^-jxz0&ZJTSUjZsAH<_? zupRjQ4l`Menv^4zC0wFnqkgS0$C2_(QL2s3c#Azf)>~Lu?Qj0f6?(rvTh3tA<}vwQ z`TT72{w5NOVhYBqU9L3=arfMxVZsSig$dHd5OMroYIc~E79M%8hhr&x&ART-`4zJ@%g-C6(T27)YCfR;s70|M{Hh8|%VU~HJ#Wv;Z zw{HqpcC!}qHKzIjNFdF0R->{}1m9HfV}-^NL&*W+E44q1YJL9bJ6O)82i5JWq;GBV zf54C})qH$f2u)lL@kWXoM|cCUUk!pH-DW)vTU-jdyoyy}sU!P;1j`Np1wTQ*Vjb1J zPVogRR$M#H%*?))GGjX=m^;kZNN8wiY*96@Rvq=8fFx0|8$XZ4kLL`J1#VX~Kq=+xFP}H}A&6MLX ztQYid2MJ38&j@5YWA8n8s*LovY^U2A8#Fj}?#WjBq_(TtPtr?D7+b$e0obZ^XL%_GA1eNMEjhMPM*3X-HE_0lWx91WXJ3F=h>uL^Gv9-> zyr1iy0h0RLb&=0O5-S{gs(0QrLh%)ey^KL>4+t$?VESXNw zdZJ>XUP;*W!*p6W>Tobspq78vS`Mh`DnaNt@!s=5adRNfva61Xi%Y}UZyiOiI{s`Z zlXI%tL@XD>@5d(sL1Fam^FQ;oxTLt$dBJBN2|H_?f#S8CoKwYWRGpY3Tcc=rTkw@L zsWT}IjdFNRQj`S`&sQ9Qz%KesWpS3o1Nyf-(i!5JKb2^qc@J4C+9r4xtyw=J?qi%qi|zu-@{r*H2C z#!iRXwxX|28m4r$9A(h_rbwh4pAP6MZ%qb%&nQZkDtRNT%+b)K8F5j75%N$~F2Q_x zT01B>r~H?BMMxNys1-G*)F+a~^K0dM&lR1lMvvkSmlp(EtzwN<${2Vu@(Swq1ecFn zT(a!1G1cfc7Rh+SX)h64q2H-rS6|TD2}ISESae>mCCi=*-Q{iXVxwof=_3jOH+k<+<~!!FdxaK+nE$K#n+a2 z!qEewH#w}jE$?C&LD02O?`O4pPYF7&%%|hYK++0;I*F#Ntwt8{S2^^}2GX&mkLYi( zgr^sY)0)kGIUP6iZ)}%a9NY(S+-k+)1=}X;T7C-poCg$}-ybh`;<<+7aY-aY+Sg^n zBK=S>k=hqU`Kto3^fsUU<1M%Iim;JMv*Vm*xVb|k7NBnxL;_bPCwelj)aNvCfUWPdES^e|a*ewiE*@AI$TS?C!qaFV_c#s`&H< z`TQYQQvHnw0ygS5*d#q;jrJvw@+jJARabR~`yfv+`v8`MR|EaNH85)Pw$E5IasU;E z05b(rfFlU>iJBI30~eZ>H+z@xaLAo|vInnVCUXrK{>G!uQ5-5pL_BQ!57i%~xc+L$5iyN+vcZIn6!BX;OJ=;NC8P zB)j2cy8F)XHWM(qB!BqelzD;(y+RymnxPVWTD7L9XI8G-{k&z*4Ll>kk48kMQ62;0 zxDZ8;|8{+g4ECNJo_F^3!^>Llwa^(n&E!f2Niq|HWI$yeYiklBEpm)W7j~M0GlxSs z6r?)EUXH#SwS!+(m)q#pZAvCIRdW(8+r1A!v_7ZGd7M(!MIV2?2}pQXFL3d9FF^Fg z_tv!aux|8{b1g9ZUByrdW9|;BIC9v|Ysul~@_0|UO|gCLkD>R-{>cdx(}j4#`Vkl; zg(#Rl3(8OHfZP`Vr6RmMkJa zcu_>=JhSe0i?JdBbVr&Hd8l~%eg}2!40>)m)le!5h++rDfp>fAcXPd0XCHo?+z{*t zNoIYi0BdNgD5nBmo}AfW0+F|^;eie-9eVzh>-O!+%f=SqOAteWx#}Ii zdgXQ~IlM!oqHED0TaUdV&v0k(U+acIqH>KKn?(f4*KR*^_=61nzkvljBx<9OYY`yY z^|L2QEDXHts;Wgrb`Yey;59wO^;ZJk*M|#Ve#FtMd`^wRI-L&abELGa!6lavj8}V<45CFI6$)oc-3RLI%{p1`6$Z{!7M|EDl9yQ^mr3NQxyD z@B@9e@@Zw&ZIEX$k0{niE7NY&4ZB7*R+}e08qW_!QbMlpgC~mQvVqktk_`R>oJko# zwEEf{AzZaSP2g~+t14*blnwmJFsa4y%KS3w?UW_eh9V z2Qgk>ZJ|^XzWW+W#-|!1$SAhu9|oFHGUAxW=0sPhJtOlGhHPQ4J-SxS^GWW5@u^98 zkXw7-8|yWkXOq>q?~Lc_*L>36X%6^-ZlrT+B`E+q68`Vn=fKlV1maQBFEj~Qok?$J zGF?UU0!cf?fW9?wW4G5Qr#*|B^=2LY1Z)%hdl{Hgj2+EQ;tz-!&UL3=HWX#I$_?g%PG7#W{e!`All-8Z~GUM`<-%oO|$ z_r9q>A+`oZ#BECW^gSo;k`_2v+f(6ip`I{3QVzGqS^PPT4GY9cN@~l6CN@fRe!%Pa z?!vaV40Q{YOHaZevy5~9sEb;Q`5mu{TNnnurzP0o{q7ojfCzpA&k)vvLih-)usd47 zEhLmC@YHXdLaUbiccS=5YIT%>h43Me_Zy|?jC#Jw*0usN$&_^&Xgq`9;JB&vcBm9Q z>_ceZh>NsY9_Zxu(X&G&cKRhc?PsQM`GjS`!ac76D-x&F-%r5z9nS+~Q~FPT;VmvR z0@0xFC?`}t_?`z-GRpF9xk&{g5jC`ab56^O(nL@j2nnxr{PWX;Jtx!qMuK^4Xr9&2 z&%)Em{e=F3b6{Z>CY*tXrA^vc-SywU}+ZT zJrQJA@MKnybZDvm3F$w0M_bmzA0s6jiA|wMvrhNS`{&mL4@1(0Xe?cFZfEp}6eBt& zX7bR`8&8apJ80r3OlX#;{oK9_(^h^c@usm@-x7eu-BgB^R>lh-f7fxX0MQl23`y7% zeYUHn;Xa;1s39Mj6U}VolXkhsaSm^#fjzuw1!*zHa*mx%_37wu_LVDm41CA=y%G@X4mZ1~i)Pw_g=P@10unm1dftrQ&ZKB#EtBUeHQ zWkTSf_aTKKV;k!L>7?B3;4(WuIG}@&M@%!V?w}{yTD#eLV}_`=5jR>ip0L%xm(H(W zi$s2 z7S7*nJ58^krZl}`!e4AVU3_p5Qy0oFUl0Ytl|K~m&~SR5p!L0}U7zaqH#ftfslTxUjnwjtJsF! zk<{>aez8C)B|JikUw8582uo35V1s>TKQLV~3hxU2fm5 zG0N2Ii&pr?uOg{}es#+Lb0J?TggxZ_O(j6^IxsR>dl-c1y*ZS0J_!e(+$Y4UnlO9(RNn6>j_(rPk=tr~ z#*tsgwcYLX!x0LK8XdF(;=xvU>-YTt{brcjQ)T^*2cFVTH%*VuJcYRRUh(Ku_#b~H zYQVht6WFX;b!2PFCkZSq$?w9zY;pZW4_jkHD5!mi1U0B~^cX*?W*cJqUyz8fOMPCB8k zs(3?4m(*~riv8b?AvK_wwVXen@~Pm7LK zm-7UGYQ5Q2fTu4TaqAMuvA^oOnej=wT?s$std1g;&(gR0>eMckTI2r1BSE+}nV(En zddxVL3_V&h;$@>=B;Xj03!%GkwD3+lhjTvJM=oJ}FH0SvuT=jBsuR76(m!p)gdzjs z1w*2^+M7zLEmp$*+8lb~+PzffIK`EMs6Q}8bnxz%Od ztz2+s-{6$xSoXBVbd|ZW043xZRfc-wW~PFWp)&h-VpL6Cwum?l9u5f;Ave{l|BdL8 zK>092PTIzq+Bq>ji$(0YRw15z;&kK;c*d8R5_K!`{>qjK$^R|Wv^MIR3O*?zJ#V&( zo4Z@TQ**{b7ux^s8WE~+R9ILTtst*XSO>lxYQ;w{H0;;u;MWW(3Lhks;73u%f!u5V z7H16O!_g5O9sWr`hNt&GGGl`X8WEu=t(7`Xx6kI_)lM!9gQ%;U;u>RZVoS24GU6tM zph~LiN<(3}jEsz+a=+g8p>ac*|B)Czm=)cziCkYlhP**e8rs+<_=)c5C6Jax0|&}* zNyiGf(3E|4QbGWdIHc4-nfOzeqeaE&sQ>XFCx`_=794+;mqa1oyN`9|!DX}qa4Rs6 zOf?5^{~Vn4AVP`49RFFbsAUm_h0THuCd}#(HgK&)IALBpS|E4xT6;ybA;afTxKC3) zYd?CL><(z%2A|i%+FE)ppgA-}ZgxwQJh0gu;)hnoSKS%2lx7G zZTXZTk*T*jj%j(j%|jJUfOv!KA=lL8K=5lh08syUc!|`|f?q*czk?zHAiekFKa-B@qki zs4B-I=U=mT9}=jupJVsE_{q0DttK{$$a;powtV$)(tV&1MW>X0yOqNHN!a)xi7w(9Hmx^Bj zBmK?98_RG^@M@Liebr2``v=3W4{wA5?>&sh{cn!bhEo{0F?!T|;AQ;w?&O#IRp11C zh>;0w)YhADis~OKyj-;P_phDW8}#bu-U+dnicb38FqGGq(l}@A@Y;+G?P9PolgBpwMyCP2XKeanc1>HZ}$b9tdYYBLq20WCB0vd zq-wZ9?h$&nYLUGQ<%(o%fR8G@Nzk=mYg-* zn~TwqS|ioC?3Q z`Wl*=fi@%pc)*mv^I--fs4y)hpYT=Gi8y;991Ki*#C(NKT>(i@qV&TAOso;vjIJEL4rx0biZ_w3wjL6fWGgXj+-9~t%kl)@lkF=nn>s9ew+djqEKcg z_DDjG3&ud5Nn5$8B6_VT=XVL^Ta_F{^2>Iv8UpO!=jd@Z-1$f7i3qe=WJZzryfffT zIwNixjqd$*3`ZBEE1x$iOlDUaChR)}fM2dm6~i`Bp0$J8g}GA z$K}l%-9Xe~Z@s@OfDLjOJn$WZF+dYY`&=6)Ur% zk{;UE1J4A=EmLC9Q{V;E-`_bwU<~r8MV>AQoL)HUAloYV{^int<_E{{rOSMDo5df3|G7052gSzBy z!NR>!-V);l&fQPf4Xu%&{mU`#37>_s9>f%9VA_unD_BosFV9*Snxzz+&KG?${n&B2 zCv2EH!U^G`EdWqy+@w7|%Z z_7*P&amT`GI+0B;&`Ax2P?9B*8n_!&!@17^#qF34DeXq>beWOfuCauQhjN%3k;6RBTYwx{7yOhv6;xO27Qr#2+If zSyh$SNmh@ z@eLFRWz9tc$`j*rdqZz}#;E^1sqIXfg`CtmEVBH_R7prk$XMz0SAy5GU(E;O>6ReQ zIKuC1V)@S4Ky4;)P#u?pdd8?l2a}99Rp_`ys!Xd?qmB)#=YuzC@Leb$*0+_oE`v`< zNFVrcou*T%KRlE~qtqnzz-2ywu~26jQ>|QC10D;8qlt8LU}|bfR19J;oOVu!JIgDK z{Vjx$RlVxyO0>wy5dyNgSE&|$m)e4doUI=Z4@#SYY1((61FXp*K|s9r#28YoJ?#ye z?H5_El=#a1p|KxATXVh!7USneQzc$>tsR_Dd z-UwV<)Xg1dZyB}P_Nh?{1XVP}#i0|#EF50eN=?mjp1(#HA@8GAA{> zq$Y$PVjmwbs&Vn~SUmT660KoDie5*53IOAo1%qQQ)yAEM*J+i9BcgFekP-H+G&faY ztoEr?BNP;vq#rphB_Z`B4&?}oLE8I*;&U#ASizebcQy{s*u)PLUb)YLWvYg{HPSNB zPbqr$&yplWel6WD2|ZQ6*4!JFE_s^&TJF)Eq@<)=S;MLDbTX$lz2i3jR7wP~yRjHt zygWuchS-db9OWYw=#fE_Ln&ZlvFf{7y94a#Qw-J|3lLUr1c4BG27iok??x}kYoWh` zRef1hh;57P>odfY+z>3X{JX!}7Vf5^HPL+G30P=F8DYn!t`U^Au}N$i-LfKzU*F3b z&ksoi^PjjJ$`+t2>JTjk+IEA!?bXgRCya?8|ht ztRtQ-1`b3c^H1|k#cr3eMb_=#X<6USZy%30FVeF0xtCy#3qW`awKReaz142s#D^Y) zKCC#nRd$2A4)#9t#~uLNb1=eCw>$JkgQ8k3WWXj!03 z{tTnZ=Bt{9(Gx3YDHE0&bcYJx?*`8j@(wGirHuODq!+{d6rXQ5v5zn#EZXbgos~3I zt&?=AsM}C3FW86FnC(Te9XDr^(&_JI2SGFI|H-$? zs*=q+d&2=Uy=fCaQA)$xNAqsys{eGtf!*DEC2n+xF>>$~KEzz15vqxFca!go9Ks2( z!|!t1xP^u{UySEPSM^Iq2Cw+|Wxwtdt@(NkSjmaFwh`b{;8JNWvNxO+tC|xXSKnry zCR@LT&wYt3`rjsL(bDj?1G&)Xv7m+gr)N4P1U*@ofp;;iJFk_ezysPvJC}|6&{P zrnBA1+V0@H)-px7^=K)ptIBDPPlJ+UL{MnE(LCVrRrM=3B8h%(dX`vsxlWA_S2Keb4jEN3 zGdyRy#)K)y{-}T_J>wR&$-z;L!84EGkAydve;#B?PAYf(KaWpVJ#Ms>%;0>2`345J zE_^(Qy5MLam>u@{JvF;(-%Oq?$ip%mhos+{^ZI@r3YvgVnFNR!qHWEkG(YzlM%I+F zN18eZ8~JcZ>0*`+d(dVV{LCLE7u>)tXBza(Prjvu6|&s)lVv+_%&O#8k*<#Rdd z@=dn^dpRFDC}qJDZ0n_Kc@*31Ng37B!hiJ5?d9o4LHe6uo!K-X`LnNl^5R_jFTZd) zVaK+uB8R!9u7?%>#U)1O{fW&n{Vt`h2glsw62H2GAeK8t2m@aLJ_g7p%K{fiV?4&U_sn;R&Ihn~>O_#QP*3$!L zcP7}i%sgmfsOsA>$DeY%AVY21mDc4H$LzV(uPtwvnIbV&KEF+BmtAJDExJFSiHy18 zoe1dAQr+fDyOn*~x{dcW8)ZR9YuPU=Z`gSqLe5v?-(>*fhW}P^c5r0k=*y* zI^ok|qr_e!mjiEybtpV;LS=(OwVzK0j;Go+r7}W)r;7}3|3rH|>ud$?6tc<@wn^Tt1^y$3af3X<<4QBE{PSe#XmJRz8?CxD^ zAabnMK66mMcVM|vXJi(RaAZOByinakX|J6ur|m1i(HG0ReXvKimGDi-5ResLC=s0J6t_KI z&h9EOkc21WIr&UeY$!VC_^EB>3p_7zDqDWr)8jq+x9@ziS;=#G=aZD3PC^DB73r1+ zkz`Ho>4AP$<)K^E$ti+vs!?YTD5)PbGjj$-pl4-wa32j~0k_*Bh31tr}`=%SOQYbmft;;<(_s3;8%A3j4cV;hu z>eCdL3|#@wf*IGtMX1#$%KFshd{oSy?8Nif?*qfT6Lxpc1@;I0^Y}W-43EBlotl1` zGg;&<{d;~FK_R!jcVk`3Ti;-GWk)}xd1F}cL&?0ZSiXZ>!9@!*+Ub`ZZ*Cb4S(hF| z_S{iQ7s*7YShNA^tf#J!rTu%zjx*IGUHwkiz&yq3D`=VC-G>WnPI3TJi8hr@~P z9Br=Lg|(Mwn}Dr)DbjZ7;>b{^e50&F`t&jF3W-+bn~E7Qd4AzZV{IA5jpm{daSB*8 z21vFhtt~Q;>%npl@F&+?E zEsOlQhET zDm-aWmnm&XuxnDT*hpOdn>l#dOo8zDdMwRO*kc;PMOa*m!k{eu*sIqgZ@a$ik7>KZ^04e0jH+| zS<-wy7hjy_S_^dn;Y;g+Z)2EsY9L4Q&m@UjR63eXJ3`vHdL220?}v8`gprA(E^NjN zq=SMf$pqIK932alL+lS`(Vu!f$#Ho13bPN#l-erlKU5;CqiYU^!s`B(CnJ(aDqJ#& zX5&jas-2Ik!UO#k;rz^Ot#>~oMJ{*EAsAn~hx-FxQ5<21z2%=FZn2?_IB ziUO_;BOCkYoyD~{c!pWc!=>B)qq2#C7^fkFG=-wg_TPk~w9>fh$17;JgTvykIW~4suDU2wV9KIU32?Auj5ZZ5+^6tz599v-j=u%1g)tWlBtX@vvqIxn}_5JzP*WL>> ze5|~#Hw(2^Gyrg~ZM3G+W%hvKMCaMLc+P! zg=}2y4AE?w!tIQnknm6i73ThNd;1aBD&c-~>l&@=2aWs;`|%t~Cu(WdgHea)&sD(& zSMCmaP9#3QoFm^iib|>P1%Xyb@J9sMmVx%rLTV_XA%XpO#Zr1o63XrdMh8CiZO z;tmy88amua9}@YPGC7CxuJ`mal2r)xA^=9UYttp#8uI{m#qT zH{>9*nX|5=(#=00&RU^*H==PDHFS|9VYQu}PCd?R$F8K%&9v?`<#+QZ!H?{RvRF@> zr4QW%bLvbE@HG@4kMTLv;gmMArANG&ns<2&C+q9B4s2RyR#1PyAjJpWy4`=w0mEsc z$&4viP)I1v%tAH6JtBJIcP-U2P%}(-9UoEk%WWTUe}b+LpP>;+IPZtZxWA)2GV`hJ zooYb;W4UlJi0|UC#$M6H)%FIYEo!@OLL2metB5ESLc4ZoRZJ!7?*#5K`FHQz%8XC zJ|_gUUhHFcWyM|tocPb4&&Rvr4+K`R54-i@`Jv~Z!1dq)d9j+fj*jE8X0cF@`V%W< zB&*aw4te)gYInP~#3etN%Q<3B&I^}WJ(a{o=JdEYdR&#UxZzg(fXrh{Rca1$}Tn*^F-WSyY0fuPyp@T-M-by}^Dq&H7!7a+SYs z7sW}ai}`DPwbq`yveHdUD+{5)NPQ3jC&ArhRnnnIV7>B=KYoV0FWROg#9Y0NTWjXS?H$Q)0#Cp8gVBr`e?< z%WARq6-)9BlU{T0T+2(lG)o}xI>)QZ^TDYPh_&wph5iIR-gcn}&X~7+!lmZebqI!k z{~o`r?Y=Tgx#U+k&{JbLDmC@!{AoAcd&dQUd1`TYfo^w_ZSVlFpQGcxV=&3xpQ!h> zLs)eE4R{wGgVVUX03y_@FHS|fAiY2=hCI%VJMaVc;h#F^gdvlGfq`%!H871rNLaR7 z#l+mAYu2+?wgmGUy>?48XSZ&-Qtks{KU%FCO-?^IfnJTai*?Htj{EW=tF4~+K&eQ$ z6z*KQgQq+9%`et_i(M0yd3^7C8=h~`(KzhF;duLhnq8}goPh|sljG4s&(R;J^i=d1>&Pe#V<29rkKQXHGh6DI}k8`oqKQo}@$s4TbpCpIpKLC&k`%G&( zXASAy!`;GmDCU-CxGQ&^r)&Cqet~#;txSQ$mz%bi0R+}1S@2yq$YHF;&qK#lN89@e zG~eW0X1QEjl0sL)mU7_v9Zyh|>;QPl=nMfB0Q_~hBe()O4$uI7YqT2oEL?%kp+i_Y z(v)I?Udm0QBgOK4G&ZASrIqYXK%tRj+I;X;U5Ks`SUB5pF}}Qa@0K_~U?A%&{b|SZ_H^Ctd$V2_wApNNQr{BS z)?+)E?CIfVu5_1?k(mJ@tC5S1i+h zI^-e6`p}uw?l8th0eDYbL$js4s-4+TrL2M~LGGAH>ps9>AR*4-K}QQpWyaSkylfPH zIj#T_w2_{QYX6XjIH$dn5ZP^fhiwj7Rch016h33HC>D< zy{>?L?3;_XsZPb^K8Ww3GAp5eki_oa#nSlke5foZtzQ;+hS|U-Mff`SROZVRh(*vG zsk&qI-Y*RWK!DtXJ=$x0oChO`1oouBLgnI90Q&hUL6FO2yp?w?KvdKx-;$4jZ#S;u znHu>kGy;y|U`$-fw*+mseJHJWumbO(KTryU0u^A{s9F6F3-B);P6Xte|Ih!!3K9Ri|Mf@WR|31dfH(o=Yqtv@ z_&vhA){&c*k1d#kV2YJ#4hljeo0~bg6}*B*jHl)TZV@@L8X6i}S$-g+epgLXzFmOj z5Au#lOr)e!uO2_?_WqnCISF9m&6XQ$o>{59hmEU%^qmL}tM#FULJ)36Ma5*k97%zE zP6&{g_4Wa34aBE%rO0S$hv#yUDQvk;IcG{V=H_xCX^Du4qGMyT0X*P_lq8~LC@OzH zHt$zHlun-Ynlo-%hZq03osA!J!`{G0u4ZUIWF4SV@&C;7GsXX3RiEGgv)&KoZ>`CH zYyxT!{_lc6Kji;cDIhwKlKGAdfk%AJZ^t&>)OBoO@b{lY+jZq%n1cr_%vCsSVoZ`&(34*gToS{w3raFFoiRf)8E`iMVK99)d92x#;XuWOY2 zkO}qdP3NHagXZKS(zsZa;F*zvx_*%@B*QYZ|CiUt55x?l1rm~|S{jmk8Lccq$Q$nf zI07=^TfO|xo-!4@kd*%wXzJ)q?ju>wi}b{Uf~b&T;bdPSE$eh}FD|64r}5#=LV4BO$|UiJL~GJjA4z|SBC z-XBB$Pym{+I&p;jiTY(TvB!|3ZqsFm94H2PKdxfr9T)kXq<;?jl~-FEZ+%0sGeeS< z<-f$+?#=n@S1oQEs@HN$duTc)9ZsP+H3`T|=Ze`zs_AEvj8S6hEmmJ<&QKyGmvd<& z8ZiXQy>hMq_TRSXX9u7q+x-*h1A_xpWJ|o`1?YrDP_gfcDA`1JMzx3-+L-xpX3D4t zTb}b(OSWU!K+n-%&!>yh#UGTVwzerm#7sql`i^{m_!>@xAe?t<=!jkw#%))Kt~3YH6A46SOT@~rg&QH#4H<#vUlGWvogljkUtT7y&3hx+DF4y+-wI#B+FW~KcP zT8$PUfNxY^OU1>kLEv?dKVNeb*?f6Rxj$25b@%ZVn)Ej@|0>K|#j3-YYUP%xv^rI4 zh2*_MBhKYKrc`pL3U3 z8W|&-C(Ay%{*mR(a_G`K;~91XeKLa!2#Z%U@eq(OaN`^%c!<8yg;;Jr|7Q6nY7ZKGYVcc4hXK=?9@VUruka!wQHYrfJUhLR!d;gE8lko-Y^z3F^8D1lbH zLR+21it-9~1_J<4DWPs|a$1ZU!_jU`TG}2S zK+d0b(5q@|dz<$wQ_kN-ehjrg%zSf|P z^JTS(jEPAz`LYvNr++<*!tl0TyV7J;Pp#2BCPjQK_D>=uw!`Xbwo+99*+9z;0UNP` zcQOOboOP6>5y@zo%$UUVopp@466)5(gLTyJ-&29@e^qrX%s&fgFC1C!)^QH9Kees= zQmCV0u3Sdlr!ZOl=PeNlMstNQpTm!|}cm1%?yM z=@0tdqyaU7SmT=v9+VnR#|bQSIA%6!4WTMS>jqSl)pnj{3+3UJBs#s`JX$LcmLY!r z0ivPg2&8l*3fvDntaRW{*;cwI^!Ir1@WtmnbyHg-NNrJ+HQuls`};}f+s;$f<9@PL z*GIp^@)yPe=$Z^4o!cZLsS^g{HbLyXAKjgo4Dp!)KeJfqWpEhh2N@wSa)rFU2 zi)C>DNy9>cxinW$O7Tj$)IzN(ZE)O%;lZdUlgYZd*w}D1<4faJ7){APK4SfkkHt{R zYLy;YhQ`~X;DIl$SIfZ*3NR}?R>`c9yD_;0^IqETWc|qtu@qp-mWz#u^9j| ze5{+=UU_My7YWZX1X=7CsR+lV%CnfGbU zmwXKcZkH<*vxUYARc8CGsJs^n8}IYfYu%6#cr2hypXaHS_xa+`eX`Lty7^}E$7HDk z4WRR5c4zFKZ_et3h-H4L>(|X24b`?(I=`T}x=c%<;u+5B&z?9W0Qm55NGK?=RCT58 zXK!tWk!bLMfJ_@nOz!Bkk?+ac)0gY3F(dyBX>y6U>H- z*>zN0Y@+14qltn1^`why2PDy(Q7ckPQq@_j6(W`q|IiUGgO%?dlXG$ zPREhy42hiOj(#cCsEmiX4v~>_$lqV>69cfv^NSKDt83JjQgR>0lah{tImV&V`c^xcp@y9_&-4^w6s=07%~5NWdjsP9Dur40=MxaZk9kgT zj-sSPo_CDiqg%WIpV9?=VRyY_T5nqI(XZ$SgBA@hH*L(m5w(BX9dGB>s;LJ)UKmYX zbDfX1A;dV*#WYv@3ux>9U(-2WrLVUXqN-0=!FMIHao!P#d_ zGF+boMJL74v4-tYNa0M!k4Ngx#WniUs(;`g&0G}LrmC=;&>tC^)Bd*l%%-uSpOvFB zSF7&Ax3LEmI%j&4eO>ZP^Z@f`_i_19E?v{sUIqI8Y(dQ^<5u~6sNb(J%OrO1oadXb z0&W%)+gk;WS52f1M~!?7X#H>WC3dezK%yTx-9Ldf7pk&G^$Z!RgO`uTUB8Eup(y9jOMom;eU1^?S=-2RsGSPhlh_;XI*kr;oW~I5YsBSzOHR^rc8McQ zHfJ~kBMgxQsp=AvnqjIA7m(D0gzr@b0!F^&C@k)%TVk`GVYR5^penpP<#uocB6Y{3 z4h)hG#^bwPm8;JbiPm(p(Jg#8ppf5ACNw@6uc1$sZlEa=#o^nb#WC%2 zermFA@rPL{(p9~LcOubO&qV4_FdB~7IoY{pQRbVo05nFM?kMB0pT0r!@7&W(0!yJ$ zW1W1s1Maa*l<|Uo>$BIR*)vc=eeueaC`C{h}3@K1? zPfXm|jcPd2i5M-yTCAe*o`GVD38bCg-3?h$V6egLXM_JOo|@gm@;ce7e`1{WJL7Bjny^8ah=_K7w);666N^k(RM+r4+6Q8?dc=hI(Cqx{YraNoUcd2FrgRI( z45(i~c)5vH9-ezC?r`a-BRTegfUOxGG26qIn+?O^mDKZzJl z)pDJ$aj{~!e$7~-<>R=KC9k!bkVU2Lk;s^#F8M_vrq7=uz4KFPOg7%r4Wzg`D>_@W zx%#;|9djpz`0nosxf%Xnyuqcrw5!XKsoXh1ewDA!bh$ao4+f^-$dW1BTV~(gOeEDo z)cq`Fcs%VZvK9wA#gE?IQ)KXbPKoBX`nqdmUxmqnOCYvfl6rYGBL~>ToEYA&uH9pe zvgmI%1k@wR(=;g3(crmgs`M608?Vb6z!=P|@W-KTOjmr9I7v$o)$>upnJFO}=~>Zl5xy^x>zU=hvSeX{n;1aLW3#azyd+ex5#ODUf;H8`8(#HaYq~n0g*J z;jU6nMK=tquJROJ#&u!H8#0h8)?_>7E&%c#_UtvFboTbunasB2rBheNPdbOcg8vy3 zOVQg(VZmaVz~|X0=Al__xzwmZF8!=E$y~X0_p(Cgyu_*a-Wr&=3}DSjPj&0DUws04 z^+EY5DkL9R0s%EMZ2K+M5IiGI8w^`s+l9x+V6g-fro3wn++UF*K!WedZ5p%LG$6ltZeR6YY$v{*Cq!?^N0F z!%|sD5>0?#Zf{ZeMSs6x7cD#9DYvi-fS(P6=4QkuPSy>R*hNz$ z%-TI?{MQ~K%j2+d1@wkeQ4Y_=Bl9SH^vVU;Ga5WZMktX`GUBMT z6FgLl=ytGzVW8S-%a)if&Y;vcMTRUq@ZYp#M26ys2k=pP#O#go;E%|?A^g@e2-MuuIw4l|8ApS49nfRF{nk>k+|=3@p;s5j${;} zBJWrJcIVYsud2Qw<&ij?r|6N3q)`7{>6c&oO?r5}0=(M6T?G_y(F?y09|CdLeC)Q4 z1ZhP8h6-^$fFaOPU7yu89i(5n6d7yV8Y2Cy^)#H*I4%m?>krrguprZdd3> zodoFo6|ZKNU1lke%qd79aoUI1?}iDtHnAHWkNRb7#q$k|8p!j_3k7~gsQhnqBZm1; z0)`BHvXFwZ=t4X~wlu$gXF?zF(aKbf*qb?TlVdv70I8#y*6-BOK19Tb5H6e+(|7OaLuhs$% z)>lUqlD1=eTL;S!S5QzeC#tSiL-+%>yffuKZDg9e9hkrBzC(t?P4NL?e~*Zmm^?Tr z|MCz1pClAn|uLHSd1L8h%=@ZfF-o z@lcv5N)TVvqQCUD2@T4xH;4vq@uw|{ACURAEQvh7IVSMr>suizcMus;zTm6&F(xR8 z5=L#ox*;;HbkRycnILXsTBu+DO2x_>9k=fEIKzFr}o*n&oIx@tS0-ISl7c?VK1Oce?D<#I}2m*1RD92H}O!}cE zEEOid7R~%prEWPPfKa?)I?i!zwWqm!6nY*#?eYYgx*SCqTVYVpAjIqH2=>Q+n!I`+ zt=rxAhN{QrGSWE!k>#nAST9O9rX?C6FleOV`h^Q2aui?9by}q+5MI1yZuYlr3k1&N zJC7DtK|!p)Tsbt7LT1;^f1?nha4<78c|^(GHFY88I}!Z#V>RuKgyPW@H(D&noo+^Ni|x~6ijBZ;^o z$>{orNvB31`Dr?cJ92x#2g`tXaWHT$TJOb)Y&!b(i%OiGtvtbiRSmD?C1mT>EFy~% zQ01t)Nl{QRB(x8(M8^ys_cyfM^N~sANNYmY`Z8yM+1YQ`S9|h@HB@_2>A#Biov5m1 z2ju|1vY=qvtGwt_Xhc%I&6T8+;I^Q*(t-cMh%J>Zf&m3xrHld$m2F;yKq^o#0tQt- zv~TiyHyN*NMyF;OLl2>}J)D4;`s-gBT@QO1Yz}7baGxdEL*YM@jaT!Ih>aU@`*KIi`SJvlR;S_LHVTm zuS9@E^(0YCxY+LSzEQZN&awL0rw-$W&&$jOf#ql@D0hi-8`IY=THlbV#3&`Q0_};5 z()Q(q_|{)1f%+=z=Wt~bi+w^}q7p2je9u2O|n8=YE2*TwC5 z)uPw)DGg!H!l|q@4@oAfkHuG1xu`+)F5YNCv(L23A3IRMtC+h-&`c7#zuG=r@Xh}i z4SO>JVWA#4{z=RM1Oq`gS zKaC{F@mq4~LRLHSN1u*DI@U$&i}>N%Z<~zlwaYs^kB4^UAA2l+HJJave@kSW&oZ~+ z^(KD!deJhM?*4jsC~3}!yIX(OtkM>Yje_C*x-+8A8bgxc6Ft?3GSuXjWu_Q}1}acfB~D)^+6dMg#>#F1X`$vo+9){SK2T6%5+FwW=FZ3-G|@ zSc~4Bj3`I1nh>zo%<^U87iRFud;p=UHbF?e)BO&tF840`^6(7w`%vq$Y%V8qNyjcr zL+#Of$$Rl-FOx5KJyi$^wSUZOI2hc`nwZ@@Xt4(v=AM`qY-*PU6+1t_iU)Nalc_72 z%u>2QsWTg%v2G!nP!AidLeoy33hRK^;c=hIz(;mKzbkSYk@9^Y(ZJnV>2`>kBqz=tO`Vo~s0et9F8hMhYXuz(s3M{c_dd*>)7EecNFgrM|ngAQ{H1(b!sYC(;Tg zSO=F3(X5lyHSzSWt7DWT_8J)TN$^lLuRS@{U98{N)e(pT24tgsMq2FQV8v;7R=FWs zu=IKnq-AF;*9TxFqx(UJmQDW~VA!rH(Oer@yCV|11I_}SCP|>?Kt|Z_>CYh+V((I$ zE7e;}Dw?kb?HynB1qHT3lY?2heX{lTd~Z|TY7e_ib_7*mA}P-iD~g=hUqxAvVvf1R zT5eAk%jr%SY8O_p?dai(&-zXp5M{(>vJm)5;Ray>BPu7YVrIcn9m}@h{4l*&R%u%I z(7r#%TLBZHOZW$&VQT_{YPZSWQ}`*uAw61&c)=RIna+4oN-*awz>Y14JA}d|cfw^0 zoLKY^1b`W;rW8$9Mn&as(0h;)G!h&2=K+XjMC0Vg4Y!a3ik?T@@Hn@* zh5Y~fkk{K^3CqD5Rj7Kjy#GfjC#0>+`FqfwN53RCHuq8}UhT<&7C?gZ?!BIw_o|^1|n$!M~3sb8WnF{rzp8E8V7^H_oKep(MI#eshSLx6$6W#6CmH2ZSh z)0&7omp~hEfpd5Cecrn22yl9W0OnCMYWn0HRJ#A(cXgLLNeI2<#XR}-$|)h5ylagG z$+>=Aouh;YZlfa?;O694IV#r78Iq30H2(pFy-*Tl-11xQCf^z_e*U7iKf(O z$jv1?hiQh;1z=-s58!-+wCDlj$tm)ydA7XENrg-*B@BEF=RpvGM|)ixY+B4^SAEj% zPL(W2rfB}-qgqKui7$ZuTNsb7_wTr-SP-O4FViydwt{_kYDr1zntZPQ8oQg+O z#tyR?Sl~>{>=MW9d&>)Kz+Gw}IdY<2v;w~EjC4{|zRU$&oXq!>66I7~vsWpSe-9Dt zj3M=2K6Rt`RRmE#j#vOG$iO}{rOHSoZG6M|P741!c(rwf8JMd502N=hcl)3CprF0B z&r8n|WH58tG!Vw+13ZYp6E=oDf3YPHkpX{J29X5QM;^9Bx2^zlB_3q>fxrrIBaZK{ zE|W`&H*#YGKFeYEl1@PNeg_Uj`CGb`uhKZk1n8H36VA|X?R+adPJ8q`1O2<6hHa-VR=6 z-6)19wwTEeAWxX?>QTzltw~#50@+vM&war4w|^+{0=&&E;2&RXfBb!R?G-6uM# zhVe0zv#O2uXYaGUDTS!$UjZt4(!!n?%;>I6YF09%H=`{tYbRO{j_&Gp(o#U$6*rDX z`BW_VSMEks!fK+w5zX9Mm2+-}+NMNgTD=tA+>Tfl^f}@E!r&I_thC@O_s^#Aeo(7!L&{l>rcH41rRz#0y-D zM4j_cYYZ+23#eTzFl*xuf%^4nZEG8$g=@J)bHvLMwqS8iyI5!xx%K{Clg6&`)OV9EqTd{8 z@6okW&Je@XIsHutaevYuF*tZBKe*Yv&uu@-h_KhCkQUwqM5bpFqb)=3sj-pEYUy{H z`TG(!w@1j@qopiosf8EcR>+kFRX{s@7XuEc{n2OXn#UwrLJ$y+WgGK6lh6*3>KeC`Lrs=uffE0dCw5_54O0A=s@PCQAdIcX%%OYD9PsJNI_b(2y0Dq}{5 zmScsr6k}+F>Az1b`t#!)>`1%(v*!SpS?v#8`6 z(|cqx-`gR2H{Kxdjf$lJf4pgNK7nxw4Ap(E&R<795;8=;h{m3Y@Y%1pUlkAnYZ52db?FteDRe|c|j?B}K!1w3tdiNe%m|gux>7I9V@ujN%9AAqzN3bY1CU>Mj zhwJ((NB3$q>61hxi<1@6Zur^DQ+5gFJey;^`*UNtrE^o`Gob5k(iARt#JCv?lCrpm zAUjX`nFdxx$?S9yE;h9uYM%_E@7Te3fr%8 zaG<^ZwvSxv*k7zy-;XqE{*33kH|T!atT32M3xgC)fe#SoJf$pcaV3p^e55(XgUehG(5G*D@TI$NyT zLh#O}5e;GcW0TfTXe;-%XOT2PM@d=;#qv7)E5eU%eNU#gt_+@!sb#Hi$J)-JqZLn+ zXlh2d9uU`ZzYz*P2D@WB9L?O;9E6bcWuV#@Tb5-jOIJizVT`px^xWY@kQDl67d3WV-tnvLGb-a3l?4$FG=yahS(1Yd) zW|dy8^qtrpM5XRT#2&LtJ$D{mHVNrMTg27}aU)Sb?mo4}vYu(3yI9t)>$tJkTF-g9#JL1l4g~4_p3ZS0-u7M~lri z-n^>>TDEf~ti}aEu^)Fjp{sd_IkROFX| zUUE6lwa&|QI)n8X=ZWpQz)5!DsjCnHC!f2`rv8chklj|4f+@R2?Rv;&&uIo>&-v-a zi1ZVw8I#VD%8f;U{${{Pq37rd4BgQJ-%R{6*EQVC&ab2OHI0*AARt&pkjIgN#GM>C zRN9Ov#i8?4S_PrwkpZi7HESp0FjTVQexpdpbjsijj&lQi$+TT_e)#_NL+^Tsh4tjr zGVLhKouoW5LF%LLiZvQ&l?LX8P`N}c{UFvXX`|LKc)Aq72>&o++ z)cfV{5**sWe03{D2L2aXyHP=d(BY)S)ES07r93QFLNOBHEcqVzKeYx(+z<9q&zant z{=xN>m6h<+2v0^{E$|;@dQv8`tN=BLt3*sgq~uxPeegr(tQkI^S~>qaO`{`k^l~^< zXoBt-4-AMmJ~6g9RJgZbH-!@fjmhu;l~89L48Pq~ZoUk?biHP9Yuo#NZz-eXxMDE$ zGDK@-2Y3shEE3J^qYnxRVP}i;QrK()i-@eQZahJmO`A?YUz)oRl%~megBGr5nEV%c zVfOt8s9S@Pdnpc|NwH=6fd~-pbN9>26D-`ofQxl!m$v&Nscy6<;nS8wV=eCuVgivI z{tbhL*IW(M%Hyg-5G(@l)`51ix77x7+Dpt{sLxz>s4Y#-g7VS$ZBG>_tSet}Ry`hT zr9%c_3(07d*MG2I$W^H;EirmfM$fqZx*O2+c%IScv|oi)3=io(+f&HbWaKZ?C%!@k z&lH%E2f-`(E%MfcW_~Uo!*Ef9%6LG+SK=$bgw-gQPla)%b9b~Z8w(7JZS?M}YMcYL zfAvJh#0eWjK>>SWl!WHYmT5?|5oSYA22U~Gf|lKC(lI8Q?F3rDgM&2(SN&NH%sMkQ zzW^6ZH+v+;uwO*SBN)+2UB7d$Nh$eb6eu=m#@B!%v%>6vh4W(Y0PJw{#$35gm4YGu zu^Ij=eQ8JnId`E*^C0@kgp6dzxst`zbV03(;~eM#lR+c6#QUW8ayN~4WZ^5yN96BD zG%5!DHBXK{#sx6d)?LQS;bORTFKc?!B?|AAXS~LAZd(YH6f!*SEJ^IQTbr(*f4~rU z3sd-W)Y%k$qgQ@ug>&Y-{D|qzB2hy}bl<3=ogO^5lKeBYNSUvV#(mG9vsLx>9U}e{ z76N#Bc8o%cq_U)r=b69nvpid4o_Q6iaa0r!YF9ro!X!J9)pM{y-r1VL*jmJh#<+CP z=XE_jYUn$BKoy~!G(Ub(uQOi<-Pd+YR=)9*!e9Mru0uL{iiumVlMwwYoaBW-p<;j$ zPbnf!NL2lcSx^WHhGX7>d-|x5t!=rrK@RkG;VP?cW=I*eUqT=6Oz)tAm{cCu3ts!- zoz7-Th=RFNlWLnZ?(&!~f*lP1^M;}^W=8#ZDZrAe_O9l+9$VF%o3 zdk^6K>^8F!RD1(?sBy9g2Xj3ZV>~$rxBbW$AY9hXw7~Sqi%o;Ev@RiPQClgsP&uD) zTB3)xJVJqFgeyP6zhMWH6Fgwxp*y9?oo0@}>YOdzn%`0yYzmj-|lH2lt!u@FZmi(tT^76OZE`HEz zDs`!-Li&@*Sg+o4F3*U%*#5q1QCgMt(XSwGl%G5-FaBOFO`99PW-3X`wva~)YHe0J zPn1$Bp=81vV4NcwrJOe;y*Ifey?5VX=+O3@9>DD13gt3MOp0v8)s|?0Yxjrzccx7P z*Kr!Q@J?Ho_0wY41A@-T`RpZ5#cifRi)CJXRkiJG^d5+#FCA-^Wk+t>nY}onFji@MQ&gH{2<-EgE_$`4ZYWcj1n+|eiJuq zlg2+mnPSl$$K>)1ElB?C z(`$4O`R{H!_t%W3E|C8QJE}hrN9{@YrQ?n`>G8M4Xwgy)A-L(LB2W}{DWdSHH~51d)R58 zUy!A{+19+Ol<`l^)P1~y!llz?DjGNDh`5u4Aaf<4>aaB;-AgMg0e~@hG3QEqpeNLt zhH*V0ZHyg#o4B6bsrUS&+mh0}lDi-h>JS?--EX!~PxPS(qktM#QbKBG5f%=fzj?tV zY1a5GI`Z~L@x!dmodk?W?e{l_vWroEKxs`RfmD^e zm&Y3tI#?>IG$7OzrPa|t~p$|6P!M4H}y+!>p%hs5J? z#aUm^;7E#E{Bi=nzV34Q++@3Zn$cJSTRAV=W-XPv7FDZ|D_yc`=|zr2$cc<_Ff%dL z<7>G+zB`z9>$1YCT4T)>o)*q`fhpy)$<{66Am{dR!G z*_HEb^qd3uG!A?V@?c#=O0VOJ32JSe%uT=k5R`no zGoc{mvW(_YETy0NKnRi=83^r(=Q~jA#-z2EwdxcmL?>*7Hr*W03B*e!bx7ZpfBurM zpb)WRhoKqIOUuZP{gb{RnSD+tfVAinnE{*%29WX1Mx&74kE_`6sDWLjdr(`A{S_ZR zzNX7{B?g`m?bxHx@le9*xh6f^m{D=;sSiBn4??$A^ii9;PS_{txADG(JMm;8Y%6}o zXBc(Col18{^*&iB`oG$H%b>WPpna4GNq_``J0xgu3vNk(#ogU)ad!w3T!Op1E$%D? zcbCQ8U3MYJ!hiF-^;W$fZ`G}PKi&RxYU`YtopXA6X1brJJ49akf}Rt9B{{(b*+Ge8 zbEB*0uj3`wV+9$57%TN)+%_r;d79$4CFS;3`A9zfxCQsi1~(TbDs}nd*X5E!E%*WB z_-M{~Qh*Bpks zJ&R;-%UxCCM1K_VNb=JW&6Zb+zi3Oq_%Jm@AeoU76(fR3^Ez4RLu|cVLp%_j`RD#a zIwl@bFuz^Pi_XVJ^tNYS1mZdWvJVoWKu7U|*195#RIJS5_f(B|0lHx=shuB`S11*q z@wn#5ReS55B|bHZ)5V4292c)bzLx5$VqWRKbo?*U$O`3SBcq8=i&>G%@!8+#ZjGn3A}@6+f|ZpuEbR_X+TovdVk&~qlAl*O?c_r$>St*8Epg(DNN0B8KCqGv8WBrpip}zpH~>8-|D@WPcK+PLMJ6 z+&#sZgvQ+;czY19I#DT?I@=}CYG(F2(K(r8N~JC(Dn4rXdf|j5Oi^Q5zeY}+=}Fh- zvMh-g!rOOJiF^n7oWV<`*g5B~6-6XrD-`WC9- zoEpNCpSE+8fkjojYD_D)wRmV(`VHphY~e-M>S|H0&dBQw)d1~}sT%O^Bk zt%b_2H7s0Aw%v}Zwhst!g$Zf4++g?!nY>PVoycB$Qe&<^|H5H^WI4}tCU~LN2jBH% zalZi?yY?KJdowvh*e?i=JL5cEx@7-qGmCVM|CHmEg$M@Pn^29wDQ%~e1BEj?KTgPc zp;gKF;>gaLTelA6A&nCP7mw!$% z0$b_PEeiNOsU)F2wYUo%R#mnkra{$xBUBce2j7i?s;fV~Z*Ap1ngbOYW2t+6alwKO zn$86uD^{v?TE{eb)_dSZq>9Fw#5{xpef5k8?qXwkA<#3>O{Z^?TtNwaqI0F7U49O= z162c(8v-Wn*4NPw@LehdMy3NX!~r@(SmMJ##6upfUTZ zRDdW-J=M@$nTIj_tDo(%|KFh3x|HI3`_ex|-Ub7OlRfn>&4J>0610ergRK7$`2-XheCc35NV3JjN$Xzw@;~JM+#6J@uQ(ffh+{uu{`jXQn(N)+D_I zDN|Ivc*Mpy?nB#g(&95;b2p@*M~uiPHn?5s)%2OTTKHsg)A^cJ^fw0@5~GR!G1pUA5>vY2feU=>Mq#j?fsiRauI%`9=xK!B0u;?ei0Q&5^aA1QKaA~6 z0}4|zh<`%RCk9YTC5Jq5qUw1&?G^(3(8-1fQk4shkV!>CmpIF|la_KCk z)+eOnBmk!lD4+EH2MZvfBoilE1-?aDvwX||p_w-GRWt5|Qa}<;PX^9hHCH#m6MLm8 zR{U86J{Vcv(9a2IXnpEVon0AKc>HdCal0>`o?ft9^BL=#HZ_d?1fl6jeHR6%g)n`yo6xs8A3n>xgx!u@ zOyTS2dTdMETOQ+iHbnOJn)H_YRA`qMoh?eqN)9!Wy0ME{r=r>xmN+eQB~wsQNle`7 zKMc?a+I6_XfXyX9T*2;xBfV9Z&xdp~>-{Nh=ii!9q2rmn5eo|(7Os{~RJ_=XaTQJ6s6Yoxif@5c9q<<~=UU2j%Y)^9WU5ZEgnc2D zrJA|DV$jyFKrJy{=GNy%!q!johpgXyRAo^4R)BgnLwXph?w=K@7k4UQf48`g2K`#6 z*b}m~IMAQdf=W;*G?k#f4oE;%T5bU5zOp$yegFP_uO;qpJC&~16E|es>OBB}sxM10 zqNiA%l2Zt?c=o~a^nT3!`TCAe(`_Is5Ekyj{!*rh36;*-R|1Sb-rr|2axQgbtJOc;hTL+KMzB;g?@0f?h7T!%eqDoxh@!3*}`SkIZ+%RQwUqP(yY7 zmjT0zS3Y`(U@BF1jcPey({Dn^|1G&6k^b`qHU)jcKoHyQL0ZrqwM|1LPG-}^+tIAs zD82dB27_*Ow9XmMHzCmTz}DJN2(Rf;i7P;DJ}x$ooxRZ~UF|~o)GV7!7kYJ>-UP3- zI&wa?Z{h75;#kakB2_J=cyoK4A1`|DBk;C?Is8XvYlEs+EJ$5Gk7Ra6_+RdC#TG1arrwhgr1)$`g`#dY@80`s zl;4;i4$rwC!rWZ^Ps3wzVZ6WROk9d(h2p=WjK{9Uo$xwBQ^HTDFW^_Wt=}4FGs2^y z#E@M;J9h=x05yZ-sXNE7z?F}PI3M`#$(l4>BAtm;Kn5Li>Q^@v0y1RGRvaujvBJU9 z8GVsT-U74NRI#5VVUhPG5o~jwMtJAuwI*fd1^@+!o8Mh)WY2@zV~;zm1&hnc3y(Q8D-;Xv5OUD}6gJDf45Ub^?bQ1`6enRSyjdeHkesVM*KlsNv|PnbWWG5Ws8?dPLCzE zgR{U--+H-4n|8srRro4Udh~5RLCJyK0ta*eqG`p>cepUPq^Qscc7X0GY5kqxJ4IanN$}UQN zOicTZA}PtIkeD?WW_;c=`T5mY8V-5{bM;*`>V~-wAHHz;HT@7NZ383kQrm}?v9uF6m5eRzaj2jm5u zQ~hQKfyB=9W)+_yxUbU@nl**VryAXq0GG+qAM?m$8Ei7@lVitK`=(vIk_kBuraJCD z#Q{>^Q7Nj9I?2|V;mEg9*#+@bv~pR~Ghlwbd7@`-C?C9qMR~E{U43LxtYwdqn?zBf zWICmE>&Wz()zd-{gNgt}euPf;si#*f*Y6hDF#I>r)3DRFo07&2{uN%cfM`l3m2@Tt z(c?YYi@n*QgXz*ty&9Epqme6UEkQ`6Qevx^Vr(MxawvDmNt&37lmVeoIOBTvBX+IV zTUT66m<5e)_oJGE_kgA67wWhD=nXc6mXDM1%Fd*ei;!*lev5*ox@1-2a&a~fOKC5c zb3_ik=s%23(Ohc#wgYEA&F53asue1-F}+D*p9;9R~YXPCET-HytiXCNJ$sMex>lT#dnDeD-h&&!#jTC34u@ z#LX$c%7wBd26FQLh(?jBO}Rt<{lR)0f0@2>tRCvEKYvKRc9Yy+HDtCcF0jW15-E=) zW}a)ZZumW?)yBn?p9B3(Br^|PgLSK0JBf`J7l*p|NT4c4vQT+Q+=!6W4{1~Z=_`{J zA4>aM8*Vck?TXy!I-Bx6&!8zyb35IJ5_(Ud)`N8VagJ(HrzC}>npN!n?*efMda@y8 z%@2C*te9NLtr#a0H)sJVD+I}YLaF}__bhj+2oS|3Am=Bm#HOyLdc@7-&+(6>8ZI}e zf1PrqBc2^CmYt@T6u`}&D4!MmoulO`whlJ|3Aq`c zKMDJ^*RFm{Eh~BnF=^Aytl|OR4^bbjW8rXBXp2MM$EoU3y-N&AZn}-d=}AJ&aXOl? z;-5%<@cR)JA;}=Yn~g8f(?wGMCtt#tQdb96m5Ps`iLlq1HriYRf(scR{KIx4EPScN}}sIaR}{csVKzCz*E0(|wC>%QRD%{hFUEkwD)Bd{mH2eY)W zah*zg>>Y($(w|QnBfoI(TvwNEuAQd45ygE0Lh%z>{KZyt4T`6AR0Pm^8rWJwz)W00 ztYUWU!jJf1n4)fjzuz-hnKtSkdJry!%V5H&+WEYJx!OfR=}aA~B-en3sAo<+TF3;%Myv=>ZsT|H(e)+g3glkXK->oM2r z;+7UnX-Opv6LBmHDFUvW@MSndozJ~nK-um)>)4*orBt8-oA|lWg5bMW(ZI!>2i%tc?9=G3d6b|wfQ=?0RPnIl0w}rK<6`FjS zR^2o8zIK;A`L>n1wwfhMtaiY?whEX{m>7P-RBSqC-^5*SMV%vhOtKYaK*ZpN0ZHSspEDf3=EC zrj4GdneBKLf>|^^wR_|ubz9Ab&-$lQmOgz)@`s!Md;`UPgd&28rTGih)%gJdkr(e-M{61sTm5m`ozP<3=UWyhw6_=4={9#&_#J67ytrX~&2lA9` zf1UN^S;-QqI-xj!MYx-vrSFO9@?G9TfAGk;*cb+_TTx@ZRMy9v#ct_D+Ea3xCT6J8 z_Pl8YJRM1k=;#Eb?!^66K8+Xik>n}XdnKvVf@(6X;N{Udn2aMCJmf3!hw;Tl>O$Q+ zl?So{klUbDnQery*L+1Tl3kxtSVn<`t;Fj`dL0$f^br}IaeuIjR;Ge{3QgcS`6I>%4r@swb$Pw!B4P=0%XRB=V|Q ze&7HdGrb-S0UU8A_Ml`_$N(l6)=_H2F)9VhFY$#b&(=s@Nib0Le{-g1C0p!l>; zUn+?K6d&G}H1}r3=F~_nGZ=@r_v%jX(IU*pIex*`i!fd)xmZ_qEbkZeLVt~(xCz+q z9U{RG=mKnp?;lS}CDaD+=-GbXXP(_;5MJha>elM8)cjH%PZq2TMldgwZ9FZg9*0M) zUm+A7r8pfEmwWm;ImUEBSO220_}zH!>W_|!5&72Z8z@PU>=5e>VVzL_Q((x{*)-le z42&60GZU@f(2~`!CBhV$C=~m)yn)TLk-O6~*jQ-J7FakfFT2XP}KVu=zC zq@)xC3V;P;KJm&TyVj0+T7>!mFfBIAytbEaopB{RrH`iqH22)bBFU3$aCb zsX_#rD7!NS!!2zYCU>D)#}E#>YhRF{W2FgY`EW~?@Bm!;s&-nJYzIxG`q-QQR=Wd9 zf(i9@Di*}@8aEU9JIsdOUfwB1`KeYwFOyR8BO8K|kl+^=8ltTTbZcdDk*+wnnahvD zY5g$#8U42y?I_ozwS2pvo_@B7cJZ=JOr!RB$TEC&S-DQ|>v{I#nVDU2!Rfn8>ky^7 ztPmWSU8yQnzV%XrZEkW#@)VCglU_jeq{0g822jvw7`*DxM{YQ-4^*v6A>+O&cd ztIKtnF)R;C8$6w2_)g1NP0OgQeqxF3JlV@FzaMmdJEg5ZuzyDcdIavau6e6$_FUQ` zVex5RwFf<0rkw{%0@L4VW)}6lj|}UrR@nyzo|3m+&Kd7mCStc;j+$gH<1KC3RdtoD z`LK+!r9{O-XU4`k+y60it`!RQl(y|3PwJB=!>oqabKd z)K+V5xz{1&e-W4&gs3bzKounWs1#%4X2yi^5UAcN6KbmR?5bOec)irH|8T>GJ8pdj z^m-{&d=rjOBCG~~@A&_bF(|G~YP1Y5(Y{POG_@dC2a92;SvMg-+^C~Q$Q4Sf!KW6I zJyeQ{ikMZDp9Tr-5vXiMwX$3&T2LRJj!d`YE5J8i0yRW&Jp}aG^zu4{x;mrEEfCc| zb^^FCMeSDV+?yVjlEK>$1X`h> zE&&ZOd2{}I9szCNAagE44m!$`Ec(aI$aw#1-Z8iObx1xBsgDjNoO@#2aac z1vbPYnuNn&WD3qNKWHNs{^8+wyXT6MtMZD|6^BgN-n&L+$~*Gu7SC?ln>;py zl1I9Ycnl<)dRWHLBPdk`CpWjN5%i~n?i!wfNhxjG7QBergKLZ+YSx7c5fj+Re2o&0 z&S~8-t)QTwbowc9h6%t|QMTGx;<|h!p$y>(Z())I~MrvB#Hr)>XK_8z~xxoZ17qqyLED0hl?wN#L`1T@i;*|j?fHQ}W8wAy8gYcLiPZ~P;p#o<+kGi7#2v+CL zD-hQduE21N*QZ0PJ767*w9C>Qp-@F0%ztzI!WGlyo=fl0fhI8`Xhxc69%D z9mE}K&A0Tew4v>D=L|>it9jctEHThB@_%sl3fP0dvQsn#{oOy9`pPS*TVZqv>^&F4 zA6;?zo+LtxcfScS*&de7bJNvI{){O;|X=e=WFB)BQGSLr^{v z>f&aLl%M_=iKqJB^X)$X{687IMQ}F$M-ZO5|B?SYj2?sr{wM#dh5yyd{{yPf|KIR` zpW**iI@bT^@0eFIv_Ch$ruxBCcyK6sVc~W5+4mpN&xoRa@5GJoKwtP!LZfd^kC{(u z#9>x`B?KpEh`*@IgC}wcllzD{b?0Wi+vW~WkK$4NeePU&{^!`ODdYn&TZ^x@0C*z! zMIDCGCX*t*wJ;*N<|~b-J%Y%LMWpN#AC%0AWo)}YUioHQF)xLgQQ`g3Vs#7I7rWx; z8{n352isYBo~P-%<+?|{%Vp_iuW8_{r6!s1*dvY~bx_XstbUENw^b0%-CBb0&4SND zy%~yc1~0K4i~ieVaJlzY>#@(`H8o%zjz_|DY&_5&Q*-o1!KT+-<95$LrO6b0-pE3` zxt$%eX$^eJ7A{*ic6T+<^zej2$W)ipp_dapdmVKzHZ44QG@SW4(XyXisUA9lQ~|Sb_cOW2U~Y245PV;ZZ;&(01vsLfz{(w)SybE0 zhPwf2QPDL5gbfTQ<;Hs-^?Mu%hHVD*aEf=h*yv&u=AUQe+Uw+LH;I ztY!l{7b!bT{Rv6qoz!1B`yZPrm(9nEcCUmwyDTiO1sdIr=bAmo2f8izuB?pX1w?P!IEPCeV+0rew-3DMHrA=rO6yJigJS)gjBe0{ zWHw=s6}^Rtc>xmjZ2VZR{7qHk$Wn4&3mMY6nUd{$>$2jfJx8Gwk}Nbp?@x@#RnR~w z#2l!dZfGUYrGcyeb04wJvj-v!ZJrXdN0-7re(D#>rt`ERL=isDm5xEGfk)E{&ARn3 ztLv0coLt^*D+|u-{R^Dn8Dtr(Iaa{a#wD)9vPyp8HJU{>hJY=9#~%Ws*gH>~^rdO> zp#T%(L%}Pt=9t))h@t*XVLajyw}dk{fWyU@{W?w94S0%fc^~i?z1zPf>?qw%(4Ryn zo0-jfcAkC`?KesLP~*cPuzd|2MV+W5QRf!tv#TBTpY$ZXDBE> zZnm$FfP*(@uz6Ri40XQ(J_T5}kSekQ0b(X?jn&zeyi0%C=Jl?5vrdy>k4t6i(QUSD zRlH7TG98VU`ZuU#&tjRGH{3aHq%rVA!PiYCJ-mX)yx#4?`tsrkj^?|6xbU4>+zH=j zR;_zaZmCRQGw|`#S0)3xSh=1vag}iMfTGYvV~6R1wr?F+5i_(b7c{_?;Jb5lQ4N1G zW!yR4WQcY5Ud9>3mcAovXP_Q?aghNVJp;Jz-DHG%F(b|TC%r4q`gY$(t zGmi$m>CMsHpuEv=G`YDS(%nquR2?aiNgEiuRhO<)eY=uZ`})!wVF8kMs>%_YSE+_m zyY~ZI=yjJC#7zvRH4BZN=xY72KA%_M$XdyTux#P1K$a(gH-xcHhf$EN`w{KW(>6aP zPv)sLI+i)*KD~2kgNht~@p+ZRxSq)ykRv4xJx_y;D0cfdq};AbI=9(<%5xp>k)HAW zp)-=sb~A2Ei0pJ)vs~>t{ut?VVE<6rJa47AeBBo{)YnnFW*e9{7*7lRI_4sDS&D?XfIJxCiAu{oHH@i_b#_ z86d|_Ckz!+>LgvQ=Xjqg+I2tyN$|Ej|GQJ-n&l+J`J!R>gL$=3y*siZpGIUuaKF*x zhp3GFNi2JV)7up8^%eb~dGYgOyFYV!fKqBvyM7jOX0iL<;%wC*lKp)|X_>?L+)VD&j8gp;V_S_Ou?qnj^0ry5vN_`t~53U;6(5qCO79doud?}uP z8NueJ?WDucPjq8<{_VDy$Y4Y|Gs4w*XS#*}^A=}~Ji~`d>B*+IYJGshang!lm0EbB zj?k}ymBUE3-K^8vVlt18)s4TA<(X8RrhZVS9kHn2d3F#ZmUaVMPN8aB(B{wZHkSmn}GG2g_1_7|F6}VeSX#&AQV%xJV{X+Uhn4+*kSkB zKQz9?PqrMhZs&bO|Cpv&wM}lkII9Sl;QLGn^T+ey=q@pHn!_(Ol$o51i;r4G0}za* z$FQ#HbV%Yi`Z$S-i1m1ka&^4urE}k8g zN!AZ)+hKASgbZ>ye{Wo(7*Lnx1X=qQvRSSXAq`-I_&G(hvgZvqBVs6W`TB$X=noGXF|8fj zS_}L1w-JYa`3F;s_^2MV;A416@!ETk&y!o9Q~w89N{;p72sJ|G2Ts7T=`3(E&WfC z#?J#9mHPXa0NVXyww=T~!e4Xh`}xs6X0kaMK&(Cjr#EvKPaX*a`*R&`o;(xUDfexc z9UPtypa~rFT(cdGK`mhLZgsYYkL%vuB9MTXW^UyAoZsyT%u^ULWjUHRx;5i5^fzNT zcRp_OhDa=m&9ND=(M(kQM;A_Xwe3@{^5nZ7$-2AFWrMxB%R9KE7RM6jr3#BnCdL>{ zKE2_HTuoKyLnA3aemly3Gjt?GtGmuOhkseb>mBQOnl7%WPG?bGg;8$!KIJ0t-m#$L zr#yJxV#zA5=BU}}S7r6*{uRb`Fc(35M)~)KjM~Btl#EA2>ky)Nq7nS@cQCOhVbHx= zC>5r4Y<~_RF-Js%UFw#1^i6-{sx8K3xq>N3!YqzuuaP8Ps^vVtiDhc2zs!Mi6kawp z-K>7Y;L%jGadrck(Kb6~sSlaOzdt$8ZRigo(o-9h(gUtw8)n6Xk|}3MQaka3a7#X7 z899?Qb=xGCj^0|%2XvTCtrf*{n|YA;juM*C5Lk;uAzzX~<`k;Dl4K^i`|Z&HfuTLhH3LcWd;^YK? z&v!ZY@gyH{880b*LGfF#GLLi`N#P>VlJSg^6h0{+$RF*`$Jen`S2v4%>+Vk0_?a8b z)^T5a2Plk{I)UT2)||!%Z~0OGx}}d;CD)v{;Nv5<8?_;vh*R$HZ_4y!qeG>p?8Wn~ z2I{x%IDVB8N4#?mdQZG&fb#WC!!Owmg&)9n;J2JRTP%JCZYr{~qbV~xA>Q*fV;@y( z!mp=yoA0_hEDR!k>|g2|FV_wokian3(*N>Ra7TYTXPnVn$~jj6zg@T*FV@I5hQ~ht znJ0^X7K%zqG1hP&L1r@AQk?}~v+Z}y+mkj{_Hj1nmg;DQaBAzmwH~NdCmUd?1Q=>q z-O@>59ysZeUz0hR8|umZ^1dDm4jslFi`ac@*sBZdL_{L~o;ECSJ>UtFGI5S&5U4c# z@MyZ#An1nm_zujUp2@qh0U@*EVf@*tKN)}_Cf3Db6TW+149+-Neq2tix$W|8I+gNB zYL6X4+ohv<;7V4Y#xbKZ0p-0;h|nI&AI5!@Py%O+v)_aQA;Nz$RBi2uwDsRZxUC2F z_3ehQxQ(7&P}HD)9|Lvm{1>X&Jht8}SgnL=3!){6f9Tzj%-m7@>k8%gJ9H=77&^>4 zq37|Q$N^tVd~wp?Vp*fjhRgPn&xP?ty53P(m$Y4f9yv{pHtcb{G_ns8Piqtieq-C- z@V53$7w3&!)@!iadUQXBaOSyin$rTTJb5BRtZi;;XZQ75D~JmSQ4?1Ep$IZ^dhY@I z$`_d$?%{XRXE*aeARwtmFn8nPo_E+F zF}7c4z24aM7#Ek81Y@hz%Fn;&RQF+%k$CN>`}CzV;CRDm;-0n*Z!3T9h+!wYDQ|9i zgOR&1(?M-XupN^>&wm`hU=k3@n6X3r}M?xC@pMR0rK?#wKz_*pw5 z3rdg@FOcIV>GcmLih8hmXl-r;1^GQ3%bOM9^!6~$Kmabn?Q?=Z!qPA3iJA(osK z1o_n7F%mt8A^5A$&BwnBzv7GtVdz~*5IJpB<`^Ht8WmB5RnG!YJYevVKHeem!*erbF^*@`bnc99N z;UPR15FTKw)K>H!dH>yH40r^`im-{G-M;T|Gn_}y0p7KfYbww_Jq@HfFGSlQW)?Jx zY`|}WYHO_YhDWGU8^B&LR%r~iAR2|gMAJP##J;}l$CsUyOe%0BH-%2@yBpuyN7^%d zYPS-Pwz5u5;}S*l*<@`z8(PQO(~LgdeQLgf=`LJd12euHu*T`~JL1>G7!6|D&YlFz?9b)6#ZIx%1pU$@3vl*Jea08Ci>v7aBWQ0WJ{lcjw17#0s4&FC; zM_Syofmv>wsnOGkok-YOc4BUUn^ zdU*BE11oxOdU`B~=U%W-qj!z|A~$@5F)21k^s{A)>G;ZjTYW*WCc@14$BO(MqHg2u z#eTDTM`0$+^^Pl}|HPy7;C8uRzG!);?k9aFYPeC6J?exq#0%@L z#*jAyCAq#>c;n|!UiYDpKB(l_Vc@)-gyxDWuMh(gb{qc^8;8Cb!$9)Eu%U^rO$ z4&0E`i6T5;9o5FK!@(}))w~1B2IAJvC&==(@arz5udSX$!-941>TE6ERF%)_vmRH| z?_(o*S${R+ko)#wg%$W)?nlMb%+pM{vAMTBR zE|b*7f#<|wFF>zpcJwHu0WZe-BFR#RGBNe>uFjX|e`Jodu)f7vygVh%XKk{dQ2FM1 zatBjCp@WiVet@NODs?jC08}){Zf$*3&rqRr4bS6%*;|rSc~-20mLh1eSEI4Y8cq6 zF9j<3NzH$o?CI1Fbx(Csp0YiDmLMPLp6X6E6BOegdQSq%S|AdKzB;yc}Ex6f(^J<`k%9!x-%DuDtYgCp8;E_CsLw8eu z4@KcROYLqypCn2uRs3$67~XUt=oX-DT$E zT>Efj{Ba$VA8nU!r=4Nd4xTcsT~!{v_uj=O?91^wMhv6s<}Mb|hOZiw|- zC=ZmrZG9ztYr%@%pzTQNh)#UzuI9m@tIqI~%d}GllFTOwyX2Kae2sJZ@j-*lCSV8WGx>rm(yzun08HY*V=2Wey$$I zu9xrT2$>|`4#zfCSls(z$>`>K@_ttb6-E5K;Uo2ul?8139XEPFQ(9?7a~PDxJ0{Ny zeR91Dqr~plTepC7<*DoAFKQJso*eY-HNLOYG==l$*O#|5UcG3&8yw2%{^)JHJq_E* zw#yCqWfE+=9_^FBZ2wc>A)ET4pZhR`-W2(uURo!fsFa~1&uoo7Zsjh}cuL>#SU@6c zBw*~?YLHL}{3sjTTvbORJU00}*#Hv!gM2lX=M{a zWwd%arpnfx>r|(xcfGb}O|2iZGBDd@o)0mj*<^kh3hBjt5w^13AXC+Rt6n&0lS14r z^~j`Ck{^51!1u}=e_=5r<4>9>1Ek?$JId2|_K zIzjwj_oB<{G1~~aoclA3lN(bKI3);T8vLt*B4`l_pS~pRUVu764)^@CcdBWJKL;aY z6N=QfyKosr11c)doyB6VhY4+660b8%ru1fzJ3IJ`zGlBW&N9glVgx-o*_Cv5$$=&^ zx;#MR7M-fzYH9MSq(GEJ7k06W?AXTOhXrH&ovB?1<)ATM3InK z#SZPXM;c`)kCjFIGiFr;i8<=rRh^zoV9cIY&C`Y`<;CU^`Eg+U#J54R!BykKVMJqt z0H!0U@~!9#_<%izS0-Wi8(owm6Q0ExI%_xPIVZE$Oh}uYJ*2bOW>n0UK5YWvBVh(+ zi;=c!_h$PszDdTCY<`@`)%Wes`*G%5dvotg448XFh4s6c_%5y9L{8fqu`)}OK+tx( z;In&uG!9jpNvaB5(kHEUp|LSaRrlt>Jwl4B%h#hYj_KYdN?4_bV#m?Azj@;E%K^{hom+f|IMyKS`Ngd*qsHQ6p4FO6AGVhk zZZUkkni}$)MfA}e(f=8zE8NS8p!U4i2tx(L`il*YebBx?Yx6o#1p@@gzjL2^^C@}n z0~uP>l4cB6J1x(Dcyqm#VGjlNS<}^ z;p1wDM@*666c=L2k`enz^%4o`qwuSNgN9VD*2inIBX?t{-PUU&eC0)m>(tsWdF}xe zr2oX=@sk55!g@9zEjvYm@hlkgCJ{glGyO2JCF7&rFDmpF=$m z*g6eqWSNpCPX|b z{z;-qj&rh1-S0aS_$Y?T%G_io+@+x&ICOhRJh=RSb@Zp;)prWSbK5`UK=&=shw_>D yFNBK Date: Wed, 22 Jun 2022 23:39:53 +0800 Subject: [PATCH 35/52] =?UTF-8?q?Evaluator=E5=9C=A8=E7=BB=93=E6=9E=9C?= =?UTF-8?q?=E4=B8=BA=E7=A9=BA=E6=97=B6=E4=B8=8D=E4=BC=9A=E8=BF=9B=E8=A1=8C?= =?UTF-8?q?=E8=BE=93=E5=87=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/controllers/evaluator.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fastNLP/core/controllers/evaluator.py b/fastNLP/core/controllers/evaluator.py index 84ca03bd..ac5b7c05 100644 --- a/fastNLP/core/controllers/evaluator.py +++ b/fastNLP/core/controllers/evaluator.py @@ -122,7 +122,7 @@ class Evaluator: _evaluate_batch_loop: Loop def __init__(self, model, dataloaders, metrics: Optional[Dict] = None, - driver: Union[str, Driver] = 'torch', device: Optional[Union[int, List[int], str]] = None, + driver: Union[str, Driver] = 'auto', device: Optional[Union[int, List[int], str]] = None, evaluate_batch_step_fn: Optional[callable] = None, evaluate_fn: Optional[str] = None, input_mapping: Optional[Union[Callable, Dict]] = None, output_mapping: Optional[Union[Callable, Dict]] = None, model_wo_auto_param_call: bool = False, @@ -279,8 +279,9 @@ class Evaluator: raise e finally: self.finally_progress_bar() + metric_results = flat_nest_dict(metric_results, separator=self.separator, compress_none_key=True, top_down=False) if len(metric_results) > 0: # 如果 metric 不为 None 需要 print 。 - metric_results = flat_nest_dict(metric_results, separator=self.separator, compress_none_key=True, top_down=False) + # metric_results = flat_nest_dict(metric_results, separator=self.separator, compress_none_key=True, top_down=False) if self.verbose: if self.progress_bar == 'rich': f_rich_progress.print(metric_results) From 8b1ed860334aeffeb3be6bc832429db9c9c35425 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Sat, 25 Jun 2022 21:25:22 +0800 Subject: [PATCH 36/52] fix paddle dataset __getattr__ --- fastNLP/core/dataloaders/paddle_dataloader/fdl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastNLP/core/dataloaders/paddle_dataloader/fdl.py b/fastNLP/core/dataloaders/paddle_dataloader/fdl.py index 8999322b..80a7a050 100644 --- a/fastNLP/core/dataloaders/paddle_dataloader/fdl.py +++ b/fastNLP/core/dataloaders/paddle_dataloader/fdl.py @@ -39,7 +39,7 @@ class _PaddleDataset(Dataset): def __getattr__(self, item): try: - self.dataset.__getattribute__(item) + return self.dataset.__getattribute__(item) except Exception as e: raise e From dabb8b9785fc68aa3eacbccb4fd430097a736cbe Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Mon, 27 Jun 2022 19:07:56 +0800 Subject: [PATCH 37/52] =?UTF-8?q?paddle=20tutorial=20=E6=83=85=E6=84=9F?= =?UTF-8?q?=E5=88=86=E6=9E=90=E6=94=B9=E5=90=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...nlp_tutorial_paddle.ipynb => fastnlp_tutorial_paddle_e1.ipynb} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tutorials/{fastnlp_tutorial_paddle.ipynb => fastnlp_tutorial_paddle_e1.ipynb} (100%) diff --git a/tutorials/fastnlp_tutorial_paddle.ipynb b/tutorials/fastnlp_tutorial_paddle_e1.ipynb similarity index 100% rename from tutorials/fastnlp_tutorial_paddle.ipynb rename to tutorials/fastnlp_tutorial_paddle_e1.ipynb From 513a5f875c18d300182ec72cd107ed8c4a508a71 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Mon, 27 Jun 2022 19:08:48 +0800 Subject: [PATCH 38/52] update paddle tutorial_e1 --- tutorials/fastnlp_tutorial_paddle_e1.ipynb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tutorials/fastnlp_tutorial_paddle_e1.ipynb b/tutorials/fastnlp_tutorial_paddle_e1.ipynb index e07b1509..a3cbcedc 100644 --- a/tutorials/fastnlp_tutorial_paddle_e1.ipynb +++ b/tutorials/fastnlp_tutorial_paddle_e1.ipynb @@ -6,6 +6,8 @@ "source": [ "# 使用 paddlenlp 和 FastNLP 实现中文文本情感分析\n", "\n", + "本篇教程属于 **`FastNLP v0.8 tutorial` 的 `paddle examples` 系列**。在本篇教程中,我们将为您展示如何使用 `paddlenlp` 自然语言处理库和 `FastNLP` 来完成比较简单的情感分析任务。\n", + "\n", "1. 基础介绍:飞桨自然语言处理库 ``paddlenlp`` 和语义理解框架 ``ERNIE``\n", "\n", "2. 准备工作:使用 ``tokenizer`` 处理数据并构造 ``dataloader``\n", @@ -254,8 +256,7 @@ "\n", "其次,我们还可以为 ``Trainer`` 指定多个 ``Callback`` 来在基础的训练过程之外进行额外的定制操作。在本篇教程中,我们使用的 ``Callback`` 有以下三种:\n", "\n", - "- ``RichCallback`` - 在训练和验证时显示进度条,以便观察训练的过程\n", - "- ``LRSchedCallback`` - 由于我们使用了 ``Scheduler``,因此需要将 ``lr_scheduler`` 传给该 ``Callback`` 以在训练中进行更新\n", + "- ``LRSchedCallback`` - 由于我们使用了 ``Scheduler``,因此需要将 ``lr_scheduler`` 传给该 ``Callback`` 以在训练中进行更新。\n", "- ``LoadBestModelCallback`` - 该 ``Callback`` 会评估结果中的 ``'acc#accuracy'`` 值,保存训练中出现的正确率最高的模型,并在训练结束时加载到模型上,方便对模型进行测试和评估。\n", "\n", "在 ``Trainer`` 中,我们还可以设置 ``metrics`` 来衡量模型的表现。``Accuracy`` 能够根据传入的预测值和真实值计算出模型预测的正确率。还记得模型中 ``evaluate_step`` 函数的返回值吗?键 ``pred`` 和 ``target`` 分别为 ``Accuracy.update`` 的参数名,在验证过程中 ``FastNLP`` 会自动将键和参数名匹配从而计算出正确率,这也是我们规定模型需要返回字典类型数据的原因。\n", @@ -820,7 +821,7 @@ } ], "source": [ - "from fastNLP import LRSchedCallback, RichCallback, LoadBestModelCallback\n", + "from fastNLP import LRSchedCallback, LoadBestModelCallback\n", "from fastNLP import Trainer, Accuracy\n", "from paddlenlp.transformers import LinearDecayWithWarmup\n", "\n", @@ -834,7 +835,6 @@ "callbacks = [\n", " LRSchedCallback(lr_scheduler, step_on=\"batch\"),\n", " LoadBestModelCallback(\"acc#accuracy\", larger_better=True, save_folder=\"fnlp-ernie\"),\n", - " RichCallback()\n", "]\n", "trainer = Trainer(\n", " model=model,\n", From 142fd4760154bb51f8651f56019159867ea40992 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Tue, 28 Jun 2022 01:06:05 +0800 Subject: [PATCH 39/52] =?UTF-8?q?=E4=BF=AE=E6=94=B9=20TorchDataLoader=20Pa?= =?UTF-8?q?ddleDataLoader=20set=5Fpad=20=E7=9A=84=E6=8F=8F=E8=BF=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/dataloaders/paddle_dataloader/fdl.py | 4 ++-- fastNLP/core/dataloaders/torch_dataloader/fdl.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fastNLP/core/dataloaders/paddle_dataloader/fdl.py b/fastNLP/core/dataloaders/paddle_dataloader/fdl.py index 80a7a050..9eec6e8f 100644 --- a/fastNLP/core/dataloaders/paddle_dataloader/fdl.py +++ b/fastNLP/core/dataloaders/paddle_dataloader/fdl.py @@ -194,8 +194,8 @@ class PaddleDataLoader(DataLoader): field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 None 。如果 backend 为 None ,该值 无意义。 :param dtype: 对于需要 pad 的 field ,该 field 的数据 dtype 应该是什么。 - :param backend: 可选['raw', 'numpy', 'Paddle', 'paddle', 'paddle', 'auto'],分别代表,输出为 list, numpy.ndarray, - Paddle.Tensor, paddle.Tensor, paddle.Var 类型。若 pad_val 为 None ,该值无意义 。 + :param backend: 可选['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto'],分别代表,输出为 list, numpy.ndarray, + torch.Tensor, paddle.Tensor, jittor.Var, oneflow.Tensor 类型。若 pad_val 为 None ,该值无意义 。 :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 pad_val, dtype, backend 等参数失效。pad_fn 的输入为当前 field 的 batch 形式。 Collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。pad_func 的输入即为 field 的 batch 形式,输出将被直接作为结果输出。 diff --git a/fastNLP/core/dataloaders/torch_dataloader/fdl.py b/fastNLP/core/dataloaders/torch_dataloader/fdl.py index 9b0ab8d3..09211f71 100644 --- a/fastNLP/core/dataloaders/torch_dataloader/fdl.py +++ b/fastNLP/core/dataloaders/torch_dataloader/fdl.py @@ -161,8 +161,8 @@ class TorchDataLoader(DataLoader): field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 None 。如果 backend 为 None ,该值 无意义。 :param dtype: 对于需要 pad 的 field ,该 field 的数据 dtype 应该是什么。 - :param backend: 可选['raw', 'numpy', 'torch', 'torch', 'jittor', 'auto'],分别代表,输出为 list, numpy.ndarray, - torch.Tensor, torch.Tensor, jittor.Var 类型。若 pad_val 为 None ,该值无意义 。 + :param backend: 可选['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto'],分别代表,输出为 list, numpy.ndarray, + torch.Tensor, paddle.Tensor, jittor.Var, oneflow.Tensor 类型。若 pad_val 为 None ,该值无意义 。 :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 pad_val, dtype, backend 等参数失效。pad_fn 的输入为当前 field 的 batch 形式。 Collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。pad_func 的输入即为 field 的 batch 形式,输出将被直接作为结果输出。 From 8e0d03a3d1d7136a1428ad0856ace2a7801bf8cc Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Tue, 28 Jun 2022 23:24:16 +0800 Subject: [PATCH 40/52] =?UTF-8?q?=E4=BF=AE=E6=94=B9=20Trainer=20torch=5Fkw?= =?UTF-8?q?args=20paddle=5Fkwargs=20fairscale=5Fkwargs=20=E7=9A=84?= =?UTF-8?q?=E6=8F=8F=E8=BF=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/controllers/trainer.py | 39 +++++++------------ fastNLP/core/drivers/paddle_driver/fleet.py | 17 ++++---- .../drivers/paddle_driver/paddle_driver.py | 12 +++++- .../drivers/paddle_driver/single_device.py | 6 ++- fastNLP/core/drivers/torch_driver/ddp.py | 10 ++++- .../drivers/torch_driver/single_device.py | 8 +++- .../core/drivers/torch_driver/torch_driver.py | 7 +++- 7 files changed, 58 insertions(+), 41 deletions(-) diff --git a/fastNLP/core/controllers/trainer.py b/fastNLP/core/controllers/trainer.py index 7a84598e..c1e64636 100644 --- a/fastNLP/core/controllers/trainer.py +++ b/fastNLP/core/controllers/trainer.py @@ -286,20 +286,11 @@ class Trainer(TrainerEventTrigger): 第一个 ``Trainer`` 实例,即使该 ``Trainer`` 实例的 marker 不为 None;这一点详见 :meth:`~fastNLP.core.controllers.Trainer.on` :kwargs: - * *torch_kwargs* -- 用于在指定 ``driver`` 为 'torch' 时设定具体 driver 实例的一些参数: - - * ddp_kwargs -- 用于在使用 ``TorchDDPDriver`` 时指定 ``DistributedDataParallel`` 初始化时的参数;例如传入 - {'find_unused_parameters': True} 来解决有参数不参与前向运算导致的报错等; - * set_grad_to_none -- 是否在训练过程中在每一次 optimizer 更新后将 grad 置为 None; - * non_blocking -- 表示用于 pytorch 的 tensor 的 to 方法的参数 non_blocking; - * gradscaler_kwargs -- 用于 fp16=True 时,提供给 ``torch.amp.cuda.GradScaler`` 的参数。 - * *paddle_kwargs* -- 用于在指定 ``driver`` 为 'paddle' 时设定具体 driver 实例的一些参数: - - * fleet_kwargs -- 用于在使用 ``PaddleFleetDriver`` 时指定 ``DataParallel`` 和 ``fleet`` 初始化时的参数,包括: - - * is_collective -- 是否使用 paddle 集群式的分布式训练方法,目前仅支持为 ``True`` 的情况; - * role_maker -- 初始化 ``fleet`` 分布式训练 API 时使用的 ``RoleMaker`` - * 其它用于初始化 ``DataParallel`` 的参数; + * *torch_kwargs* -- ``TorchDriver`` 所需的其它参数,详见 :class:`~fastNLP.core.drivers.torch_driver.TorchSingleDriver` 和 + :class:`~fastNLP.core.drivers.torch_driver.TorchDDPDriver`; + * *paddle_kwargs* -- ``PaddleDriver`` 所需的其它参数,详见 :class:`~fastNLP.core.drivers.paddle_driver.PaddleSingleDriver` 和 + :class:`~fastNLP.core.drivers.paddle_driver.PaddleSingleDriver`; + * *fairscale_kwargs* -- ``FairScaleDriver`` 所需的其它参数,详见 :class:`~fastNLP.core.drivers.torch_driver.FairScaleDriver`; * *data_device* -- 一个具体的 driver 实例中,有 ``model_device`` 和 ``data_device``,前者表示模型所在的设备,后者表示 当 ``model_device`` 为 None 时应当将数据迁移到哪个设备; @@ -313,23 +304,23 @@ class Trainer(TrainerEventTrigger): 3. 对于 paddle,该参数无效; * *use_dist_sampler* -- 表示是否使用分布式的 ``sampler``。在多卡时,分布式 ``sampler`` 将自动决定每张卡上读取的 sample ,使得一个 epoch - 内所有卡的 sample 加起来为一整个数据集的 sample,同时为了保证所有卡上拥有相同数量的 sample ,有的卡上可能会有重复的 sample ,例如 - 8卡训练,只有9个sample,如果batch_size为1,那么第二个batch时,有7张卡将没有 sample 可用,因此只有重复使用 sample 来 pad 到第二个 - batch 中。如果不希望 fastNLP 对 dataloader 的sampler 做特殊设置,请将该值设置为 False ,若确实需要分布式的训练,请在 Trainer 外 - 对 train_dataloader 做的数据做特殊处理使得其在不同的卡之间 sample 是 + 内所有卡的 sample 加起来为一整个数据集的 sample,同时为了保证所有卡上拥有相同数量的 sample ,有的卡上可能会有重复的 sample ,例如 + 8卡训练,只有9个sample,如果batch_size为1,那么第二个batch时,有7张卡将没有 sample 可用,因此只有重复使用 sample 来 pad 到第二个 + batch 中。如果不希望 fastNLP 对 dataloader 的sampler 做特殊设置,请将该值设置为 False ,若确实需要分布式的训练,请在 Trainer 外 + 对 train_dataloader 做的数据做特殊处理使得其在不同的卡之间 sample 是 * *evaluate_use_dist_sampler* -- 表示在 ``Evaluator`` 中在使用分布式的时候是否将保证 dataloader 的 ``sampler`` 替换为 - evaluate 时使用的分布式的 ``sampler``,其特点是每个卡上的数据之间不重叠,所有卡上数据的加起来是整个数据集。若传入的 dataloader - 的 sampler 为 (a) 深度学习框架自带的默认 sampler ; (b) fastNLP 的 Sampler 等,则将替换为 + evaluate 时使用的分布式的 ``sampler``,其特点是每个卡上的数据之间不重叠,所有卡上数据的加起来是整个数据集。若传入的 dataloader + 的 sampler 为 (a) 深度学习框架自带的默认 sampler ; (b) fastNLP 的 Sampler 等,则将替换为 :class:`~fastNLP.UnrepeatedSequentialSampler`,如果这个行为不是期待的,请本参数设置为 ``False``,并针对每个卡控制其可以 用到的数据。 * *output_from_new_proc* -- 应当为一个字符串,表示在多进程的 driver 中其它进程的输出流应当被做如何处理;其值应当为以下之一: - ["all", "ignore", "only_error"];当该参数的值不是以上值时,该值应当表示一个文件夹的名字,我们会将其他 rank 的输出流重定向到 - log 文件中,然后将 log 文件保存在通过该参数值设定的文件夹中;默认为 "only_error"; + ["all", "ignore", "only_error"];当该参数的值不是以上值时,该值应当表示一个文件夹的名字,我们会将其他 rank 的输出流重定向到 + log 文件中,然后将 log 文件保存在通过该参数值设定的文件夹中;默认为 "only_error"; 注意该参数仅当使用分布式的 ``driver`` 时才有效,例如 ``TorchDDPDriver``; * *progress_bar* -- 以哪种方式显示 progress ,目前支持[None, 'raw', 'rich', 'auto', 'tqdm'] 或者 :class:`~fastNLP.RichCallback`, :class:`~fastNLP.RawTextCallback`等对象, - 默认为 auto , auto 表示如果检测到当前 terminal 为交互型则使用 :class:`~fastNLP.RichCallback`,否则使用 :class:`~fastNLP.RawTextCallback` 对象。如果 - 需要定制 progress bar 的参数,例如打印频率等,可以传入 :class:`~fastNLP.RichCallback`, :class:`~fastNLP.RawTextCallback` 等对象。 + 默认为 auto , auto 表示如果检测到当前 terminal 为交互型则使用 :class:`~fastNLP.RichCallback`,否则使用 :class:`~fastNLP.RawTextCallback` 对象。如果 + 需要定制 progress bar 的参数,例如打印频率等,可以传入 :class:`~fastNLP.RichCallback`, :class:`~fastNLP.RawTextCallback` 等对象。 * *train_input_mapping* -- 与 input_mapping 一致,但是只用于 ``Trainer`` 中。与 input_mapping 互斥。 * *train_output_mapping* -- 与 output_mapping 一致,但是只用于 ``Trainer`` 中。与 output_mapping 互斥。 * *evaluate_input_mapping* -- 与 input_mapping 一致,但是只用于 ``Evaluator`` 中。与 input_mapping 互斥。 diff --git a/fastNLP/core/drivers/paddle_driver/fleet.py b/fastNLP/core/drivers/paddle_driver/fleet.py index 9344f515..a7d08e5c 100644 --- a/fastNLP/core/drivers/paddle_driver/fleet.py +++ b/fastNLP/core/drivers/paddle_driver/fleet.py @@ -130,15 +130,15 @@ class PaddleFleetDriver(PaddleDriver): :param is_pull_by_paddle_run: 标记当前进程是否为通过 ``python -m paddle.distributed.launch`` 启动的。 这个参数仅在 :class:`~fastNLP.core.Trainer` 中初始化 driver 时使用 :param fp16: 是否开启混合精度训练; + :param paddle_kwargs: + * *fleet_kwargs* -- 用于在使用 ``PaddleFleetDriver`` 时指定 ``DataParallel`` 和 ``fleet`` 初始化时的参数,包括: + + * *is_collective* -- 是否使用 paddle 集群式的分布式训练方法,目前仅支持为 ``True`` 的情况; + * *role_maker* -- 初始化 ``fleet`` 分布式训练 API 时使用的 ``RoleMaker``; + * 其它用于初始化 ``DataParallel`` 的参数; + * *gradscaler_kwargs* -- 用于 ``fp16=True`` 时,提供给 :class:`paddle.amp.GradScaler` 的参数; + :kwargs: - * *paddle_kwargs* -- 用于在指定 ``driver`` 为 'paddle' 时设定具体 driver 实例的一些参数: - - * fleet_kwargs -- 用于在使用 ``PaddleFleetDriver`` 时指定 ``DataParallel`` 和 ``fleet`` 初始化时的参数,包括: - - * is_collective -- 是否使用 paddle 集群式的分布式训练方法,目前仅支持为 ``True`` 的情况; - * role_maker -- 初始化 ``fleet`` 分布式训练 API 时使用的 ``RoleMaker`` - * 其它用于初始化 ``DataParallel`` 的参数; - * wo_auto_param_call (``bool``) -- 是否关闭在训练时调用我们的 ``auto_param_call`` 函数来自动匹配 batch 和前向函数的参数的行为; .. note:: @@ -152,6 +152,7 @@ class PaddleFleetDriver(PaddleDriver): parallel_device: Optional[Union[List[str], str]], is_pull_by_paddle_run: bool = False, fp16: bool = False, + paddle_kwrags: Dict = {}, **kwargs ): if USER_CUDA_VISIBLE_DEVICES not in os.environ: diff --git a/fastNLP/core/drivers/paddle_driver/paddle_driver.py b/fastNLP/core/drivers/paddle_driver/paddle_driver.py index bfc26350..b22a6913 100644 --- a/fastNLP/core/drivers/paddle_driver/paddle_driver.py +++ b/fastNLP/core/drivers/paddle_driver/paddle_driver.py @@ -56,8 +56,18 @@ class PaddleDriver(Driver): 1. :class:`~fastNLP.core.drivers.PaddleSingleDriver`:实现了使用单卡和 ``cpu`` 训练的具体功能; 2. :class:`~fastNLP.core.drivers.PaddleFleetDriver`:实现了使用 ``fleet`` 分布式训练 API 进行集群式分布式训练的具体功能; + .. warning:: + + 您不应当直接初始化该类,然后传入给 ``Trainer``,换句话说,您应当使用该类的子类 ``PaddleSingleDriver`` 和 ``PaddleDDPDriver``,而不是 + 该类本身; + + .. note:: + + 您可以在使用 ``PaddleSingleDriver`` 和 ``PaddleFleetDriver`` 时使用 ``PaddleDriver`` 提供的接口; + :param model: 训练时使用的 **PaddlePaddle** 模型; :param fp16: 是否开启混合精度训练; + :param paddle_kwargs: :kwargs: * wo_auto_param_call (``bool``) -- 是否关闭在训练时调用我们的 ``auto_param_call`` 函数来自动匹配 batch 和前向函数的参数的行为; @@ -66,7 +76,7 @@ class PaddleDriver(Driver): 关于该参数的详细说明,请参见 :class:`~fastNLP.core.controllers.Trainer` 中的描述;函数 ``auto_param_call`` 详见 :func:`fastNLP.core.utils.auto_param_call`。 """ - def __init__(self, model: "paddle.nn.Layer", fp16: Optional[bool] = False, **kwargs): + def __init__(self, model: "paddle.nn.Layer", fp16: Optional[bool] = False, paddle_kwrags: Dict = {}, **kwargs): if not isinstance(model, paddle.nn.Layer): raise ValueError(f"Parameter `model` can not be `{type(model)}` in `PaddleDriver`, it should be exactly " f"`paddle.nn.Layer` type.") diff --git a/fastNLP/core/drivers/paddle_driver/single_device.py b/fastNLP/core/drivers/paddle_driver/single_device.py index 4105bf20..267c10bd 100644 --- a/fastNLP/core/drivers/paddle_driver/single_device.py +++ b/fastNLP/core/drivers/paddle_driver/single_device.py @@ -43,6 +43,8 @@ class PaddleSingleDriver(PaddleDriver): :param model: 训练时使用的 **PaddlePaddle** 模型; :param device: 训练使用的设备; :param fp16: 是否开启混合精度训练; + :param paddle_kwargs: + * *gradscaler_kwargs* -- 用于 ``fp16=True`` 时,提供给 :class:`paddle.amp.GradScaler` 的参数; :kwargs: * wo_auto_param_call (``bool``) -- 是否关闭在训练时调用我们的 ``auto_param_call`` 函数来自动匹配 batch 和前向函数的参数的行为; @@ -51,7 +53,7 @@ class PaddleSingleDriver(PaddleDriver): 关于该参数的详细说明,请参见 :class:`~fastNLP.core.controllers.Trainer` 中的描述;函数 ``auto_param_call`` 详见 :func:`fastNLP.core.utils.auto_param_call`。 """ - def __init__(self, model: "paddle.nn.Layer", device: Union[str, int], fp16: Optional[bool] = False, **kwargs): + def __init__(self, model: "paddle.nn.Layer", device: Union[str, int], fp16: Optional[bool] = False, paddle_kwrags: Dict = {}, **kwargs): if isinstance(model, DataParallel): raise ValueError("`paddle.DataParallel` is not supported in `PaddleSingleDriver`") @@ -61,7 +63,7 @@ class PaddleSingleDriver(PaddleDriver): logger.info("You have set `CUDA_VISIBLE_DEVICES` to '' in system environment variable, and we are gonna to" "use `cpu` instead of `gpu` device.") - super(PaddleSingleDriver, self).__init__(model, fp16=fp16, **kwargs) + super(PaddleSingleDriver, self).__init__(model, fp16=fp16, paddle_kwrags=paddle_kwrags, **kwargs) if device is None: raise ValueError("Parameter `device` can not be None in `PaddleSingleDriver`.") diff --git a/fastNLP/core/drivers/torch_driver/ddp.py b/fastNLP/core/drivers/torch_driver/ddp.py index b5485d16..47d9cbb5 100644 --- a/fastNLP/core/drivers/torch_driver/ddp.py +++ b/fastNLP/core/drivers/torch_driver/ddp.py @@ -235,7 +235,12 @@ class TorchDDPDriver(TorchDriver): :param parallel_device: 用于分布式训练的 ``gpu`` 设备; :param is_pull_by_torch_run: 标志当前的脚本的启动是否由 ``python -m torch.distributed.launch`` 启动的; :param fp16: 是否开启 fp16 训练; - :param kwargs: 其余的一些用于设定 ddp 训练的参数; + :param torch_kwargs: + * *ddp_kwargs* -- 用于在使用 ``TorchDDPDriver`` 时指定 ``DistributedDataParallel`` 初始化时的参数;例如传入 + {'find_unused_parameters': True} 来解决有参数不参与前向运算导致的报错等; + * *set_grad_to_none* -- 是否在训练过程中在每一次 optimizer 更新后将 grad 置为 None; + * *non_blocking* -- 表示用于 pytorch 的 tensor 的 to 方法的参数 non_blocking; + * *gradscaler_kwargs* -- 用于 fp16=True 时,提供给 ``torch.amp.cuda.GradScaler`` 的参数; """ def __init__( @@ -244,11 +249,12 @@ class TorchDDPDriver(TorchDriver): parallel_device: Optional[Union[List["torch.device"], "torch.device"]], is_pull_by_torch_run: bool = False, fp16: bool = False, + torch_kwargs: Dict = {}, **kwargs ): # 在加入很多东西后,需要注意这里调用 super 函数的位置; - super(TorchDDPDriver, self).__init__(model, fp16=fp16, **kwargs) + super(TorchDDPDriver, self).__init__(model, fp16=fp16, torch_kwargs=torch_kwargs, **kwargs) if isinstance(model, torch.nn.DataParallel): raise ValueError(f"Parameter `model` can not be `DataParallel` in `TorchDDPDriver`, it should be " diff --git a/fastNLP/core/drivers/torch_driver/single_device.py b/fastNLP/core/drivers/torch_driver/single_device.py index 263cf712..b59aba64 100644 --- a/fastNLP/core/drivers/torch_driver/single_device.py +++ b/fastNLP/core/drivers/torch_driver/single_device.py @@ -35,9 +35,13 @@ class TorchSingleDriver(TorchDriver): :param model: 传入给 ``Trainer`` 的 ``model`` 参数; :param device: torch.device,当前进程所使用的设备; :param fp16: 是否开启 fp16; + :param torch_kwargs: + * *set_grad_to_none* -- 是否在训练过程中在每一次 optimizer 更新后将 grad 置为 None; + * *non_blocking* -- 表示用于 pytorch 的 tensor 的 to 方法的参数 non_blocking; + * *gradscaler_kwargs* -- 用于 fp16=True 时,提供给 ``torch.amp.cuda.GradScaler`` 的参数; """ - def __init__(self, model, device: "torch.device", fp16: bool = False, **kwargs): + def __init__(self, model, device: "torch.device", fp16: bool = False, torch_kwargs: Dict = {}, **kwargs): if isinstance(model, DistributedDataParallel): raise ValueError("`DistributedDataParallel` is not supported in `TorchSingleDriver`") @@ -47,7 +51,7 @@ class TorchSingleDriver(TorchDriver): logger.info("You have set `CUDA_VISIBLE_DEVICES` to '' in system environment variable, and we are gonna to" "use `cpu` instead of `gpu` device.") - super(TorchSingleDriver, self).__init__(model, fp16=fp16, **kwargs) + super(TorchSingleDriver, self).__init__(model, fp16=fp16, torch_kwargs=torch_kwargs, **kwargs) if device is None: logger.debug("device is not set, fastNLP will try to automatically get it.") diff --git a/fastNLP/core/drivers/torch_driver/torch_driver.py b/fastNLP/core/drivers/torch_driver/torch_driver.py index 1594a903..60bd4147 100644 --- a/fastNLP/core/drivers/torch_driver/torch_driver.py +++ b/fastNLP/core/drivers/torch_driver/torch_driver.py @@ -47,12 +47,15 @@ class TorchDriver(Driver): 您可以在使用 ``TorchSingleDriver`` 和 ``TorchDDPDriver`` 时使用 ``TorchDriver`` 提供的接口; + :param model: 训练时使用的 **pytorch** 模型; + :param fp16: 是否开启混合精度训练; + :param torch_kwargs: """ - def __init__(self, model, fp16: Optional[bool] = False, **kwargs): + def __init__(self, model, fp16: Optional[bool] = False, torch_kwargs: Dict = {}, **kwargs): super(TorchDriver, self).__init__(model) """ 进行 fp16 的设置 """ - self._torch_kwargs = kwargs.get("torch_kwargs", {}) + self._torch_kwargs = torch_kwargs # 因为 ddp 和 single_device 的混合精度训练的设置是一样的,因此可以统一抽象到这里; self.fp16 = fp16 From ad6ada2487c492d3a12887aeb6664001639ba166 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Tue, 28 Jun 2022 23:25:12 +0800 Subject: [PATCH 41/52] small --- tests/core/drivers/torch_driver/test_ddp.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/core/drivers/torch_driver/test_ddp.py b/tests/core/drivers/torch_driver/test_ddp.py index 46abd84c..11f47617 100644 --- a/tests/core/drivers/torch_driver/test_ddp.py +++ b/tests/core/drivers/torch_driver/test_ddp.py @@ -773,10 +773,7 @@ class TestSaveLoad: # 保存状态 sampler_states = dataloader.batch_sampler.sampler.state_dict() save_states = {"num_consumed_batches": num_consumed_batches} - if only_state_dict: - driver1.save_checkpoint(Path(path), save_states, dataloader, only_state_dict, should_save_model=True) - else: - driver1.save_checkpoint(Path(path), save_states, dataloader, only_state_dict, should_save_model=True, input_spec=[torch.ones((16, 10))]) + driver1.save_checkpoint(Path(path), save_states, dataloader, only_state_dict, should_save_model=True) dist.barrier() # 等待save成功 # 加载 # 更改 batch_size From 40b8016e98ea521e545e45606424039901dfb5ba Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Tue, 28 Jun 2022 23:29:42 +0800 Subject: [PATCH 42/52] =?UTF-8?q?PaddleDriver=20=E5=8F=AF=E4=BB=A5?= =?UTF-8?q?=E4=BC=A0=E5=85=A5Gradscaler=E5=8F=82=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/drivers/paddle_driver/paddle_driver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastNLP/core/drivers/paddle_driver/paddle_driver.py b/fastNLP/core/drivers/paddle_driver/paddle_driver.py index b22a6913..5bd35b7a 100644 --- a/fastNLP/core/drivers/paddle_driver/paddle_driver.py +++ b/fastNLP/core/drivers/paddle_driver/paddle_driver.py @@ -86,7 +86,7 @@ class PaddleDriver(Driver): # scaler的参数 self.auto_cast, _grad_scaler = _build_fp16_env(dummy=not fp16) - self.grad_scaler = _grad_scaler() + self.grad_scaler = _grad_scaler(**self._paddle_kwargs.get("gradscaler_kwargs", {})) # 用来设置是否关闭 auto_param_call 中的参数匹配问题; self.wo_auto_param_call = kwargs.get("model_wo_auto_param_call", False) From 780bf77ef79e08fe6a4b80df956e9efddff078eb Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Wed, 29 Jun 2022 00:06:22 +0800 Subject: [PATCH 43/52] =?UTF-8?q?paddle=20tutorials=20e2-=E9=97=AE?= =?UTF-8?q?=E7=AD=94=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tutorials/fastnlp_tutorial_paddle_e1.ipynb | 2 +- tutorials/fastnlp_tutorial_paddle_e2.ipynb | 1510 ++++++++++++++++++++ 2 files changed, 1511 insertions(+), 1 deletion(-) create mode 100644 tutorials/fastnlp_tutorial_paddle_e2.ipynb diff --git a/tutorials/fastnlp_tutorial_paddle_e1.ipynb b/tutorials/fastnlp_tutorial_paddle_e1.ipynb index a3cbcedc..60ff450e 100644 --- a/tutorials/fastnlp_tutorial_paddle_e1.ipynb +++ b/tutorials/fastnlp_tutorial_paddle_e1.ipynb @@ -57,7 +57,7 @@ "source": [ "#### 1.2 语义理解框架 ERNIE\n", "\n", - "``ERNIE(Enhanced Representation from kNowledge IntEgration)`` 是百度提出的基于知识增强的持续学习语义理解框架,至今已有 ``ERNIE 2.0``、``ERNIE 3.0``、``ERNIE-M``、``ERNIE-tiny`` 等多种预训练模型。``ERNIE 1.0`` 采用``Transformer Encoder`` 作为其语义表示的骨架,并改进了两种``mask`` 策略,分别为基于**短语**和**实体**(人名、组织等)的策略。在 ``ERNIE`` 中,由多个字组成的短语或者实体将作为一个统一单元,在训练的时候被统一地 ``mask`` 掉,这样可以潜在地学习到知识的依赖以及更长的语义依赖来让模型更具泛化性。\n", + "``ERNIE(Enhanced Representation from kNowledge IntEgration)`` 是百度提出的基于知识增强的持续学习语义理解框架,至今已有 ``ERNIE 2.0``、``ERNIE 3.0``、``ERNIE-M``、``ERNIE-tiny`` 等多种预训练模型。``ERNIE 1.0`` 采用``Transformer Encoder`` 作为其语义表示的骨架,并改进了两种 ``mask`` 策略,分别为基于**短语**和**实体**(人名、组织等)的策略。在 ``ERNIE`` 中,由多个字组成的短语或者实体将作为一个统一单元,在训练的时候被统一地 ``mask`` 掉,这样可以潜在地学习到知识的依赖以及更长的语义依赖来让模型更具泛化性。\n", "\n", "\n", "\n", diff --git a/tutorials/fastnlp_tutorial_paddle_e2.ipynb b/tutorials/fastnlp_tutorial_paddle_e2.ipynb new file mode 100644 index 00000000..c17be405 --- /dev/null +++ b/tutorials/fastnlp_tutorial_paddle_e2.ipynb @@ -0,0 +1,1510 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 使用 paddlenlp 和 FastNLP 训练中文阅读理解任务\n", + "\n", + "本篇教程属于 **`FastNLP v0.8 tutorial` 的 `paddle examples` 系列**。在本篇教程中,我们将为您展示如何在 `FastNLP` 中通过自定义 `Metric` 和 损失函数来完成进阶的问答任务。\n", + "\n", + "1. 基础介绍:自然语言处理中的阅读理解任务\n", + "\n", + "2. 准备工作:加载 `DuReader-robust` 数据集,并使用 `tokenizer` 处理数据\n", + "\n", + "3. 模型训练:自己定义评测用的 `Metric` 实现更加自由的任务评测" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1. 基础介绍:自然语言处理中的阅读理解任务\n", + "\n", + "阅读理解任务,顾名思义,就是给出一段文字,然后让模型理解这段文字所含的语义。大部分机器阅读理解任务都采用问答式测评,即设计与文章内容相关的自然语言式问题,让模型理解问题并根据文章作答。与文本分类任务不同的是,在阅读理解任务中我们有时需要需要输入“一对”句子,分别代表问题和上下文;答案的格式也分为多种:\n", + "\n", + "- 多项选择:让模型从多个答案选项中选出正确答案\n", + "- 区间答案:答案为上下文的一段子句,需要模型给出答案的起始位置\n", + "- 自由回答:不做限制,让模型自行生成答案\n", + "- 完形填空:在原文中挖空部分关键词,让模型补全;这类答案往往不需要问题\n", + "\n", + "如果您对 `transformers` 有所了解的话,其中的 `ModelForQuestionAnswering` 系列模型就可以用于这项任务。阅读理解模型的泛用性是衡量该技术能否在实际应用中大规模落地的重要指标之一,随着当前技术的进步,许多模型虽然能够在一些测试集上取得较好的性能,但在实际应用中,这些模型仍然难以让人满意。在本篇教程中,我们将会为您展示如何训练一个问答模型。\n", + "\n", + "在这一领域,`SQuAD` 数据集是一个影响深远的数据集。它的全称是斯坦福问答数据集(Stanford Question Answering Dataset),每条数据包含 `(问题,上下文,答案)` 三部分,规模大(约十万条,2.0又新增了五万条),在提出之后很快成为训练问答任务的经典数据集之一。`SQuAD` 数据集有两个指标来衡量模型的表现:`EM`(Exact Match,精确匹配)和 `F1`(模糊匹配)。前者反应了模型给出的答案中有多少和正确答案完全一致,后者则反应了模型给出的答案中与正确答案重叠的部分,均为越高越好。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. 准备工作:加载 DuReader-robust 数据集,并使用 tokenizer 处理数据" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/remote-home/shxing/anaconda3/envs/fnlp-paddle/lib/python3.7/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2.3.3\n" + ] + } + ], + "source": [ + "import sys\n", + "sys.path.append(\"../\")\n", + "import paddle\n", + "import paddlenlp\n", + "\n", + "print(paddlenlp.__version__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "在数据集方面,我们选用 `DuReader-robust` 中文数据集作为训练数据。它是一种抽取式问答数据集,采用 `SQuAD` 数据格式,能够评估真实应用场景下模型的泛用性。" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Reusing dataset dureader_robust (/remote-home/shxing/.cache/huggingface/datasets/dureader_robust/plain_text/1.0.0/d462ecadc8c010cee20f57632f1413f272867cd802a91a602df48c7d34eb0c27)\n", + "Reusing dataset dureader_robust (/remote-home/shxing/.cache/huggingface/datasets/dureader_robust/plain_text/1.0.0/d462ecadc8c010cee20f57632f1413f272867cd802a91a602df48c7d34eb0c27)\n", + "\u001b[32m[2022-06-27 19:22:46,998] [ INFO]\u001b[0m - Already cached /remote-home/shxing/.paddlenlp/models/ernie-1.0-base-zh/vocab.txt\u001b[0m\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'id': '0a25cb4bc1ab6f474c699884e04601e4', 'title': '', 'context': '第35集雪见缓缓张开眼睛,景天又惊又喜之际,长卿和紫萱的仙船驶至,见众人无恙,也十分高兴。众人登船,用尽合力把自身的真气和水分输给她。雪见终于醒过来了,但却一脸木然,全无反应。众人向常胤求助,却发现人世界竟没有雪见的身世纪录。长卿询问清微的身世,清微语带双关说一切上了天界便有答案。长卿驾驶仙船,众人决定立马动身,往天界而去。众人来到一荒山,长卿指出,魔界和天界相连。由魔界进入通过神魔之井,便可登天。众人至魔界入口,仿若一黑色的蝙蝠洞,但始终无法进入。后来花楹发现只要有翅膀便能飞入。于是景天等人打下许多乌鸦,模仿重楼的翅膀,制作数对翅膀状巨物。刚佩戴在身,便被吸入洞口。众人摔落在地,抬头发现魔界守卫。景天和众魔套交情,自称和魔尊重楼相熟,众魔不理,打了起来。', 'question': '仙剑奇侠传3第几集上天界', 'answers': {'text': ['第35集'], 'answer_start': [0]}}\n", + "{'id': '7de192d6adf7d60ba73ba25cf590cc1e', 'title': '', 'context': '选择燃气热水器时,一定要关注这几个问题:1、出水稳定性要好,不能出现忽热忽冷的现象2、快速到达设定的需求水温3、操作要智能、方便4、安全性要好,要装有安全报警装置 市场上燃气热水器品牌众多,购买时还需多加对比和仔细鉴别。方太今年主打的磁化恒温热水器在使用体验方面做了全面升级:9秒速热,可快速进入洗浴模式;水温持久稳定,不会出现忽热忽冷的现象,并通过水量伺服技术将出水温度精确控制在±0.5℃,可满足家里宝贝敏感肌肤洗护需求;配备CO和CH4双气体报警装置更安全(市场上一般多为CO单气体报警)。另外,这款热水器还有智能WIFI互联功能,只需下载个手机APP即可用手机远程操作热水器,实现精准调节水温,满足家人多样化的洗浴需求。当然方太的磁化恒温系列主要的是增加磁化功能,可以有效吸附水中的铁锈、铁屑等微小杂质,防止细菌滋生,使沐浴水质更洁净,长期使用磁化水沐浴更利于身体健康。', 'question': '燃气热水器哪个牌子好', 'answers': {'text': ['方太'], 'answer_start': [110]}}\n", + "{'id': 'b9e74d4b9228399b03701d1fe6d52940', 'title': '', 'context': '迈克尔.乔丹在NBA打了15个赛季。他在84年进入nba,期间在1993年10月6日第一次退役改打棒球,95年3月18日重新回归,在99年1月13日第二次退役,后于2001年10月31日复出,在03年最终退役。迈克尔·乔丹(Michael Jordan),1963年2月17日生于纽约布鲁克林,美国著名篮球运动员,司职得分后卫,历史上最伟大的篮球运动员。1984年的NBA选秀大会,乔丹在首轮第3顺位被芝加哥公牛队选中。 1986-87赛季,乔丹场均得到37.1分,首次获得分王称号。1990-91赛季,乔丹连夺常规赛MVP和总决赛MVP称号,率领芝加哥公牛首次夺得NBA总冠军。 1997-98赛季,乔丹获得个人职业生涯第10个得分王,并率领公牛队第六次夺得总冠军。2009年9月11日,乔丹正式入选NBA名人堂。', 'question': '乔丹打了多少个赛季', 'answers': {'text': ['15个'], 'answer_start': [12]}}\n", + "训练集大小: 14520\n", + "验证集大小: 1417\n" + ] + } + ], + "source": [ + "from paddlenlp.datasets import load_dataset\n", + "train_dataset = load_dataset(\"PaddlePaddle/dureader_robust\", splits=\"train\")\n", + "val_dataset = load_dataset(\"PaddlePaddle/dureader_robust\", splits=\"validation\")\n", + "for i in range(3):\n", + " print(train_dataset[i])\n", + "print(\"训练集大小:\", len(train_dataset))\n", + "print(\"验证集大小:\", len(val_dataset))\n", + "\n", + "MODEL_NAME = \"ernie-1.0-base-zh\"\n", + "from paddlenlp.transformers import ErnieTokenizer\n", + "tokenizer =ErnieTokenizer.from_pretrained(MODEL_NAME)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 2.1 处理训练集\n", + "\n", + "对于阅读理解任务,数据处理的方式较为麻烦。接下来我们会为您详细讲解处理函数 `_process_train` 的功能,同时也将通过实践展示关于 `tokenizer` 的更多功能,让您更加深入地了解自然语言处理任务。首先让我们向 `tokenizer` 输入一条数据(以列表的形式):" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2\n", + "dict_keys(['offset_mapping', 'input_ids', 'token_type_ids', 'overflow_to_sample'])\n" + ] + } + ], + "source": [ + "result = tokenizer(\n", + " [train_dataset[0][\"question\"]],\n", + " [train_dataset[0][\"context\"]],\n", + " stride=128,\n", + " max_length=256,\n", + " padding=\"max_length\",\n", + " return_dict=False\n", + ")\n", + "\n", + "print(len(result))\n", + "print(result[0].keys())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "首先不难理解的是,模型必须要同时接受问题(`question`)和上下文(`context`)才能够进行阅读理解,因此我们需要将二者同时进行分词(`tokenize`)。所幸,`Tokenizer` 提供了这一功能,当我们调用 `tokenizer` 的时候,其第一个参数名为 `text`,第二个参数名为 `text_pair`,这使得我们可以同时对一对文本进行分词。同时,`tokenizer` 还需要标记出一条数据中哪些属于问题,哪些属于上下文,这一功能则由 `token_type_ids` 完成。`token_type_ids` 会将输入的第一个文本(问题)标记为 `0`,第二个文本(上下文)标记为 `1`,这样模型在训练时便可以将问题和上下文区分开来:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[1, 1034, 1189, 734, 2003, 241, 284, 131, 553, 271, 28, 125, 280, 2, 131, 1773, 271, 1097, 373, 1427, 1427, 501, 88, 662, 1906, 4, 561, 125, 311, 1168, 311, 692, 46, 430, 4, 84, 2073, 14, 1264, 3967, 5, 1034, 1020, 1829, 268, 4, 373, 539, 8, 154, 5210, 4, 105, 167, 59, 69, 685, 12043, 539, 8, 883, 1020, 4, 29, 720, 95, 90, 427, 67, 262, 5, 384, 266, 14, 101, 59, 789, 416, 237, 12043, 1097, 373, 616, 37, 1519, 93, 61, 15, 4, 255, 535, 7, 1529, 619, 187, 4, 62, 154, 451, 149, 12043, 539, 8, 253, 223, 3679, 323, 523, 4, 535, 34, 87, 8, 203, 280, 1186, 340, 9, 1097, 373, 5, 262, 203, 623, 704, 12043, 84, 2073, 1137, 358, 334, 702, 5, 262, 203, 4, 334, 702, 405, 360, 653, 129, 178, 7, 568, 28, 15, 125, 280, 518, 9, 1179, 487, 12043, 84, 2073, 1621, 1829, 1034, 1020, 4, 539, 8, 448, 91, 202, 466, 70, 262, 4, 638, 125, 280, 83, 299, 12043, 539, 8, 61, 45, 7, 1537, 176, 4, 84, 2073, 288, 39, 4, 889, 280, 14, 125, 280, 156, 538, 12043, 190, 889, 280, 71, 109, 124, 93, 292, 889, 46, 1248, 4, 518, 48, 883, 125, 12043, 539, 8, 268, 889, 280, 109, 270, 4, 1586, 845, 7, 669, 199, 5, 3964, 3740, 1084, 4, 255, 440, 616, 154, 72, 71, 109, 12043, 49, 61, 283, 3591, 34, 87, 297, 41, 9, 1993, 2602, 518, 52, 706, 109, 2]\n", + "['[CLS]', '仙', '剑', '奇', '侠', '传', '3', '第', '几', '集', '上', '天', '界', '[SEP]', '第', '35', '集', '雪', '见', '缓', '缓', '张', '开', '眼', '睛', ',', '景', '天', '又', '惊', '又', '喜', '之', '际', ',', '长', '卿', '和', '紫', '萱', '的', '仙', '船', '驶', '至', ',', '见', '众', '人', '无', '恙', ',', '也', '十', '分', '高', '兴', '。', '众', '人', '登', '船', ',', '用', '尽', '合', '力', '把', '自', '身', '的', '真', '气', '和', '水', '分', '输', '给', '她', '。', '雪', '见', '终', '于', '醒', '过', '来', '了', ',', '但', '却', '一', '脸', '木', '然', ',', '全', '无', '反', '应', '。', '众', '人', '向', '常', '胤', '求', '助', ',', '却', '发', '现', '人', '世', '界', '竟', '没', '有', '雪', '见', '的', '身', '世', '纪', '录', '。', '长', '卿', '询', '问', '清', '微', '的', '身', '世', ',', '清', '微', '语', '带', '双', '关', '说', '一', '切', '上', '了', '天', '界', '便', '有', '答', '案', '。', '长', '卿', '驾', '驶', '仙', '船', ',', '众', '人', '决', '定', '立', '马', '动', '身', ',', '往', '天', '界', '而', '去', '。', '众', '人', '来', '到', '一', '荒', '山', ',', '长', '卿', '指', '出', ',', '魔', '界', '和', '天', '界', '相', '连', '。', '由', '魔', '界', '进', '入', '通', '过', '神', '魔', '之', '井', ',', '便', '可', '登', '天', '。', '众', '人', '至', '魔', '界', '入', '口', ',', '仿', '若', '一', '黑', '色', '的', '蝙', '蝠', '洞', ',', '但', '始', '终', '无', '法', '进', '入', '。', '后', '来', '花', '楹', '发', '现', '只', '要', '有', '翅', '膀', '便', '能', '飞', '入', '[SEP]']\n", + "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n" + ] + } + ], + "source": [ + "print(result[0][\"input_ids\"])\n", + "print(tokenizer.convert_ids_to_tokens(result[0][\"input_ids\"]))\n", + "print(result[0][\"token_type_ids\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "根据上面的输出我们可以看出,`tokenizer` 会将数据开头用 `[CLS]` 标记,用 `[SEP]` 来分割句子。同时,根据 `token_type_ids` 得到的 0、1 串,我们也很容易将问题和上下文区分开。顺带一提,如果一条数据进行了 `padding`,那么这部分会被标记为 `0` 。\n", + "\n", + "在输出的 `keys` 中还有一项名为 `offset_mapping` 的键。该项数据能够表示分词后的每个 `token` 在原文中对应文字或词语的位置。比如我们可以像下面这样将数据打印出来:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[(0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (0, 0), (0, 1), (1, 3), (3, 4), (4, 5), (5, 6), (6, 7)]\n", + "[1, 1034, 1189, 734, 2003, 241, 284, 131, 553, 271, 28, 125, 280, 2, 131, 1773, 271, 1097, 373, 1427]\n", + "['[CLS]', '仙', '剑', '奇', '侠', '传', '3', '第', '几', '集', '上', '天', '界', '[SEP]', '第', '35', '集', '雪', '见', '缓']\n" + ] + } + ], + "source": [ + "print(result[0][\"offset_mapping\"][:20])\n", + "print(result[0][\"input_ids\"][:20])\n", + "print(tokenizer.convert_ids_to_tokens(result[0][\"input_ids\"])[:20])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`[CLS]` 由于是 `tokenizer` 自己添加进去用于标记数据的 `token`,因此它在原文中找不到任何对应的词语,所以给出的位置范围就是 `(0, 0)`;第二个 `token` 对应第一个 `“仙”` 字,因此映射的位置就是 `(0, 1)`;同理,后面的 `[SEP]` 也不对应任何文字,映射的位置为 `(0, 0)`;而接下来的 `token` 对应 **上下文** 中的第一个字 `“第”`,映射出的位置为 `(0, 1)`;再后面的 `token` 对应原文中的两个字符 `35`,因此其位置映射为 `(1, 3)` 。通过这种手段,我们可以更方便地获取 `token` 与原文的对应关系。\n", + "\n", + "最后,您也许会注意到我们获取的 `result` 长度为 2 。这是文本在分词后长度超过了 `max_length` 256 ,`tokenizer` 将数据分成了两部分所致。在阅读理解任务中,我们不可能像文本分类那样轻易地将一条数据截断,因为答案很可能就出现在后面被丢弃的那部分数据中,因此,我们需要保留所有的数据(当然,您也可以直接丢弃这些超长的数据)。`overflow_to_sample` 则可以标识当前数据在原数据的索引:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[CLS]仙剑奇侠传3第几集上天界[SEP]第35集雪见缓缓张开眼睛,景天又惊又喜之际,长卿和紫萱的仙船驶至,见众人无恙,也十分高兴。众人登船,用尽合力把自身的真气和水分输给她。雪见终于醒过来了,但却一脸木然,全无反应。众人向常胤求助,却发现人世界竟没有雪见的身世纪录。长卿询问清微的身世,清微语带双关说一切上了天界便有答案。长卿驾驶仙船,众人决定立马动身,往天界而去。众人来到一荒山,长卿指出,魔界和天界相连。由魔界进入通过神魔之井,便可登天。众人至魔界入口,仿若一黑色的蝙蝠洞,但始终无法进入。后来花楹发现只要有翅膀便能飞入[SEP]\n", + "overflow_to_sample: 0\n", + "[CLS]仙剑奇侠传3第几集上天界[SEP]说一切上了天界便有答案。长卿驾驶仙船,众人决定立马动身,往天界而去。众人来到一荒山,长卿指出,魔界和天界相连。由魔界进入通过神魔之井,便可登天。众人至魔界入口,仿若一黑色的蝙蝠洞,但始终无法进入。后来花楹发现只要有翅膀便能飞入。于是景天等人打下许多乌鸦,模仿重楼的翅膀,制作数对翅膀状巨物。刚佩戴在身,便被吸入洞口。众人摔落在地,抬头发现魔界守卫。景天和众魔套交情,自称和魔尊重楼相熟,众魔不理,打了起来。[SEP][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD][PAD]\n", + "overflow_to_sample: 0\n" + ] + } + ], + "source": [ + "for res in result:\n", + " tokens = tokenizer.convert_ids_to_tokens(res[\"input_ids\"])\n", + " print(\"\".join(tokens))\n", + " print(\"overflow_to_sample: \", res[\"overflow_to_sample\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "将两条数据均输出之后可以看到,它们都出自我们传入的数据,并且存在一部分重合。`tokenizer` 的 `stride` 参数可以设置重合部分的长度,这也可以帮助模型识别被分割开的两条数据;`overflow_to_sample` 的 `0` 则代表它们来自于第 `0` 条数据。\n", + "\n", + "基于以上信息,我们处理训练集的思路如下:\n", + "\n", + "1. 通过 `overflow_to_sample` 来获取原来的数据\n", + "2. 通过原数据的 `answers` 找到答案的起始位置\n", + "3. 通过 `offset_mapping` 给出的映射关系在分词处理后的数据中找到答案的起始位置,分别记录在 `start_pos` 和 `end_pos` 中;如果没有找到答案(比如答案被截断了),那么答案的起始位置就被标记为 `[CLS]` 的位置。\n", + "\n", + "这样 `_process_train` 函数就呼之欲出了,我们调用 `train_dataset.map` 函数,并将 `batched` 参数设置为 `True` ,将所有数据批量地进行更新。有一点需要注意的是,**在处理过后数据量会增加**。" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'offset_mapping': [(0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (0, 0), (0, 1), (1, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13), (13, 14), (14, 15), (15, 16), (16, 17), (17, 18), (18, 19), (19, 20), (20, 21), (21, 22), (22, 23), (23, 24), (24, 25), (25, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31), (31, 32), (32, 33), (33, 34), (34, 35), (35, 36), (36, 37), (37, 38), (38, 39), (39, 40), (40, 41), (41, 42), (42, 43), (43, 44), (44, 45), (45, 46), (46, 47), (47, 48), (48, 49), (49, 50), (50, 51), (51, 52), (52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60), (60, 61), (61, 62), (62, 63), (63, 64), (64, 65), (65, 66), (66, 67), (67, 68), (68, 69), (69, 70), (70, 71), (71, 72), (72, 73), (73, 74), (74, 75), (75, 76), (76, 77), (77, 78), (78, 79), (79, 80), (80, 81), (81, 82), (82, 83), (83, 84), (84, 85), (85, 86), (86, 87), (87, 88), (88, 89), (89, 90), (90, 91), (91, 92), (92, 93), (93, 94), (94, 95), (95, 96), (96, 97), (97, 98), (98, 99), (99, 100), (100, 101), (101, 102), (102, 103), (103, 104), (104, 105), (105, 106), (106, 107), (107, 108), (108, 109), (109, 110), (110, 111), (111, 112), (112, 113), (113, 114), (114, 115), (115, 116), (116, 117), (117, 118), (118, 119), (119, 120), (120, 121), (121, 122), (122, 123), (123, 124), (124, 125), (125, 126), (126, 127), (127, 128), (128, 129), (129, 130), (130, 131), (131, 132), (132, 133), (133, 134), (134, 135), (135, 136), (136, 137), (137, 138), (138, 139), (139, 140), (140, 141), (141, 142), (142, 143), (143, 144), (144, 145), (145, 146), (146, 147), (147, 148), (148, 149), (149, 150), (150, 151), (151, 152), (152, 153), (153, 154), (154, 155), (155, 156), (156, 157), (157, 158), (158, 159), (159, 160), (160, 161), (161, 162), (162, 163), (163, 164), (164, 165), (165, 166), (166, 167), (167, 168), (168, 169), (169, 170), (170, 171), (171, 172), (172, 173), (173, 174), (174, 175), (175, 176), (176, 177), (177, 178), (178, 179), (179, 180), (180, 181), (181, 182), (182, 183), (183, 184), (184, 185), (185, 186), (186, 187), (187, 188), (188, 189), (189, 190), (190, 191), (191, 192), (192, 193), (193, 194), (194, 195), (195, 196), (196, 197), (197, 198), (198, 199), (199, 200), (200, 201), (201, 202), (202, 203), (203, 204), (204, 205), (205, 206), (206, 207), (207, 208), (208, 209), (209, 210), (210, 211), (211, 212), (212, 213), (213, 214), (214, 215), (215, 216), (216, 217), (217, 218), (218, 219), (219, 220), (220, 221), (221, 222), (222, 223), (223, 224), (224, 225), (225, 226), (226, 227), (227, 228), (228, 229), (229, 230), (230, 231), (231, 232), (232, 233), (233, 234), (234, 235), (235, 236), (236, 237), (237, 238), (238, 239), (239, 240), (240, 241), (241, 242), (0, 0)], 'input_ids': [1, 1034, 1189, 734, 2003, 241, 284, 131, 553, 271, 28, 125, 280, 2, 131, 1773, 271, 1097, 373, 1427, 1427, 501, 88, 662, 1906, 4, 561, 125, 311, 1168, 311, 692, 46, 430, 4, 84, 2073, 14, 1264, 3967, 5, 1034, 1020, 1829, 268, 4, 373, 539, 8, 154, 5210, 4, 105, 167, 59, 69, 685, 12043, 539, 8, 883, 1020, 4, 29, 720, 95, 90, 427, 67, 262, 5, 384, 266, 14, 101, 59, 789, 416, 237, 12043, 1097, 373, 616, 37, 1519, 93, 61, 15, 4, 255, 535, 7, 1529, 619, 187, 4, 62, 154, 451, 149, 12043, 539, 8, 253, 223, 3679, 323, 523, 4, 535, 34, 87, 8, 203, 280, 1186, 340, 9, 1097, 373, 5, 262, 203, 623, 704, 12043, 84, 2073, 1137, 358, 334, 702, 5, 262, 203, 4, 334, 702, 405, 360, 653, 129, 178, 7, 568, 28, 15, 125, 280, 518, 9, 1179, 487, 12043, 84, 2073, 1621, 1829, 1034, 1020, 4, 539, 8, 448, 91, 202, 466, 70, 262, 4, 638, 125, 280, 83, 299, 12043, 539, 8, 61, 45, 7, 1537, 176, 4, 84, 2073, 288, 39, 4, 889, 280, 14, 125, 280, 156, 538, 12043, 190, 889, 280, 71, 109, 124, 93, 292, 889, 46, 1248, 4, 518, 48, 883, 125, 12043, 539, 8, 268, 889, 280, 109, 270, 4, 1586, 845, 7, 669, 199, 5, 3964, 3740, 1084, 4, 255, 440, 616, 154, 72, 71, 109, 12043, 49, 61, 283, 3591, 34, 87, 297, 41, 9, 1993, 2602, 518, 52, 706, 109, 2], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'overflow_to_sample': 0, 'start_pos': 14, 'end_pos': 16}\n", + "处理后的训练集大小: 26198\n" + ] + } + ], + "source": [ + "max_length = 256\n", + "doc_stride = 128\n", + "def _process_train(data):\n", + "\n", + " contexts = [data[i][\"context\"] for i in range(len(data))]\n", + " questions = [data[i][\"question\"] for i in range(len(data))]\n", + "\n", + " tokenized_data_list = tokenizer(\n", + " questions,\n", + " contexts,\n", + " stride=doc_stride,\n", + " max_length=max_length,\n", + " padding=\"max_length\",\n", + " return_dict=False\n", + " )\n", + "\n", + " for i, tokenized_data in enumerate(tokenized_data_list):\n", + " # 获取 [CLS] 对应的位置\n", + " input_ids = tokenized_data[\"input_ids\"]\n", + " cls_index = input_ids.index(tokenizer.cls_token_id)\n", + "\n", + " # 在 tokenize 的过程中,汉字和 token 在位置上并非一一对应的\n", + " # 而 offset mapping 记录了每个 token 在原文中对应的起始位置\n", + " offsets = tokenized_data[\"offset_mapping\"]\n", + " # token_type_ids 记录了一条数据中哪些是问题,哪些是上下文\n", + " token_type_ids = tokenized_data[\"token_type_ids\"]\n", + "\n", + " # 一条数据可能因为长度过长而在 tokenized_data 中存在多个结果\n", + " # overflow_to_sample 表示了当前 tokenize_example 属于 data 中的哪一条数据\n", + " sample_index = tokenized_data[\"overflow_to_sample\"]\n", + " answers = data[sample_index][\"answers\"]\n", + "\n", + " # answers 和 answer_starts 均为长度为 1 的 list\n", + " # 我们可以计算出答案的结束位置\n", + " start_char = answers[\"answer_start\"][0]\n", + " end_char = start_char + len(answers[\"text\"][0])\n", + "\n", + " token_start_index = 0\n", + " while token_type_ids[token_start_index] != 1:\n", + " token_start_index += 1\n", + "\n", + " token_end_index = len(input_ids) - 1\n", + " while token_type_ids[token_end_index] != 1:\n", + " token_end_index -= 1\n", + " # 分词后一条数据的结尾一定是 [SEP],因此还需要减一\n", + " token_end_index -= 1\n", + "\n", + " if not (offsets[token_start_index][0] <= start_char and\n", + " offsets[token_end_index][1] >= end_char):\n", + " # 如果答案不在这条数据中,则将答案位置标记为 [CLS] 的位置\n", + " tokenized_data_list[i][\"start_pos\"] = cls_index\n", + " tokenized_data_list[i][\"end_pos\"] = cls_index\n", + " else:\n", + " # 否则,我们可以找到答案对应的 token 的起始位置,记录在 start_pos 和 end_pos 中\n", + " while token_start_index < len(offsets) and offsets[\n", + " token_start_index][0] <= start_char:\n", + " token_start_index += 1\n", + " tokenized_data_list[i][\"start_pos\"] = token_start_index - 1\n", + " while offsets[token_end_index][1] >= end_char:\n", + " token_end_index -= 1\n", + " tokenized_data_list[i][\"end_pos\"] = token_end_index + 1\n", + "\n", + " return tokenized_data_list\n", + "\n", + "train_dataset.map(_process_train, batched=True, num_workers=5)\n", + "print(train_dataset[0])\n", + "print(\"处理后的训练集大小:\", len(train_dataset))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 2.2 处理验证集\n", + "\n", + "对于验证集的处理则简单得多,我们只需要保存原数据的 `id` 并将 `offset_mapping` 中不属于上下文的部分设置为 `None` 即可。" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "def _process_val(data):\n", + "\n", + " contexts = [data[i][\"context\"] for i in range(len(data))]\n", + " questions = [data[i][\"question\"] for i in range(len(data))]\n", + "\n", + " tokenized_data_list = tokenizer(\n", + " questions,\n", + " contexts,\n", + " stride=doc_stride,\n", + " max_length=max_length,\n", + " return_dict=False\n", + " )\n", + "\n", + " for i, tokenized_data in enumerate(tokenized_data_list):\n", + " token_type_ids = tokenized_data[\"token_type_ids\"]\n", + " # 保存数据对应的 id\n", + " sample_index = tokenized_data[\"overflow_to_sample\"]\n", + " tokenized_data_list[i][\"example_id\"] = data[sample_index][\"id\"]\n", + "\n", + " # 将不属于 context 的 offset 设置为 None\n", + " tokenized_data_list[i][\"offset_mapping\"] = [\n", + " (o if token_type_ids[k] == 1 else None)\n", + " for k, o in enumerate(tokenized_data[\"offset_mapping\"])\n", + " ]\n", + "\n", + " return tokenized_data_list\n", + "\n", + "val_dataset.map(_process_val, batched=True, num_workers=5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 2.3 DataLoader\n", + "\n", + "最后使用 `PaddleDataLoader` 将数据集包裹起来即可。" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "

    \n",
    +       "
    \n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from fastNLP.core import PaddleDataLoader\n", + "\n", + "train_dataloader = PaddleDataLoader(train_dataset, batch_size=32, shuffle=True)\n", + "val_dataloader = PaddleDataLoader(val_dataset, batch_size=16)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. 模型训练:自己定义评测用的 Metric 实现更加自由的任务评测\n", + "\n", + "#### 3.1 损失函数\n", + "\n", + "对于阅读理解任务,我们使用的是 `ErnieForQuestionAnswering` 模型。该模型在接受输入后会返回两个值:`start_logits` 和 `end_logits` ,大小均为 `(batch_size, sequence_length)`,反映了每条数据每个词语为答案起始位置的可能性,因此我们需要自定义一个损失函数来计算 `loss`。 `CrossEntropyLossForSquad` 会分别对答案起始位置的预测值和真实值计算交叉熵,最后返回其平均值作为最终的损失。" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "class CrossEntropyLossForSquad(paddle.nn.Layer):\n", + " def __init__(self):\n", + " super(CrossEntropyLossForSquad, self).__init__()\n", + "\n", + " def forward(self, start_logits, end_logits, start_pos, end_pos):\n", + " start_pos = paddle.unsqueeze(start_pos, axis=-1)\n", + " end_pos = paddle.unsqueeze(end_pos, axis=-1)\n", + " start_loss = paddle.nn.functional.softmax_with_cross_entropy(\n", + " logits=start_logits, label=start_pos)\n", + " start_loss = paddle.mean(start_loss)\n", + " end_loss = paddle.nn.functional.softmax_with_cross_entropy(\n", + " logits=end_logits, label=end_pos)\n", + " end_loss = paddle.mean(end_loss)\n", + "\n", + " loss = (start_loss + end_loss) / 2\n", + " return loss" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 3.2 定义模型\n", + "\n", + "模型的核心则是 `ErnieForQuestionAnswering` 的 `ernie-1.0-base-zh` 预训练模型,同时按照 `FastNLP` 的规定定义 `train_step` 和 `evaluate_step` 函数。这里 `evaluate_step` 函数并没有像文本分类那样直接返回该批次数据的评测结果,这一点我们将在下面为您讲解。" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[2022-06-27 19:00:15,825] [ INFO]\u001b[0m - Already cached /remote-home/shxing/.paddlenlp/models/ernie-1.0-base-zh/ernie_v1_chn_base.pdparams\u001b[0m\n", + "W0627 19:00:15.831080 21543 gpu_context.cc:278] Please NOTE: device: 0, GPU Compute Capability: 7.5, Driver API Version: 11.2, Runtime API Version: 11.2\n", + "W0627 19:00:15.843276 21543 gpu_context.cc:306] device: 0, cuDNN Version: 8.1.\n" + ] + } + ], + "source": [ + "from paddlenlp.transformers import ErnieForQuestionAnswering\n", + "\n", + "class QAModel(paddle.nn.Layer):\n", + " def __init__(self, model_checkpoint):\n", + " super(QAModel, self).__init__()\n", + " self.model = ErnieForQuestionAnswering.from_pretrained(model_checkpoint)\n", + " self.loss_func = CrossEntropyLossForSquad()\n", + "\n", + " def forward(self, input_ids, token_type_ids):\n", + " start_logits, end_logits = self.model(input_ids, token_type_ids)\n", + " return start_logits, end_logits\n", + "\n", + " def train_step(self, input_ids, token_type_ids, start_pos, end_pos):\n", + " start_logits, end_logits = self(input_ids, token_type_ids)\n", + " loss = self.loss_func(start_logits, end_logits, start_pos, end_pos)\n", + " return {\"loss\": loss}\n", + "\n", + " def evaluate_step(self, input_ids, token_type_ids):\n", + " start_logits, end_logits = self(input_ids, token_type_ids)\n", + " return {\"start_logits\": start_logits, \"end_logits\": end_logits}\n", + "\n", + "model = QAModel(MODEL_NAME)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 3.3 自定义 Metric 进行数据的评估\n", + "\n", + "`paddlenlp` 为我们提供了评测 `SQuAD` 格式数据集的函数 `compute_prediction` 和 `squad_evaluate`:\n", + "- `compute_prediction` 函数要求传入原数据 `examples` 、处理后的数据 `features` 和 `features` 对应的结果 `predictions`(一个包含所有数据 `start_logits` 和 `end_logits` 的元组)\n", + "- `squad_evaluate` 要求传入原数据 `examples` 和预测结果 `all_predictions`(通常来自于 `compute_prediction`)\n", + "\n", + "在使用这两个函数的时候,我们需要向其中传入数据集,但显然根据 `fastNLP` 的设计,我们无法在 `evaluate_step` 里实现这一过程,并且 `FastNLP` 也并没有提供计算 `F1` 和 `EM` 的 `Metric`,故我们需要自己定义用于评测的 `Metric`。\n", + "\n", + "在初始化之外,一个 `Metric` 还需要实现三个函数:\n", + "\n", + "1. `reset` - 该函数会在验证数据集的迭代之前被调用,用于清空数据;在我们自定义的 `Metric` 中,我们需要将 `all_start_logits` 和 `all_end_logits` 清空,重新收集每个 `batch` 的结果。\n", + "2. `update` - 该函数会在在每个 `batch` 得到结果后被调用,用于更新 `Metric` 的状态;它的参数即为 `evaluate_step` 返回的内容。我们在这里将得到的 `start_logits` 和 `end_logits` 收集起来。\n", + "3. `get_metric` - 该函数会在数据集被迭代完毕后调用,用于计算评测的结果。现在我们有了整个验证集的 `all_start_logits` 和 `all_end_logits` ,将他们传入 `compute_predictions` 函数得到预测的结果,并继续使用 `squad_evaluate` 函数得到评测的结果。\n", + " - 注:`suqad_evaluate` 函数会自己输出评测结果,为了不让其干扰 `FastNLP` 输出,这里我们使用 `contextlib.redirect_stdout(None)` 将函数的标准输出屏蔽掉。\n", + "\n", + "综上,`SquadEvaluateMetric` 实现的评估过程是:将验证集中所有数据的 `logits` 收集起来,然后统一传入 `compute_prediction` 和 `squad_evaluate` 中进行评估。值得一提的是,`paddlenlp.datasets.load_dataset` 返回的结果是一个 `MapDataset` 类型,其 `data` 成员为加载时的数据,`new_data` 为经过 `map` 函数处理后更新的数据,因此可以分别作为 `examples` 和 `features` 传入。" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "from fastNLP.core import Metric\n", + "from paddlenlp.metrics.squad import squad_evaluate, compute_prediction\n", + "import contextlib\n", + "\n", + "class SquadEvaluateMetric(Metric):\n", + " def __init__(self, examples, features, testing=False):\n", + " super(SquadEvaluateMetric, self).__init__(\"paddle\", False)\n", + " self.examples = examples\n", + " self.features = features\n", + " self.all_start_logits = []\n", + " self.all_end_logits = []\n", + " self.testing = testing\n", + "\n", + " def reset(self):\n", + " self.all_start_logits = []\n", + " self.all_end_logits = []\n", + "\n", + " def update(self, start_logits, end_logits):\n", + " for start, end in zip(start_logits, end_logits):\n", + " self.all_start_logits.append(start.numpy())\n", + " self.all_end_logits.append(end.numpy())\n", + "\n", + " def get_metric(self):\n", + " all_predictions, _, _ = compute_prediction(\n", + " self.examples, self.features[:len(self.all_start_logits)],\n", + " (self.all_start_logits, self.all_end_logits),\n", + " False, 20, 30\n", + " )\n", + " with contextlib.redirect_stdout(None):\n", + " result = squad_evaluate(\n", + " examples=self.examples,\n", + " preds=all_predictions,\n", + " is_whitespace_splited=False\n", + " )\n", + "\n", + " if self.testing:\n", + " self.print_predictions(all_predictions)\n", + " return result\n", + "\n", + " def print_predictions(self, preds):\n", + " for i, data in enumerate(self.examples):\n", + " if i >= 5:\n", + " break\n", + " print()\n", + " print(\"原文:\", data[\"context\"])\n", + " print(\"问题:\", data[\"question\"], \\\n", + " \"答案:\", preds[data[\"id\"]], \\\n", + " \"正确答案:\", data[\"answers\"][\"text\"])\n", + "\n", + "metric = SquadEvaluateMetric(\n", + " val_dataloader.dataset.data,\n", + " val_dataloader.dataset.new_data,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 3.4 训练\n", + "\n", + "至此所有的准备工作已经完成,可以使用 `Trainer` 进行训练了。学习率我们依旧采用线性预热策略 `LinearDecayWithWarmup`,优化器为 `AdamW`;回调模块我们选择 `LRSchedCallback` 更新学习率和 `LoadBestModelCallback` 监视评测结果的 `f1` 分数。初始化好 `Trainer` 之后,就将训练的过程交给 `FastNLP` 吧。" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
    [19:04:54] INFO     Running evaluator sanity check for 2 batches.              trainer.py:631\n",
    +       "
    \n" + ], + "text/plain": [ + "\u001b[2;36m[19:04:54]\u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Running evaluator sanity check for \u001b[1;36m2\u001b[0m batches. \u001b]8;id=367046;file://../fastNLP/core/controllers/trainer.py\u001b\\\u001b[2mtrainer.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=96810;file://../fastNLP/core/controllers/trainer.py#631\u001b\\\u001b[2m631\u001b[0m\u001b]8;;\u001b\\\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    \n"
    +      ],
    +      "text/plain": []
    +     },
    +     "metadata": {},
    +     "output_type": "display_data"
    +    },
    +    {
    +     "data": {
    +      "text/html": [
    +       "
    \n",
    +       "
    \n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    ---------------------------- Eval. results on Epoch:0, Batch:100 ----------------------------\n",
    +       "
    \n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m100\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    {\n",
    +       "  \"exact#squad\": 49.25899788285109,\n",
    +       "  \"f1#squad\": 66.55559127349602,\n",
    +       "  \"total#squad\": 1417,\n",
    +       "  \"HasAns_exact#squad\": 49.25899788285109,\n",
    +       "  \"HasAns_f1#squad\": 66.55559127349602,\n",
    +       "  \"HasAns_total#squad\": 1417\n",
    +       "}\n",
    +       "
    \n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"exact#squad\"\u001b[0m: \u001b[1;36m49.25899788285109\u001b[0m,\n", + " \u001b[1;34m\"f1#squad\"\u001b[0m: \u001b[1;36m66.55559127349602\u001b[0m,\n", + " \u001b[1;34m\"total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m,\n", + " \u001b[1;34m\"HasAns_exact#squad\"\u001b[0m: \u001b[1;36m49.25899788285109\u001b[0m,\n", + " \u001b[1;34m\"HasAns_f1#squad\"\u001b[0m: \u001b[1;36m66.55559127349602\u001b[0m,\n", + " \u001b[1;34m\"HasAns_total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    \n",
    +       "
    \n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    ---------------------------- Eval. results on Epoch:0, Batch:200 ----------------------------\n",
    +       "
    \n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m200\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    {\n",
    +       "  \"exact#squad\": 57.37473535638673,\n",
    +       "  \"f1#squad\": 70.93036525200617,\n",
    +       "  \"total#squad\": 1417,\n",
    +       "  \"HasAns_exact#squad\": 57.37473535638673,\n",
    +       "  \"HasAns_f1#squad\": 70.93036525200617,\n",
    +       "  \"HasAns_total#squad\": 1417\n",
    +       "}\n",
    +       "
    \n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"exact#squad\"\u001b[0m: \u001b[1;36m57.37473535638673\u001b[0m,\n", + " \u001b[1;34m\"f1#squad\"\u001b[0m: \u001b[1;36m70.93036525200617\u001b[0m,\n", + " \u001b[1;34m\"total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m,\n", + " \u001b[1;34m\"HasAns_exact#squad\"\u001b[0m: \u001b[1;36m57.37473535638673\u001b[0m,\n", + " \u001b[1;34m\"HasAns_f1#squad\"\u001b[0m: \u001b[1;36m70.93036525200617\u001b[0m,\n", + " \u001b[1;34m\"HasAns_total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    \n",
    +       "
    \n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    ---------------------------- Eval. results on Epoch:0, Batch:300 ----------------------------\n",
    +       "
    \n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m300\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    {\n",
    +       "  \"exact#squad\": 63.86732533521524,\n",
    +       "  \"f1#squad\": 78.62546663568186,\n",
    +       "  \"total#squad\": 1417,\n",
    +       "  \"HasAns_exact#squad\": 63.86732533521524,\n",
    +       "  \"HasAns_f1#squad\": 78.62546663568186,\n",
    +       "  \"HasAns_total#squad\": 1417\n",
    +       "}\n",
    +       "
    \n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"exact#squad\"\u001b[0m: \u001b[1;36m63.86732533521524\u001b[0m,\n", + " \u001b[1;34m\"f1#squad\"\u001b[0m: \u001b[1;36m78.62546663568186\u001b[0m,\n", + " \u001b[1;34m\"total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m,\n", + " \u001b[1;34m\"HasAns_exact#squad\"\u001b[0m: \u001b[1;36m63.86732533521524\u001b[0m,\n", + " \u001b[1;34m\"HasAns_f1#squad\"\u001b[0m: \u001b[1;36m78.62546663568186\u001b[0m,\n", + " \u001b[1;34m\"HasAns_total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    \n",
    +       "
    \n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    ---------------------------- Eval. results on Epoch:0, Batch:400 ----------------------------\n",
    +       "
    \n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m400\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    {\n",
    +       "  \"exact#squad\": 64.92589978828511,\n",
    +       "  \"f1#squad\": 79.36746074079691,\n",
    +       "  \"total#squad\": 1417,\n",
    +       "  \"HasAns_exact#squad\": 64.92589978828511,\n",
    +       "  \"HasAns_f1#squad\": 79.36746074079691,\n",
    +       "  \"HasAns_total#squad\": 1417\n",
    +       "}\n",
    +       "
    \n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"exact#squad\"\u001b[0m: \u001b[1;36m64.92589978828511\u001b[0m,\n", + " \u001b[1;34m\"f1#squad\"\u001b[0m: \u001b[1;36m79.36746074079691\u001b[0m,\n", + " \u001b[1;34m\"total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m,\n", + " \u001b[1;34m\"HasAns_exact#squad\"\u001b[0m: \u001b[1;36m64.92589978828511\u001b[0m,\n", + " \u001b[1;34m\"HasAns_f1#squad\"\u001b[0m: \u001b[1;36m79.36746074079691\u001b[0m,\n", + " \u001b[1;34m\"HasAns_total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    \n",
    +       "
    \n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    ---------------------------- Eval. results on Epoch:0, Batch:500 ----------------------------\n",
    +       "
    \n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m500\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    {\n",
    +       "  \"exact#squad\": 65.70218772053634,\n",
    +       "  \"f1#squad\": 80.33295482054824,\n",
    +       "  \"total#squad\": 1417,\n",
    +       "  \"HasAns_exact#squad\": 65.70218772053634,\n",
    +       "  \"HasAns_f1#squad\": 80.33295482054824,\n",
    +       "  \"HasAns_total#squad\": 1417\n",
    +       "}\n",
    +       "
    \n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"exact#squad\"\u001b[0m: \u001b[1;36m65.70218772053634\u001b[0m,\n", + " \u001b[1;34m\"f1#squad\"\u001b[0m: \u001b[1;36m80.33295482054824\u001b[0m,\n", + " \u001b[1;34m\"total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m,\n", + " \u001b[1;34m\"HasAns_exact#squad\"\u001b[0m: \u001b[1;36m65.70218772053634\u001b[0m,\n", + " \u001b[1;34m\"HasAns_f1#squad\"\u001b[0m: \u001b[1;36m80.33295482054824\u001b[0m,\n", + " \u001b[1;34m\"HasAns_total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    \n",
    +       "
    \n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    ---------------------------- Eval. results on Epoch:0, Batch:600 ----------------------------\n",
    +       "
    \n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m600\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    {\n",
    +       "  \"exact#squad\": 65.41990119971771,\n",
    +       "  \"f1#squad\": 79.7483487059053,\n",
    +       "  \"total#squad\": 1417,\n",
    +       "  \"HasAns_exact#squad\": 65.41990119971771,\n",
    +       "  \"HasAns_f1#squad\": 79.7483487059053,\n",
    +       "  \"HasAns_total#squad\": 1417\n",
    +       "}\n",
    +       "
    \n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"exact#squad\"\u001b[0m: \u001b[1;36m65.41990119971771\u001b[0m,\n", + " \u001b[1;34m\"f1#squad\"\u001b[0m: \u001b[1;36m79.7483487059053\u001b[0m,\n", + " \u001b[1;34m\"total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m,\n", + " \u001b[1;34m\"HasAns_exact#squad\"\u001b[0m: \u001b[1;36m65.41990119971771\u001b[0m,\n", + " \u001b[1;34m\"HasAns_f1#squad\"\u001b[0m: \u001b[1;36m79.7483487059053\u001b[0m,\n", + " \u001b[1;34m\"HasAns_total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    \n",
    +       "
    \n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    ---------------------------- Eval. results on Epoch:0, Batch:700 ----------------------------\n",
    +       "
    \n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m700\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    {\n",
    +       "  \"exact#squad\": 66.61961891319689,\n",
    +       "  \"f1#squad\": 80.32432238994133,\n",
    +       "  \"total#squad\": 1417,\n",
    +       "  \"HasAns_exact#squad\": 66.61961891319689,\n",
    +       "  \"HasAns_f1#squad\": 80.32432238994133,\n",
    +       "  \"HasAns_total#squad\": 1417\n",
    +       "}\n",
    +       "
    \n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"exact#squad\"\u001b[0m: \u001b[1;36m66.61961891319689\u001b[0m,\n", + " \u001b[1;34m\"f1#squad\"\u001b[0m: \u001b[1;36m80.32432238994133\u001b[0m,\n", + " \u001b[1;34m\"total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m,\n", + " \u001b[1;34m\"HasAns_exact#squad\"\u001b[0m: \u001b[1;36m66.61961891319689\u001b[0m,\n", + " \u001b[1;34m\"HasAns_f1#squad\"\u001b[0m: \u001b[1;36m80.32432238994133\u001b[0m,\n", + " \u001b[1;34m\"HasAns_total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    \n",
    +       "
    \n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    ---------------------------- Eval. results on Epoch:0, Batch:800 ----------------------------\n",
    +       "
    \n" + ], + "text/plain": [ + "---------------------------- Eval. results on Epoch:\u001b[1;36m0\u001b[0m, Batch:\u001b[1;36m800\u001b[0m ----------------------------\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    {\n",
    +       "  \"exact#squad\": 65.84333098094567,\n",
    +       "  \"f1#squad\": 79.23169801265415,\n",
    +       "  \"total#squad\": 1417,\n",
    +       "  \"HasAns_exact#squad\": 65.84333098094567,\n",
    +       "  \"HasAns_f1#squad\": 79.23169801265415,\n",
    +       "  \"HasAns_total#squad\": 1417\n",
    +       "}\n",
    +       "
    \n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"exact#squad\"\u001b[0m: \u001b[1;36m65.84333098094567\u001b[0m,\n", + " \u001b[1;34m\"f1#squad\"\u001b[0m: \u001b[1;36m79.23169801265415\u001b[0m,\n", + " \u001b[1;34m\"total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m,\n", + " \u001b[1;34m\"HasAns_exact#squad\"\u001b[0m: \u001b[1;36m65.84333098094567\u001b[0m,\n", + " \u001b[1;34m\"HasAns_f1#squad\"\u001b[0m: \u001b[1;36m79.23169801265415\u001b[0m,\n", + " \u001b[1;34m\"HasAns_total#squad\"\u001b[0m: \u001b[1;36m1417\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    \n"
    +      ],
    +      "text/plain": []
    +     },
    +     "metadata": {},
    +     "output_type": "display_data"
    +    },
    +    {
    +     "data": {
    +      "text/html": [
    +       "
    \n",
    +       "
    \n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    [19:20:28] INFO     Loading best model from fnlp-ernie-squad/ load_best_model_callback.py:111\n",
    +       "                    2022-06-27-19_00_15_388554/best_so_far                                   \n",
    +       "                    with f1#squad: 80.33295482054824...                                      \n",
    +       "
    \n" + ], + "text/plain": [ + "\u001b[2;36m[19:20:28]\u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Loading best model from fnlp-ernie-squad/ \u001b]8;id=163935;file://../fastNLP/core/callbacks/load_best_model_callback.py\u001b\\\u001b[2mload_best_model_callback.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=31503;file://../fastNLP/core/callbacks/load_best_model_callback.py#111\u001b\\\u001b[2m111\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b[2;36m \u001b[0m \u001b[1;36m2022\u001b[0m-\u001b[1;36m06\u001b[0m-\u001b[1;36m27\u001b[0m-19_00_15_388554/best_so_far \u001b[2m \u001b[0m\n", + "\u001b[2;36m \u001b[0m with f1#squad: \u001b[1;36m80.33295482054824\u001b[0m\u001b[33m...\u001b[0m \u001b[2m \u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
               INFO     Deleting fnlp-ernie-squad/2022-06-27-19_0 load_best_model_callback.py:131\n",
    +       "                    0_15_388554/best_so_far...                                               \n",
    +       "
    \n" + ], + "text/plain": [ + "\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0m\u001b[34mINFO \u001b[0m Deleting fnlp-ernie-squad/\u001b[1;36m2022\u001b[0m-\u001b[1;36m06\u001b[0m-\u001b[1;36m27\u001b[0m-19_0 \u001b]8;id=560859;file://../fastNLP/core/callbacks/load_best_model_callback.py\u001b\\\u001b[2mload_best_model_callback.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=573263;file://../fastNLP/core/callbacks/load_best_model_callback.py#131\u001b\\\u001b[2m131\u001b[0m\u001b]8;;\u001b\\\n", + "\u001b[2;36m \u001b[0m 0_15_388554/best_so_far\u001b[33m...\u001b[0m \u001b[2m \u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from fastNLP import Trainer, LRSchedCallback, LoadBestModelCallback\n", + "from paddlenlp.transformers import LinearDecayWithWarmup\n", + "\n", + "n_epochs = 1\n", + "num_training_steps = len(train_dataloader) * n_epochs\n", + "lr_scheduler = LinearDecayWithWarmup(3e-5, num_training_steps, 0.1)\n", + "optimizer = paddle.optimizer.AdamW(\n", + " learning_rate=lr_scheduler,\n", + " parameters=model.parameters(),\n", + ")\n", + "callbacks=[\n", + " LRSchedCallback(lr_scheduler, step_on=\"batch\"),\n", + " LoadBestModelCallback(\"f1#squad\", larger_better=True, save_folder=\"fnlp-ernie-squad\")\n", + "]\n", + "trainer = Trainer(\n", + " model=model,\n", + " train_dataloader=train_dataloader,\n", + " evaluate_dataloaders=val_dataloader,\n", + " device=1,\n", + " optimizers=optimizer,\n", + " n_epochs=n_epochs,\n", + " callbacks=callbacks,\n", + " evaluate_every=100,\n", + " metrics={\"squad\": metric},\n", + ")\n", + "trainer.run()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 3.5 测试\n", + "\n", + "最后,我们可以使用 `Evaluator` 查看我们训练的结果。我们在之前为 `SquadEvaluateMetric` 设置了 `testing` 参数来在测试阶段进行输出,可以看到,训练的结果还是比较不错的。" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
    \n",
    +       "
    \n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    原文: 爬行垫根据中间材料的不同可以分为:XPE爬行垫、EPE爬行垫、EVA爬行垫、PVC爬行垫;其中XPE爬\n",
    +       "行垫、EPE爬行垫都属于PE材料加保鲜膜复合而成,都是无异味的环保材料,但是XPE爬行垫是品质较好的爬\n",
    +       "行垫,韩国进口爬行垫都是这种爬行垫,而EPE爬行垫是国内厂家为了减低成本,使用EPE(珍珠棉)作为原料生\n",
    +       "产的一款爬行垫,该材料弹性差,易碎,开孔发泡防水性弱。EVA爬行垫、PVC爬行垫是用EVA或PVC作为原材料\n",
    +       "与保鲜膜复合的而成的爬行垫,或者把图案转印在原材料上,这两款爬行垫通常有异味,如果是图案转印的爬\n",
    +       "行垫,油墨外露容易脱落。 \n",
    +       "当时我儿子爬的时候,我们也买了垫子,但是始终有味。最后就没用了,铺的就的薄毯子让他爬。\n",
    +       "
    \n" + ], + "text/plain": [ + "原文: 爬行垫根据中间材料的不同可以分为:XPE爬行垫、EPE爬行垫、EVA爬行垫、PVC爬行垫;其中XPE爬\n", + "行垫、EPE爬行垫都属于PE材料加保鲜膜复合而成,都是无异味的环保材料,但是XPE爬行垫是品质较好的爬\n", + "行垫,韩国进口爬行垫都是这种爬行垫,而EPE爬行垫是国内厂家为了减低成本,使用EPE(珍珠棉)作为原料生\n", + "产的一款爬行垫,该材料弹性差,易碎,开孔发泡防水性弱。EVA爬行垫、PVC爬行垫是用EVA或PVC作为原材料\n", + "与保鲜膜复合的而成的爬行垫,或者把图案转印在原材料上,这两款爬行垫通常有异味,如果是图案转印的爬\n", + "行垫,油墨外露容易脱落。 \n", + "当时我儿子爬的时候,我们也买了垫子,但是始终有味。最后就没用了,铺的就的薄毯子让他爬。\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    问题: 爬行垫什么材质的好 答案: EPE(珍珠棉 正确答案: ['XPE']\n",
    +       "
    \n" + ], + "text/plain": [ + "问题: 爬行垫什么材质的好 答案: EPE(珍珠棉 正确答案: ['XPE']\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    \n",
    +       "
    \n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    原文: 真实情况是160-162。她平时谎报的168是因为不离脚穿高水台恨天高(15厘米) 图1她穿着高水台恨\n",
    +       "天高和刘亦菲一样高,(刘亦菲对外报身高172)范冰冰礼服下厚厚的高水台暴露了她的心机,对比一下两者的\n",
    +       "鞋子吧 图2 穿着高水台恨天高才和刘德华谢霆锋持平,如果她真的有168,那么加上鞋高,刘和谢都要有180?\n",
    +       "明显是不可能的。所以刘德华对外报的身高174减去10-15厘米才是范冰冰的真实身高 图3,范冰冰有一次脱\n",
    +       "鞋上场,这个最说明问题了,看看她的身体比例吧。还有目测一下她手上鞋子的鞋跟有多高多厚吧,至少超过\n",
    +       "10厘米。\n",
    +       "
    \n" + ], + "text/plain": [ + "原文: 真实情况是160-162。她平时谎报的168是因为不离脚穿高水台恨天高(15厘米) 图1她穿着高水台恨\n", + "天高和刘亦菲一样高,(刘亦菲对外报身高172)范冰冰礼服下厚厚的高水台暴露了她的心机,对比一下两者的\n", + "鞋子吧 图2 穿着高水台恨天高才和刘德华谢霆锋持平,如果她真的有168,那么加上鞋高,刘和谢都要有180?\n", + "明显是不可能的。所以刘德华对外报的身高174减去10-15厘米才是范冰冰的真实身高 图3,范冰冰有一次脱\n", + "鞋上场,这个最说明问题了,看看她的身体比例吧。还有目测一下她手上鞋子的鞋跟有多高多厚吧,至少超过\n", + "10厘米。\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    问题: 范冰冰多高真实身高 答案: 160-162 正确答案: ['160-162']\n",
    +       "
    \n" + ], + "text/plain": [ + "问题: 范冰冰多高真实身高 答案: 160-162 正确答案: ['160-162']\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    \n",
    +       "
    \n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    原文: 防水作为目前高端手机的标配,特别是苹果也支持防水之后,国产大多数高端旗舰手机都已经支持防\n",
    +       "水。虽然我们真的不会故意把手机放入水中,但是有了防水之后,用户心里会多一重安全感。那么近日最为\n",
    +       "火热的小米6防水吗?小米6的防水级别又是多少呢? 小编查询了很多资料发现,小米6确实是防水的,但是为\n",
    +       "了保持低调,同时为了不被别人说防水等级不够,很多资料都没有标注小米是否防水。根据评测资料显示,小\n",
    +       "米6是支持IP68级的防水,是绝对能够满足日常生活中的防水需求的。\n",
    +       "
    \n" + ], + "text/plain": [ + "原文: 防水作为目前高端手机的标配,特别是苹果也支持防水之后,国产大多数高端旗舰手机都已经支持防\n", + "水。虽然我们真的不会故意把手机放入水中,但是有了防水之后,用户心里会多一重安全感。那么近日最为\n", + "火热的小米6防水吗?小米6的防水级别又是多少呢? 小编查询了很多资料发现,小米6确实是防水的,但是为\n", + "了保持低调,同时为了不被别人说防水等级不够,很多资料都没有标注小米是否防水。根据评测资料显示,小\n", + "米6是支持IP68级的防水,是绝对能够满足日常生活中的防水需求的。\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    问题: 小米6防水等级 答案: IP68级 正确答案: ['IP68级']\n",
    +       "
    \n" + ], + "text/plain": [ + "问题: 小米6防水等级 答案: IP68级 正确答案: ['IP68级']\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    \n",
    +       "
    \n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    原文: 这位朋友你好,女性出现妊娠反应一般是从6-12周左右,也就是女性怀孕1个多月就会开始出现反应,\n",
    +       "第3个月的时候,妊辰反应基本结束。 而大部分女性怀孕初期都会出现恶心、呕吐的感觉,这些症状都是因\n",
    +       "人而异的,除非恶心、呕吐的非常厉害,才需要就医,否则这些都是刚怀孕的的正常症状。1-3个月的时候可\n",
    +       "以观察一下自己的皮肤,一般女性怀孕初期可能会产生皮肤色素沉淀或是腹壁产生妊娠纹,特别是在怀孕的\n",
    +       "后期更加明显。 还有很多女性怀孕初期会出现疲倦、嗜睡的情况。怀孕三个月的时候,膀胱会受到日益胀\n",
    +       "大的子宫的压迫,容量会变小,所以怀孕期间也会有尿频的现象出现。月经停止也是刚怀孕最容易出现的症\n",
    +       "状,只要是平时月经正常的女性,在性行为后超过正常经期两周,就有可能是怀孕了。 如果你想判断自己是\n",
    +       "否怀孕,可以看看自己有没有这些反应。当然这也只是多数人的怀孕表现,也有部分女性怀孕表现并不完全\n",
    +       "是这样,如果你无法确定自己是否怀孕,最好去医院检查一下。\n",
    +       "
    \n" + ], + "text/plain": [ + "原文: 这位朋友你好,女性出现妊娠反应一般是从6-12周左右,也就是女性怀孕1个多月就会开始出现反应,\n", + "第3个月的时候,妊辰反应基本结束。 而大部分女性怀孕初期都会出现恶心、呕吐的感觉,这些症状都是因\n", + "人而异的,除非恶心、呕吐的非常厉害,才需要就医,否则这些都是刚怀孕的的正常症状。1-3个月的时候可\n", + "以观察一下自己的皮肤,一般女性怀孕初期可能会产生皮肤色素沉淀或是腹壁产生妊娠纹,特别是在怀孕的\n", + "后期更加明显。 还有很多女性怀孕初期会出现疲倦、嗜睡的情况。怀孕三个月的时候,膀胱会受到日益胀\n", + "大的子宫的压迫,容量会变小,所以怀孕期间也会有尿频的现象出现。月经停止也是刚怀孕最容易出现的症\n", + "状,只要是平时月经正常的女性,在性行为后超过正常经期两周,就有可能是怀孕了。 如果你想判断自己是\n", + "否怀孕,可以看看自己有没有这些反应。当然这也只是多数人的怀孕表现,也有部分女性怀孕表现并不完全\n", + "是这样,如果你无法确定自己是否怀孕,最好去医院检查一下。\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    问题: 怀孕多久会有反应 答案: 6-12周左右 正确答案: ['6-12周左右', '6-12周', '1个多月']\n",
    +       "
    \n" + ], + "text/plain": [ + "问题: 怀孕多久会有反应 答案: 6-12周左右 正确答案: ['6-12周左右', '6-12周', '1个多月']\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    \n",
    +       "
    \n" + ], + "text/plain": [ + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    原文: 【东奥会计在线——中级会计职称频道推荐】根据《关于提高科技型中小企业研究开发费用税前加计\n",
    +       "扣除比例的通知》的规定,研发费加计扣除比例提高到75%。|财政部、国家税务总局、科技部发布《关于提\n",
    +       "高科技型中小企业研究开发费用税前加计扣除比例的通知》。|通知称,为进一步激励中小企业加大研发投\n",
    +       "入,支持科技创新,就提高科技型中小企业研究开发费用(以下简称研发费用)税前加计扣除比例有关问题发\n",
    +       "布通知。|通知明确,科技型中小企业开展研发活动中实际发生的研发费用,未形成无形资产计入当期损益的\n",
    +       ",在按规定据实扣除的基础上,在2017年1月1日至2019年12月31日期间,再按照实际发生额的75%在税前加计\n",
    +       "扣除;形成无形资产的,在上述期间按照无形资产成本的175%在税前摊销。|科技型中小企业享受研发费用税\n",
    +       "前加计扣除政策的其他政策口径按照《财政部国家税务总局科技部关于完善研究开发费用税前加计扣除政\n",
    +       "策的通知》(财税〔2015〕119号)规定执行。|科技型中小企业条件和管理办法由科技部、财政部和国家税\n",
    +       "务总局另行发布。科技、财政和税务部门应建立信息共享机制,及时共享科技型中小企业的相关信息,加强\n",
    +       "协调配合,保障优惠政策落实到位。|上一篇文章:关于2016年度企业研究开发费用税前加计扣除政策企业所\n",
    +       "得税纳税申报问题的公告 下一篇文章:关于提高科技型中小企业研究开发费用税前加计扣除比例的通知\n",
    +       "
    \n" + ], + "text/plain": [ + "原文: 【东奥会计在线——中级会计职称频道推荐】根据《关于提高科技型中小企业研究开发费用税前加计\n", + "扣除比例的通知》的规定,研发费加计扣除比例提高到75%。|财政部、国家税务总局、科技部发布《关于提\n", + "高科技型中小企业研究开发费用税前加计扣除比例的通知》。|通知称,为进一步激励中小企业加大研发投\n", + "入,支持科技创新,就提高科技型中小企业研究开发费用(以下简称研发费用)税前加计扣除比例有关问题发\n", + "布通知。|通知明确,科技型中小企业开展研发活动中实际发生的研发费用,未形成无形资产计入当期损益的\n", + ",在按规定据实扣除的基础上,在2017年1月1日至2019年12月31日期间,再按照实际发生额的75%在税前加计\n", + "扣除;形成无形资产的,在上述期间按照无形资产成本的175%在税前摊销。|科技型中小企业享受研发费用税\n", + "前加计扣除政策的其他政策口径按照《财政部国家税务总局科技部关于完善研究开发费用税前加计扣除政\n", + "策的通知》(财税〔2015〕119号)规定执行。|科技型中小企业条件和管理办法由科技部、财政部和国家税\n", + "务总局另行发布。科技、财政和税务部门应建立信息共享机制,及时共享科技型中小企业的相关信息,加强\n", + "协调配合,保障优惠政策落实到位。|上一篇文章:关于2016年度企业研究开发费用税前加计扣除政策企业所\n", + "得税纳税申报问题的公告 下一篇文章:关于提高科技型中小企业研究开发费用税前加计扣除比例的通知\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    问题: 研发费用加计扣除比例 答案: 75% 正确答案: ['75%']\n",
    +       "
    \n" + ], + "text/plain": [ + "问题: 研发费用加计扣除比例 答案: 75% 正确答案: ['75%']\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
    \n"
    +      ],
    +      "text/plain": []
    +     },
    +     "metadata": {},
    +     "output_type": "display_data"
    +    },
    +    {
    +     "data": {
    +      "text/html": [
    +       "
    {\n",
    +       "    'exact#squad': 65.70218772053634,\n",
    +       "    'f1#squad': 80.33295482054824,\n",
    +       "    'total#squad': 1417,\n",
    +       "    'HasAns_exact#squad': 65.70218772053634,\n",
    +       "    'HasAns_f1#squad': 80.33295482054824,\n",
    +       "    'HasAns_total#squad': 1417\n",
    +       "}\n",
    +       "
    \n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[32m'exact#squad'\u001b[0m: \u001b[1;36m65.70218772053634\u001b[0m,\n", + " \u001b[32m'f1#squad'\u001b[0m: \u001b[1;36m80.33295482054824\u001b[0m,\n", + " \u001b[32m'total#squad'\u001b[0m: \u001b[1;36m1417\u001b[0m,\n", + " \u001b[32m'HasAns_exact#squad'\u001b[0m: \u001b[1;36m65.70218772053634\u001b[0m,\n", + " \u001b[32m'HasAns_f1#squad'\u001b[0m: \u001b[1;36m80.33295482054824\u001b[0m,\n", + " \u001b[32m'HasAns_total#squad'\u001b[0m: \u001b[1;36m1417\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from fastNLP import Evaluator\n", + "evaluator = Evaluator(\n", + " model=model,\n", + " dataloaders=val_dataloader,\n", + " device=1,\n", + " metrics={\n", + " \"squad\": SquadEvaluateMetric(\n", + " val_dataloader.dataset.data,\n", + " val_dataloader.dataset.new_data,\n", + " testing=True,\n", + " ),\n", + " },\n", + ")\n", + "result = evaluator.run()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.7.13 ('fnlp-paddle')", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.13" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "31f2d9d3efc23c441973d7c4273acfea8b132b6a578f002629b6b44b8f65e720" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 4cdfdc77c7993633aef82fb1c62065b0f2fa8766 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Thu, 30 Jun 2022 06:28:17 +0000 Subject: [PATCH 44/52] =?UTF-8?q?oneflow=20=E5=8A=A8=E6=80=81=E5=9B=BE=20?= =?UTF-8?q?=E5=8D=95=E5=8D=A1=E5=92=8Cddp=E9=83=A8=E5=88=86=E5=8F=8A?= =?UTF-8?q?=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/__init__.py | 4 + fastNLP/core/collators/collator.py | 10 +- fastNLP/core/collators/padders/get_padder.py | 7 + .../core/collators/padders/numpy_padder.py | 6 +- .../core/collators/padders/oneflow_padder.py | 204 ++++ fastNLP/core/collators/padders/raw_padder.py | 6 +- .../core/collators/padders/torch_padder.py | 8 +- fastNLP/core/dataloaders/__init__.py | 3 + .../oneflow_dataloader/__init__.py | 6 + .../dataloaders/oneflow_dataloader/fdl.py | 353 +++++++ .../core/dataloaders/prepare_dataloader.py | 8 +- fastNLP/core/drivers/__init__.py | 21 +- fastNLP/core/drivers/choose_driver.py | 9 +- .../core/drivers/oneflow_driver/__init__.py | 18 + fastNLP/core/drivers/oneflow_driver/ddp.py | 323 ++++++ .../core/drivers/oneflow_driver/dist_utils.py | 306 ++++++ .../initialize_oneflow_driver.py | 70 ++ .../drivers/oneflow_driver/oneflow_driver.py | 445 ++++++++ .../drivers/oneflow_driver/single_device.py | 114 +++ fastNLP/core/drivers/oneflow_driver/utils.py | 292 ++++++ fastNLP/core/metrics/backend/auto_backend.py | 3 + .../backend/oneflow_backend/__init__.py | 0 .../backend/oneflow_backend/backend.py | 130 +++ fastNLP/core/utils/__init__.py | 5 + fastNLP/core/utils/oneflow_utils.py | 69 ++ fastNLP/envs/imports.py | 1 + fastNLP/envs/set_backend.py | 5 +- fastNLP/envs/set_env_on_import.py | 10 + .../core/collators/padders/test_get_padder.py | 63 +- .../collators/padders/test_oneflow_padder.py | 105 ++ tests/core/collators/test_collator.py | 136 ++- .../core/controllers/_test_trainer_oneflow.py | 96 ++ .../core/controllers/test_trainer_oneflow.py | 70 ++ .../oneflow_dataloader/__init__.py | 0 .../oneflow_dataloader/test_fdl.py | 169 ++++ tests/core/drivers/oneflow_driver/__init__.py | 0 tests/core/drivers/oneflow_driver/dist.py | 78 ++ tests/core/drivers/oneflow_driver/test_ddp.py | 948 ++++++++++++++++++ .../drivers/oneflow_driver/test_dist_utils.py | 157 +++ .../test_initialize_oneflow_driver.py | 76 ++ .../oneflow_driver/test_single_device.py | 790 +++++++++++++++ .../core/drivers/oneflow_driver/test_utils.py | 39 + tests/helpers/datasets/oneflow_data.py | 55 + tests/helpers/models/oneflow_model.py | 43 + tests/helpers/utils.py | 40 + tests/pytest.ini | 4 +- 46 files changed, 5242 insertions(+), 63 deletions(-) create mode 100644 fastNLP/core/collators/padders/oneflow_padder.py create mode 100644 fastNLP/core/dataloaders/oneflow_dataloader/__init__.py create mode 100644 fastNLP/core/dataloaders/oneflow_dataloader/fdl.py create mode 100644 fastNLP/core/drivers/oneflow_driver/__init__.py create mode 100644 fastNLP/core/drivers/oneflow_driver/ddp.py create mode 100644 fastNLP/core/drivers/oneflow_driver/dist_utils.py create mode 100644 fastNLP/core/drivers/oneflow_driver/initialize_oneflow_driver.py create mode 100644 fastNLP/core/drivers/oneflow_driver/oneflow_driver.py create mode 100644 fastNLP/core/drivers/oneflow_driver/single_device.py create mode 100644 fastNLP/core/drivers/oneflow_driver/utils.py create mode 100644 fastNLP/core/metrics/backend/oneflow_backend/__init__.py create mode 100644 fastNLP/core/metrics/backend/oneflow_backend/backend.py create mode 100644 fastNLP/core/utils/oneflow_utils.py create mode 100644 tests/core/collators/padders/test_oneflow_padder.py create mode 100644 tests/core/controllers/_test_trainer_oneflow.py create mode 100644 tests/core/controllers/test_trainer_oneflow.py create mode 100644 tests/core/dataloaders/oneflow_dataloader/__init__.py create mode 100644 tests/core/dataloaders/oneflow_dataloader/test_fdl.py create mode 100644 tests/core/drivers/oneflow_driver/__init__.py create mode 100644 tests/core/drivers/oneflow_driver/dist.py create mode 100644 tests/core/drivers/oneflow_driver/test_ddp.py create mode 100644 tests/core/drivers/oneflow_driver/test_dist_utils.py create mode 100644 tests/core/drivers/oneflow_driver/test_initialize_oneflow_driver.py create mode 100644 tests/core/drivers/oneflow_driver/test_single_device.py create mode 100644 tests/core/drivers/oneflow_driver/test_utils.py create mode 100644 tests/helpers/datasets/oneflow_data.py create mode 100644 tests/helpers/models/oneflow_model.py diff --git a/fastNLP/core/__init__.py b/fastNLP/core/__init__.py index 6cf73d3b..459c741b 100644 --- a/fastNLP/core/__init__.py +++ b/fastNLP/core/__init__.py @@ -46,9 +46,11 @@ __all__ = [ 'TorchDataLoader', 'PaddleDataLoader', 'JittorDataLoader', + 'OneflowDataLoader', 'prepare_jittor_dataloader', 'prepare_paddle_dataloader', 'prepare_torch_dataloader', + 'prepare_oneflow_dataloader', "prepare_dataloader", # dataset @@ -63,6 +65,8 @@ __all__ = [ "PaddleFleetDriver", "JittorSingleDriver", "JittorMPIDriver", + "OneflowSingleDriver", + "OneflowDDPDriver", # log "logger", diff --git a/fastNLP/core/collators/collator.py b/fastNLP/core/collators/collator.py index dab5028c..5fbdacb9 100644 --- a/fastNLP/core/collators/collator.py +++ b/fastNLP/core/collators/collator.py @@ -18,7 +18,7 @@ from .packer_unpacker import SequencePackerUnpacker, SinglePackerUnpacker, Mappi NestedMappingPackerUnpacker sequence_idx_str = re.compile(r'^_\d+$') # 形如_0, _1 -SUPPORTED_BACKENDS = ['torch', 'jittor', 'paddle', 'numpy', 'raw', 'auto', None] +SUPPORTED_BACKENDS = ['torch', 'jittor', 'paddle', 'oneflow', 'numpy', 'raw', 'auto', None] # 由于 jittor DataLoader 存在自动的 to_jittor 的转换,所以只需要 collate 成为 numpy 就行 AUTO_BACKEND_MAPPING = {'jittor': 'numpy'} @@ -103,7 +103,7 @@ class Collator: Collator 在第一次进行 pad 的时候自动根据设置以及数据情况,为每个 field 获取一个 padder ,在之后的每次调用中,都将使用对应 的 Padder 给对应的 field 。 - :param backend: 对于可以 pad 的 field,使用哪种 tensor,支持 ['torch','jittor','paddle','numpy','raw', auto, None]。 + :param backend: 对于可以 pad 的 field,使用哪种 tensor,支持 ['torch','jittor','paddle','oneflow','numpy','raw', auto, None]。 若为 'auto' ,则在进行 pad 的时候会根据调用的环境决定其 backend 。该参数对不能进行 pad 的数据没用影响,不能 pad 的数据返回一定是 list 。 """ @@ -200,8 +200,8 @@ class Collator: field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 None 。如果 backend 为 None ,该值 无意义。 :param dtype: 对于需要 pad 的 field ,该 field 的数据 dtype 应该是什么。 - :param backend: 可选['raw', 'numpy', 'torch', 'paddle', 'jittor', 'auto'],分别代表,输出为 list, numpy.ndarray, - torch.Tensor, paddle.Tensor, jittor.Var 类型。若 pad_val 为 None ,该值无意义 。 + :param backend: 可选['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto'],分别代表,输出为 list, numpy.ndarray, + torch.Tensor, paddle.Tensor, jittor.Var oneflow.Tensor 类型。若 pad_val 为 None ,该值无意义 。 :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 pad_val, dtype, backend 等参数失效。pad_fn 的输入为当前 field 的 batch 形式。 Collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。pad_func 的输入即为 field 的 batch 形式,输出将被直接作为结果输出。 @@ -275,7 +275,7 @@ class Collator: """ 设置可以 pad 的 field 默认 pad 为什么类型的 tensor - :param backend: 对于可以 pad 的 field,使用哪种 tensor,支持 ['torch','jittor','paddle','numpy','raw', 'auto', None], + :param backend: 对于可以 pad 的 field,使用哪种 tensor,支持 ['torch','jittor','paddle','oneflow','numpy','raw', 'auto', None], 若为 auto ,则在进行 pad 的时候会自动根据调用的环境决定其 backend 。 :return: """ diff --git a/fastNLP/core/collators/padders/get_padder.py b/fastNLP/core/collators/padders/get_padder.py index b0a82849..6416a978 100644 --- a/fastNLP/core/collators/padders/get_padder.py +++ b/fastNLP/core/collators/padders/get_padder.py @@ -10,6 +10,7 @@ from .torch_padder import TorchNumberPadder, TorchSequencePadder, TorchTensorPad from .raw_padder import RawNumberPadder, RawSequencePadder, RawTensorPadder from .paddle_padder import PaddleTensorPadder, PaddleSequencePadder, PaddleNumberPadder from .jittor_padder import JittorTensorPadder, JittorSequencePadder, JittorNumberPadder +from .oneflow_padder import OneflowTensorPadder, OneflowSequencePadder, OneflowNumberPadder from .exceptions import * @@ -91,6 +92,8 @@ def get_padder(batch_field:Sequence[Any], pad_val, dtype, backend, field_name)-> return PaddleNumberPadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) elif backend == 'jittor': return JittorNumberPadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) + elif backend == 'oneflow': + return OneflowNumberPadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) else: raise ValueError(f"backend={backend} is not supported for list(Field:{field_name}).") @@ -105,6 +108,8 @@ def get_padder(batch_field:Sequence[Any], pad_val, dtype, backend, field_name)-> return PaddleSequencePadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) elif backend == 'jittor': return JittorSequencePadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) + elif backend == 'oneflow': + return OneflowSequencePadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) else: raise ValueError(f"backend={backend} is not supported for nested list(Field:{field_name}).") @@ -121,6 +126,8 @@ def get_padder(batch_field:Sequence[Any], pad_val, dtype, backend, field_name)-> return PaddleTensorPadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) elif backend == 'jittor': return JittorTensorPadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) + elif backend == 'oneflow': + return OneflowTensorPadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) else: raise ValueError(f"backend={backend} is not supported for tensors(Field:{field_name}).") diff --git a/fastNLP/core/collators/padders/numpy_padder.py b/fastNLP/core/collators/padders/numpy_padder.py index b6edba04..499fdb8b 100644 --- a/fastNLP/core/collators/padders/numpy_padder.py +++ b/fastNLP/core/collators/padders/numpy_padder.py @@ -18,9 +18,9 @@ def _get_dtype(ele_dtype, dtype, class_name): """ 用于检测数据的 dtype 类型, 根据内部和外部数据判断。 - :param ele_dtype 内部数据的类型 - :param dtype 数据外部类型 - :param class_name 类的名称 + :param ele_dtype: 内部数据的类型 + :param dtype: 数据外部类型 + :param class_name: 类的名称 """ if ele_dtype is not None and not is_number_or_numpy_number(ele_dtype): raise EleDtypeUnsupportedError(f"`{class_name}` only supports padding python numbers " diff --git a/fastNLP/core/collators/padders/oneflow_padder.py b/fastNLP/core/collators/padders/oneflow_padder.py new file mode 100644 index 00000000..c218bcca --- /dev/null +++ b/fastNLP/core/collators/padders/oneflow_padder.py @@ -0,0 +1,204 @@ +__all__ = [ + 'OneflowNumberPadder', + 'OneflowSequencePadder', + 'OneflowTensorPadder' +] +from inspect import isclass +import numpy as np + +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW + +if _NEED_IMPORT_ONEFLOW: + import oneflow + numpy_to_oneflow_dtype_dict = { + np.bool_: oneflow.bool, + np.uint8: oneflow.uint8, + np.int8: oneflow.int8, + np.int32: oneflow.int32, + np.int64: oneflow.int64, + np.float16: oneflow.float16, + np.float32: oneflow.float32, + np.float64: oneflow.float32, # 这里都统一为到 float32 吧,这是由于 numpy 大部分时候都默认 float64 了 + } + number_to_oneflow_dtype_dict = { + float: oneflow.float32, # 因为 oneflow.tensor([1], dtype=float)是oneflow.float64 + int: oneflow.int64, + bool: oneflow.bool + } + +from .padder import Padder +from .utils import is_number_or_numpy_number, is_number, is_numpy_number_dtype, get_shape, is_numpy_generic_class +from .exceptions import * + + +def is_oneflow_tensor(dtype): + """ + 判断是否为 oneflow 的 tensor + + :param dtype 数据的 dtype 类型 + """ + if not isclass(dtype) and isinstance(dtype, oneflow.dtype): + return True + return False + + +def _get_dtype(ele_dtype, dtype, class_name): + """ + 用于检测数据的 dtype 类型, 根据内部和外部数据判断。 + + :param ele_dtype: 内部数据的类型 + :param dtype: 数据外部类型 + :param class_name: 类的名称 + """ + if not (ele_dtype is None or (is_number_or_numpy_number(ele_dtype) or is_oneflow_tensor(ele_dtype))): + raise EleDtypeUnsupportedError(f"`{class_name}` only supports padding python numbers " + f"or numpy numbers or oneflow.Tensor but get `{ele_dtype}`.") + + if dtype is not None: + if not (is_oneflow_tensor(dtype) or is_number(dtype)): + raise DtypeUnsupportedError(f"The dtype of `{class_name}` only supports python numbers " + f"or oneflow.dtype but get `{dtype}`.") + dtype = number_to_oneflow_dtype_dict.get(dtype, dtype) + else: + if ele_dtype is not None: + if (is_number(ele_dtype) or is_oneflow_tensor(ele_dtype)): + ele_dtype = number_to_oneflow_dtype_dict.get(ele_dtype, ele_dtype) + dtype = ele_dtype + elif is_numpy_number_dtype(ele_dtype): # 存在一个转换的问题了 + dtype = numpy_to_oneflow_dtype_dict.get(ele_dtype.type) + elif is_numpy_generic_class(ele_dtype): + dtype = numpy_to_oneflow_dtype_dict.get(ele_dtype) + + return dtype + + +class OneflowNumberPadder(Padder): + """ + 可以将形如 [1, 2, 3] 这类的数据转为 oneflow.Tensor([1, 2, 3]) + + :param pad_val: 该值无意义 + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 oneflow.tensor 类型。 + :param dtype: 输出的数据的 dtype 是什么。如 oneflow.long, oneflow.float32, int, float 等 + """ + def __init__(self, pad_val=0, ele_dtype=None, dtype=None): + dtype = _get_dtype(ele_dtype, dtype, class_name=self.__class__.__name__) + super().__init__(pad_val=pad_val, dtype=dtype) + + @staticmethod + def pad(batch_field, pad_val=0, dtype=None): + return oneflow.tensor(batch_field, dtype=dtype) + + +class OneflowSequencePadder(Padder): + """ + 将类似于 [[1], [1, 2]] 的内容 pad 为 oneflow.Tensor([[1, 0], [1, 2]]) 可以 pad 多重嵌套的数据。 + + :param pad_val: 需要 pad 的值。 + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 oneflow.tensor 类型。 + :param dtype: 输出的数据的 dtype 是什么。如 oneflow.long, oneflow.float32, int, float 等 + """ + def __init__(self, pad_val=0, ele_dtype=None, dtype=None): + dtype = _get_dtype(ele_dtype, dtype, class_name=self.__class__.__name__) + super().__init__(pad_val=pad_val, dtype=dtype) + + @staticmethod + def pad(batch_field, pad_val=0, dtype=None): + tensor = get_padded_oneflow_tensor(batch_field, dtype=dtype, pad_val=pad_val) + return tensor + + +class OneflowTensorPadder(Padder): + """ + 目前支持 [oneflow.tensor([3, 2], oneflow.tensor([1])] 类似的。若内部元素不为 oneflow.tensor ,则必须含有 tolist() 方法。 + + >>> OneflowTensorPadder.pad([np.array([3, 4]), np.array([1])], pad_val=-100) + [[ 3. 4.] + [ 1. -100.]] + >>> OneflowTensorPadder.pad([oneflow.LongTensor([3, 4]), oneflow.LongTensor([1])], pad_val=-100) + tensor([[ 3, 4], + [ 1, -100]]) + + :param pad_val: 需要 pad 的值。 + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 oneflow.tensor 类型。 + :param dtype: 输出的数据的 dtype 是什么。如 oneflow.long, oneflow.float32, int, float 等 + """ + def __init__(self, pad_val=0, ele_dtype=None, dtype=None): + dtype = _get_dtype(ele_dtype, dtype, class_name=self.__class__.__name__) + super().__init__(pad_val=pad_val, dtype=dtype) + + @staticmethod + def pad(batch_field, pad_val=0, dtype=None): + device = None + try: + if not isinstance(batch_field[0], oneflow.Tensor): + batch_field = [oneflow.tensor(field.tolist(), dtype=dtype) for field in batch_field] + else: + batch_field = [field.to(dtype) for field in batch_field] + device = batch_field[0].device + if dtype is None: + dtype = batch_field[0].dtype + except AttributeError: + raise RuntimeError(f"If the field is not a oneflow.Tensor (it is {type(batch_field[0])}), " + f"it must have tolist() method.") + + shapes = [field.shape for field in batch_field] + if len(batch_field) < 2: + max_shape = [len(batch_field)] + list(shapes[0]) + else: + max_shape = [len(batch_field)] + [max(*_) for _ in zip(*shapes)] + + tensor = oneflow.full(max_shape, value=pad_val, dtype=dtype, device=device) + for i, field in enumerate(batch_field): + slices = (i, ) + tuple(slice(0, s) for s in shapes[i]) + tensor[slices] = field + return tensor + + +def fill_tensor(batch_field, padded_batch, dtype): + """ + 将 batch_field 中的值填入到 tensor 中。 + + :param batch_field: 需要填充进入 array 中的内容 + :param padded_batch: 待填充的 tensor + :param dtype: 数据的类别 + + :return: + """ + if padded_batch.ndim == 2: + for i, content_i in enumerate(batch_field): + padded_batch[i, :len(content_i)] = oneflow.tensor(content_i, dtype=dtype) + elif padded_batch.ndim == 3: + for i, content_i in enumerate(batch_field): + for j, content_ii in enumerate(content_i): + padded_batch[i, j, :len(content_ii)] = oneflow.tensor(content_ii, dtype=dtype) + elif padded_batch.ndim == 4: + try: # 应该是图像,所以直接应该就 ok 了。 + padded_batch = oneflow.tensor(batch_field) + except: + for i, content_i in enumerate(batch_field): + for j, content_ii in enumerate(content_i): + for k, content_iii in enumerate(content_ii): + padded_batch[i, j, k, :len(content_iii)] = oneflow.tensor(content_iii, dtype=dtype) + elif padded_batch.ndim == 1: + padded_batch[:] = oneflow.tensor(batch_field, dtype=dtype) + else: + raise RuntimeError("fastNLP does not support padding for more than 3 dimensions. If you need this, please " + "report.") + return padded_batch + + +def get_padded_oneflow_tensor(batch_field, dtype=None, pad_val=0): + """ + 例如: + [[1,2], [3]] -> oneflow.LongTensor([[1, 2], [3, 0]]) + + :param batch_field: 需要 pad 的对象。需要保证应该是可以进行 pad 的。支持 1d(多为句子长度)/2d(多为文本序列)/3d(多为字符序列) + /4d(多为图片)。 + :param dtype: 目标类别是什么 + :param pad_val: pad 的 value + :return: + """ + shapes = get_shape(batch_field) + tensor = oneflow.full(shapes, dtype=dtype, value=pad_val) + tensor = fill_tensor(batch_field, tensor, dtype=dtype) + return tensor diff --git a/fastNLP/core/collators/padders/raw_padder.py b/fastNLP/core/collators/padders/raw_padder.py index 645c145c..3828b2c0 100644 --- a/fastNLP/core/collators/padders/raw_padder.py +++ b/fastNLP/core/collators/padders/raw_padder.py @@ -13,9 +13,9 @@ def _get_dtype(ele_dtype, dtype, class_name): """ 用于检测数据的 dtype 类型, 根据内部和外部数据判断。 - :param ele_dtype 内部数据的类型 - :param dtype 数据外部类型 - :param class_name 类的名称 + :param ele_dtype: 内部数据的类型 + :param dtype: 数据外部类型 + :param class_name: 类的名称 """ if ele_dtype is not None and not is_number_or_numpy_number(ele_dtype): raise EleDtypeUnsupportedError(f"`{class_name}` only supports padding python numbers " diff --git a/fastNLP/core/collators/padders/torch_padder.py b/fastNLP/core/collators/padders/torch_padder.py index 911c7d8c..91f58af4 100644 --- a/fastNLP/core/collators/padders/torch_padder.py +++ b/fastNLP/core/collators/padders/torch_padder.py @@ -38,7 +38,7 @@ def is_torch_tensor(dtype): """ 判断是否为 torch 的 tensor - :param dtype 数据的 dtype 类型 + :param dtype: 数据的 dtype 类型 """ if not isclass(dtype) and isinstance(dtype, torch.dtype): return True @@ -49,9 +49,9 @@ def _get_dtype(ele_dtype, dtype, class_name): """ 用于检测数据的 dtype 类型, 根据内部和外部数据判断。 - :param ele_dtype 内部数据的类型 - :param dtype 数据外部类型 - :param class_name 类的名称 + :param ele_dtype: 内部数据的类型 + :param dtype: 数据外部类型 + :param class_name: 类的名称 """ if not (ele_dtype is None or (is_number_or_numpy_number(ele_dtype) or is_torch_tensor(ele_dtype))): raise EleDtypeUnsupportedError(f"`{class_name}` only supports padding python numbers " diff --git a/fastNLP/core/dataloaders/__init__.py b/fastNLP/core/dataloaders/__init__.py index b18e371c..06d3f5a8 100644 --- a/fastNLP/core/dataloaders/__init__.py +++ b/fastNLP/core/dataloaders/__init__.py @@ -3,9 +3,11 @@ __all__ = [ 'TorchDataLoader', 'PaddleDataLoader', 'JittorDataLoader', + 'OneflowDataLoader', 'prepare_jittor_dataloader', 'prepare_paddle_dataloader', 'prepare_torch_dataloader', + 'prepare_oneflow_dataloader', "prepare_dataloader", @@ -15,5 +17,6 @@ __all__ = [ from .jittor_dataloader import JittorDataLoader, prepare_jittor_dataloader from .torch_dataloader import TorchDataLoader, prepare_torch_dataloader, MixDataLoader from .paddle_dataloader import PaddleDataLoader, prepare_paddle_dataloader +from .oneflow_dataloader import OneflowDataLoader, prepare_oneflow_dataloader from .prepare_dataloader import prepare_dataloader from .utils import OverfitDataLoader \ No newline at end of file diff --git a/fastNLP/core/dataloaders/oneflow_dataloader/__init__.py b/fastNLP/core/dataloaders/oneflow_dataloader/__init__.py new file mode 100644 index 00000000..d17ce91c --- /dev/null +++ b/fastNLP/core/dataloaders/oneflow_dataloader/__init__.py @@ -0,0 +1,6 @@ +__all__ = [ + "OneflowDataLoader", + "prepare_oneflow_dataloader", +] + +from .fdl import OneflowDataLoader, prepare_oneflow_dataloader diff --git a/fastNLP/core/dataloaders/oneflow_dataloader/fdl.py b/fastNLP/core/dataloaders/oneflow_dataloader/fdl.py new file mode 100644 index 00000000..e68402ea --- /dev/null +++ b/fastNLP/core/dataloaders/oneflow_dataloader/fdl.py @@ -0,0 +1,353 @@ +__all__ = [ + 'OneflowDataLoader', + 'prepare_oneflow_dataloader' +] + +from typing import Optional, Callable, Sequence, Union, Tuple, Dict, Mapping, List, Any +from abc import ABC +from copy import deepcopy + +from fastNLP.core.dataset import DataSet +from fastNLP.core.collators import Collator +from fastNLP.core.dataloaders.utils import indice_collate_wrapper +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW +from fastNLP.core.samplers import ReproducibleBatchSampler, ReproducibleSampler, UnrepeatedSampler, RandomSampler +from ..utils import _match_param +from ..utils import HasLenGetitemType + +if _NEED_IMPORT_ONEFLOW: + from oneflow.utils.data import DataLoader, Sampler, Dataset +else: + from fastNLP.core.utils.dummy_class import DummyClass as DataLoader + + +class _FDataSet: + """ + 提供给 ``OneflowDataLoader`` 使用的 warp 类,其功能是对 dataset 进行封装,wrap 修改 dataset 的 __getitem__ 函数,增加返回 + 数据的下标 idx 。 + + ..note:: + + 需要注意的是传入 ``__init__`` 的 dataset 需要实现 __getattribute__ 方法才能在 _FDataset 实例化对象中调用 dataset 的方法 + + """ + + def __init__(self, dataset) -> None: + self.dataset = dataset + + def __getitem__(self, item: Union[int, list]) -> Tuple: + return (item, self.dataset[item]) + + def __getattr__(self, item): + try: + return self.dataset.__getattribute__(item) + except AttributeError as e: + raise e + + def __len__(self) -> int: + return len(self.dataset) + + +class OneflowDataLoader(DataLoader): + """ + 提供给 ``oneflow`` 框架使用的 ``DataLoader`` 函数,``OneflowDataLoader`` 提供了 ``Collator`` 来自动检测 dataset 的每个 field 是否可 pad, + 若是可 pad 的 field 则自动 pad 到相同长度,否则只会将相同 field 的数据收集组成一个 batch 返回。 + 具体详见 :class:`~fastNLP.core.collators.Collator`;用户通过 callte_fn 来控制是否使用该功能, collate_fn 只能为 ``['auto', None, Callable]`` + 三种取值。 + + * callate_fn 为 ``'auto'`` 时,``OneflowDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的取值。 + 此时可以配套使用 ``OneflowDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 + * callate_fn 为 ``None`` 时, ``OneflowDataLoadr`` 默认使用 oneflow DataLoader 自带的 collate_fn + * collate_fn 为 ``Callable`` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 + dataset 的一条数据;该 Callable 函数还应当返回一个对象。 + + """ + + def __init__(self, dataset, batch_size: int = 16, + shuffle: bool = False, sampler = None, batch_sampler = None, + num_workers: int = 0, collate_fn: Union[Callable, str, None] = 'auto', + pin_memory: bool = False, drop_last: bool = False, + timeout: float = 0, worker_init_fn: Optional[Callable] = None, + multiprocessing_context=None, generator=None, prefetch_factor: int = 2, + persistent_workers: bool = False, **kwargs) -> None: + """ + + :param dataset: 实现了 __getitem__() 和 __len__() 的对象。 + :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 + :param shuffle: 是否打乱数据集, 默认为 ``False``。 + :param sampler: 实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , + 默认为None, 当其不为 None 时, shuffle 参数无效。 + :param batch_sampler: 实现了 __len__() 和 __iter__() 的实例化对象,,其__iter__() 方法每次都会返回一个 List 对象, List中的值为 + dataset 的下标 index ;默认为 None,当其不为 None 时,bacth_size, sampler, shuffle 参数均失效。 + :param num_workers: 当 ``num_workers > 0`` 时, ``OneflowDataLoader`` 会开启 num_workers 个子进程来处理数据, 可以加快 + 数据处理速度,但同时也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 + :param collate_fn: 用于从 dataset 取到的一个 batch 数据进行打包处理的 Callable 函数,其值应该为以下三个: ``[None, "auto", Callable]``. + + * callate_fn 为 ``None`` 时,需要注意的是此时传进来的 datset 类型不能为 :class:`~fastNLP.core.dataset.DataSet` , 当 collate_fn 为 ``None`` 时, + ``OneflowDataLoader`` 调用默认的 oneflow 框架的 ``DataLoader`` 自带的 ``default_collate_fn`` 作为 callate_fn 的默认值, 其无法处理 + :class:`~fastNLP.core.dataset.DataSet` 的dataset对象。 + * callate_fn 为 ``'auto'`` 时,``OneflowDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的默认值。 + 此时可以配套使用 ``OneflowDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 + * collate_fn 为 ``Callable`` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 + dataset 的一条数据;该 Callable 函数还应当返回一个对象。 + + :param pin_memory: 如果其为 ``True``, 那么 ``OneflowDataLoader`` 会在返回数据张量之前将其 copy 到 cud a的 pin memory 中。 + :param drop_last: 当 ``drop_last=True`` 时,``OneflowDataLoader`` 会扔掉最后一个长度小于 ``batch_size`` 的 batch 数据; + 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 + :param timeout: 子进程的输出队列获取数据的超时值 + :param worker_init_fn: init 函数,如果不设置为 None ,则将会在每个子进程初始化时调用该函数。 + :param multiprocessing_context: 多进程的上下文环境 + :param generator: 如果其不为 ``None``, 将会使用 RandomSampler 去生成随机的 index 且会为每个子进程生成一个 ``base_seed`` + :param prefetch_factor: 每个 worker 提前装载的 samples 数量。``2``意味着在所有的进程中会有 2*num_workers 的数据被预取。默认值为 ``2`` . + :param persistent_workers: 如果其为 ``True``, ``OneflowDataLoader`` 在迭代完一次 dataset 后不会关闭所有进程。默认为 ``False`` + + """ + if isinstance(dataset, DataSet) and collate_fn is None: + raise ValueError("When use FastNLP DataSet, collate_fn must be not None") + + if not isinstance(dataset, _FDataSet): + dataset = _FDataSet(dataset) + + if num_workers>0 and multiprocessing_context is None: + multiprocessing_context = 'fork' # 这里默认使用fork的方式来启动多进程 + + if batch_sampler is not None: + batch_size = 1 + shuffle = False + sampler = None + elif sampler is None: + sampler = RandomSampler(dataset, shuffle=shuffle) + shuffle = False + + if isinstance(collate_fn, str): + if collate_fn == 'auto': + if isinstance(dataset.dataset, DataSet): # 使用了 fastnlp dataset + collate_fn = deepcopy(dataset.dataset.collator) + collate_fn.set_backend(backend="oneflow") + else: + collate_fn = Collator(backend="oneflow") + else: + raise ValueError(f"collate_fn: {collate_fn} must be 'auto'") + + dl_kwargs = _match_param(OneflowDataLoader.__init__, DataLoader.__init__, fn_name=DataLoader.__name__) + if dl_kwargs is None: + super().__init__(dataset=dataset, batch_size=batch_size, shuffle=shuffle, sampler=sampler, + batch_sampler=batch_sampler, num_workers=num_workers, collate_fn=collate_fn, + pin_memory=pin_memory, drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn, + multiprocessing_context=multiprocessing_context, generator=generator, + prefetch_factor=prefetch_factor, + persistent_workers=persistent_workers) + else: + super().__init__(**dl_kwargs) + + self.cur_batch_indices = None + + def __iter__(self): + self.collate_fn = indice_collate_wrapper(self.collate_fn) + for indices, data in super().__iter__(): + self.cur_batch_indices = indices + yield data + + def set_pad(self, field_name: Union[str, tuple], pad_val: Union[int, float, None] = 0, dtype=None, backend=None, + pad_fn: Callable = None) -> Collator: + """ + 如果需要对某个 field 的内容进行特殊的调整,请使用这个函数。 + + :param field_name: 需要调整的 field 的名称。如果 Dataset 的 __getitem__ 方法返回的是 dict 类型的,则可以直接使用对应的 + field 的 key 来表示,如果是 nested 的 dict,可以使用元组表示多层次的 key,例如 {'a': {'b': 1}} 中的使用 ('a', 'b'); + 如果 __getitem__ 返回的是 Sequence 类型的,则可以使用 '_0', '_1' 表示序列中第 0 或 1 个元素。如果该 field 在数据中没 + 有找到,则报错;如果 __getitem__ 返回的是就是整体内容,请使用 "_single" 。 + :param pad_val: 这个 field 的默认 pad 值。如果设置为 None,则表示该 field 不需要 pad , fastNLP 默认只会对可以 pad 的 + field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 None 。如果 backend 为 None ,该值 + 无意义。 + :param dtype: 对于需要 pad 的 field ,该 field 的数据 dtype 应该是什么。 + :param backend: 可选['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto'],分别代表,输出为 list, numpy.ndarray, + torch.Tensor, paddle.Tensor, jittor.Var, oneflow.Tensor 类型。若 pad_val 为 None ,该值无意义 。 + :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 pad_val, dtype, backend 等参数失效。pad_fn 的输入为当前 field 的 + batch 形式。 Collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。pad_func 的输入即为 field 的 batch + 形式,输出将被直接作为结果输出。 + :return: 返回 Collator + """ + collator = self._get_collator() + if isinstance(collator, Collator): + collator.set_pad(field_name=field_name, pad_val=pad_val, dtype=dtype, pad_fn=pad_fn, backend=backend) + return collator + else: + raise ValueError(f"Only when the collate_fn is a fastNLP Collator, set_pad() is allowed.") + + def _get_collator(self): + """ + 如果 collate_fn 是 Collator 对象,得到该对象。如果没有的话,返回 None + + :return: + """ + collator = None + if hasattr(self.collate_fn, '__wrapped__') and isinstance(self.collate_fn.__wrapped__, Collator): + collator = self.collate_fn.__wrapped__ + elif isinstance(self.collate_fn, Collator): + collator = self.collate_fn + return collator + + def set_ignore(self, *field_names) -> Collator: + """ + 如果有的内容不希望输出,可以在此处进行设置,被设置的 field 将在 batch 的输出中被忽略。 + Example:: + + collator.set_ignore('field1', 'field2') + + :param field_names: 需要忽略的 field 的名称。如果 Dataset 的 __getitem__ 方法返回的是 dict 类型的,则可以直接使用对应的 + field 的 key 来表示,如果是 nested 的 dict,可以使用元组来表示,例如 {'a': {'b': 1}} 中的使用 ('a', 'b'); 如果 + __getitem__ 返回的是 Sequence 类型的,则可以使用 '_0', '_1' 表示序列中第 0 或 1 个元素。 + :return: 返回 Collator 自身 + """ + collator = self._get_collator() + if isinstance(collator, Collator): + collator.set_ignore(*field_names) + return collator + else: + raise ValueError(f"Only when the collate_fn is a fastNLP Collator, set_ignore() is allowed.") + + def get_batch_indices(self) -> List[int]: + """ + 获取当前 ``batch`` 中每条数据对应的索引。 + + :return: 当前 ``batch`` 数据的索引; + """ + return self.cur_batch_indices + + +def prepare_oneflow_dataloader(ds_or_db, + batch_size: int = 16, + shuffle: bool = None, + sampler: Union["Sampler[int]", ReproducibleSampler, UnrepeatedSampler] = None, + batch_sampler: Union["Sampler[Sequence[int]]", ReproducibleBatchSampler] = None, + num_workers: int = 0, collate_fn: Union[Callable, str, None] = 'auto', + pin_memory: bool = False, drop_last: bool = False, + timeout: float = 0, worker_init_fn: Optional[Callable] = None, + multiprocessing_context=None, generator=None, prefetch_factor: int = 2, + persistent_workers: bool = False, + non_train_sampler: Union["Sampler[int]", ReproducibleSampler, UnrepeatedSampler] = None, + non_train_batch_size: int = None) \ + -> Union[OneflowDataLoader, Dict[str, OneflowDataLoader]]: + """ + ``prepare_oneflow_dataloader`` 的功能是将输入的单个或多个 dataset 同时转为 ``OneflowDataloader``对象, 详见 :class:`~fastNLP.OneflowDataLoader`。 + 根据 ds_or_db 的类型 ``[DataSet, DataBundle, Dict[name, Dataset]]`` 不同而有不同返回结果, 具体如下: + + * 当 ds_or_db 为 ``DataSet``时,``prepare_oneflow_dataloader`` 会将使用的除了 non_train_batch_size 和 non_train_sampler 以外的参数来 + 帮你实例化一个 ``OneflowDataLoader`` 对象并返回该对象。 详见:class:`~fastNLP.core.dataloaders.OneflowDataLoader`。 + * 当 ds_or_db 为 :class:`~fastNLP.io.DataBundle` 时,``prepare_oneflow_dataloader`` 会遍历 ``DataBundle`` 的数据集的 key-value + 来创建不同的 ``OneflowDataLoader`` 对象;当 key 中包含'train'字符串时,``prepare_oneflow_dataloader`` 默认该 value 为 train 数据集, + 会将 batch_size 和 sampler 作为参数,其他 key 不包含 'train' 字符串的数据集则使用 non_train_size 和 non_train_sampler 作为参数。 + 最终根据 ``key: OneflowDataLoader`` 组成 ``Dict[key, OneflowDataLoader]`` 的字典返回。 + * 当 ds_or_db 为 ``Dict[str, DataSet]`` 字典类型时, ``prepare_oneflow_dataloader`` 会遍历 该 dict 的的 key-value 来创建不同的 + ``OneflowDataLoader`` 对象;当 key 中包含'train'字符串时,``prepare_oneflow_dataloader`` 默认该 value 为 train 数据集,会将 batch_size 和 sampler 作为参数, + 其他 key 不包含 'train' 字符串的数据集则使用 non_train_size 和 non_train_sampler 作为参数。最终根据 ``key: OneflowDataLoader`` 组成 + ``Dict[key, OneflowDataLoader]`` 的字典返回。 + + :param ds_or_db: 可以有以下三种取值, + + * ds_or_db 为 :class:`~fastNLP.io.DataBundle`, 返回值为 ``Dict[str, OneflowDataLoader]`` 的字典 + * ds_or_db 为 ``Dict[str, DataSet]`` 字典, 返回值为 ``Dict[str, OneflowDataLoader]`` 的字典 + * ds_or_db 为实现了 __getitem__() 和 __len__() 的对象 ,返回值为:class:`~fastNLP.OneflowDataLoader` + + :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 + :param non_train_batch_size: 非 'train' 数据集的 ``OneflowDataLoader`` 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 + :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 'train' 则设置其 shuffle 为 True , + 其它的为 False 。 + :param sampler: 实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , + 默认为None, 当其不为 None 时, shuffle 参数无效。 + :param non_train_sampler: 非 'train' 数据集的的实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , + 默认为None, 当其不为 None 时, shuffle 参数无效。 + :param batch_sampler: 实现了 __len__() 和 __iter__() 的实例化对象,,其__iter__() 方法每次都会返回一个 List 对象, List中的值为 + dataset 的下标 index ;默认为 None,当其不为 None 时,bacth_size, sampler, shuffle 参数均失效。 + :param num_workers: 当 ``num_workers > 0`` 时, ``OneflowDataLoader`` 会开启 num_workers 个子进程来处理数据, 可以加快 + 数据处理速度,但同时也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 + :param collate_fn: 用于从 dataset 取到的一个 batch 数据进行打包处理的 Callable 函数,其值应该为以下三个: ``[None, "auto", Callable]``. + + * callate_fn 为 'None' 时,需要注意的是此时传进来的 datset 类型不能为 :class:`~fastNLP.core.dataset.DataSet` , 当 collate_fn 为 ``None`` 时, + ``OneflowDataLoader`` 调用默认的 oneflow 框架的 ``DataLoader`` 自带的 `default_collate_fn` 作为 callate_fn 的默认值, 其无法处理 + :class:`~fastNLP.core.dataset.DataSet` 的dataset对象。 + * callate_fn 为 ``'auto'`` 时,`OneflowDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的默认值。 + 此时可以配套使用 ``OneflowDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 + * `collate_fn 为 ``Callable`` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 + dataset 的一条数据;该 Callable 函数还应当返回一个对象。 + + :param pin_memory: 如果其为 ``True``, 那么 ``OneflowDataLoader`` 会在返回数据张量之前将其 copy 到 cud a的 pin memory 中。 + :param drop_last: 当 ``drop_last=True`` 时,``OneflowDataLoader`` 会扔掉最后一个长度小于 ``batch_size`` 的 batch 数据; + 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 + :param timeout: 子进程的输出队列获取数据的超时值 + :param worker_init_fn: init 函数,如果不设置为 None ,则将会在每个子进程初始化时调用该函数。 + :param multiprocessing_context: 多进程的上下文环境 + :param generator: 如果其不为 ``None``, 将会使用 RandomSampler 去生成随机的 index 且会为每个子进程生成一个``base_seed`` + :param prefetch_factor: 每个 worker 提前装载的 samples 数量。``2``意味着在所有的进程中会有 2*num_workers 的数据被预取。默认值为 ``2`` . + :param persistent_workers: 如果其为 ``True``, ``OneflowDataLoader`` 在迭代完一次 dataset 后不会关闭所有进程。默认为 ``False`` + + """ + + from fastNLP.io import DataBundle + + if isinstance(ds_or_db, DataBundle): + dl_bundle = {} + for name, ds in ds_or_db.iter_datasets(): + if 'train' in name: + dl_bundle[name] = OneflowDataLoader(dataset=ds, batch_size=batch_size, + shuffle=True if shuffle is None else shuffle, sampler=sampler, batch_sampler=batch_sampler, + num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory, + drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn, + multiprocessing_context=multiprocessing_context, generator=generator, + prefetch_factor=prefetch_factor, + persistent_workers=persistent_workers, + ) + else: + dl_bundle[name] = OneflowDataLoader(dataset=ds, + batch_size=non_train_batch_size if non_train_batch_size else batch_size, + shuffle=False if shuffle is None else shuffle, + sampler=non_train_sampler if non_train_sampler else sampler, + batch_sampler=batch_sampler, + num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory, + drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn, + multiprocessing_context=multiprocessing_context, generator=generator, + prefetch_factor=prefetch_factor, + persistent_workers=persistent_workers, + ) + return dl_bundle + + elif isinstance(ds_or_db, Mapping): + dl_bundle = {} + for name, ds in ds_or_db.items(): + if 'train' in name: + dl_bundle[name] = OneflowDataLoader(dataset=ds, batch_size=batch_size, + shuffle=True if shuffle is None else shuffle, sampler=sampler, batch_sampler=batch_sampler, + num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory, + drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn, + multiprocessing_context=multiprocessing_context, generator=generator, + prefetch_factor=prefetch_factor, + persistent_workers=persistent_workers, + ) + else: + dl_bundle[name] = OneflowDataLoader(dataset=ds, + batch_size=non_train_batch_size if non_train_batch_size else batch_size, + shuffle=False if shuffle is None else shuffle, + sampler=non_train_sampler if non_train_sampler else sampler, + batch_sampler=batch_sampler, + num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory, + drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn, + multiprocessing_context=multiprocessing_context, generator=generator, + prefetch_factor=prefetch_factor, + persistent_workers=persistent_workers, + ) + + return dl_bundle + + elif isinstance(ds_or_db, HasLenGetitemType): + dl = OneflowDataLoader(dataset=ds_or_db, batch_size=batch_size, + shuffle=False if shuffle is None else shuffle, sampler=sampler, batch_sampler=batch_sampler, + num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory, + drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn, + multiprocessing_context=multiprocessing_context, generator=generator, + prefetch_factor=prefetch_factor, persistent_workers=persistent_workers, + ) + return dl + + else: + raise ValueError(f"ds_or_db: {ds_or_db} must be fastnlp dataset or data_bundle or mapping!") diff --git a/fastNLP/core/dataloaders/prepare_dataloader.py b/fastNLP/core/dataloaders/prepare_dataloader.py index 9cda2bd3..1bac3257 100644 --- a/fastNLP/core/dataloaders/prepare_dataloader.py +++ b/fastNLP/core/dataloaders/prepare_dataloader.py @@ -9,6 +9,7 @@ import sys from .torch_dataloader import prepare_torch_dataloader from .paddle_dataloader import prepare_paddle_dataloader from .jittor_dataloader import prepare_jittor_dataloader +from .oneflow_dataloader import prepare_oneflow_dataloader from ...envs import FASTNLP_BACKEND, SUPPORT_BACKENDS from ..log import logger @@ -37,7 +38,7 @@ def prepare_dataloader(dataset, batch_size: int = 16, shuffle: bool = None, drop * 为 ``Callable`` 时,应当接受一个 ``batch`` 的数据作为参数,同时输出一个对象 。 * 为 ``None`` 时,使用各个框架的 DataLoader 的默认 ``collate_fn`` 。 :param num_workers: 使用多少进程进行数据的 fetch 。 - :param backend: 当前支持 ``["auto", "torch", "paddle", "jittor"]`` 四种类型。 + :param backend: 当前支持 ``["auto", "torch", "paddle", "jittor", "oneflow"]`` 四种类型。 * 为 ``auto`` 时,首先(1) 根据环境变量 "FASTNLP_BACKEND" 进行判断;如果没有设置则,(2)通过当前 ``sys.modules`` 中已经 import 的 ``backend`` 进行判定。如果以上均无法判定,则报错。如果找到了 @@ -45,6 +46,7 @@ def prepare_dataloader(dataset, batch_size: int = 16, shuffle: bool = None, drop * 为 ``torch`` 时,使用 :func:`~fastNLP.prepare_torch_dataloader` 。 * 为 ``paddle`` 时,使用 :func:`~fastNLP.prepare_paddle_dataloader` 。 * 为 ``jittor`` 时,使用 :func:`~fastNLP.prepare_jittor_dataloader` 。 + * 为 ``oneflow`` 时,使用 :func:`~fastNLP.prepare_oneflow_dataloader` 。 :return """ @@ -61,6 +63,10 @@ def prepare_dataloader(dataset, batch_size: int = 16, shuffle: bool = None, drop prepare_jittor_dataloader(ds_or_db=dataset, sampler=None, collate_fn=collate_fn, num_workers=num_workers, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last) + elif backend == 'oneflow': + return prepare_oneflow_dataloader(ds_or_db=dataset, batch_sampler=None, collate_fn=collate_fn, + num_workers=num_workers, shuffle=shuffle, sampler=None, + batch_size=batch_size) else: raise ValueError(f"Currently we do not support backend:{backend}.") diff --git a/fastNLP/core/drivers/__init__.py b/fastNLP/core/drivers/__init__.py index f9be3180..7bf91d35 100644 --- a/fastNLP/core/drivers/__init__.py +++ b/fastNLP/core/drivers/__init__.py @@ -1,22 +1,27 @@ __all__ = [ 'Driver', 'TorchDriver', - "TorchSingleDriver", - "TorchDDPDriver", - "PaddleDriver", - "PaddleSingleDriver", - "PaddleFleetDriver", - "JittorDriver", - "JittorSingleDriver", - "JittorMPIDriver", + 'TorchSingleDriver', + 'TorchDDPDriver', + 'PaddleDriver', + 'PaddleSingleDriver', + 'PaddleFleetDriver', + 'JittorDriver', + 'JittorSingleDriver', + 'JittorMPIDriver', + 'OneflowDriver', + 'OneflowSingleDriver', + 'OneflowDDPDriver', 'torch_seed_everything', 'paddle_seed_everything', + 'oneflow_seed_everything', 'optimizer_state_to_device' ] from .torch_driver import TorchDriver, TorchSingleDriver, TorchDDPDriver, torch_seed_everything, optimizer_state_to_device from .jittor_driver import JittorDriver, JittorMPIDriver, JittorSingleDriver from .paddle_driver import PaddleDriver, PaddleFleetDriver, PaddleSingleDriver, paddle_seed_everything +from .oneflow_driver import OneflowDriver, OneflowSingleDriver, OneflowDDPDriver, oneflow_seed_everything from .driver import Driver diff --git a/fastNLP/core/drivers/choose_driver.py b/fastNLP/core/drivers/choose_driver.py index 75df97c4..8ad7e880 100644 --- a/fastNLP/core/drivers/choose_driver.py +++ b/fastNLP/core/drivers/choose_driver.py @@ -1,7 +1,7 @@ from typing import Union, Optional, List from .driver import Driver -from ..utils import is_torch_module, is_paddle_module, is_jittor_module +from ..utils import is_torch_module, is_paddle_module, is_jittor_module, is_oneflow_module def choose_driver(model, driver: Union[str, Driver], device: Optional[Union[int, List[int], str]], **kwargs) -> Driver: @@ -25,6 +25,8 @@ def choose_driver(model, driver: Union[str, Driver], device: Optional[Union[int, driver = "paddle" elif is_jittor_module(model): driver = "jittor" + elif is_oneflow_module(model): + driver = "oneflow" else: raise ValueError(f"Cannot choose driver automatically based on model, please set `driver` specifically.") @@ -37,6 +39,9 @@ def choose_driver(model, driver: Union[str, Driver], device: Optional[Union[int, elif driver in {"paddle"}: from fastNLP.core.drivers.paddle_driver.initialize_paddle_driver import initialize_paddle_driver return initialize_paddle_driver(driver, device, model, **kwargs) + elif driver in {"oneflow"}: + from fastNLP.core.drivers.oneflow_driver.initialize_oneflow_driver import initialize_oneflow_driver + return initialize_oneflow_driver(driver, device, model, **kwargs) else: raise ValueError("Parameter `driver` can only be one of these values: ['torch', 'fairscale', " - "'jittor', 'paddle'].") \ No newline at end of file + "'jittor', 'paddle', 'oneflow'].") \ No newline at end of file diff --git a/fastNLP/core/drivers/oneflow_driver/__init__.py b/fastNLP/core/drivers/oneflow_driver/__init__.py new file mode 100644 index 00000000..12beffc0 --- /dev/null +++ b/fastNLP/core/drivers/oneflow_driver/__init__.py @@ -0,0 +1,18 @@ +__all__ = [ + "OneflowDDPDriver", + "OneflowSingleDriver", + "OneflowDriver", + "oneflow_seed_everything", + "optimizer_state_to_device" +] + +from .ddp import OneflowDDPDriver +from .single_device import OneflowSingleDriver +from .oneflow_driver import OneflowDriver +from .utils import oneflow_seed_everything, optimizer_state_to_device + + + + + + diff --git a/fastNLP/core/drivers/oneflow_driver/ddp.py b/fastNLP/core/drivers/oneflow_driver/ddp.py new file mode 100644 index 00000000..fb992bc8 --- /dev/null +++ b/fastNLP/core/drivers/oneflow_driver/ddp.py @@ -0,0 +1,323 @@ +import os +from typing import List, Optional, Union, Dict + +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW + +if _NEED_IMPORT_ONEFLOW: + import oneflow + import oneflow.comm as comm + import oneflow.env as dist_env + from oneflow.nn.parallel import DistributedDataParallel + from oneflow.utils.data import BatchSampler + +__all__ = [ + "OneflowDDPDriver" +] + +from .oneflow_driver import OneflowDriver +from fastNLP.core.drivers.oneflow_driver.utils import ( + replace_sampler, + replace_batch_sampler +) +from fastNLP.core.utils import check_user_specific_params +from fastNLP.core.samplers import ReproducibleSampler, RandomSampler, UnrepeatedSequentialSampler, \ + ReproducibleBatchSampler, \ + re_instantiate_sampler, UnrepeatedSampler, conversion_between_reproducible_and_unrepeated_sampler +from fastNLP.envs import FASTNLP_GLOBAL_SEED, FASTNLP_NO_SYNC +from fastNLP.core.log import logger +from fastNLP.core.drivers.oneflow_driver.dist_utils import fastnlp_oneflow_all_gather, fastnlp_oneflow_broadcast_object +from .utils import _check_dataloader_args_for_distributed + + +class OneflowDDPDriver(OneflowDriver): + r""" + ``OneflowDDPDriver`` 实现了动态图下使用 ``DistributedDataParallel`` 进行的数据并行分布式训练。 + + .. note:: + + 您在绝大多数情况下不需要自己使用到该类,通过向 ``Trainer`` 传入正确的参数,您可以方便快速地部署您的分布式训练; + + ``OneflowDDPDriver`` 目前支持两种启动方式: + + 1. 用户不做任何处理,通过运行 ``python -m oneflow.distributed.launch --nproc_per_node 2 train.py`` 启动; + 2. 用户将模型通过 ``DistributedDataParallel`` 处理后,通过运行 ``python -m oneflow.distributed.launch --nproc_per_node 2 train.py`` 启动; + + 注意多机的启动强制要求用户在每一台机器上使用 ``python -m oneflow.distributed.launch`` 启动;因此我们不会在 ``OneflowDDPDriver`` 中保存 + 任何当前有多少台机器的信息; + + :param model: 传入给 ``Trainer`` 的 ``model`` 参数; + :param parallel_device: 该参数无效,**FastNLP** 会自动获取当前进程的设备; + :param fp16: 是否开启 fp16 训练;目前该参数无效; + :param oneflow_kwargs: + * *ddp_kwargs* -- 用于 ``DistributedDataParallel`` 的其它参数,详情可查阅 **oneflow** 的官方文档; + """ + + def __init__( + self, + model, + parallel_device: Optional["oneflow.device"], + fp16: bool = False, + oneflow_kwargs: Dict = {}, + **kwargs + ): + + super(OneflowDDPDriver, self).__init__(model, fp16=fp16, oneflow_kwargs=oneflow_kwargs, **kwargs) + + # oneflow 会自己初始化通信组,因此 parallel_device 实际上不起作用,可以通过 current_device 获取设备 + self.model_device = oneflow.device("cuda", oneflow.cuda.current_device()) + self._data_device = self.model_device + + self.global_rank = int(os.environ["RANK"]) + self.world_size = int(os.environ["WORLD_SIZE"]) + + self._ddp_kwargs = self._oneflow_kwargs.get("ddp_kwargs", {}) + check_user_specific_params(self._ddp_kwargs, DistributedDataParallel.__init__, DistributedDataParallel.__name__) + if len(self.model._buffers) != 0 and self._ddp_kwargs.get("broadcast_buffers", None) is None: + logger.info("Notice your model has buffers and you are using `OneflowDDPDriver`, but you do not set " + "'broadcast_buffers' in your trainer. Cause in most situations, this parameter can be set" + " to 'False' to avoid redundant data communication between different processes.") + + self.output_from_new_proc = kwargs.get("output_from_new_proc", "only_error") + assert isinstance(self.output_from_new_proc, str), "Parameter `output_from_new_proc` can only be `str` type." + if self.output_from_new_proc not in {"all", "ignore", "only_error"}: + os.makedirs(name=self.output_from_new_proc, exist_ok=True) + self.output_from_new_proc = os.path.abspath(self.output_from_new_proc) + + self._has_setup = False # 设置这一参数是因为 evaluator 中也会进行 setup 操作,但是显然是不需要的也不应该的; + self._has_ddpwrapped = False# hasattr(model, ) + + def setup(self): + r""" + 将模型用 ``DistributedDataParallel`` 进行处理; + """ + if self._has_setup: + return + self._has_setup = True + + self.configure_ddp() + self.barrier() + # 初始化 self._pids,从而使得每一个进程都能接受到 rank0 的 send 操作; + # self._pids = [oneflow.tensor(0, dtype=oneflow.int).to(self.data_device) for _ in range(dist_env.get_world_size())] + # comm.all_gather(self._pids, oneflow.tensor(os.getpid(), dtype=oneflow.int).to(self.data_device)) + # local_world_size = int(os.environ.get("LOCAL_WORLD_SIZE")) if "LOCAL_WORLD_SIZE" in os.environ else None + # if local_world_size is None: + # local_world_size = oneflow.tensor(int(os.environ.get("LOCAL_RANK")), dtype=oneflow.int).to(self.data_device) + # comm.all_reduce(local_world_size, op=dist_env.ReduceOp.MAX) + # local_world_size = local_world_size.tolist() + 1 + + # node_rank = self.global_rank // local_world_size + # self._pids = self._pids[node_rank * local_world_size: (node_rank + 1) * local_world_size] + # self._pids = self.tensor_to_numeric(self._pids) + + def configure_ddp(self): + if not hasattr(self.model, "_ddp_state_for_reversed_params"): + self.model.to(self.model_device) + self.model = DistributedDataParallel( + # 注意这里的 self.model_device 是 `oneflow.device` type,因此 self.model_device.index; + self.model, + **self._ddp_kwargs + ) + self._has_ddpwrapped = True + + @property + def master_address(self) -> str: + return os.environ.get("MASTER_ADDR") + + @property + def master_port(self) -> str: + return os.environ.get("MASTER_PORT") + + @property + def world_size(self) -> int: + return self._world_size + + @world_size.setter + def world_size(self, size: int): + self._world_size = size + + @property + def global_rank(self) -> int: + return self._global_rank + + @global_rank.setter + def global_rank(self, rank: int) -> None: + self._global_rank = rank + + @property + def local_rank(self) -> int: # 这个不会受到 all_rank_call_context 的影响 + return int(os.environ.get("LOCAL_RANK", 0)) + + @property + def data_device(self): + return self._data_device + + def set_dist_repro_dataloader(self, dataloader, + dist: Optional[Union[str, ReproducibleSampler, ReproducibleBatchSampler]] = None, + reproducible: bool = False): + # 如果 dist 为 ReproducibleBatchSampler, ReproducibleSampler 说明是在断点重训时 driver.load_checkpoint 函数调用; + # 注意这里不需要调用 dist_sampler.set_distributed;因为如果用户使用的是 OneflowDDPDriver,那么其在 Trainer 初始化的时候就已经调用了该函数; + if isinstance(dist, ReproducibleBatchSampler): + dist.set_distributed( + num_replicas=self.world_size, + rank=self.global_rank, + pad=True + ) + return replace_batch_sampler(dataloader, dist) + if isinstance(dist, ReproducibleSampler): + dist.set_distributed( + num_replicas=self.world_size, + rank=self.global_rank, + pad=True + ) + return replace_sampler(dataloader, dist) + + # 如果 dist 为 str 或者 None,说明是在 trainer 初试化时调用; + # trainer, evaluator + if dist is None: + if reproducible: + raise RuntimeError("It is not allowed to save checkpoint if the sampler is not allowed to be replaced.") + else: + args = self.get_dataloader_args(dataloader) + if isinstance(args.batch_sampler, ReproducibleBatchSampler): + return replace_batch_sampler(dataloader, re_instantiate_sampler(args.batch_sampler)) + if isinstance(args.sampler, ReproducibleSampler): + return replace_sampler(dataloader, re_instantiate_sampler(args.sampler)) + return dataloader + # trainer + elif dist == "dist": + args = self.get_dataloader_args(dataloader) + # 如果用户的 trainer.use_dist_sampler 为 True,那么此时其是否进行断点重训,不影响这里的行为; + if isinstance(args.batch_sampler, ReproducibleBatchSampler): + batch_sampler = re_instantiate_sampler(args.batch_sampler) + batch_sampler.set_distributed( + num_replicas=self.world_size, + rank=self.global_rank, + pad=True + ) + return replace_batch_sampler(dataloader, batch_sampler) + elif isinstance(args.sampler, ReproducibleSampler): + sampler = re_instantiate_sampler(args.sampler) + sampler.set_distributed( + num_replicas=self.world_size, + rank=self.global_rank, + pad=True + ) + return replace_sampler(dataloader, sampler) + else: + _check_dataloader_args_for_distributed(args, controller="Trainer") + sampler = RandomSampler( + dataset=args.dataset, + shuffle=args.shuffle, + seed=int(os.environ.get(FASTNLP_GLOBAL_SEED, 0)) + ) + sampler.set_distributed( + num_replicas=self.world_size, + rank=self.global_rank, + pad=True + ) + return replace_sampler(dataloader, sampler) + # evaluator + elif dist == "unrepeatdist": + args = self.get_dataloader_args(dataloader) + if isinstance(args.sampler, ReproducibleSampler): + sampler = conversion_between_reproducible_and_unrepeated_sampler(args.sampler) + elif not isinstance(args.sampler, UnrepeatedSampler): + _check_dataloader_args_for_distributed(args, controller="Evaluator") + sampler = UnrepeatedSequentialSampler( + dataset=args.dataset + ) + else: + sampler = re_instantiate_sampler(args.sampler) + sampler.set_distributed( + num_replicas=self.world_size, + rank=self.global_rank + ) + batch_sampler = BatchSampler(sampler, args.batch_size, drop_last=False) + return replace_batch_sampler(dataloader, batch_sampler) + else: + raise ValueError( + "Parameter `dist_sampler` can only be one of three values: ('dist', 'unrepeatdist', None).") + + def is_global_zero(self): + r""" + :return: 返回当前的进程是否在全局上是进程 0 ; + """ + return self.global_rank == 0 + + def get_model_no_sync_context(self): + r""" + :return: 返回一个 ``context`` 上下文环境,用于关闭各个进程之间的同步;该功能暂时无效,返回一个空的上下文环境; + """ + # TODO 暂时没有在 oneflow 中找到类似的功能; + from fastNLP.core.utils import nullcontext + return nullcontext + return self.model.no_sync + + def unwrap_model(self): + r""" + :return: 返回原始模型; + """ + return self.model + + def get_local_rank(self) -> int: + r""" + :return: 返回当前进程局部的进程编号; + """ + return self.local_rank + + def barrier(self): + r""" + 通过使用该函数来使得各个进程之间同步操作; + """ + if int(os.environ.get(FASTNLP_NO_SYNC, 0)) < 1: # 当 FASTNLP_NO_SYNC 小于 1 时实际执行 + comm.barrier() + + def is_distributed(self): + r""" + :return: 返回当前使用的 driver 是否是分布式的 driver,对于 ``OneflowDDPDriver`` 来说,该函数一定返回 ``True``; + """ + return True + + def broadcast_object(self, obj, src: int = 0, **kwargs): + r""" + 从 src 端将 obj 对象(可能是 tensor ,可能是 object )发送到 dst 处。如果是非 tensor 的对象会尝试使用 pickle 进行打包进行 + 传输,然后再 dst 处再加载回来。仅在分布式的 driver 中有实际意义。 + + :param obj: obj,可能是 Tensor 或 嵌套类型的数据 + :param int src: source 的 global rank 。 + :param int dst: target 的 global rank,可以是多个目标 rank + :param group: 所属的 group + :return: 如果当前不是分布式 driver 直接返回输入的 obj 。如果当前 rank 是接收端(其 global rank 包含在了 dst 中),则返回 + 接收到的参数;如果是 source 端则返回发射的内容;既不是发送端、又不是接收端,则返回 None 。 + """ + if int(os.environ.get(FASTNLP_NO_SYNC, 0)) == 2: # 如果 FASTNLP_NO_SYNC == 2 直接返回。 + return + return fastnlp_oneflow_broadcast_object(obj, src, device=self.data_device) + + def all_gather(self, obj) -> List: + r""" + 将 obj 互相传送到其它所有的 rank 上,其中 obj 可能是 Tensor,也可能是嵌套结构的 object 。如果不是基础类型的数据,尝试通过 + pickle 进行序列化,接收到之后再反序列化。 + + example:: + + obj = { + 'a': [1, 1], + 'b': [[1, 2], [1, 2]], + 'c': { + 'd': [1, 2] + } + } + -> + [ + {'a': 1, 'b':[1, 2], 'c':{'d': 1}}, + {'a': 1, 'b':[1, 2], 'c':{'d': 2}} + ] + + :param obj: 需要传输的对象,在每个rank上都应该保持相同的结构。 + :param group: + :return: + """ + if int(os.environ.get(FASTNLP_NO_SYNC, 0)) == 2: # 如果 FASTNLP_NO_SYNC 表示不执行 + return [obj] + return fastnlp_oneflow_all_gather(obj) diff --git a/fastNLP/core/drivers/oneflow_driver/dist_utils.py b/fastNLP/core/drivers/oneflow_driver/dist_utils.py new file mode 100644 index 00000000..e84df213 --- /dev/null +++ b/fastNLP/core/drivers/oneflow_driver/dist_utils.py @@ -0,0 +1,306 @@ +import io +import pickle +import os +from typing import Any, List + +from fastNLP.core.utils import apply_to_collection, get_oneflow_device +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW +from fastNLP.envs.env import FASTNLP_NO_SYNC +if _NEED_IMPORT_ONEFLOW: + import oneflow + import oneflow.comm as comm + import oneflow.env as dist_env + +PROTOCOL_VERSION = 1 + +def _validate_output_list_for_rank(my_rank, dst, gather_list): + if dst == my_rank: + if not gather_list: + raise ValueError( + "Argument ``gather_list`` must be specified on destination rank." + ) + elif gather_list: + raise ValueError( + "Argument ``gather_list`` must NOT be specified " + "on non-destination ranks." + ) + + obj = {"protocol_version": PROTOCOL_VERSION, "data": obj} + pickled_bytes = pickle.dumps(obj) + +def fastnlp_oneflow_gather_object(obj, dst=0): + """ + 从其它 rank gather 东西到 dst rank 。 + + Example:: + >>> # Assumes world_size of 3. + >>> gather_objects = ["foo", 12, {1: 2}] # any picklable object + >>> output = [None for _ in gather_objects] + >>> fastnlp_oneflow_gather_object( + gather_objects[dist.get_rank()], + output if dist.get_rank() == 0 else None, + dst=0 + ) + >>> # On rank 0 + >>> output + ['foo', 12, {1: 2}] + + :param obj: 需要发送的 obj 对象,需要是可以 pickable 的对象 + :param dst: 目标的 rank 。 + :return: 在 dst 上面返回 world_size 的 list,依次为 rank 0;rank 1...上 obj + """ + if int(os.environ.get(FASTNLP_NO_SYNC, '0')) == 2: + return [obj] + + if dist_env.get_rank() == dst: + object_gather_list = [None for _ in range(dist_env.get_world_size())] + else: + object_gather_list = None + + # Ensure object_gather_list is specified appopriately. + my_rank = dist_env.get_rank() + _validate_output_list_for_rank(my_rank, dst, object_gather_list) + # 防止 unpickle 的时候出现在了发送的 gpu 上。 + obj = apply_to_collection(obj, oneflow.Tensor, _to_device, device=oneflow.device("cpu")) + input_tensor, local_size = _object_to_tensor(obj) + current_device = oneflow.device("cuda") + input_tensor = input_tensor.to(current_device) + local_size = local_size.to(current_device) + # Gather all local sizes. This is so that we can find the max size, and index + # until the correct size when deserializing the tensors. + group_size = dist_env.get_world_size() + object_sizes_tensor = oneflow.zeros(group_size, dtype=oneflow.long, device=current_device) + object_size_list = [ + object_sizes_tensor[i].unsqueeze(dim=0) for i in range(group_size) + ] + # Allgather tensor sizes. An all-gather is needed here despite this being a + # gather, since each rank needs to broadcast a tensor of the same (maximal) + # size. + comm.all_gather(object_size_list, local_size) + max_object_size = int(max(object_size_list).item()) # type: ignore[type-var] + # Resize tensor to max size across all ranks. + input_tensor = input_tensor.reshape(max_object_size) + # Avoid populating output tensors if the result won't be gathered on this rank. + if my_rank == dst: + coalesced_output_tensor = oneflow.empty( + max_object_size * group_size, dtype=oneflow.uint8, device=current_device + ) + # Output tensors are nonoverlapping views of coalesced_output_tensor + output_tensors = [ + coalesced_output_tensor[max_object_size * i : max_object_size * (i + 1)] + for i in range(group_size) + ] + # All ranks call gather with equal-sized tensors. + comm.gather( + input_tensor, + gather_list=output_tensors if my_rank == dst else None, + dst=dst, + ) + if my_rank != dst: + return + for i, tensor in enumerate(output_tensors): + tensor = tensor.type(oneflow.uint8) # type: ignore[call-overload] + tensor_size = object_size_list[i] + object_gather_list[i] = _tensor_to_object(tensor, tensor_size) + + +def _object_to_tensor(obj, device=None): + f = io.BytesIO() + obj = {"protocol_version": PROTOCOL_VERSION, "data": obj} + pickled_bytes = pickle.dumps(obj) + + byte_tensor = oneflow.ByteTensor(list(pickled_bytes)) + local_size = oneflow.LongTensor([byte_tensor.numel()]) + if device is not None: + byte_tensor = byte_tensor.to(device) + local_size = local_size.to(device) + return byte_tensor, local_size + +def _tensor_to_object(tensor, tensor_size): + buf = tensor.detach().cpu().numpy().tobytes()[:tensor_size] + res = pickle.loads(buf) + assert res["protocol_version"] == PROTOCOL_VERSION + return res["data"] + +def send_recv_object(obj, src, cur_rank, device): + r""" + oneflow 中的单点对多点的分发函数; + + 例如将进程 0 上的对象 object 分发到其它进程上; + + Example:: + + cur_rank = int(os.environ.get('LOCAL_RANK', 0)) + + # 拿到 local_device + + send_recv_object(object, 0, cur_rank, local_device) + + :param obj: 一个可以序列化的 python 对象; + :param src: 从哪一个 rank 上发送到其它 rank; + :param cur_rank: 当前的进程的 rank 序号; + :param device: 当前的进程所在的设备; + :param group: 通信组,默认为 None; + :param tag: 将发送与远程接收匹配的标记; + :return: + """ + # src rank send to all other ranks + size = oneflow.LongTensor([0]).to(device) + + if cur_rank == src: + world_size = dist_env.get_world_size() + tensor, size = _object_to_tensor(obj) + tensor = tensor.to(device) + size = size.to(device) + + # 首先同步 obj 的 size 的信息; + comm.broadcast(size, src) + for subrank in range(world_size): + if subrank != src: + comm.send(tensor=tensor, dst=subrank) + else: + comm.broadcast(size, src) + tensor = oneflow.ByteTensor([0] * size).to(device) + comm.recv(tensor=tensor, src=src) + + return _tensor_to_object(tensor.cpu(), size) + + +def _to_device(tensor, device): + return tensor.contiguous().to(device) + + +def fastnlp_oneflow_all_gather(obj: Any, device=None) ->List: + """ + 实现任何类型的数据都使用该接口可以进行 all_gather 操作。对于非 tensor 类型的数据,通过 pickle 序列化再反序列化的方式进行传输。 + + example:: + + obj = { + 'a': [1, 1], + 'b': [[1, 2], [1, 2]], + 'c': { + 'd': [1, 2] + } + } + -> + [ + {'a': 1, 'b':[1, 2], 'c':{'d': 1}}, + {'a': 1, 'b':[1, 2], 'c':{'d': 2}} + ] + + :param obj: 任意结构的数据,如果为 tensor ,需要保证每个显卡上的 tensor 的形状是一样的。如果传入的是非 tensor 对象都将直接进行 + 序列化之后进行传输。 + :param device: 当前该参数无意义。 + :param group: + :return: 返回的结果是 [obj0, obj1, ...],其中 obj_i 即为第 i 个 rank 上的 obj 。 + """ + if int(os.environ.get(FASTNLP_NO_SYNC, "0")) == 2: + return [obj] + + if isinstance(obj, oneflow.Tensor): + objs = [oneflow.zeros_like(obj) for _ in range(dist_env.get_world_size())] + comm.all_gather(objs, obj) + else: + objs = [None for _ in range(dist_env.get_world_size())] + # 防止 unpickle 的时候弄到发送的 gpu 上了 + obj = apply_to_collection(obj, oneflow.Tensor, _to_device, device=oneflow.device("cpu")) + all_gather_object(objs, obj) + return objs + + +def fastnlp_oneflow_broadcast_object(obj, src, device=None): + """ + 将 src 上的 obj 对象广播到其它 rank 上。 + + :param obj: 需要发送的对象 + :param src: 从哪里发出。 + :param device: + :param group: 属于哪个通信 group + :return: + """ + if int(os.environ.get(FASTNLP_NO_SYNC, "0")) == 2: + if src == dist_env.get_rank(): + return obj + else: + return None + + cur_rank = dist_env.get_rank() + if cur_rank == src: + # 如果有 tensor 全部移动到 cpu 上,方便 pickle , 不然 unpickle 的时候可能会 pickle 到发送过来的卡那里 + obj = apply_to_collection(obj, oneflow.Tensor, _to_device, device=oneflow.device("cpu")) + if device is None: + device = oneflow.cuda.current_device() + device = get_oneflow_device(device) + + if cur_rank == src: + tensor, size = _object_to_tensor(obj, device=device) + else: + size = oneflow.LongTensor([0]).to(device) + + comm.broadcast(size, src=src) + if cur_rank != src: + tensor = oneflow.empty( + size.int().item(), # type: ignore[arg-type] + dtype=oneflow.uint8, + device=device + ) + comm.broadcast(tensor, src=src) + + return _tensor_to_object(tensor, tensor_size=size.item()) + +def all_gather_object(object_list, obj): + """ + + Example:: + >>> # Note: Process group initialization omitted on each rank. + >>> # Assumes world_size of 3. + >>> gather_objects = ["foo", 12, {1: 2}] # any picklable object + >>> output = [None for _ in gather_objects] + >>> all_gather_object(output, gather_objects[dist.get_rank()]) + >>> output + ['foo', 12, {1: 2}] + + :param object_list: + :param obj: + :param group: + :return: + """ + if int(os.environ.get(FASTNLP_NO_SYNC, "0")) == 2: + return [obj] + + current_device = get_oneflow_device(oneflow.cuda.current_device()) + + input_tensor, local_size = _object_to_tensor(obj, device=current_device) + + # Gather all local sizes. This is so that we can find the max size, and index + # until the correct size when deserializing the tensors. + group_size = dist_env.get_world_size() + object_sizes_tensor = oneflow.zeros( + group_size, dtype=oneflow.long, device=current_device + ) + object_size_list = [ + object_sizes_tensor[i].unsqueeze(dim=0) for i in range(group_size) + ] + # Allgather tensor sizes + comm.all_gather(object_size_list, local_size) + max_object_size = int(max(object_size_list).item()) # type: ignore[type-var] + # Resize tensor to max size across all ranks. + input_tensor = input_tensor.reshape(max_object_size) + coalesced_output_tensor = oneflow.empty( + max_object_size * group_size, dtype=oneflow.uint8, device=current_device + ) + # Output tensors are nonoverlapping views of coalesced_output_tensor + output_tensors = [ + coalesced_output_tensor[max_object_size * i : max_object_size * (i + 1)] + for i in range(group_size) + ] + comm.all_gather(output_tensors, input_tensor) + # Deserialize outputs back to object. + for i, tensor in enumerate(output_tensors): + tensor = tensor.type(oneflow.uint8) + if tensor.device != oneflow.device("cpu"): + tensor = tensor.cpu() + tensor_size = object_size_list[i] + object_list[i] = _tensor_to_object(tensor, tensor_size) + return object_list diff --git a/fastNLP/core/drivers/oneflow_driver/initialize_oneflow_driver.py b/fastNLP/core/drivers/oneflow_driver/initialize_oneflow_driver.py new file mode 100644 index 00000000..2dab1729 --- /dev/null +++ b/fastNLP/core/drivers/oneflow_driver/initialize_oneflow_driver.py @@ -0,0 +1,70 @@ +import os +from typing import Optional, Union, List, Sequence +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW +if _NEED_IMPORT_ONEFLOW: + import oneflow + +from .oneflow_driver import OneflowDriver +from .single_device import OneflowSingleDriver +from .ddp import OneflowDDPDriver +from fastNLP.core.log import logger +from fastNLP.envs import FASTNLP_BACKEND_LAUNCH + +__all__ = [] + + +def initialize_oneflow_driver(driver: str, device: Optional[Union[str, "oneflow.device", int, List[int]]], + model: "oneflow.nn.Module", **kwargs) -> OneflowDriver: + r""" + 用来根据参数 ``driver` 和 ``device`` 来确定并且初始化一个具体的 ``Driver`` 实例然后返回回去; + + :param driver: 该参数的值应为以下之一:``["oneflow"]``; + :param device: 该参数的格式与 ``Trainer`` 对参数 ``device`` 的要求一致; + :param model: 训练或者评测的具体的模型; + + :return: 返回一个 :class:`~fastNLP.core.OneflowSingleDriver` 或 :class:`~fastNLP.core.OneflowDDPDriver` 实例; + """ + # world_size 和 rank + if FASTNLP_BACKEND_LAUNCH in os.environ: + if device is not None: + logger.rank_zero_warning("Parameter `device` would be ignored when you are using `oneflow.distributed.launch` to pull " + "up your script. ", once=True) + return OneflowDDPDriver(model, None, **kwargs) + + if driver not in {"oneflow"}: + raise ValueError("Parameter `driver` can only be one of these values: ['oneflow'].") + + _could_use_device_num = oneflow.cuda.device_count() + if isinstance(device, str): + device = oneflow.device(device) + elif isinstance(device, int): + if device < 0: + if device != -1: + raise ValueError("Parameter `device` can only be '-1' when it is smaller than 0.") + device = [oneflow.device(f"cuda:{w}") for w in range(_could_use_device_num)] + elif device >= _could_use_device_num: + print(device, _could_use_device_num) + raise ValueError("The gpu device that parameter `device` specifies is not existed.") + else: + device = oneflow.device(f"cuda:{device}") + elif isinstance(device, Sequence): + device = list(set(device)) + for each in device: + if not isinstance(each, int): + raise ValueError("When parameter `device` is 'Sequence' type, the value in it should be 'int' type.") + elif each < 0: + raise ValueError("When parameter `device` is 'Sequence' type, the value in it should be bigger than 0.") + elif each >= _could_use_device_num: + raise ValueError(f"When parameter `device` is 'Sequence' type, the value in it should not be bigger than" + f" the available gpu number:{_could_use_device_num}.") + device = [oneflow.device(f"cuda:{w}") for w in device] + elif device is not None and not isinstance(device, oneflow.device): + raise ValueError("Parameter `device` is wrong type, please check our documentation for the right use.") + + if driver == "oneflow": # single, ddp, 直接启动。 + if not isinstance(device, List): + return OneflowSingleDriver(model, device, **kwargs) + else: + raise RuntimeError("If you want to run distributed training, please use " + "'python -m oneflow.distributed.launch xxx.py'.") + return OneflowDDPDriver(model, device, **kwargs) \ No newline at end of file diff --git a/fastNLP/core/drivers/oneflow_driver/oneflow_driver.py b/fastNLP/core/drivers/oneflow_driver/oneflow_driver.py new file mode 100644 index 00000000..17777358 --- /dev/null +++ b/fastNLP/core/drivers/oneflow_driver/oneflow_driver.py @@ -0,0 +1,445 @@ +import os +from typing import Union, Dict, Optional, Callable, Tuple +from functools import partial +import numpy as np +import random +from dataclasses import dataclass +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW +from pathlib import Path +if _NEED_IMPORT_ONEFLOW: + import oneflow + from oneflow.utils.data import DataLoader, Sampler, BatchSampler, Dataset + from oneflow.optim import Optimizer + from oneflow.utils.data import RandomSampler as OneflowRandomSampler + _reduces = { + "sum": oneflow.sum, + "min": oneflow.min, + "max": oneflow.max, + "mean": oneflow.mean + } + + +__all__ = [ + "OneflowDriver" +] + +from .utils import optimizer_state_to_device, DummyGradScaler +from fastNLP.core.drivers.driver import Driver +from fastNLP.core.utils.utils import _get_fun_msg, nullcontext +from fastNLP.core.utils import apply_to_collection, oneflow_move_data_to_device, auto_param_call +from fastNLP.envs import rank_zero_call +from fastNLP.envs import FASTNLP_GLOBAL_RANK, FASTNLP_MODEL_FILENAME, FASTNLP_CHECKPOINT_FILENAME +from fastNLP.core.log import logger +from fastNLP.core.samplers import ReproducibleBatchSampler, ReproducibleSampler, ReproduceBatchSampler, RandomSampler +from fastNLP.core.dataloaders import OverfitDataLoader + + +class OneflowDriver(Driver): + r""" + 专属于 ``oneflow`` 的 ``driver``,是 ``OneflowSingleDriver`` 和 ``OneflowDDPDriver`` 的父类; + + .. warning:: + + 您不应当直接初始化该类,然后传入给 ``Trainer``,换句话说,您应当使用该类的子类 ``OneflowSingleDriver`` 和 ``OneflowDDPDriver``,而不是 + 该类本身; + + .. note:: + + 您可以在使用 ``OneflowSingleDriver`` 和 ``OneflowDDPDriver`` 时使用 ``OneflowDriver`` 提供的接口; + + """ + def __init__(self, model, fp16: Optional[bool] = False, oneflow_kwargs: Dict = {}, **kwargs): + super(OneflowDriver, self).__init__(model) + + """ 进行 fp16 的设置 """ + self._oneflow_kwargs = oneflow_kwargs + + self.fp16 = fp16 + if fp16: + logger.warn("OneflowDriver of eager mode dose not support fp16 now.``") + # self.auto_cast, _grad_scaler = _build_fp16_env(dummy=not self.fp16) + # self.grad_scaler = _grad_scaler(**self._oneflow_kwargs.get("gradscaler_kwargs", {})) + self.auto_cast = nullcontext + self.grad_scaler = DummyGradScaler() + self.set_grad_to_none = self._oneflow_kwargs.get("set_grad_to_none") + + self.wo_auto_param_call = kwargs.get("model_wo_auto_param_call", False) + + def zero_grad(self): + for optimizer in self.optimizers: + optimizer.zero_grad(self.set_grad_to_none) + + def backward(self, loss): + loss.backward() + # self.grad_scaler.scale(loss).backward() + + def step(self): + for optimizer in self.optimizers: + self.grad_scaler.step(optimizer) + self.grad_scaler.update() + + def check_dataloader_legality(self, dataloader): + if not isinstance(dataloader, DataLoader) and not isinstance(dataloader, OverfitDataLoader): + raise TypeError(f"{DataLoader} is expected, instead of `{type(dataloader)}`") + if len(dataloader) == 0: + logger.rank_zero_warning("Your dataloader is empty, which is not recommended because it " + "may cause some unexpected exceptions.", once=True) + + @staticmethod + def _check_optimizer_legality(optimizers): + for each_optimizer in optimizers: + if not isinstance(each_optimizer, Optimizer): + raise TypeError(f"Each optimizer of parameter `optimizers` should be 'Optimizer' type, " + f"not {type(each_optimizer)}.") + + @staticmethod + def tensor_to_numeric(tensor, reduce: str = None): + r""" + 将 ``oneflow.Tensor`` 转换成 python 中的数值类型; + + :param tensor: ``oneflow.Tensor``; + :param reduce: 当 tensor 是一个多数值的张量时,应当使用何种归一化操作来转换成单一数值,应当为以下类型之一:``['max', 'min', 'sum', 'mean']``; + :return: 返回一个单一数值,其数值类型是 python 中的基本的数值类型,例如 ``int,float`` 等; + """ + + if tensor is None: + return None + + def _translate(_data): + if _data.numel() == 1: + return _data.item() + if reduce is None: + return _data.tolist() + return _reduces[reduce](_data).item() + + return apply_to_collection( + data=tensor, + dtype=oneflow.Tensor, + function=_translate + ) + + def set_model_mode(self, mode: str): + r""" + 设置模型的状态是 ``train`` 还是 ``eval``; + :param mode: ``'train'`` 或 ``'eval'``; + """ + assert mode in {"train", "eval"} + getattr(self.model, mode)() + + @rank_zero_call + def save_model(self, filepath: Union[str, Path], only_state_dict: bool = True, **kwargs): + """ + 保存当前 driver 的模型到 folder 下。 + + :param filepath: 保存到哪个文件夹; + :param only_state_dict: 是否只保存权重;如果使用 ``DistributedDataParallel`` 启动分布式训练的话,该参数只能为 ``True``; + :return: + """ + model = self.unwrap_model() + if not only_state_dict and self.is_distributed(): + logger.warn("`Cannot save ddp model directly, we will save its state_dict for you.") + only_state_dict = True + + if only_state_dict: + states = {name: param.cpu().detach().clone() for name, param in model.state_dict().items()} + oneflow.save(states, filepath) + else: + if self.model_device is not None: + if not self.is_distributed(): + self.move_model_to_device(model, oneflow.device("cpu")) + oneflow.save(model, filepath) + if not self.is_distributed(): + self.move_model_to_device(model, self.model_device) + else: + oneflow.save(model, filepath) + + def load_model(self, filepath: Union[Path, str], only_state_dict: bool = True, **kwargs): + """ + 从 folder 中加载权重并赋值到当前 driver 的模型上。 + + :param filepath: 加载权重或模型的路径 + :param load_state_dict: 保存的内容是否只是权重。 + :param kwargs: + :return: + """ + model = self.unwrap_model() + res = oneflow.load(filepath) + if isinstance(res, dict) and only_state_dict is False: + logger.rank_zero_warning(f"It seems like that {filepath} only contains state, you may need to use " + f"`only_state_dict=True`") + elif not isinstance(res, dict) and only_state_dict is True: + logger.rank_zero_warning(f"It seems like that {filepath} is not state, you may need to use " + f"`only_state_dict=False`") + if not isinstance(res, dict): + res = res.state_dict() + model.load_state_dict(res) + + @rank_zero_call + def save_checkpoint(self, folder: Path, states: Dict, dataloader, only_state_dict: bool = True, should_save_model: bool = True, **kwargs): + # 传入的 dataloader 参数是 trainer 的 dataloader 属性,因为 driver 的所有 dataloader 我们是不会去改变它的,而是通过改变 + # trainer.dataloader 来改变 dataloader 的状态,从而适配训练或者评测环境; + + # 1. sampler 的状态; + num_consumed_batches = states.pop("num_consumed_batches") + states["sampler_states"] = self.get_sampler_state(dataloader, num_consumed_batches) + + # 2. 保存模型的状态; + if should_save_model: + if not os.path.exists(folder): + os.mkdir(folder) + model_path = folder.joinpath(FASTNLP_MODEL_FILENAME) + self.save_model(model_path, only_state_dict=only_state_dict) + + # 3. 保存 optimizers 的状态; + states["optimizers_state_dict"] = self.get_optimizer_state() + logger.debug("Save optimizer state dict.") + + # # 4. 保存fp16的状态 + # if not isinstance(self.grad_scaler, DummyGradScaler): + # grad_scaler_state_dict = self.grad_scaler.state_dict() + # states['grad_scaler_state_dict'] = grad_scaler_state_dict + + oneflow.save(states, Path(folder).joinpath(FASTNLP_CHECKPOINT_FILENAME)) + + def get_sampler_state(self, dataloader, num_consumed_batches): + dataloader_args = self.get_dataloader_args(dataloader) + if isinstance(dataloader_args.batch_sampler, ReproducibleBatchSampler): + sampler = dataloader_args.batch_sampler + elif dataloader_args.sampler: + sampler = dataloader_args.sampler + else: + raise RuntimeError("This condition is not supposed to appear. Please report a bug to us.") + + if hasattr(sampler, "state_dict") and callable(sampler.state_dict): + sampler_states = sampler.state_dict() + if dataloader_args.batch_size is not None: + sampler_states["num_consumed_samples"] = sampler.num_replicas * dataloader_args.batch_size \ + * num_consumed_batches + else: + logger.rank_zero_warning("fastNLP cannot get batch_size, we have to save based on sampler's " + "`num_consumed_samples`, it may cause missing some samples when reload.") + else: + raise RuntimeError("The sampler has no `state_dict()` method, fastNLP cannot save the training " + "state.") + + return sampler_states + + def load_sampler_state(self, dataloader, sampler_states): + states = {} + dataloader_args = self.get_dataloader_args(dataloader) + if isinstance(dataloader_args.batch_sampler, ReproducibleBatchSampler): + sampler = dataloader_args.batch_sampler + elif isinstance(dataloader_args.sampler, ReproducibleSampler): + sampler = dataloader_args.sampler + elif isinstance(dataloader_args.sampler, OneflowRandomSampler): + sampler = RandomSampler(dataloader_args.sampler.data_source) + logger.debug("Replace oneflow RandomSampler into fastNLP RandomSampler.") + elif self.is_distributed(): + raise RuntimeError("It is not allowed to use checkpoint retraining when you do not use our" + "`ReproducibleSampler`.") + else: + sampler = ReproduceBatchSampler( + batch_sampler=dataloader_args.batch_sampler if dataloader_args.batch_sampler is not None else dataloader_args.sampler, + batch_size=dataloader_args.batch_size, + drop_last=dataloader_args.drop_last + ) + sampler.load_state_dict(sampler_states) + states["dataloader"] = self.set_dist_repro_dataloader(dataloader, sampler) + + # 修改 trainer_state.batch_idx_in_epoch + # sampler 是类似 RandomSampler 的sampler,不是 batch_sampler; + if not isinstance(sampler, ReproducibleBatchSampler): + if dataloader_args.drop_last: + batch_idx_in_epoch = len( + sampler) // dataloader_args.batch_size - sampler.num_left_samples // dataloader_args.batch_size + else: + batch_idx_in_epoch = (len(sampler) + dataloader_args.batch_size - 1) // dataloader_args.batch_size - \ + (sampler.num_left_samples + dataloader_args.batch_size - 1) // dataloader_args.batch_size + # sampler 是 batch_sampler; + else: + batch_idx_in_epoch = sampler.batch_idx_in_epoch + + states["batch_idx_in_epoch"] = batch_idx_in_epoch + return states + + def get_optimizer_state(self): + optimizers_state_dict = {} + for i in range(len(self.optimizers)): + optimizer: oneflow.optim.Optimizer = self.optimizers[i] + optimizer_state = optimizer.state_dict() + optimizer_state["state"] = optimizer_state_to_device(optimizer_state["state"], oneflow.device("cpu")) + optimizers_state_dict[f"optimizer{i}"] = optimizer_state # 注意这里没有使用 deepcopy,测试是不需要的; + return optimizers_state_dict + + def load_optimizer_state(self, states): + assert len(states) == len(self.optimizers), f"The number of optimizers is:{len(self.optimizers)}, while in " \ + f"checkpoint it is:{len(states)}" + for i in range(len(self.optimizers)): + optimizer: oneflow.optim.Optimizer = self.optimizers[i] + optimizer.load_state_dict(states[f"optimizer{i}"]) + logger.debug("Load optimizer state dict.") + + def load_checkpoint(self, folder: Path, dataloader, only_state_dict: bool = True, should_load_model: bool = True, **kwargs) -> Dict: + states = oneflow.load(folder.joinpath(FASTNLP_CHECKPOINT_FILENAME)) + + # 1. 加载 optimizers 的状态; + optimizers_state_dict = states.pop("optimizers_state_dict") + self.load_optimizer_state(optimizers_state_dict) + + # 2. 加载模型状态; + if should_load_model: + self.load_model(filepath=folder.joinpath(FASTNLP_MODEL_FILENAME), only_state_dict=only_state_dict) + + # # 3. 加载 fp16 的状态 + # if "grad_scaler_state_dict" in states: + # grad_scaler_state_dict = states.pop("grad_scaler_state_dict") + # if not isinstance(self.grad_scaler, DummyGradScaler): + # self.grad_scaler.load_state_dict(grad_scaler_state_dict) + # logger.debug("Load grad_scaler state dict...") + # elif not isinstance(self.grad_scaler, DummyGradScaler): + # logger.rank_zero_warning(f"Checkpoint {folder} is not trained with fp16=True, while resume to a fp16=True training, " + # f"the training process may be unstable.") + + # 4. 恢复 sampler 的状态; + sampler_states = states.pop("sampler_states") + states_ret = self.load_sampler_state(dataloader, sampler_states) + states.update(states_ret) + + return states + + def get_evaluate_context(self): + r""" + :return: 返回 ``oneflow.no_grad`` 这个 context; + """ + return oneflow.no_grad + + def model_call(self, batch, fn: Callable, signature_fn: Optional[Callable]) -> Dict: + if isinstance(batch, Dict) and not self.wo_auto_param_call: + return auto_param_call(fn, batch, signature_fn=signature_fn) + else: + return fn(batch) + + def get_model_call_fn(self, fn: str) -> Tuple: + if hasattr(self.model, fn): + fn = getattr(self.model, fn) + if not callable(fn): + raise RuntimeError(f"The `{fn}` attribute is not `Callable`.") + logger.debug(f"Use {_get_fun_msg(fn, with_fp=False)}...") + return fn, None + elif fn in {"train_step", "evaluate_step"}: + logger.debug(f"Use {_get_fun_msg(self.model.forward, with_fp=False)}...") + return self.model, self.model.forward + else: + raise RuntimeError(f"There is no `{fn}` method in your {type(self.model)}.") + + @staticmethod + def move_model_to_device(model: "oneflow.nn.Module", device: "oneflow.device"): + r""" + 将模型迁移到对应的设备上; + """ + if device is not None: + model.to(device) + + def move_data_to_device(self, batch): + """ + 将一个 batch 的数据迁移到对应的设备上; + + :param batch: 一个 batch 的数据,可以是 ``list、dict`` 等; + :return: + """ + return oneflow_move_data_to_device(batch, self.data_device) + + @staticmethod + def worker_init_function(worker_id: int, rank: Optional[int] = None) -> None: # pragma: no cover + global_rank = rank if rank is not None else int(os.environ.get(FASTNLP_GLOBAL_RANK, 0)) + process_seed = oneflow.initial_seed() + + base_seed = process_seed - worker_id + ss = np.random.SeedSequence([base_seed, worker_id, global_rank]) + + np.random.seed(ss.generate_state(4)) + + oneflow_ss, stdlib_ss = ss.spawn(2) + oneflow.manual_seed(oneflow_ss.generate_state(1, dtype=np.uint64)[0]) + + stdlib_seed = (stdlib_ss.generate_state(2, dtype=np.uint64).astype(object) * [1 << 64, 1]).sum() + random.seed(stdlib_seed) + + def set_deterministic_dataloader(self, dataloader: "DataLoader"): + if dataloader.worker_init_fn is None: + dataloader.worker_init_fn = partial(self.worker_init_function, + rank=int(os.environ.get(FASTNLP_GLOBAL_RANK, 0))) + + def set_sampler_epoch(self, dataloader: "DataLoader", cur_epoch_idx: int): + # 保证 ddp 训练时的 shuffle=True 时的正确性,因为需要保证每一个进程上的 sampler 的shuffle 的随机数种子是一样的; + if callable(getattr(dataloader.sampler, "set_epoch", None)): + dataloader.sampler.set_epoch(cur_epoch_idx) + + @staticmethod + def get_dataloader_args(dataloader: "DataLoader"): + """ + 获取 dataloader 的 shuffle 和 drop_last 属性; + """ + + @dataclass + class Res: + dataset: Optional[Dataset] = None + batch_sampler: Optional[BatchSampler] = None + sampler: Optional[Sampler] = None + batch_size: Optional[int] = None + shuffle: Optional[bool] = None + drop_last: Optional[bool] = None + + res = Res() + + # oneflow 的 DataLoader 一定会有 dataset 属性; + res.dataset = dataloader.dataset + + # dataloader 使用的是 sampler; + if dataloader.batch_sampler is None: + res.sampler = dataloader.sampler + res.batch_size = 1 + res.shuffle = True if isinstance(dataloader.sampler, RandomSampler) else False + res.drop_last = False + # dataloader 使用的是 batch_sampler; + else: + res.batch_sampler = dataloader.batch_sampler + if hasattr(dataloader.batch_sampler, "batch_size"): + res.batch_size = getattr(dataloader.batch_sampler, "batch_size") + # 用户使用的是自己的 batch_sampler 并且其没有 "batch_size" 属性; + else: + dataloader_iter = iter(dataloader) + pre_sample = next(dataloader_iter) + res.batch_size = pre_sample.shape[0] + + if hasattr(dataloader.batch_sampler, "sampler"): + res.sampler = dataloader.batch_sampler.sampler + if hasattr(dataloader.batch_sampler.sampler, "shuffle"): + res.shuffle = dataloader.batch_sampler.sampler.shuffle + elif isinstance(dataloader.batch_sampler.sampler, OneflowRandomSampler): + res.shuffle = True + else: + res.shuffle = False + # ReproduceBatchSampler 的情况 + elif hasattr(dataloader.batch_sampler, "batch_sampler"): + batch_sampler = dataloader.batch_sampler.batch_sampler + res.sampler = batch_sampler.sampler + if hasattr(batch_sampler.sampler, "shuffle"): + res.shuffle = dataloader.batch_sampler.sampler.shuffle + elif isinstance(batch_sampler.sampler, OneflowRandomSampler): + res.shuffle = True + else: + res.shuffle = False + else: + # 如果 dataloader.batch_sampler 没有 sampler 这个属性,那么说明其使用的是自己的 batch_sampler,且没有 "sampler" 属性; + # 这种情况下 DataLoader 会自己初始化一个 sampler;我们因此将这个默认初始化的 sampler 挂载到 res 上; + res.sampler = dataloader.sampler + res.shuffle = False + + if hasattr(dataloader.batch_sampler, "drop_last"): + res.drop_last = getattr(dataloader.batch_sampler, "drop_last") + # 用户使用的是自己的 batch_sampler 并且其没有 "drop_last" 属性; + else: + res.drop_last = False + + return res diff --git a/fastNLP/core/drivers/oneflow_driver/single_device.py b/fastNLP/core/drivers/oneflow_driver/single_device.py new file mode 100644 index 00000000..aec4d0e1 --- /dev/null +++ b/fastNLP/core/drivers/oneflow_driver/single_device.py @@ -0,0 +1,114 @@ +import os +from typing import Dict, Union +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW + +if _NEED_IMPORT_ONEFLOW: + import oneflow + from oneflow.utils.data import SequentialSampler as OneflowSequentialSampler + from oneflow.utils.data import BatchSampler as OneflowBatchSampler + +__all__ = [ + "OneflowSingleDriver" +] + +from .oneflow_driver import OneflowDriver +from fastNLP.core.drivers.oneflow_driver.utils import replace_sampler, replace_batch_sampler +from fastNLP.core.samplers import ReproducibleBatchSampler, ReproducibleSampler, re_instantiate_sampler, \ + ReproduceBatchSampler +from fastNLP.core.samplers import RandomSampler +from fastNLP.core.log import logger + + +class OneflowSingleDriver(OneflowDriver): + r""" + 用于执行 ``oneflow`` 动态图 cpu 和 单卡 gpu 运算的 ``driver``; + + :param model: 传入给 ``Trainer`` 的 ``model`` 参数; + :param device: oneflow.device,当前进程所使用的设备; + :param fp16: 是否开启 fp16;目前动态图的单卡下该参数无效; + :param oneflow_kwargs: + """ + + def __init__(self, model, device: "oneflow.device", fp16: bool = False, oneflow_kwargs: Dict = {}, **kwargs): + cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None) + if cuda_visible_devices == "": + device = oneflow.device("cpu") + logger.info("You have set `CUDA_VISIBLE_DEVICES` to '' in system environment variable, and we are gonna to" + "use `cpu` instead of `gpu` device.") + + super(OneflowSingleDriver, self).__init__(model, fp16=fp16, **kwargs) + + if device is None: + logger.debug("device is not set, fastNLP will try to automatically get it.") + try: + device = next(model.parameters()).device + assert isinstance(device, oneflow.device) + except: + raise ValueError("fastNLP cannot get device automatically, please set device explicitly.") + + self.model_device = device + + self.local_rank = 0 + self.global_rank = 0 + self.world_size = 1 + + def setup(self): + r""" + 将模型迁移到相应的设备上; + """ + if self.model_device is not None: + self.model.to(self.model_device) + + def set_dist_repro_dataloader(self, dataloader, + dist: Union[str, ReproducibleBatchSampler, ReproducibleSampler] = None, + reproducible: bool = False): + + # 如果 dist 为 ReproducibleBatchSampler, ReproducibleIterator 说明是在断点重训时 driver.load_checkpoint 函数调用; + if isinstance(dist, ReproducibleBatchSampler): + return replace_batch_sampler(dataloader, dist) + elif isinstance(dist, ReproducibleSampler): + return replace_sampler(dataloader, dist) + + # 如果 dist 为 str 或者 None,说明是在 trainer 初试化时调用; + args = self.get_dataloader_args(dataloader) + if isinstance(args.batch_sampler, ReproducibleBatchSampler): + batch_sampler = re_instantiate_sampler(args.batch_sampler) + return replace_batch_sampler(dataloader, batch_sampler) + elif isinstance(args.sampler, ReproducibleSampler): + sampler = re_instantiate_sampler(args.sampler) + return replace_sampler(dataloader, sampler) + + if reproducible: + if type(args.batch_sampler) is OneflowBatchSampler: + if type(args.sampler) is OneflowSequentialSampler: + # 需要替换为不要 shuffle 的。 + sampler = RandomSampler(args.sampler.data_source, shuffle=False) + logger.debug("Replace oneflow SequentialSampler into fastNLP RandomSampler.") + return replace_sampler(dataloader, sampler) + batch_sampler = ReproduceBatchSampler( + batch_sampler=args.batch_sampler, + batch_size=args.batch_size, + drop_last=args.drop_last + ) + return replace_batch_sampler(dataloader, batch_sampler) + else: + return dataloader + + def unwrap_model(self): + r""" + :return: 返回模型 + """ + return self.model + + @property + def data_device(self): + r""" + :return: 数据和模型所在的设备; + """ + return self.model_device + + def is_distributed(self): + r""" + :return: 返回当前使用的 driver 是否是分布式的 driver,在 ``OneflowSingleDriver`` 中返回 ``False``; + """ + return False diff --git a/fastNLP/core/drivers/oneflow_driver/utils.py b/fastNLP/core/drivers/oneflow_driver/utils.py new file mode 100644 index 00000000..33019883 --- /dev/null +++ b/fastNLP/core/drivers/oneflow_driver/utils.py @@ -0,0 +1,292 @@ +import os + +from typing import Any, Dict, Optional +from enum import IntEnum +import contextlib +import random +import numpy as np +import inspect + +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW +from fastNLP.envs.utils import get_global_seed +from fastNLP.envs import ( + get_global_rank, + FASTNLP_BACKEND_LAUNCH, + FASTNLP_GLOBAL_SEED, +) +from fastNLP.core.samplers import ReproducibleBatchSampler +from fastNLP.core.utils import auto_param_call +from fastNLP.core.log import logger + +if _NEED_IMPORT_ONEFLOW: + import oneflow + from oneflow.nn import Module + from oneflow.utils.data import DataLoader + from oneflow.utils.data import RandomSampler as oneflowRandomSampler + from oneflow.utils.data import SequentialSampler as oneflowSequentialSampler + from oneflow.utils.data import BatchSampler as oneflowBatchSampler +else: + from fastNLP.core.utils.dummy_class import DummyClass as Module + + +__all__ = [ + 'oneflow_seed_everything', + 'optimizer_state_to_device' +] + +def oneflow_seed_everything(seed: int = None, add_global_rank_to_seed: bool = True) -> int: + r""" + 为 **oneflow**、**numpy**、**python.random** 伪随机数生成器设置种子。 + + :param seed: 全局随机状态的整数值种子。如果为 ``None`` 则会根据时间戳生成一个种子。 + :param add_global_rank_to_seed: 在分布式训练中,是否在不同 **rank** 中使用不同的随机数。 + 当设置为 ``True`` 时,**FastNLP** 会将种子加上当前的 ``global_rank``。 + """ + max_seed_value = np.iinfo(np.uint32).max + min_seed_value = np.iinfo(np.uint32).min + + if seed is None: + if os.getenv(FASTNLP_BACKEND_LAUNCH) == "1": + seed = 42 + else: + seed = get_global_seed() + logger.info(f"'FASTNLP_GLOBAL_SEED' is set to {seed} automatically.") + if not isinstance(seed, int): + seed = int(seed) + + if not (min_seed_value <= seed <= max_seed_value): + logger.rank_zero_warning("Your seed value is too big or too small for numpy, we will choose a random seed for you.") + seed %= max_seed_value + + os.environ[FASTNLP_GLOBAL_SEED] = f"{seed}" + if add_global_rank_to_seed: + seed += get_global_rank() + + random.seed(seed) + np.random.seed(seed) + oneflow.manual_seed(seed) + oneflow.cuda.manual_seed_all(seed) + return seed + + +class ForwardState(IntEnum): + TRAIN = 0 + VALIDATE = 1 + TEST = 2 + PREDICT = 3 + + +class _DDPWrappingModel(Module): + """ + 该函数用于 DDP 训练时处理用户自己定制的 train_step 等函数; + 之所以要使用这一额外的包裹模型,是因为在使用 DDP 时,必须使用 DistributedDataParallel 的 forward 函数才能实现正常的运行; + 另一方面,我们要求用户在使用我们的框架时,需要针对不用的模式实现不同的处理函数,例如 'train_step', 'evaluate_step' 等; + 然而,当使用 DistributedDataParallel 包裹 model 后,模型看不见其除了 forward 之外的方法;并且当我们尝试在训练过程中主动提取 + `model = model.module`,这同样会导致错误,会使得每一个gpu上的模型参数不同; + + 因此出于以上考虑,我们实现了这一函数; + 对于更详细的解释,可以参考 'pytorch_lightning' 的 ddp 的设计; + """ + + def __init__(self, model: Module): + super(_DDPWrappingModel, self).__init__() + self.model = model + + def forward(self, batch, **kwargs) -> Dict: + """ + pytorch lightning 实现了先 unwrapping_model 的操作,但是感觉对于我们来说没有什么必须要,先写个注释放这里,之后有需求了再看; + """ + fn = kwargs.pop("fastnlp_fn") + signature_fn = kwargs.pop("fastnlp_signature_fn") + wo_auto_param_call = kwargs.pop("wo_auto_param_call") + + if isinstance(batch, Dict) and not wo_auto_param_call: + return auto_param_call(fn, batch, signature_fn=signature_fn) + else: + return fn(batch) + + +class DummyGradScaler: + + def __init__(self, *args, **kwargs): + pass + + def get_scale(self): + return 1.0 + + def is_enabled(self): + return False + + def scale(self, outputs): + return outputs + + def step(self, optimizer, *args, **kwargs): + optimizer.step(*args, **kwargs) + + def update(self, new_scale=None): + pass + + def unscale_(self, optimizer): + pass + + def load_state_dict(self, state_dict): + pass + + def state_dict(self): + return {} + + +def _build_fp16_env(dummy=False): + return + if dummy: + autocast = contextlib.ExitStack + GradScaler = DummyGradScaler + else: + if not oneflow.cuda.is_available(): + raise RuntimeError("Oneflow is not installed in gpu version, please use device='cpu'.") + if oneflow.cuda.get_device_capability(0)[0] < 7: + logger.rank_zero_warning( + "NOTE: your device does NOT support faster training with fp16, " + "please switch to FP32 which is likely to be faster" + ) + try: + from oneflow.amp import GradScaler + from oneflow.cuda.amp import autocast, GradScaler + except ImportError: + raise RuntimeError("torch version too low (less than 1.6)") + return autocast, GradScaler + + +def replace_sampler(dataloader: "DataLoader", sampler): + r""" + 替换 sampler (初始化一个新的 dataloader 的逻辑在于): + + 用户可能继承了 dataloader,定制了自己的 dataloader 类,这也是我们为什么先 `inspect.signature(dataloader)` 而不是直接 + `inspect.signature(DataLoader)` 的原因,因此同时注意到我们在外层重新初始化一个 dataloader 时也是使用的用户传进来的 dataloader + 的类,而不是直接的 DataLoader; + + 如果需要定制自己的 dataloader,保证以下两点: + + 1. 在 __init__ 方法中加入 **kwargs,这是为了方便我们将 sampler 插入到具体的 DataLoader 的构造中; + 2. 在 __init__ 方法中出现的参数,请务必挂为同样名字的实例属性,例如 self.one_arg_name = one_arg_name,这是因为我们只能通过属性 + 来获取实际的参数的值; + + """ + + # 拿到实例属性; + instance_attrs = {k: v for k, v in vars(dataloader).items() if not k.startswith('_')} + + # 'multiprocessing_context' 是 user-defined function; + if getattr(dataloader, 'multiprocessing_context', None) is not None: + instance_attrs["multiprocessing_context"] = dataloader.multiprocessing_context + + # 拿到 dataloader '__init__' 函数的默认函数签名; + init_params = dict(inspect.signature(dataloader.__init__).parameters) + + # 防止用户的 DataLoader 是继承了 oneflow 的 DataLoader,然后还是使用了 **kwargs 的方式对父类传参数 + has_variadic_kwargs = any(v.kind is v.VAR_KEYWORD for k, v in init_params.items()) + if has_variadic_kwargs and isinstance(dataloader, DataLoader): + # 防止用户写入了 super().__init__(**kwargs) + for key, value in dict(inspect.signature(DataLoader.__init__).parameters).items(): + if key not in init_params and key != 'self': + init_params[key] = value + + # 如果初始化dataloader所使用的参数不是默认值,那么我们需要将其记录下来用于重新初始化时设置; + non_default_params = {name for name, p in init_params.items() if + name in instance_attrs and p.default != instance_attrs[name]} + # add `dataset` as it might have been replaced with `*args` + non_default_params.add("dataset") + + reconstruct_args = {k: v for k, v in instance_attrs.items() if k in non_default_params} + if isinstance(dataloader, DataLoader): + reconstruct_args.update({"sampler": sampler, "shuffle": False, "batch_sampler": None}) + + batch_sampler = getattr(dataloader, "batch_sampler") + if batch_sampler is not None and isinstance(batch_sampler, ReproducibleBatchSampler): + raise RuntimeError("It should not be running here, please report a bug to us.") + + required_args = { + p.name + for p in init_params.values() + if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) + and p.default is p.empty + and p.name not in reconstruct_args + } + + # 在 attribute 中没有找到这些参数,导致了没有办法重新初始化 + if required_args: + required_args = sorted(required_args) + dataloader_self_name = dataloader.__class__.__name__ + raise Exception( + f"Need to inject arguments {required_args} into the __init__ of `{dataloader_self_name}`. " + f"But they are not found in the attribute of `{dataloader_self_name}`, fastNLP cannot determine its " + f"value when try to reinitialize `{dataloader_self_name}`, please add `{required_args}` to be " + f"`{dataloader_self_name}`'s attribute." + ) + + # 这种错误针对的是传入的 dataloader 不是直接的 DataLoader,而是定制了 DataLoader,但是 __init__ 中没有 **kwargs; + if not has_variadic_kwargs: + # the dataloader signature does not allow keyword arguments that need to be passed + missing_kwargs = reconstruct_args.keys() - init_params.keys() + if missing_kwargs: + missing_kwargs = sorted(missing_kwargs) + dataloader_self_name = dataloader.__class__.__name__ + raise Exception( + f"The parameter:{missing_kwargs} needed to reinitialize `{dataloader_self_name}` is not found." + ) + # 如果没有kwargs,则保证一下只传入需要的参数 + if not isinstance(dataloader, DataLoader): + reconstruct_args = {key:value for key,value in reconstruct_args.items() if key in init_params} + + return type(dataloader)(**reconstruct_args) + + +def replace_batch_sampler(dataloader, new_batch_sampler): + r""" + 替换一个 dataloader 的 batch_sampler; + """ + params_keys = [k for k in dataloader.__dict__.keys() if not k.startswith("_")] + for k in ["batch_size", "sampler", "drop_last", "batch_sampler", "dataset_kind"]: + if k in params_keys: + params_keys.remove(k) + params = {k: getattr(dataloader, k) for k in params_keys} + params["batch_sampler"] = new_batch_sampler + + if not isinstance(dataloader, DataLoader): + init_params = dict(inspect.signature(dataloader.__init__).parameters) + has_variadic_kwargs = any(v.kind is v.VAR_KEYWORD for k, v in init_params.items()) + if not has_variadic_kwargs: + params = {key:value for key,value in params.items() if key in init_params} + + return type(dataloader)(**params) + + +def optimizer_state_to_device(state, device): + r""" + 将一个 ``optimizer`` 的 ``state_dict`` 迁移到对应的设备; + + :param state: ``optimzier.state_dict()``; + :param device: 要迁移到的目的设备; + :return: 返回迁移后的新的 state_dict; + """ + new_state = {} + for name, param in state.items(): + if isinstance(param, dict): + new_state[name] = optimizer_state_to_device(param, device) + elif isinstance(param, oneflow.Tensor): + new_state[name] = param.to(device).clone() + else: + new_state[name] = param + return new_state + + +def _check_dataloader_args_for_distributed(args, controller='Trainer'): + if type(args.batch_sampler) is not oneflowBatchSampler or (type(args.sampler) not in {oneflowRandomSampler, + oneflowSequentialSampler}): + mode = 'training' if controller == 'Trainer' else 'evaluation' + substitution = 'fastNLP.RandomSampler' if controller == 'Trainer' else 'fastNLP.UnrepeatedSequentialSampler' + raise TypeError(f"Using customized ``batch_sampler`` or ``sampler`` for distributed {mode} may cause " + f"unpredictable problems, because fastNLP will substitute the dataloader's sampler into " + f"``{substitution}``. The customized sampler should set for distributed running " + f"before initializing ``{controller}`` , and then set the " + f"parameter ``use_dist_sampler`` of ``{controller}`` to ``False``.") diff --git a/fastNLP/core/metrics/backend/auto_backend.py b/fastNLP/core/metrics/backend/auto_backend.py index e2515313..f671ad2e 100644 --- a/fastNLP/core/metrics/backend/auto_backend.py +++ b/fastNLP/core/metrics/backend/auto_backend.py @@ -8,6 +8,7 @@ from .backend import Backend from .torch_backend.backend import TorchBackend from .paddle_backend.backend import PaddleBackend from .jittor_backend.backend import JittorBackend +from .oneflow_backend.backend import OneflowBackend class AutoBackend(Backend): @@ -52,6 +53,8 @@ class AutoBackend(Backend): self.__class__ = PaddleBackend elif backend == 'jittor': self.__class__ = JittorBackend + elif backend == 'oneflow': + self.__class__ = OneflowBackend elif backend is None: # 不用做任何事情就可以初始化了 pass diff --git a/fastNLP/core/metrics/backend/oneflow_backend/__init__.py b/fastNLP/core/metrics/backend/oneflow_backend/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/fastNLP/core/metrics/backend/oneflow_backend/backend.py b/fastNLP/core/metrics/backend/oneflow_backend/backend.py new file mode 100644 index 00000000..6392b09d --- /dev/null +++ b/fastNLP/core/metrics/backend/oneflow_backend/backend.py @@ -0,0 +1,130 @@ +from typing import List + +import numpy as np + +from fastNLP.core.metrics.backend import Backend +from fastNLP.core.metrics.utils import AggregateMethodError +from fastNLP.core.utils import is_in_oneflow_dist +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW +from fastNLP.core.drivers.oneflow_driver.dist_utils import fastnlp_oneflow_all_gather + + +if _NEED_IMPORT_ONEFLOW: + import oneflow + import oneflow.comm as comm + +__all__ = [] + +class OneflowBackend(Backend): + def __init__(self): + super().__init__() + self._specified = True + + def aggregate(self, tensor, method: str): + """ + 聚集结果,并根据 method 计算后,返回结果 + + :param tensor: 需要聚合的张量 + :param method: 聚合的方法, 目前支持 ``['sum', 'mean', 'max', 'mix']``: + + * method 为 ``'sum'`` 时, 会将多张卡上聚合结果在维度为 `0` 上 累加起来。 + * method 为 ``'mean'`` 时,会将多张卡上聚合结果在维度为 `0` 上取平均值。 + * method 为 ``'max'`` 时,会将多张卡上聚合结果在维度为 `0` 上取最大值。 + * method 为 ``'mix'`` 时,会将多张卡上聚合结果在维度为 `0` 上取最小值。 + + """ + if isinstance(tensor, oneflow.Tensor): + # TODO 暂时没有找到 oneflow 中检测是否初始化了分布式环境的方法 + if is_in_oneflow_dist(): + if method is None: + raise AggregateMethodError(should_have_aggregate_method=True) + tensor = self.all_gather_object(tensor) + if isinstance(tensor[0], oneflow.Tensor): + tensor = oneflow.stack(tensor) + # 第一步, aggregate结果 + if method == 'sum': + tensor = oneflow.sum(tensor, dim=0) + elif method == 'mean': + tensor = oneflow.mean(tensor, dim=0) + elif method == 'max': + tensor, _ = oneflow.max(tensor, dim=0) + elif method == 'min': + tensor, _ = oneflow.min(tensor, dim=0) + else: + raise AggregateMethodError(should_have_aggregate_method=False) + + return tensor + + def create_tensor(self, value: float): + """ + 创建 tensor,并且填入 value 作为值 + + :param value: 创建张量的初始值 + """ + tensor = oneflow.ones(1).fill_(value) + return tensor + + def fill_value(self, tensor, value: float): + """ + 将 tensor 的值设置为 value + + :param tensor: 传入的张量 + :param value: 需要 fill 的值。 + """ + tensor.fill_(value) + return tensor + + def get_scalar(self, tensor) -> float: + """ + 获取 tensor 的 scalar 值 + + :param tensor: 传入的张量 + """ + return tensor.item() + + def tensor2numpy(self, tensor) -> np.array: + """ + 将 tensor 转为 numpy 值, 主要是在 metric 计算中使用 + + :param tensor: 传入的张量 + """ + + if isinstance(tensor, oneflow.Tensor): + return tensor.cpu().detach().numpy() + elif isinstance(tensor, np.ndarray): + return tensor + elif isinstance(tensor, (float, int)): + return tensor + else: + raise ValueError(f"tensor: {tensor} can not convert to ndarray!") + + @staticmethod + def is_distributed() -> bool: + """ + 判断是否为 ddp 状态 + + :return: + """ + return is_in_oneflow_dist() + + def move_tensor_to_device(self, tensor, device): + """ + 将张量移到设备上 + + :param tensor: 需要移动的张量 + :param device: 设备名, 一般为 "cpu", "cuda:0"等字符串 + """ + return tensor.to(device) + + def all_gather_object(self, obj, group=None) -> List: + """ + 给定 obj 将各个 rank 上的 obj 汇总到每个 obj 上。返回一个 list 对象,里面依次为各个 rank 对应的 obj 。 + + :param obj: + :param group: + """ + if self.is_distributed(): + obj_list = fastnlp_oneflow_all_gather(obj) + return obj_list + return [obj] + diff --git a/fastNLP/core/utils/__init__.py b/fastNLP/core/utils/__init__.py index 2825b5ac..d188bc37 100644 --- a/fastNLP/core/utils/__init__.py +++ b/fastNLP/core/utils/__init__.py @@ -14,6 +14,10 @@ __all__ = [ 'f_rich_progress', 'torch_move_data_to_device', 'is_torch_module', + 'get_oneflow_device', + 'oneflow_move_data_to_device', + 'is_oneflow_module', + 'is_in_oneflow_dist', 'get_fn_arg_names', 'auto_param_call', 'check_user_specific_params', @@ -36,6 +40,7 @@ from .paddle_utils import paddle_to, paddle_move_data_to_device, get_paddle_devi is_in_fnlp_paddle_dist, is_in_paddle_launch_dist, is_paddle_module from .rich_progress import f_rich_progress from .torch_utils import torch_move_data_to_device, is_torch_module +from .oneflow_utils import oneflow_move_data_to_device, is_oneflow_module, is_in_oneflow_dist, get_oneflow_device from .utils import * from .tqdm_progress import f_tqdm_progress from .seq_len_to_mask import seq_len_to_mask diff --git a/fastNLP/core/utils/oneflow_utils.py b/fastNLP/core/utils/oneflow_utils.py new file mode 100644 index 00000000..f9225466 --- /dev/null +++ b/fastNLP/core/utils/oneflow_utils.py @@ -0,0 +1,69 @@ +import os +from typing import Any, Union, Optional +from fastNLP.envs.env import FASTNLP_DISTRIBUTED_CHECK +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW + +if _NEED_IMPORT_ONEFLOW: + import oneflow + +__all__ = [ + 'get_oneflow_device' + 'oneflow_move_data_to_device', + 'is_oneflow_module', + 'is_in_oneflow_dist', +] + +from .utils import apply_to_collection + +def get_oneflow_device(device): + """ + 构造一个 :class:`oneflow.device` 实例并返回。 + + :param device: 字符串或 gpu 编号 + :return: :class:`oneflow.device` + """ + if isinstance(device, oneflow.device): + return device + if isinstance(device, int): + return oneflow.device("cuda", device) + if isinstance(device, str): + return oneflow.device(device) + raise RuntimeError(f"Cannot get `oneflow.device` from {device}.") + +def oneflow_move_data_to_device(batch: Any, device: Optional[Union[str, "oneflow.device"]] = None) -> Any: + r""" + 在 **oneflow** 中将数据集合 ``batch`` 传输到给定设备。任何定义方法 ``to(device)`` 的对象都将被移动并且集合中的所有其他对象将保持不变; + + :param batch: 需要迁移的数据; + :param device: 数据应当迁移到的设备;当该参数的值为 ``None`` 时则不执行任何操作; + :return: 迁移到新设备上的数据集合; + """ + if device is None: + return batch + + def batch_to(data: Any) -> Any: + data_output = data.to(device) + if data_output is not None: + return data_output + # user wrongly implemented the `TransferableDataType` and forgot to return `self`. + return data + + return apply_to_collection(batch, dtype=oneflow.Tensor, function=batch_to) + +def is_oneflow_module(model) -> bool: + """ + 判断传入的 ``model`` 是否是 :class:`oneflow.nn.Module` 类型 + + :param model: 模型; + :return: 当前模型是否为 ``oneflow`` 的模型; + """ + try: + return isinstance(model, oneflow.nn.Module) + except BaseException: + return False + +def is_in_oneflow_dist() -> bool: + """ + 判断是否处于 **oneflow** 分布式的进程下。 + """ + return "GLOG_log_dir" in os.environ \ No newline at end of file diff --git a/fastNLP/envs/imports.py b/fastNLP/envs/imports.py index 77b642c3..247bd855 100644 --- a/fastNLP/envs/imports.py +++ b/fastNLP/envs/imports.py @@ -22,5 +22,6 @@ _NEED_IMPORT_FAIRSCALE = not _IS_WINDOWS and _module_available("fairscale") and _NEED_IMPORT_TORCH = _module_available("torch") and 'torch' in need_import _NEED_IMPORT_JITTOR = _module_available("jittor") and 'jittor' in need_import _NEED_IMPORT_PADDLE = _module_available("paddle") and 'paddle' in need_import +_NEED_IMPORT_ONEFLOW = _module_available("oneflow") and 'oneflow' in need_import _TORCH_GREATER_EQUAL_1_8 = _NEED_IMPORT_TORCH and _compare_version("torch", operator.ge, "1.8.0") diff --git a/fastNLP/envs/set_backend.py b/fastNLP/envs/set_backend.py index 1ef27ff6..45674794 100644 --- a/fastNLP/envs/set_backend.py +++ b/fastNLP/envs/set_backend.py @@ -8,7 +8,7 @@ from fastNLP.envs.env import FASTNLP_BACKEND, FASTNLP_GLOBAL_RANK, USER_CUDA_VIS from fastNLP.envs.utils import _module_available, get_gpu_count -SUPPORT_BACKENDS = ['torch', 'paddle', 'jittor'] +SUPPORT_BACKENDS = ['torch', 'paddle', 'jittor', 'oneflow'] def _set_backend(): @@ -145,6 +145,9 @@ def set_env(global_seed=None): if backend == 'torch': assert _module_available(backend), f"You must have {backend} available to use {backend} backend." + if backend == 'oneflow': + assert _module_available(backend), f"You must have {backend} available to use {backend} backend." + def dump_fastnlp_backend(default:bool = False, backend=None): """ diff --git a/fastNLP/envs/set_env_on_import.py b/fastNLP/envs/set_env_on_import.py index f35f8e54..27686ae3 100644 --- a/fastNLP/envs/set_env_on_import.py +++ b/fastNLP/envs/set_env_on_import.py @@ -50,6 +50,15 @@ def set_env_on_import_jittor(): if 'log_silent' not in os.environ: os.environ['log_silent'] = '1' +def set_env_on_import_oneflow(): + if 'GLOG_log_dir' in os.environ: + os.environ[FASTNLP_GLOBAL_RANK] = os.environ['RANK'] + if int(os.environ.get(FASTNLP_REMOVE_LOCAL_RANK, 1)): + remove_local_rank_in_argv() + + if 'GLOG_log_dir' in os.environ and FASTNLP_DISTRIBUTED_CHECK not in os.environ: + os.environ[FASTNLP_BACKEND_LAUNCH] = '1' + def set_env_on_import(): """ @@ -61,6 +70,7 @@ def set_env_on_import(): set_env_on_import_torch() set_env_on_import_paddle() set_env_on_import_jittor() + set_env_on_import_oneflow() # fastNLP 内部使用的一些变量 if FASTNLP_LAUNCH_TIME not in os.environ: diff --git a/tests/core/collators/padders/test_get_padder.py b/tests/core/collators/padders/test_get_padder.py index 5996f023..a0e2dfdc 100644 --- a/tests/core/collators/padders/test_get_padder.py +++ b/tests/core/collators/padders/test_get_padder.py @@ -3,7 +3,7 @@ import numpy as np from fastNLP.core.collators.padders.get_padder import get_padder, InconsistencyError, DtypeError, \ _get_element_shape_dtype -from fastNLP.envs.imports import _NEED_IMPORT_TORCH, _NEED_IMPORT_PADDLE, _NEED_IMPORT_JITTOR +from fastNLP.envs.imports import _NEED_IMPORT_TORCH, _NEED_IMPORT_PADDLE, _NEED_IMPORT_JITTOR, _NEED_IMPORT_ONEFLOW def test_get_element_shape_dtype(): @@ -14,10 +14,11 @@ def test_get_element_shape_dtype(): catalog = _get_element_shape_dtype([np.zeros(3), np.zeros((2, 1))]) -# @pytest.mark.parametrize('backend', ['raw', None, 'numpy', 'torch', 'jittor', 'paddle']) -@pytest.mark.parametrize('backend', ['raw', None, 'numpy', 'torch', 'paddle']) +@pytest.mark.parametrize('backend', ['raw', None, 'numpy', 'torch', 'paddle', 'jittor', 'oneflow']) @pytest.mark.torch @pytest.mark.paddle +@pytest.mark.jittor +@pytest.mark.oneflow def test_get_padder_run(backend): if not _NEED_IMPORT_TORCH and backend == 'torch': pytest.skip("No torch") @@ -25,6 +26,8 @@ def test_get_padder_run(backend): pytest.skip("No paddle") if not _NEED_IMPORT_JITTOR and backend == 'jittor': pytest.skip("No jittor") + if not _NEED_IMPORT_ONEFLOW and backend == 'oneflow': + pytest.skip("No oneflow") batch_field = [1, 2, 3] padder = get_padder(batch_field, pad_val=0, backend=backend, dtype=int, field_name='test') @@ -163,3 +166,57 @@ def test_torch_padder(): assert isinstance(pad_batch, np.ndarray) assert np.shape(pad_batch) == (3, 3, 3) assert (pad_batch == np.zeros(np.shape(pad_batch))).sum()==12 + +@pytest.mark.oneflow +def test_oneflow_padder(): + if not _NEED_IMPORT_ONEFLOW: + pytest.skip("No oneflow.") + import oneflow + backend = 'oneflow' + target_type = oneflow.Tensor + batch_field = [1, 2, 3] + padder = get_padder(batch_field, pad_val=0, backend=backend, dtype=int, field_name='test') + pad_batch = padder(batch_field) + assert isinstance(pad_batch, target_type) + assert (pad_batch == oneflow.LongTensor(batch_field)).sum()==len(batch_field) + + batch_field = [[1], [2, 2], [3, 3, 3]] + padder = get_padder(batch_field, pad_val=0, backend=backend, dtype=int, field_name='test') + pad_batch = padder(batch_field) + assert isinstance(pad_batch, target_type) + assert pad_batch.shape == (3, 3) + assert (pad_batch == oneflow.zeros(pad_batch.shape)).sum()==3 + + batch_field = [oneflow.ones((3,3)), oneflow.ones((2,3)), oneflow.ones((1,3))] + padder = get_padder(batch_field, pad_val=0, backend=backend, dtype=int, field_name='test') + pad_batch = padder(batch_field) + assert isinstance(pad_batch, target_type) + assert pad_batch.shape == (3, 3, 3) + assert (pad_batch == oneflow.zeros(pad_batch.shape)).sum()==9 + + batch_field = [oneflow.ones((3,3)), oneflow.ones((2,3)), oneflow.ones((1,0))] + padder = get_padder(batch_field, pad_val=0, backend=backend, dtype=int, field_name='test') + pad_batch = padder(batch_field) + assert isinstance(pad_batch, target_type) + assert pad_batch.shape == (3, 3, 3) + assert (pad_batch == oneflow.zeros(pad_batch.shape)).sum()==12 + + batch_field = [oneflow.ones((3,3)), oneflow.ones((2,3)), oneflow.ones((1,))] + with pytest.raises(InconsistencyError): + padder = get_padder(batch_field, pad_val=0, backend=backend, dtype=int, field_name='test') + + # 可以是 numpy.ndarray + batch_field = [np.ones((3,3)), np.ones((2,3)), np.ones((1,0))] + padder = get_padder(batch_field, pad_val=0, backend=backend, dtype=int, field_name='test') + pad_batch = padder(batch_field) + assert isinstance(pad_batch, target_type) + assert pad_batch.shape == (3, 3, 3) + assert (pad_batch == oneflow.zeros(pad_batch.shape)).sum()==12 + + # 测试 to numpy + batch_field = [oneflow.ones((3,3)), oneflow.ones((2,3)), oneflow.ones((1,0))] + padder = get_padder(batch_field, pad_val=0, backend='numpy', dtype=int, field_name='test') + pad_batch = padder(batch_field) + assert isinstance(pad_batch, np.ndarray) + assert np.shape(pad_batch) == (3, 3, 3) + assert (pad_batch == np.zeros(np.shape(pad_batch))).sum()==12 diff --git a/tests/core/collators/padders/test_oneflow_padder.py b/tests/core/collators/padders/test_oneflow_padder.py new file mode 100644 index 00000000..9ad31816 --- /dev/null +++ b/tests/core/collators/padders/test_oneflow_padder.py @@ -0,0 +1,105 @@ +import numpy as np +import pytest + +from fastNLP.core.collators.padders.oneflow_padder import OneflowTensorPadder, OneflowSequencePadder, OneflowNumberPadder +from fastNLP.core.collators.padders.exceptions import DtypeError +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW + +if _NEED_IMPORT_ONEFLOW: + import oneflow + + +@pytest.mark.oneflow +class TestOneflowNumberPadder: + def test_run(self): + padder = OneflowNumberPadder(pad_val=-1, ele_dtype=int, dtype=int) + a = [1, 2, 3] + t_a = padder(a) + assert isinstance(t_a, oneflow.Tensor) + assert (t_a == oneflow.LongTensor(a)).sum() == 3 + + +@pytest.mark.oneflow +class TestOneflowSequencePadder: + def test_run(self): + padder = OneflowSequencePadder(pad_val=-1, ele_dtype=int, dtype=int) + a = [[1, 2, 3], [3]] + a = padder(a) + shape = a.shape + assert isinstance(a, oneflow.Tensor) + assert tuple(shape) == (2, 3) + b = oneflow.LongTensor([[1, 2, 3], [3, -1, -1]]) + assert (a == b).sum().item() == shape[0]*shape[1] + + def test_dtype_check(self): + padder = OneflowSequencePadder(pad_val=-1, ele_dtype=np.zeros(3, dtype=np.int8).dtype, dtype=int) + with pytest.raises(DtypeError): + padder = OneflowSequencePadder(pad_val=-1, ele_dtype=str, dtype=int) + padder = OneflowSequencePadder(pad_val=-1, ele_dtype=oneflow.long, dtype=int) + padder = OneflowSequencePadder(pad_val=-1, ele_dtype=np.int8, dtype=None) + a = padder([[1], [2, 322]]) + assert (a>67).sum()==0 # 因为int8的范围为-67 - 66 + padder = OneflowSequencePadder(pad_val=-1, ele_dtype=np.zeros(2).dtype, dtype=None) + + +@pytest.mark.oneflow +class TestOneflowTensorPadder: + def test_run(self): + padder = OneflowTensorPadder(pad_val=-1, ele_dtype=oneflow.zeros(3).dtype, dtype=int) + a = [oneflow.zeros(3), oneflow.zeros(2), oneflow.zeros(0)] + a = padder(a) + shape = a.shape + assert isinstance(a, oneflow.Tensor) + assert tuple(shape) == (3, 3) + b = oneflow.LongTensor([[0, 0, 0], [0, 0, -1], [-1, -1, -1]]) + assert (a == b).sum().item() == shape[0]*shape[1] + + a = [oneflow.zeros((3, 2)), oneflow.zeros((2, 2)), oneflow.zeros((1, 2))] + a = padder(a) + shape = a.shape + assert isinstance(a, oneflow.Tensor) + assert tuple(shape) == (3, 3, 2) + b = oneflow.LongTensor([[[0, 0], [0, 0], [0, 0]], + [[0, 0], [0, 0], [-1, -1]], + [[0, 0], [-1, -1], [-1, -1]]]) + assert (a == b).sum().item() == shape[0]*shape[1]*shape[2] + + a = [oneflow.zeros((3, 2)), oneflow.zeros((2, 2)), oneflow.zeros((1, 1))] + a = padder(a) + shape = a.shape + assert isinstance(a, oneflow.Tensor) + assert tuple(shape) == (3, 3, 2) + b = oneflow.LongTensor([[[0, 0], [0, 0], [0, 0]], + [[0, 0], [0, 0], [-1, -1]], + [[0, -1], [-1, -1], [-1, -1]]]) + assert (a == b).sum().item() == shape[0]*shape[1]*shape[2] + + padder = OneflowTensorPadder(pad_val=-1, ele_dtype=oneflow.zeros(3).dtype, dtype=int) + a = [oneflow.zeros((3, 2)), oneflow.zeros((2, 2)), oneflow.zeros((1, 0))] + a = padder(a) + shape = a.shape + assert isinstance(a, oneflow.Tensor) + assert tuple(shape) == (3, 3, 2) + b = oneflow.LongTensor([[[0, 0], [0, 0], [0, 0]], + [[0, 0], [0, 0], [-1, -1]], + [[-1, -1], [-1, -1], [-1, -1]]]) + assert (a == b).sum().item() == shape[0]*shape[1]*shape[2] + + padder = OneflowTensorPadder(pad_val=-1, ele_dtype=oneflow.zeros(3).dtype, dtype=None) + a = [np.zeros((3, 2)), np.zeros((2, 2)), np.zeros((1, 0))] + a = padder(a) + shape = a.shape + assert isinstance(a, oneflow.Tensor) + assert tuple(shape) == (3, 3, 2) + b = oneflow.FloatTensor([[[0, 0], [0, 0], [0, 0]], + [[0, 0], [0, 0], [-1, -1]], + [[-1, -1], [-1, -1], [-1, -1]]]) + assert (a == b).sum().item() == shape[0]*shape[1]*shape[2] + + def test_dtype_check(self): + padder = OneflowTensorPadder(pad_val=-1, ele_dtype=np.zeros(3, dtype=np.int8).dtype, dtype=int) + with pytest.raises(DtypeError): + padder = OneflowTensorPadder(pad_val=-1, ele_dtype=str, dtype=int) + padder = OneflowTensorPadder(pad_val=-1, ele_dtype=oneflow.long, dtype=int) + padder = OneflowTensorPadder(pad_val=-1, ele_dtype=int, dtype=oneflow.long) + diff --git a/tests/core/collators/test_collator.py b/tests/core/collators/test_collator.py index 8443ef92..d00cbe05 100644 --- a/tests/core/collators/test_collator.py +++ b/tests/core/collators/test_collator.py @@ -2,7 +2,7 @@ import numpy as np import pytest -from fastNLP.envs.imports import _NEED_IMPORT_TORCH, _NEED_IMPORT_PADDLE, _NEED_IMPORT_JITTOR +from fastNLP.envs.imports import _NEED_IMPORT_TORCH, _NEED_IMPORT_PADDLE, _NEED_IMPORT_JITTOR, _NEED_IMPORT_ONEFLOW from fastNLP.core.collators.collator import Collator from ...helpers.utils import Capturing @@ -14,6 +14,10 @@ def _assert_equal(d1, d2): if 'float64' in str(d2.dtype): print(d2.dtype) assert (d1 == d2).all().item() + if 'oneflow' in str(type(d1)): + if 'float64' in str(d2.dtype): + print(d2.dtype) + assert (d1 == d2).all().item() else: assert all(d1 == d2) except TypeError: @@ -43,9 +47,9 @@ def findListDiff(d1, d2): class TestCollator: - @pytest.mark.torch - def test_run(self): - dict_batch = [{ + @staticmethod + def setup_class(cls): + cls.dict_batch = [{ 'str': '1', 'lst_str': ['1'], 'int': 1, @@ -75,17 +79,21 @@ class TestCollator: } ] - list_batch = [['1', ['1'], 1, [1], [[1]], 1.1, [1.1], True, np.ones(1), {'1': '1'}, {'1'}], - ['2', ['2', '2'], 2, [2, 2], [[1], [1, 2]], 2.1, [2.1], False, np.ones(2), {'2': '2'}, {'2'}]] + cls.list_batch = [['1', ['1'], 1, [1], [[1]], 1.1, [1.1], True, np.ones(1), {'1': '1'}, {'1'}], + ['2', ['2', '2'], 2, [2, 2], [[1], [1, 2]], 2.1, [2.1], False, np.ones(2), {'2': '2'}, {'2'}]] + + def test_run_traw(self): raw_pad_batch = {'str': ['1', '2'], 'lst_str': [['1'], ['2', '2']], 'int': [1, 2], 'lst_int': [[1, 0], [1, 2]], 'nest_lst_int': [[[1, 0], [0, 0]], [[1, 0], [1, 2]]], 'float': [1.1, 2.1], 'lst_float': [[1.1], [2.1]], 'bool': [True, False], 'numpy': [np.array([1.]), np.array([0.])], 'dict': {'1': ['1', '2']}, 'set': [{'1'}, {'2'}], 'nested_dict': {'a': [1, 2], 'b': [[1, 2], [1, 2]]}} collator = Collator(backend='raw') - assert raw_pad_batch == collator(dict_batch) + assert raw_pad_batch == collator(self.dict_batch) collator = Collator(backend='raw') raw_pad_lst = [['1', '2'], [['1'], ['2', '2']], [1, 2], [[1, 0], [2, 2]], [[[1, 0], [0, 0]], [[1, 0], [1, 2]]], [1.1, 2.1], [[1.1], [2.1]], [True, False], [[1, 0], [1, 1]], [{'1': '1'}, {'2': '2'}], [{'1'}, {'2'}]] - findListDiff(raw_pad_lst, collator(list_batch)) + findListDiff(raw_pad_lst, collator(self.list_batch)) + + def test_run_numpy(self): collator = Collator(backend='numpy') numpy_pad_batch = {'str': ['1', '2'], 'lst_str': [['1'], ['2', '2']], 'int': np.array([1, 2]), 'lst_int': np.array([[1, 0], [1, 2]]), @@ -94,36 +102,60 @@ class TestCollator: 'dict': {'1': ['1', '2']}, 'set': [{'1'}, {'2'}], 'nested_dict': {'a': np.array([1, 2]), 'b': np.array([[1, 2], [1, 2]])}} - findDictDiff(numpy_pad_batch, collator(dict_batch)) + findDictDiff(numpy_pad_batch, collator(self.dict_batch)) collator = Collator(backend='numpy') numpy_pad_lst = [['1', '2'], [['1'], ['2', '2']], np.array([1, 2]), np.array([[1, 0], [2, 2]]), np.array([[[1, 0], [0, 0]], [[1, 0], [1, 2]]]), np.array([1.1, 2.1]), np.array([[1.1], [2.1]]), np.array([True, False]), np.array([[1, 0], [1, 1]]), [{'1': '1'}, {'2': '2'}], [{'1'}, {'2'}]] - findListDiff(numpy_pad_lst, collator(list_batch)) - - if _NEED_IMPORT_TORCH: - import torch - collator = Collator(backend='torch') - numpy_pad_batch = {'str': ['1', '2'], 'lst_str': [['1'], ['2', '2']], 'int': torch.LongTensor([1, 2]), - 'lst_int': torch.LongTensor([[1, 0], [1, 2]]), - 'nest_lst_int': torch.LongTensor([[[1, 0], [0, 0]], [[1, 0], [1, 2]]]), - 'float': torch.FloatTensor([1.1, 2.1]), - 'lst_float': torch.FloatTensor([[1.1], [2.1]]), 'bool': torch.BoolTensor([True, False]), - 'numpy': torch.FloatTensor([[1], [0]]), - 'dict': {'1': ['1', '2']}, 'set': [{'1'}, {'2'}], 'nested_dict': {'a': torch.LongTensor([1, 2]), - 'b': torch.LongTensor( - [[1, 2], [1, 2]])}} - - findDictDiff(numpy_pad_batch, collator(dict_batch)) - collator = Collator(backend='torch') - torch_pad_lst = [['1', '2'], [['1'], ['2', '2']], torch.LongTensor([1, 2]), torch.LongTensor([[1, 0], [2, 2]]), - torch.LongTensor([[[1, 0], [0, 0]], [[1, 0], [1, 2]]]), - torch.FloatTensor([1.1, 2.1]), torch.FloatTensor([[1.1], [2.1]]), torch.BoolTensor([True, False]), - torch.LongTensor([[1, 0], [1, 1]]), [{'1': '1'}, {'2': '2'}], - [{'1'}, {'2'}]] - findListDiff(torch_pad_lst, collator(list_batch)) + findListDiff(numpy_pad_lst, collator(self.list_batch)) + + @pytest.mark.torch + def test_run_torch(self): + import torch + collator = Collator(backend='torch') + numpy_pad_batch = {'str': ['1', '2'], 'lst_str': [['1'], ['2', '2']], 'int': torch.LongTensor([1, 2]), + 'lst_int': torch.LongTensor([[1, 0], [1, 2]]), + 'nest_lst_int': torch.LongTensor([[[1, 0], [0, 0]], [[1, 0], [1, 2]]]), + 'float': torch.FloatTensor([1.1, 2.1]), + 'lst_float': torch.FloatTensor([[1.1], [2.1]]), 'bool': torch.BoolTensor([True, False]), + 'numpy': torch.FloatTensor([[1], [0]]), + 'dict': {'1': ['1', '2']}, 'set': [{'1'}, {'2'}], 'nested_dict': {'a': torch.LongTensor([1, 2]), + 'b': torch.LongTensor( + [[1, 2], [1, 2]])}} + + findDictDiff(numpy_pad_batch, collator(self.dict_batch)) + collator = Collator(backend='torch') + torch_pad_lst = [['1', '2'], [['1'], ['2', '2']], torch.LongTensor([1, 2]), torch.LongTensor([[1, 0], [2, 2]]), + torch.LongTensor([[[1, 0], [0, 0]], [[1, 0], [1, 2]]]), + torch.FloatTensor([1.1, 2.1]), torch.FloatTensor([[1.1], [2.1]]), torch.BoolTensor([True, False]), + torch.LongTensor([[1, 0], [1, 1]]), [{'1': '1'}, {'2': '2'}], + [{'1'}, {'2'}]] + findListDiff(torch_pad_lst, collator(self.list_batch)) + + @pytest.mark.oneflow + def test_run_oneflow(self): + import oneflow + collator = Collator(backend='oneflow') + numpy_pad_batch = {'str': ['1', '2'], 'lst_str': [['1'], ['2', '2']], 'int': oneflow.LongTensor([1, 2]), + 'lst_int': oneflow.LongTensor([[1, 0], [1, 2]]), + 'nest_lst_int': oneflow.LongTensor([[[1, 0], [0, 0]], [[1, 0], [1, 2]]]), + 'float': oneflow.FloatTensor([1.1, 2.1]), + 'lst_float': oneflow.FloatTensor([[1.1], [2.1]]), 'bool': oneflow.BoolTensor([True, False]), + 'numpy': oneflow.FloatTensor([[1], [0]]), + 'dict': {'1': ['1', '2']}, 'set': [{'1'}, {'2'}], 'nested_dict': {'a': oneflow.LongTensor([1, 2]), + 'b': oneflow.LongTensor( + [[1, 2], [1, 2]])}} + + findDictDiff(numpy_pad_batch, collator(self.dict_batch)) + collator = Collator(backend='oneflow') + oneflow_pad_lst = [['1', '2'], [['1'], ['2', '2']], oneflow.LongTensor([1, 2]), oneflow.LongTensor([[1, 0], [2, 2]]), + oneflow.LongTensor([[[1, 0], [0, 0]], [[1, 0], [1, 2]]]), + oneflow.FloatTensor([1.1, 2.1]), oneflow.FloatTensor([[1.1], [2.1]]), oneflow.BoolTensor([True, False]), + oneflow.LongTensor([[1, 0], [1, 1]]), [{'1': '1'}, {'2': '2'}], + [{'1'}, {'2'}]] + findListDiff(oneflow_pad_lst, collator(self.list_batch)) def test_pad(self): dict_batch = [{ @@ -366,6 +398,46 @@ def test_torch_dl(): with pytest.raises(KeyError): dl.set_pad('i', pad_val=None) +@pytest.mark.oneflow +def test_oneflow_dl(): + from fastNLP import OneflowDataLoader + from fastNLP import DataSet + import numpy as np + import oneflow + + ds = DataSet({ + 'x': [1, 2], 'y': [[1,2], [3]], 'z':[np.ones((1, 2)), np.ones((2, 3))], + 'i': [{'j': [1, 2]}, {'j': [3]}], 'j': ['a', 'b'] + }) + + dl = OneflowDataLoader(ds, batch_size=2) + batch = next(iter(dl)) + assert 'x' in batch and 'y' in batch and 'z' in batch and 'i' in batch and 'j' in batch + assert batch['z'].dtype == oneflow.float32 + assert isinstance(batch['j'], list) + assert batch['i']['j'].dtype, oneflow.long + + dl.set_ignore('x') + batch = next(iter(dl)) + assert 'x' not in batch and 'y' in batch and 'z' in batch + + dl.set_pad('y', pad_val=None) + batch = next(iter(dl)) + assert 'x' not in batch and 'y' in batch and 'z' in batch + assert isinstance(batch['y'], list) + assert len(batch['y'][0])!=len(batch['y'][1]) # 没有 pad + + dl.set_pad(('i', 'j'), pad_val=None) + batch = next(iter(dl)) + assert 'x' not in batch and 'y' in batch and 'z' in batch + assert isinstance(batch['y'], list) + assert len(batch['y'][0])!=len(batch['y'][1]) # 没有 pad + assert isinstance(batch['i']['j'], list) + assert len(batch['i']['j'][0])!=len(batch['i']['j'][1]) # 没有 pad + + with pytest.raises(KeyError): + dl.set_pad('i', pad_val=None) + def test_compare_tuple(): from fastNLP.core.collators.collator import _compare_tuple diff --git a/tests/core/controllers/_test_trainer_oneflow.py b/tests/core/controllers/_test_trainer_oneflow.py new file mode 100644 index 00000000..385aded0 --- /dev/null +++ b/tests/core/controllers/_test_trainer_oneflow.py @@ -0,0 +1,96 @@ +""" +测试 oneflow 动态图的多卡训练:: + + >>> # 不使用 DistributedDataParallel 包裹的情况 + >>> python -m oneflow.distributed.launch --nproc_per_node 2 _test_trainer_oneflow.py + >>> # 使用 DistributedDataParallel 包裹的情况 + >>> python -m oneflow.distributed.launch --nproc_per_node 2 _test_trainer_oneflow.py -w +""" +import sys +sys.path.append("../../../") +import os +from dataclasses import dataclass + +from fastNLP.core.controllers.trainer import Trainer +from fastNLP.core.metrics.accuracy import Accuracy +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW + +if _NEED_IMPORT_ONEFLOW: + import oneflow + from oneflow.nn.parallel import DistributedDataParallel + from oneflow.optim import Adam + from oneflow.utils.data import DataLoader + +from tests.helpers.models.oneflow_model import OneflowNormalModel_Classification_1 +from tests.helpers.datasets.oneflow_data import OneflowArgMaxDataset + +@dataclass +class TrainOneflowConfig: + num_labels: int = 3 + feature_dimension: int = 3 + + batch_size: int = 2 + shuffle: bool = True + evaluate_every = 2 + +def test_trainer_oneflow( + callbacks, + wrapped=False, + n_epochs=2, +): + model = OneflowNormalModel_Classification_1( + num_labels=TrainOneflowConfig.num_labels, + feature_dimension=TrainOneflowConfig.feature_dimension + ) + optimizers = Adam(params=model.parameters(), lr=0.0001) + train_dataloader = DataLoader( + dataset=OneflowArgMaxDataset(20, TrainOneflowConfig.feature_dimension), + batch_size=TrainOneflowConfig.batch_size, + shuffle=True + ) + val_dataloader = DataLoader( + dataset=OneflowArgMaxDataset(12, TrainOneflowConfig.feature_dimension), + batch_size=TrainOneflowConfig.batch_size, + shuffle=True + ) + train_dataloader = train_dataloader + evaluate_dataloaders = val_dataloader + evaluate_every = TrainOneflowConfig.evaluate_every + metrics = {"acc": Accuracy()} + + if wrapped: + model.to(int(os.environ["LOCAL_RANK"])) + model = DistributedDataParallel(model) + + + trainer = Trainer( + model=model, + driver="oneflow", + device=0, + optimizers=optimizers, + train_dataloader=train_dataloader, + evaluate_dataloaders=evaluate_dataloaders, + evaluate_every=evaluate_every, + input_mapping=None, + output_mapping=None, + metrics=metrics, + + n_epochs=n_epochs, + callbacks=callbacks, + ) + trainer.run() + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser() + parser.add_argument( + "-w", + "--wrapped", + default=False, + action="store_true", + help="Use DistributedDataParallal to wrap model first.", + ) + args = parser.parse_args() + + callbacks = [] + test_trainer_oneflow(callbacks, args.wrapped) diff --git a/tests/core/controllers/test_trainer_oneflow.py b/tests/core/controllers/test_trainer_oneflow.py new file mode 100644 index 00000000..e5e2433a --- /dev/null +++ b/tests/core/controllers/test_trainer_oneflow.py @@ -0,0 +1,70 @@ +import os +import pytest +from dataclasses import dataclass + +from fastNLP.core.controllers.trainer import Trainer +from fastNLP.core.metrics.accuracy import Accuracy +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW + +if _NEED_IMPORT_ONEFLOW: + from oneflow.optim import Adam + from oneflow.utils.data import DataLoader + + +from tests.helpers.models.oneflow_model import OneflowNormalModel_Classification_1 +from tests.helpers.datasets.oneflow_data import OneflowArgMaxDataset +from tests.helpers.utils import magic_argv_env_context + +@dataclass +class TrainOneflowConfig: + num_labels: int = 3 + feature_dimension: int = 3 + + batch_size: int = 2 + shuffle: bool = True + evaluate_every = 2 + +@pytest.mark.parametrize("device", ["cpu", 1]) +@pytest.mark.parametrize("callbacks", [[]]) +@pytest.mark.oneflow +@magic_argv_env_context +def test_trainer_oneflow( + device, + callbacks, + n_epochs=2, +): + model = OneflowNormalModel_Classification_1( + num_labels=TrainOneflowConfig.num_labels, + feature_dimension=TrainOneflowConfig.feature_dimension + ) + optimizers = Adam(params=model.parameters(), lr=0.0001) + train_dataloader = DataLoader( + dataset=OneflowArgMaxDataset(20, TrainOneflowConfig.feature_dimension), + batch_size=TrainOneflowConfig.batch_size, + shuffle=True + ) + val_dataloader = DataLoader( + dataset=OneflowArgMaxDataset(12, TrainOneflowConfig.feature_dimension), + batch_size=TrainOneflowConfig.batch_size, + shuffle=True + ) + train_dataloader = train_dataloader + evaluate_dataloaders = val_dataloader + evaluate_every = TrainOneflowConfig.evaluate_every + metrics = {"acc": Accuracy()} + trainer = Trainer( + model=model, + driver="oneflow", + device=device, + optimizers=optimizers, + train_dataloader=train_dataloader, + evaluate_dataloaders=evaluate_dataloaders, + evaluate_every=evaluate_every, + input_mapping=None, + output_mapping=None, + metrics=metrics, + + n_epochs=n_epochs, + callbacks=callbacks, + ) + trainer.run() diff --git a/tests/core/dataloaders/oneflow_dataloader/__init__.py b/tests/core/dataloaders/oneflow_dataloader/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/core/dataloaders/oneflow_dataloader/test_fdl.py b/tests/core/dataloaders/oneflow_dataloader/test_fdl.py new file mode 100644 index 00000000..f6a80d7c --- /dev/null +++ b/tests/core/dataloaders/oneflow_dataloader/test_fdl.py @@ -0,0 +1,169 @@ +import pytest + +from fastNLP.core.dataloaders.oneflow_dataloader import OneflowDataLoader, prepare_oneflow_dataloader +from fastNLP.core.dataset import DataSet +from fastNLP.io.data_bundle import DataBundle +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW +from tests.helpers.utils import Capturing, recover_logger +from fastNLP import logger +import numpy as np + +if _NEED_IMPORT_ONEFLOW: + import oneflow + + +@pytest.mark.oneflow +class TestFdl: + + def test_init_v1(self): + ds = DataSet({"x": [[1, 2], [2, 3, 4], [4, 5, 6, 7]] * 10, "y": [1, 0, 1] * 10}) + fdl = OneflowDataLoader(ds, batch_size=3, shuffle=True, drop_last=True) + # for batch in fdl: + # print(batch) + fdl1 = OneflowDataLoader(ds, batch_size=3, shuffle=True, drop_last=True) + # for batch in fdl1: + # print(batch) + + def test_set_padding(self): + ds = DataSet({"x": [[1, 2], [2, 3, 4], [4, 5, 6, 7]] * 10, "y": [1, 0, 1] * 10}) + fdl = OneflowDataLoader(ds, batch_size=3) + fdl.set_pad("x", -1) + for batch in fdl: + assert batch['x'].shape == oneflow.Size([3, 4]) + + def test_get_batch_indices(self): + ds = DataSet({"x": [[1, 2], [2, 3, 4], [4, 5, 6, 7]] * 10, "y": [1, 0, 1] * 10}) + fdl = OneflowDataLoader(ds, batch_size=3, shuffle=True) + for batch in fdl: + assert len(fdl.get_batch_indices()) == 3 + + def test_other_dataset(self): + import numpy as np + class _DataSet: + + def __init__(self): + pass + + def __getitem__(self, item): + return np.random.randn(5), [[1, 2], [2, 3, 4]] + + def __len__(self): + return 10 + + def __getattribute__(self, item): + return object.__getattribute__(self, item) + + dataset = _DataSet() + dl = OneflowDataLoader(dataset, batch_size=2, shuffle=True) + # dl.set_inputs('data', 'labels') + # dl.set_pad_val('labels', val=None) + for batch in dl: + assert batch[0].shape == oneflow.Size([2, 5]) + assert batch[1].shape == oneflow.Size([2, 2, 3]) + + def test_default_collate_fn(self): + ds = DataSet({"x": [[1, 2], [2, 3, 4], [4, 5, 6, 7]] * 10, "y": [1, 0, 1] * 10}) + with pytest.raises(ValueError): + fdl = OneflowDataLoader(ds, batch_size=3, collate_fn=None) + import numpy as np + class _DataSet: + + def __init__(self): + pass + + def __getitem__(self, item): + return np.random.randn(5), [[1, 2], [2, 3, 4]] + + def __len__(self): + return 10 + + fdl = OneflowDataLoader(_DataSet(), batch_size=3, collate_fn=None, drop_last=True) + for batch in fdl: + assert batch[0].shape == oneflow.Size([3, 5]) + + def test_my_collate_fn(self): + ds = DataSet({"x": [[1, 2], [2, 3, 4], [4, 5, 6, 7]] * 10, "y": [1, 0, 1] * 10}) + def collate_fn(batch): + res = {'x': [], 'y': []} + for ins in batch: + res['x'].append(ins['x']) + res['y'].append(ins['y']) + return res + fdl = OneflowDataLoader(ds, collate_fn=collate_fn, batch_size=3, drop_last=True) + for batch in fdl: + assert batch['x'] == [[1, 2], [2, 3, 4], [4, 5, 6, 7]] + assert batch['y'] == [1, 0, 1] + + def test_prepare_oneflow_dataloader(self): + # 测试 fastNLP 的 dataset + ds = DataSet({"x": [[1, 2], [2, 3, 4], [4, 5, 6, 7]] * 10, "y": [1, 0, 1] * 10}) + dl = prepare_oneflow_dataloader(ds, batch_size=8, shuffle=True, num_workers=2) + assert isinstance(dl, OneflowDataLoader) + + ds1 = DataSet({"x": [[1, 2], [2, 3, 4], [4, 5, 6, 7]] * 10, "y": [1, 0, 1] * 10}) + dbl = DataBundle(datasets={'train': ds, 'val': ds1}) + dl_bundle = prepare_oneflow_dataloader(dbl) + assert isinstance(dl_bundle['train'], OneflowDataLoader) + assert isinstance(dl_bundle['val'], OneflowDataLoader) + + ds_dict = {'train_1': ds, 'val': ds1} + dl_dict = prepare_oneflow_dataloader(ds_dict) + assert isinstance(dl_dict['train_1'], OneflowDataLoader) + assert isinstance(dl_dict['val'], OneflowDataLoader) + + # 测试其他 dataset + class _DataSet: + + def __init__(self): + pass + + def __getitem__(self, item): + return np.random.randn(5), [[1, 2], [2, 3, 4]] + + def __len__(self): + return 10 + + def __getattribute__(self, item): + return object.__getattribute__(self, item) + + ds2 = _DataSet() + dl1 = prepare_oneflow_dataloader(ds2, batch_size=8, shuffle=True, num_workers=2) + assert isinstance(dl1, OneflowDataLoader) + + ds3 = _DataSet() + dbl1 = DataBundle(datasets={'train': ds2, 'val': ds3}) + dl_bundle1 = prepare_oneflow_dataloader(dbl1) + assert isinstance(dl_bundle1['train'], OneflowDataLoader) + assert isinstance(dl_bundle1['val'], OneflowDataLoader) + + ds_dict1 = {'train_1': ds2, 'val': ds3} + dl_dict1 = prepare_oneflow_dataloader(ds_dict1) + assert isinstance(dl_dict1['train_1'], OneflowDataLoader) + assert isinstance(dl_dict1['val'], OneflowDataLoader) + + ds = [[1, [1]], [2, [2, 2]]] + dl = prepare_oneflow_dataloader(ds, batch_size=2) + for batch in dl: + assert (batch[0] == oneflow.LongTensor([1, 2])).sum()==2 + assert (batch[1] == oneflow.LongTensor([[1, 0], [2, 2]])).sum()==4 + + # sequence = [ds, ds1] + # seq_ds = prepare_oneflow_dataloader(sequence) + # assert isinstance(seq_ds[0], OneflowDataLoader) + # assert isinstance(seq_ds[1], OneflowDataLoader) + + def test_get_backend(self): + from fastNLP.core.collators import Collator + from oneflow.utils.data import DataLoader, Dataset + + class MyDatset(DataSet): + def __len__(self): + return 1000 + + def __getitem__(self, item): + return [[1, 0], [1], [1, 2, 4]], [1, 0] + + collate_batch = Collator(backend='auto') + dl = DataLoader(MyDatset(), collate_fn=collate_batch) + for batch in dl: + print(batch) diff --git a/tests/core/drivers/oneflow_driver/__init__.py b/tests/core/drivers/oneflow_driver/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/core/drivers/oneflow_driver/dist.py b/tests/core/drivers/oneflow_driver/dist.py new file mode 100644 index 00000000..894fcc3c --- /dev/null +++ b/tests/core/drivers/oneflow_driver/dist.py @@ -0,0 +1,78 @@ +import oneflow +from oneflow import nn +from oneflow.utils.data import DataLoader, Dataset +from oneflow.nn.parallel import DistributedDataParallel as ddp +import os +# print(oneflow.ones(3,4).device) +# print(oneflow.rand(3,4).device) +# exit(0) +# PLACEMENT = oneflow.placement("cuda", [0,1]) +# S0 = oneflow.sbp.split(0) +# B = oneflow.sbp.broadcast +print(oneflow.cuda.current_device()) +exit(0) +class OneflowArgMaxDataset(Dataset): + def __init__(self, feature_dimension=10, data_num=1000, seed=0): + self.num_labels = feature_dimension + self.feature_dimension = feature_dimension + self.data_num = data_num + self.seed = seed + + g = oneflow.Generator() + g.manual_seed(1000) + self.x = oneflow.randint(low=-100, high=100, size=[data_num, feature_dimension], generator=g).float() + self.y = oneflow.max(self.x, dim=-1)[1] + + def __len__(self): + return self.data_num + + def __getitem__(self, item): + return self.x[item], self.y[item] + +class Model(nn.Module): + def __init__(self, num_labels, feature_dimension): + super(Model, self).__init__() + self.num_labels = num_labels + + self.linear1 = nn.Linear(in_features=feature_dimension, out_features=10) + self.ac1 = nn.ReLU() + self.linear2 = nn.Linear(in_features=10, out_features=10) + self.ac2 = nn.ReLU() + self.output = nn.Linear(in_features=10, out_features=num_labels) + + def forward(self, x): + x = self.ac1(self.linear1(x)) + x = self.ac2(self.linear2(x)) + x = self.output(x) + return x + +dataset = OneflowArgMaxDataset(10, 100) +model = Model(10, 10) +loss_func = nn.CrossEntropyLoss() +optimizer = oneflow.optim.Adam(model.parameters(), 0.001) +dataloader = oneflow.utils.data.DataLoader(dataset, batch_size=32) + +device = "cuda" +model.to(device) +# model = ddp(model) +loss_func.to(device) + +# model = model.to_global(PLACEMENT, B) + +for i in range(2): + for i, (x, y) in enumerate(dataloader): + if i % 2 != oneflow.env.get_rank(): + continue + x = x.to(device) + y = y.to(device) + # x = x.to_global(PLACEMENT, S0) + # y = y.to_global(PLACEMENT, S0) + output = model(x) + loss = loss_func(output, y) + optimizer.zero_grad() + loss.backward() + optimizer.step() +oneflow.save(model, "ttt") +print("end.") +# python -m oneflow.distributed.launch --nproc_per_node 2 dist.py + diff --git a/tests/core/drivers/oneflow_driver/test_ddp.py b/tests/core/drivers/oneflow_driver/test_ddp.py new file mode 100644 index 00000000..8fa92924 --- /dev/null +++ b/tests/core/drivers/oneflow_driver/test_ddp.py @@ -0,0 +1,948 @@ +import os +import sys +sys.path.append("../../../../") +import pytest +from pathlib import Path + +from fastNLP.core.drivers.oneflow_driver.ddp import OneflowDDPDriver +from fastNLP import prepare_oneflow_dataloader +from fastNLP.core.samplers import ( + RandomSampler, + UnrepeatedSampler, + BucketedBatchSampler, + UnrepeatedRandomSampler, + UnrepeatedSequentialSampler, +) +from tests.helpers.models.oneflow_model import OneflowNormalModel_Classification_1 +from tests.helpers.datasets.oneflow_data import OneflowNormalDataset, OneflowNormalXYDataset +from tests.helpers.utils import recover_logger +from fastNLP.envs.distributed import rank_zero_rm +from fastNLP import logger +from fastNLP.core.drivers.oneflow_driver.dist_utils import fastnlp_oneflow_all_gather +from fastNLP.envs.imports import _NEED_IMPORT_ONEFLOW +if _NEED_IMPORT_ONEFLOW: + import oneflow + import oneflow.comm as comm + import oneflow.env as dist_env + from oneflow.utils.data import DataLoader, BatchSampler + +def generate_driver(labels, features, device=[0,1], fp16=False, output_from_new_proc="all"): + oneflow_model = OneflowNormalModel_Classification_1(labels, features) + oneflow_opt = oneflow.optim.Adam(params=oneflow_model.parameters(), lr=0.01) + device = [oneflow.device("cuda", i) for i in device] + driver = OneflowDDPDriver( + model=oneflow_model, + parallel_device=device, + fp16=fp16, + output_from_new_proc=output_from_new_proc + ) + driver.set_optimizers(oneflow_opt) + driver.setup() + + return driver + +def dataloader_with_bucketedbatchsampler(dataset, length, batch_size, shuffle, drop_last): + """ + 建立一个 batch_sampler 为 BucketedBatchSampler 的 dataloader + """ + dataloader = DataLoader( + dataset=dataset, + batch_sampler=BucketedBatchSampler( + dataset, + length, + batch_size, + shuffle=shuffle, + drop_last=drop_last, + ), + ) + + return dataloader + +def dataloader_with_randomsampler(dataset, batch_size, shuffle, drop_last, seed=0, unrepeated=False): + """ + 建立一个 sampler 为 RandomSampler 的 dataloader + """ + if unrepeated: + sampler = UnrepeatedRandomSampler(dataset, shuffle, seed) + else: + sampler = RandomSampler(dataset, shuffle, seed=seed) + dataloader = DataLoader( + dataset, + sampler=sampler, + drop_last=drop_last, + batch_size=batch_size + ) + return dataloader + +############################################################################ +# +# 测试 OneflowDDPDriver 的一些函数 +# +############################################################################ + +@pytest.mark.oneflow +class TestDDPDriverFunction: + """ + 测试 OneflowDDPDriver 一些简单函数的测试类,基本都是测试能否运行、是否存在 import 错误等问题 + """ + + def test_simple_functions(self): + """ + 简单测试多个函数 + """ + driver = generate_driver(10, 10) + + """ + 测试 move_data_to_device 函数。 + """ + + driver.move_data_to_device(oneflow.rand((32, 64))) + comm.barrier() + + """ + 测试 is_distributed 函数 + """ + assert driver.is_distributed() == True + comm.barrier() + + """ + 测试 get_no_sync_context 函数 + """ + res = driver.get_model_no_sync_context() + comm.barrier() + + """ + 测试 is_global_zero 函数 + """ + driver.is_global_zero() + comm.barrier() + + """ + 测试 unwrap_model 函数 + """ + driver.unwrap_model() + comm.barrier() + + """ + 测试 get_local_rank 函数 + """ + driver.get_local_rank() + comm.barrier() + + """ + 测试 all_gather 函数 + 详细的测试在 test_dist_utils.py 中完成 + """ + obj = { + "rank": driver.global_rank + } + obj_list = driver.all_gather(obj) + for i, res in enumerate(obj_list): + assert res["rank"] == i + + """ + 测试 broadcast_object 函数 + 详细的函数在 test_dist_utils.py 中完成 + """ + if driver.global_rank == 0: + obj = { + "rank": driver.global_rank + } + else: + obj = None + res = driver.broadcast_object(obj, src=0) + assert res["rank"] == 0 + +############################################################################ +# +# 测试 set_dist_repro_dataloader 函数 +# +############################################################################ + +@pytest.mark.oneflow +class TestSetDistReproDataloader: + + @classmethod + def setup_class(cls): + cls.device = [0, 1] + + def setup_method(self): + self.dataset = OneflowNormalDataset(100) + + """ + 传入的 `dist` 参数为具体的 ReproducibleSampler 或 ReproducibleBatchSampler 的情况 + 此时对应 driver.load_checkpoint 中的情况 + """ + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_batch_sampler(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 BucketedBatchSampler 时的表现 + 此时应该将 batch_sampler 替换为 dist 对应的 BucketedBatchSampler + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = DataLoader(self.dataset, batch_size=4, shuffle=not shuffle) + batch_sampler = BucketedBatchSampler(self.dataset, self.dataset._data, batch_size=4, shuffle=shuffle) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, batch_sampler, False) + + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler, BucketedBatchSampler) + assert replaced_loader.batch_sampler is batch_sampler + self.check_distributed_sampler(replaced_loader.batch_sampler) + self.check_set_dist_repro_dataloader(driver, dataloader, replaced_loader, shuffle) + + comm.barrier() + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_sampler(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 RandomSampler 时的表现 + 此时应该将 batch_sampler.sampler 替换为 dist 对应的 RandomSampler + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = DataLoader(self.dataset, batch_size=4, shuffle=not shuffle) + sampler = RandomSampler(self.dataset, shuffle=shuffle) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, sampler, False) + + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler, BatchSampler) + assert isinstance(replaced_loader.batch_sampler.sampler, RandomSampler) + assert not (replaced_loader.batch_sampler is dataloader.batch_sampler) + assert replaced_loader.batch_sampler.sampler is sampler + assert replaced_loader.batch_sampler.batch_size == dataloader.batch_sampler.batch_size + self.check_distributed_sampler(replaced_loader.batch_sampler.sampler) + self.check_set_dist_repro_dataloader(driver, dataloader, replaced_loader, shuffle) + + comm.barrier() + + """ + 传入的参数 `dist` 为 None 的情况,这种情况出现在 trainer 和 evaluator 的初始化过程中,用户指定了 `use_dist_sampler` + 参数为 False。此时函数会根据 `reproducible` 的设置进行不同的处理。 + 当 `reproducible` 为 False 时,需要根据 dataloader 的 batch_sampler 或 sampler 是否为 Reproducible 来决定 + 是否重新实例化 dataloader + """ + + def test_with_dist_none_reproducible_true(self): + """ + 测试 set_dist_repro_dataloader 中 dist 为 None、reproducible 为 True 时的表现 + 当用户在 driver 之外初始化了分布式环境时,fastnlp 不支持进行断点重训,此时应该报错 + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = DataLoader(self.dataset, batch_size=4, shuffle=True) + with pytest.raises(RuntimeError): + # 应当抛出 RuntimeError + replaced_loader = driver.set_dist_repro_dataloader(dataloader, None, True) + + comm.barrier() + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_none_reproducible_false_dataloader_reproducible_batch_sampler(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 None、reproducible 为 False 、dataloader 有 BucketedBatchSampler + 时的表现 + 此时传入的 dataloader 的 batch_sampler 应该已经执行了 set_distributed,产生一个新的 dataloader,其 batch_sampler + 和原 dataloader 相同 + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = dataloader_with_bucketedbatchsampler(self.dataset, self.dataset._data, 4, shuffle, False) + dataloader.batch_sampler.set_distributed( + num_replicas=driver.world_size, + rank=driver.global_rank, + pad=True + ) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, None, False) + + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler, BucketedBatchSampler) + assert replaced_loader.batch_sampler.batch_size == 4 + self.check_distributed_sampler(dataloader.batch_sampler) + self.check_set_dist_repro_dataloader(driver, dataloader, replaced_loader, shuffle) + + comm.barrier() + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_none_reproducible_false_dataloader_reproducible_sampler(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 None、reproducible 为 False 、dataloader 有 RandomSampler 时的表现 + 此时传入的 dataloader 的 batch_sampler.sampler 应该已经执行了 set_distributed,产生一个新的 dataloader,其 + batch_sampler.sampler 和原 dataloader 相同 + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = dataloader_with_randomsampler(self.dataset, 4, shuffle, False, unrepeated=False) + dataloader.batch_sampler.sampler.set_distributed( + num_replicas=driver.world_size, + rank=driver.global_rank + ) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, None, False) + + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler, BatchSampler) + assert not (replaced_loader.batch_sampler is dataloader.batch_sampler) + assert isinstance(replaced_loader.batch_sampler.sampler, RandomSampler) + assert not (replaced_loader.batch_sampler.sampler is dataloader.batch_sampler.sampler) + assert replaced_loader.batch_sampler.batch_size == 4 + assert replaced_loader.batch_sampler.drop_last == False + self.check_distributed_sampler(replaced_loader.batch_sampler.sampler) + self.check_set_dist_repro_dataloader(driver, dataloader, replaced_loader, shuffle) + + comm.barrier() + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_none_reproducible_false_dataloader_normal(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 None、reproducible 为 False 、dataloader 为一般情况时的表现 + 此时直接返回原来的 dataloader,不做任何处理。 + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = DataLoader(self.dataset, batch_size=4, shuffle=shuffle) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, None, False) + + assert replaced_loader is dataloader + comm.barrier() + + """ + 传入的参数 `dist` 为 'dist' 的情况,这种情况出现在 trainer 的初始化过程中,用户指定了 `use_dist_sampler` 参数 + 为 True。此时函数会根据 dataloader 的 batch_sampler 或 sampler 是否为 Reproducible 来决定如何重新实例化 dataloader + """ + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_dist_dataloader_reproducible_batch_sampler(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 'dist'、dataloader.batch_sampler 为 ReproducibleBatchSampler + 的表现 + 此时应该返回一个新的 dataloader,其batch_sampler 和原 dataloader 相同,且应该正确地设置了分布式相关的属性 + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = DataLoader( + dataset=self.dataset, + batch_sampler=BucketedBatchSampler(self.dataset, self.dataset._data, batch_size=4, shuffle=shuffle) + ) + dataloader = dataloader_with_bucketedbatchsampler(self.dataset, self.dataset._data, 4, shuffle, False) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, "dist", False) + + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler, BucketedBatchSampler) + assert not (replaced_loader.batch_sampler is dataloader.batch_sampler) + assert replaced_loader.batch_sampler.batch_size == 4 + assert replaced_loader.drop_last == dataloader.drop_last + self.check_distributed_sampler(replaced_loader.batch_sampler) + comm.barrier() + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_dist_dataloader_reproducible_sampler(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 'dist'、dataloader.batch_sampler.sampler 为 ReproducibleSampler + 的表现 + 此时应该返回一个新的 dataloader,其 batch_sampler.sampler 和原 dataloader 相同,且应该正确地设置了分布式相关 + 的属性 + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = dataloader_with_randomsampler(self.dataset, 4, shuffle, False, unrepeated=False) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, "dist", False) + + assert not (replaced_loader is dataloader) + assert not (replaced_loader.batch_sampler is dataloader.batch_sampler) + assert isinstance(replaced_loader.batch_sampler.sampler, RandomSampler) + assert not (replaced_loader.batch_sampler.sampler is dataloader.batch_sampler.sampler) + assert replaced_loader.batch_sampler.batch_size == 4 + assert replaced_loader.batch_sampler.sampler.shuffle == shuffle + self.check_distributed_sampler(replaced_loader.batch_sampler.sampler) + comm.barrier() + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_dist_dataloader_normal(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 'dist'、dataloader 为一般情况的表现 + 此时应该返回一个新的 dataloader,并替换其 batch_sampler.sampler 为 RandomSampler,且应该正确设置了分布式相关 + 的属性 + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = DataLoader(self.dataset, batch_size=4, shuffle=shuffle) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, "dist", False) + + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler, BatchSampler) + assert not (replaced_loader.batch_sampler is dataloader.batch_sampler) + assert isinstance(replaced_loader.batch_sampler.sampler, RandomSampler) + assert replaced_loader.batch_sampler.batch_size == dataloader.batch_sampler.batch_size + assert replaced_loader.batch_sampler.sampler.shuffle == shuffle + self.check_distributed_sampler(replaced_loader.batch_sampler.sampler) + comm.barrier() + + """ + 传入的参数 `dist` 为 'unrepeatdist' 的情况,这种情况出现在 evaluator 的初始化过程中,用户指定了 `use_dist_sampler` 参数 + 为 True。此时函数会根据 dataloader 的 sampler 是否为 Unrepeated 和 Reproducible 来决定如何重新实例化 dataloader + """ + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_unrepeat_dataloader_reproducible_sampler(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 'unrepeatdist'、dataloader.batch_sampler.sampler 为 ReproducibleSampler + 的表现 + 此时应该返回一个新的 dataloader,且将原来的 Sampler 替换为 UnrepeatedRandomSampler,且正确地设置了分布式相关 + 的属性 + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = dataloader_with_randomsampler(self.dataset, 4, shuffle, False, unrepeated=False) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, "unrepeatdist", False) + + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler, BatchSampler) + assert not (replaced_loader.batch_sampler is dataloader.batch_sampler) + assert isinstance(replaced_loader.batch_sampler.sampler, UnrepeatedRandomSampler) + assert replaced_loader.batch_sampler.batch_size == 4 + assert replaced_loader.batch_sampler.sampler.shuffle == shuffle + self.check_distributed_sampler(replaced_loader.batch_sampler.sampler) + comm.barrier() + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_unrepeat_dataloader_unrepreated_sampler(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 'unrepeatdist'、dataloader.batch_sampler.sampler 为 UnrepeatedSampler + 的表现 + 此时应该返回一个新的 dataloader,且重新实例化了原来的 Sampler + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = dataloader_with_randomsampler(self.dataset, 4, shuffle, False, unrepeated=True) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, "unrepeatdist", False) + + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler, BatchSampler) + assert not (replaced_loader.batch_sampler is dataloader.batch_sampler) + assert isinstance(replaced_loader.batch_sampler.sampler, UnrepeatedRandomSampler) + assert not (replaced_loader.batch_sampler.sampler is dataloader.batch_sampler.sampler) + assert replaced_loader.batch_sampler.batch_size == 4 + assert replaced_loader.drop_last == dataloader.drop_last + self.check_distributed_sampler(replaced_loader.batch_sampler.sampler) + comm.barrier() + + @pytest.mark.parametrize("shuffle", ([True, False])) + def test_with_dist_unrepeat_dataloader_normal(self, shuffle): + """ + 测试 set_dist_repro_dataloader 中 dist 为 'unrepeatdist'、dataloader 为一般情况的表现 + 此时应该返回一个新的 dataloader,且将 sampler 替换为 UnrepeatedSequentialSampler,并正确地设置了分布式相关 + 的属性 + """ + driver = generate_driver(10, 10, device=self.device) + dataloader = DataLoader(self.dataset, batch_size=4, shuffle=shuffle) + replaced_loader = driver.set_dist_repro_dataloader(dataloader, "unrepeatdist", False) + + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler, BatchSampler) + assert not (replaced_loader.batch_sampler is dataloader.batch_sampler) + assert isinstance(replaced_loader.batch_sampler.sampler, UnrepeatedSequentialSampler) + assert replaced_loader.batch_sampler.batch_size == 4 + assert replaced_loader.drop_last == dataloader.drop_last + self.check_distributed_sampler(replaced_loader.batch_sampler.sampler) + comm.barrier() + + def check_distributed_sampler(self, sampler): + """ + 测试替换得到的 sampler 或 batch_sampler 的分布式设置是否正确 + """ + assert sampler.num_replicas == dist_env.get_world_size() + assert sampler.rank == dist_env.get_rank() + if not isinstance(sampler, UnrepeatedSampler): + assert sampler.pad == True + + def check_set_dist_repro_dataloader(self, driver, dataloader, replaced_loader, shuffle): + """ + 测试多卡下 set_dist_repro_dataloader 函数的执行结果是否正确 + """ + # 迭代两个 batch + num_replicas = len(self.device) + num_consumed_batches = 2 + already_seen_idx = set() + if isinstance(replaced_loader.batch_sampler, BucketedBatchSampler): + sampler_states = replaced_loader.batch_sampler.set_epoch(4) + else: + sampler_states = replaced_loader.batch_sampler.sampler.set_epoch(4) + for idx, batch in enumerate(replaced_loader): + if idx >= num_consumed_batches: + break + already_seen_idx.update(batch.tolist()) + comm.barrier() + if isinstance(replaced_loader.batch_sampler, BucketedBatchSampler): + sampler_states = replaced_loader.batch_sampler.state_dict() + else: + sampler_states = replaced_loader.batch_sampler.sampler.state_dict() + + # 重新加载,应该可以输出剩下的内容,且对于 OneflowNormalDataset 来说,排序后应该是一个 range + left_idxes = set() + if isinstance(replaced_loader.batch_sampler, BucketedBatchSampler): + batch_size = replaced_loader.batch_sampler.batch_size + sampler_states["num_consumed_samples"] = num_consumed_batches * batch_size * num_replicas + # 重新改造 dataloader + new_loader = dataloader_with_bucketedbatchsampler( + replaced_loader.dataset, + length=replaced_loader.dataset._data, + batch_size=batch_size, + shuffle=shuffle, + drop_last=False, + ) + new_loader.batch_sampler.set_distributed( + num_replicas=driver.world_size, + rank=driver.global_rank, + pad=True + ) + new_loader.batch_sampler.load_state_dict(sampler_states) + new_loader.batch_sampler.set_epoch(4) + else: + batch_size = replaced_loader.batch_sampler.batch_size + sampler_states["num_consumed_samples"] = num_consumed_batches * batch_size * num_replicas + # 重新构造 dataloader + new_loader = dataloader_with_randomsampler(replaced_loader.dataset, batch_size, shuffle, drop_last=False) + new_loader.batch_sampler.sampler.set_distributed( + num_replicas=driver.world_size, + rank=driver.global_rank + ) + new_loader.batch_sampler.sampler.load_state_dict(sampler_states) + new_loader.batch_sampler.sampler.set_epoch(4) + for idx, batch in enumerate(new_loader): + left_idxes.update(batch.tolist()) + + assert len(left_idxes) + len(already_seen_idx) == len(self.dataset) / num_replicas + assert len(left_idxes | already_seen_idx) == len(self.dataset) / num_replicas + + +############################################################################ +# +# 测试 save 和 load 相关的功能 +# +############################################################################ +@pytest.mark.oneflow +class TestSaveLoad: + """ + 测试多卡情况下 save 和 load 相关函数的表现 + """ + + def setup_method(self): + self.dataset = OneflowNormalXYDataset(100) + + @pytest.mark.parametrize("only_state_dict", ([True, False])) + def test_save_and_load_model(self, only_state_dict): + """ + 测试 save_model 和 load_model 函数 + """ + try: + path = "model" + + dataloader = DataLoader(self.dataset, batch_size=2) + driver1, driver2 = generate_driver(20, 1), generate_driver(20, 1) + + driver1.save_model(path, only_state_dict) + + # 同步 + comm.barrier() + driver2.load_model(path, only_state_dict) + + for idx, batch in enumerate(dataloader): + batch = driver1.move_data_to_device(batch) + res1 = driver1.model.evaluate_step(**batch) + res2 = driver2.model.evaluate_step(**batch) + + assert oneflow.all(res1["preds"] == res2["preds"]) + finally: + rank_zero_rm(path) + + @pytest.mark.parametrize("only_state_dict", ([True, False])) + @pytest.mark.parametrize("fp16", ([True, False])) + @pytest.mark.parametrize("device", ([[0,1]])) + def test_save_and_load_with_bucketedbatchsampler(self, device, only_state_dict, fp16): + """ + 测试save和load函数,主要测试 dataloader 被替换了 sampler 之后的情况 + """ + + try: + path = "model.ckp" + num_replicas = len(device) + + driver1, driver2 = generate_driver(20, 1, device=device, fp16=fp16), \ + generate_driver(20, 1, device=device, fp16=False) + dataloader = dataloader_with_bucketedbatchsampler( + self.dataset, + length=[10 for i in range(len(self.dataset))], + batch_size=4, + shuffle=True, + drop_last=False + ) + dataloader.batch_sampler.set_distributed( + num_replicas=driver1.world_size, + rank=driver1.global_rank, + pad=True + ) + num_consumed_batches = 4 + + already_seen_x_set = set() + already_seen_y_set = set() + driver1.set_sampler_epoch(dataloader, 4) + for idx, batch in enumerate(dataloader): + if idx >= num_consumed_batches: + break + already_seen_x_set.update(batch["x"].reshape(-1, ).tolist()) + already_seen_y_set.update(batch["y"].reshape(-1, ).tolist()) + + # 同步 + comm.barrier() + + # 保存状态 + sampler_states = dataloader.batch_sampler.state_dict() + save_states = {"num_consumed_batches": num_consumed_batches} + driver1.save_checkpoint(Path(path), save_states, dataloader, only_state_dict, should_save_model=True) + comm.barrier() + # 加载 + # 更改 batch_size + dataloader = dataloader_with_bucketedbatchsampler( + self.dataset, + length=[10 for i in range(len(self.dataset))], + batch_size=2, + shuffle=True, + drop_last=False + ) + dataloader.batch_sampler.set_distributed( + num_replicas=driver2.world_size, + rank=driver2.global_rank, + pad=True + ) + comm.barrier() + load_states = driver2.load_checkpoint(Path(path), dataloader, only_state_dict, should_load_model=True) + comm.barrier() + replaced_loader = load_states.pop("dataloader") + + # 1. 检查 optimizer 的状态 + # TODO optimizer 的 state_dict 总是为空 + + # 2. 检查 batch_sampler 是否被正确地加载和替换 + assert not (replaced_loader is dataloader) + assert replaced_loader.batch_sampler is dataloader.batch_sampler + assert isinstance(replaced_loader.batch_sampler, BucketedBatchSampler) + if os.environ['FASTNLP_GLOBAL_RANK'] == '0': + assert replaced_loader.batch_sampler.seed == sampler_states["seed"] + assert replaced_loader.batch_sampler.num_consumed_samples == num_consumed_batches * 4 * num_replicas + + # # 3. 检查 fp16 是否被加载 + # if fp16: + # assert not isinstance(driver2.grad_scaler, oneflow.cuda.amp.GradScaler) + + # 4. 检查 model 的参数是否正确 + # 5. 检查 batch_idx + start_batch = load_states.pop('batch_idx_in_epoch') + assert start_batch == 2 * num_consumed_batches + left_x_batches = set() + left_y_batches = set() + driver2.set_sampler_epoch(replaced_loader, 4) + for idx, batch in enumerate(replaced_loader): + + left_x_batches.update(batch["x"].reshape(-1, ).tolist()) + left_y_batches.update(batch["y"].reshape(-1, ).tolist()) + res1 = driver1.model.evaluate_step(**batch) + res2 = driver2.model.evaluate_step(**batch) + assert oneflow.all(res1["preds"] == res2["preds"]) + + assert len(left_x_batches) + len(already_seen_x_set) == len(self.dataset) / num_replicas + assert len(left_x_batches | already_seen_x_set) == len(self.dataset) / num_replicas + assert len(left_y_batches) + len(already_seen_y_set) == len(self.dataset) / num_replicas + assert len(left_y_batches | already_seen_y_set) == len(self.dataset) / num_replicas + comm.barrier() + finally: + rank_zero_rm(path) + + @pytest.mark.parametrize("only_state_dict", ([True, False])) + @pytest.mark.parametrize("fp16", ([True, False])) + @pytest.mark.parametrize("device", ([[0,1]])) + def test_save_and_load_with_randomsampler(self, device, only_state_dict, fp16): + """ + 测试save和load函数,主要测试 dataloader 被替换了 batch_sampler 的情况 + """ + + try: + path = "checkpoints/" + + num_replicas = len(device) + + driver1 = generate_driver(20, 1, device=device, fp16=fp16) + driver2 = generate_driver(20, 1, device=device, fp16=False) + + dataloader = dataloader_with_randomsampler(self.dataset, 4, True, False, unrepeated=False) + dataloader.batch_sampler.sampler.set_distributed( + num_replicas=driver1.world_size, + rank=driver1.global_rank, + pad=True + ) + num_consumed_batches = 4 + + already_seen_x_set = set() + already_seen_y_set = set() + driver1.set_sampler_epoch(dataloader, 4) + for idx, batch in enumerate(dataloader): + if idx >= num_consumed_batches: + break + already_seen_x_set.update(batch["x"].reshape(-1, ).tolist()) + already_seen_y_set.update(batch["y"].reshape(-1, ).tolist()) + + # 同步 + comm.barrier() + + # 保存状态 + sampler_states = dataloader.batch_sampler.sampler.state_dict() + save_states = {"num_consumed_batches": num_consumed_batches} + driver1.save_checkpoint(Path(path), save_states, dataloader, only_state_dict, should_save_model=True) + comm.barrier() # 等待save成功 + # 加载 + # 更改 batch_size + dataloader = dataloader_with_randomsampler(self.dataset, 2, True, False, unrepeated=False) + dataloader.batch_sampler.sampler.set_distributed( + num_replicas=driver2.world_size, + rank=driver2.global_rank, + pad=True + ) + load_states = driver2.load_checkpoint(Path(path), dataloader, only_state_dict, should_load_model=True) + replaced_loader = load_states.pop("dataloader") + + # 1. 检查 optimizer 的状态 + # TODO optimizer 的 state_dict 总是为空 + + # 2. 检查 sampler 是否被正确地加载和替换 + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler.sampler, RandomSampler) + if os.environ['FASTNLP_GLOBAL_RANK'] == '0': + assert replaced_loader.batch_sampler.sampler.seed == sampler_states["seed"] + assert replaced_loader.batch_sampler.sampler.epoch == sampler_states["epoch"] + assert len(replaced_loader.batch_sampler.sampler.dataset) == sampler_states["length"] + assert replaced_loader.batch_sampler.sampler.shuffle == sampler_states["shuffle"] + assert replaced_loader.batch_sampler.sampler.num_consumed_samples == 4 * num_consumed_batches * num_replicas + + # # 3. 检查 fp16 是否被加载 + # if fp16: + # assert not isinstance(driver2.grad_scaler, oneflow.cuda.amp.GradScaler) + + # 4. 检查 model 的参数是否正确 + # 5. 检查 batch_idx + start_batch = load_states.pop('batch_idx_in_epoch') + assert start_batch == 2 * num_consumed_batches + left_x_batches = set() + left_y_batches = set() + driver2.set_sampler_epoch(replaced_loader, 4) + for idx, batch in enumerate(replaced_loader): + + left_x_batches.update(batch["x"].reshape(-1, ).tolist()) + left_y_batches.update(batch["y"].reshape(-1, ).tolist()) + res1 = driver1.model.evaluate_step(**batch) + res2 = driver2.model.evaluate_step(**batch) + assert oneflow.all(res1["preds"] == res2["preds"]) + + assert len(left_x_batches) + len(already_seen_x_set) == len(self.dataset) / num_replicas + assert len(left_x_batches | already_seen_x_set) == len(self.dataset) / num_replicas + assert len(left_y_batches) + len(already_seen_y_set) == len(self.dataset) / num_replicas + assert len(left_y_batches | already_seen_y_set) == len(self.dataset) / num_replicas + + finally: + rank_zero_rm(path) + + +@pytest.mark.oneflow +@pytest.mark.parametrize("shuffle", ([True, False])) +@pytest.mark.parametrize("batch_size", ([1, 3, 16, 17])) +@pytest.mark.parametrize("drop_last", ([True, False])) +def test_shuffle_dataloader(shuffle, batch_size, drop_last, reproducible=True): + try: + # 需要检验一下 set_dist_repro_dataloader 没有修改参数 + num_samples = 200 + dataset = OneflowNormalXYDataset(num_samples) + dl = prepare_oneflow_dataloader(dataset, shuffle=shuffle, batch_size=batch_size, drop_last=drop_last) + model = OneflowNormalModel_Classification_1(10, 32) + device = [oneflow.device("cuda", i) for i in [0, 1]] + + driver = OneflowDDPDriver(model, parallel_device=device) + driver.setup() + dl = driver.set_dist_repro_dataloader(dataloader=dl, dist='dist', reproducible=reproducible) + + data = [] + flags = [] + for batch in dl: + flags.append(batch['x'].size(0) == batch_size) + data.extend(batch['x'].reshape(-1).tolist()) + + _num_samples = num_samples//2 + + if drop_last and _num_samples%batch_size != 0: + assert len(data)!=_num_samples + assert all(flags) == True + elif _num_samples%batch_size!=0: + assert flags[-1] is False + else: + assert len(data) == _num_samples + + if not shuffle: + for i in range(1, len(data)-1): + assert data[i]>data[i-1] + else: + flags = [] + for i in range(1, len(data)-1): + flags.append(data[i]>data[i-1]) + assert all(flags) is False + datas = fastnlp_oneflow_all_gather(data) + if drop_last: + assert len(set(datas[0] + datas[1])) == num_samples-_num_samples%batch_size*2 + else: + assert len(set(datas[0] + datas[1])) == num_samples + finally: + pass + + +@pytest.mark.oneflow +@pytest.mark.parametrize("shuffle", ([True, False])) +@pytest.mark.parametrize("batch_size", ([1, 3, 16, 17])) +@pytest.mark.parametrize("drop_last", ([True, False])) +def test_batch_sampler_dataloader(shuffle, batch_size, drop_last, reproducible=True): + try: + # 需要检验一下 set_dist_repro_dataloader 没有修改参数 + num_samples = 200 + num_device = 2 + dataset = OneflowNormalXYDataset(num_samples) + sampler = BucketedBatchSampler(dataset, length=dataset._data, batch_size=batch_size, drop_last=drop_last, + shuffle=shuffle, num_batch_per_bucket=2) + dl = prepare_oneflow_dataloader(dataset, batch_sampler=sampler) + model = OneflowNormalModel_Classification_1(10, 32) + device = [oneflow.device("cuda", i) for i in [0, 1]] + driver = OneflowDDPDriver(model, parallel_device=device) + driver.setup() + dl = driver.set_dist_repro_dataloader(dataloader=dl, dist='dist', reproducible=reproducible) + + data = [] + flags = [] + for batch in dl: + d = batch['x'].reshape(-1).tolist() + diff = max(d) - min(d) + assert diff= num_consumed_batches: + break + already_seen_idx.update(batch.tolist()) + if isinstance(replaced_loader.batch_sampler, ReproduceBatchSampler): + sampler_states = replaced_loader.batch_sampler.state_dict() + else: + sampler_states = replaced_loader.batch_sampler.sampler.state_dict() + + # 重新加载,应该可以输出剩下的内容,且对于 OneflowNormalDataset 来说,排序后应该是一个 range + left_idxes = set() + if isinstance(replaced_loader.batch_sampler, ReproduceBatchSampler): + batch_size = replaced_loader.batch_sampler.batch_size + sampler_states["num_consumed_samples"] = num_consumed_batches * batch_size + # 重新改造 dataloader + new_loader = dataloader_with_randombatchsampler(replaced_loader.dataset, batch_size, shuffle, False) + new_loader.batch_sampler.load_state_dict(sampler_states) + new_loader.batch_sampler.set_epoch(3) + else: + batch_size = replaced_loader.batch_sampler.batch_size + sampler_states["num_consumed_samples"] = num_consumed_batches * batch_size + # 重新构造 dataloader + new_loader = dataloader_with_randomsampler(replaced_loader.dataset, batch_size, shuffle, False) + new_loader.batch_sampler.sampler.load_state_dict(sampler_states) + new_loader.batch_sampler.sampler.set_epoch(3) + for idx, batch in enumerate(new_loader): + left_idxes.update(batch.tolist()) + + assert len(left_idxes) + len(already_seen_idx) == len(self.dataset) + assert len(left_idxes | already_seen_idx) == len(self.dataset) + +############################################################################ +# +# 测试 save 和 load 相关的功能 +# +############################################################################ + +def generate_random_driver(labels, features, fp16=False, device="cpu"): + """ + 生成driver + """ + model = OneflowNormalModel_Classification_1(labels, features) + opt = oneflow.optim.Adam(params=model.parameters(), lr=0.01) + driver = OneflowSingleDriver(model, device=device, fp16=fp16) + driver.set_optimizers(opt) + driver.setup() + + return driver + +@pytest.mark.oneflow +@pytest.mark.parametrize("only_state_dict", ([True, False])) +def test_save_and_load_model(only_state_dict): + """ + 测试 save_model 和 load_model 函数 + """ + try: + path = "model" + dataset = OneflowNormalXYDataset(20) + dataloader = DataLoader(dataset, batch_size=4) + driver1, driver2 = generate_random_driver(20, 1), generate_random_driver(20, 1) + + driver1.save_model(path, only_state_dict) + driver2.load_model(path, only_state_dict) + + for batch in dataloader: + batch = driver1.move_data_to_device(batch) + res1 = driver1.model.evaluate_step(**batch) + res2 = driver2.model.evaluate_step(**batch) + + assert oneflow.all(res1["preds"] == res2["preds"]) + finally: + rank_zero_rm(path) + +@pytest.mark.oneflow +@pytest.mark.parametrize("only_state_dict", ([True, False])) +@pytest.mark.parametrize("fp16", ([True, False])) +def test_save_and_load_with_randombatchsampler(only_state_dict, fp16): + """ + 测试save和load函数,主要测试 dataloader 被替换了 sampler 之后的情况 + """ + + try: + path = "model.ckp" + dataset = OneflowNormalXYDataset(20) + dataloader = dataloader_with_randombatchsampler(dataset, 4, True, False) + driver1, driver2 = generate_random_driver(20, 1, fp16, "cuda"), generate_random_driver(20, 1, False, "cuda") + + num_consumed_batches = 2 + + already_seen_x_set = set() + already_seen_y_set = set() + driver1.set_sampler_epoch(dataloader, 3) + for idx, batch in enumerate(dataloader): + if idx >= num_consumed_batches: + break + already_seen_x_set.update(batch["x"].reshape(-1, ).tolist()) + already_seen_y_set.update(batch["y"].reshape(-1, ).tolist()) + + sampler_states = dataloader.batch_sampler.state_dict() + save_states = {"num_consumed_batches": num_consumed_batches} + driver1.save_checkpoint(Path(path), save_states, dataloader, only_state_dict, should_save_model=True) + # 加载 + # 更改 batch_size + + dataloader = dataloader_with_randombatchsampler(dataset, 2, True, False) + load_states = driver2.load_checkpoint(Path(path), dataloader, only_state_dict, should_load_model=True) + replaced_loader = load_states.pop("dataloader") + # 1. 检查 optimizer 的状态 + # TODO optimizer 的 state_dict 总是为空 + + # 2. 检查 batch_sampler 是否被正确地加载和替换 + assert not (replaced_loader is dataloader) + assert replaced_loader.batch_sampler is dataloader.batch_sampler + assert isinstance(replaced_loader.batch_sampler, ReproduceBatchSampler) + assert replaced_loader.batch_sampler.index_list == sampler_states["index_list"] + assert replaced_loader.batch_sampler.num_consumed_samples == num_consumed_batches * 4 + + # # 3. 检查 fp16 是否被加载 + # if fp16: + # assert not isinstance(driver2.grad_scaler, oneflow.cuda.amp.GradScaler) + + # 4. 检查 model 的参数是否正确 + # 5. 检查 batch_idx + start_batch = load_states.pop('batch_idx_in_epoch') + assert start_batch == 2 * num_consumed_batches + left_x_batches = set() + left_y_batches = set() + driver1.set_sampler_epoch(replaced_loader, 3) + for idx, batch in enumerate(replaced_loader): + + batch = driver2.move_data_to_device(batch) + left_x_batches.update(batch["x"].reshape(-1, ).tolist()) + left_y_batches.update(batch["y"].reshape(-1, ).tolist()) + res1 = driver1.model.evaluate_step(**batch) + res2 = driver2.model.evaluate_step(**batch) + assert oneflow.all(res1["preds"] == res2["preds"]) + + assert len(left_x_batches) + len(already_seen_x_set) == len(dataset) + assert len(left_x_batches | already_seen_x_set) == len(dataset) + assert len(left_y_batches) + len(already_seen_y_set) == len(dataset) + assert len(left_y_batches | already_seen_y_set) == len(dataset) + finally: + rank_zero_rm(path) + +@pytest.mark.oneflow +@pytest.mark.parametrize("only_state_dict", ([True, False])) +@pytest.mark.parametrize("fp16", ([True, False])) +def test_save_and_load_with_randomsampler(only_state_dict, fp16): + """ + 测试save和load函数,主要测试 dataloader 被替换了 sampler 的情况 + """ + + try: + path = "model.ckp" + + driver1, driver2 = generate_random_driver(40, 1, fp16, "cuda"), generate_random_driver(40, 1, False, "cuda") + dataset = OneflowNormalXYDataset(40) + dataloader = dataloader_with_randomsampler(dataset, 4, True, False) + num_consumed_batches = 2 + + already_seen_x_set = set() + already_seen_y_set = set() + driver1.set_sampler_epoch(dataloader, 3) + for idx, batch in enumerate(dataloader): + if idx >= num_consumed_batches: + break + already_seen_x_set.update(batch["x"].reshape(-1, ).tolist()) + already_seen_y_set.update(batch["y"].reshape(-1, ).tolist()) + + sampler_states = dataloader.batch_sampler.sampler.state_dict() + save_states = {"num_consumed_batches": num_consumed_batches} + driver1.save_checkpoint(Path(path), save_states, dataloader, only_state_dict, should_save_model=True) + + # 加载 + # 更改 batch_size + dataloader = dataloader_with_randomsampler(dataset, 2, True, False) + load_states = driver2.load_checkpoint(Path(path), dataloader, only_state_dict, should_load_model=True) + replaced_loader = load_states.pop("dataloader") + + # 1. 检查 optimizer 的状态 + # TODO optimizer 的 state_dict 总是为空 + + # 2. 检查 sampler 是否被正确地加载和替换 + assert not (replaced_loader is dataloader) + assert isinstance(replaced_loader.batch_sampler.sampler, RandomSampler) + assert replaced_loader.batch_sampler.sampler.seed == sampler_states["seed"] + assert replaced_loader.batch_sampler.sampler.epoch == sampler_states["epoch"] + assert replaced_loader.batch_sampler.sampler.num_consumed_samples == 4 * num_consumed_batches + assert len(replaced_loader.batch_sampler.sampler.dataset) == sampler_states["length"] + assert replaced_loader.batch_sampler.sampler.shuffle == sampler_states["shuffle"] + + # # 3. 检查 fp16 是否被加载 + # if fp16: + # assert not isinstance(driver2.grad_scaler, oneflow.cuda.amp.GradScaler) + + # 4. 检查 model 的参数是否正确 + # 5. 检查 batch_idx + start_batch = load_states.pop('batch_idx_in_epoch') + assert start_batch == 2 * num_consumed_batches + left_x_batches = set() + left_y_batches = set() + # set epoch + driver2.set_sampler_epoch(replaced_loader, 3) + for idx, batch in enumerate(replaced_loader): + + batch = driver2.move_data_to_device(batch) + left_x_batches.update(batch["x"].reshape(-1, ).tolist()) + left_y_batches.update(batch["y"].reshape(-1, ).tolist()) + res1 = driver1.model.evaluate_step(**batch) + res2 = driver2.model.evaluate_step(**batch) + assert oneflow.all(res1["preds"] == res2["preds"]) + + assert len(left_x_batches) + len(already_seen_x_set) == len(dataset) + assert len(left_x_batches | already_seen_x_set) == len(dataset) + assert len(left_y_batches) + len(already_seen_y_set) == len(dataset) + assert len(left_y_batches | already_seen_y_set) == len(dataset) + finally: + rank_zero_rm(path) + + +@pytest.mark.oneflow +@pytest.mark.parametrize("shuffle", ([True, False])) +@pytest.mark.parametrize("batch_size", ([1, 3, 16, 17])) +@pytest.mark.parametrize("drop_last", ([True, False])) +@pytest.mark.parametrize("reproducible", ([True, False])) +def test_shuffle_dataloader(shuffle, batch_size, drop_last, reproducible): + # 需要检验一下 set_dist_repro_dataloader 没有修改参数 + num_samples = 100 + dataset = OneflowNormalXYDataset(num_samples) + dl = prepare_oneflow_dataloader(dataset, shuffle=shuffle, batch_size=batch_size, drop_last=drop_last) + model = OneflowNormalModel_Classification_1(10, 32) + driver = OneflowSingleDriver(model, device="cpu") + dl = driver.set_dist_repro_dataloader(dataloader=dl, reproducible=reproducible) + + data = [] + flags = [] + for batch in dl: + flags.append(batch['x'].size(0) == batch_size) + data.extend(batch['x'].reshape(-1).tolist()) + + if drop_last and num_samples%batch_size != 0: + assert len(data)!=num_samples + assert all(flags) == True + elif num_samples%batch_size!=0: + assert flags[-1] is False + else: + assert len(data) == num_samples + + if not shuffle: + for i in range(1, len(data)): + assert data[i]>data[i-1] + else: + flags = [] + for i in range(1, len(data)): + flags.append(data[i]>data[i-1]) + assert all(flags) is False + + +@pytest.mark.oneflow +@pytest.mark.parametrize("shuffle", ([True, False])) +@pytest.mark.parametrize("batch_size", ([1, 3, 16, 17])) +@pytest.mark.parametrize("drop_last", ([True, False])) +@pytest.mark.parametrize("reproducible", ([True, False])) +def test_batch_sampler_dataloader(shuffle, batch_size, drop_last, reproducible): + # 需要检验一下 set_dist_repro_dataloader 没有修改参数 + num_samples = 100 + dataset = OneflowNormalXYDataset(num_samples) + sampler = BucketedBatchSampler(dataset, length=dataset._data, batch_size=batch_size, drop_last=drop_last, + shuffle=shuffle, num_batch_per_bucket=2) + dl = prepare_oneflow_dataloader(dataset, batch_sampler=sampler) + model = OneflowNormalModel_Classification_1(10, 32) + driver = OneflowSingleDriver(model, device="cpu") + dl = driver.set_dist_repro_dataloader(dataloader=dl, reproducible=reproducible) + + data = [] + flags = [] + for batch in dl: + d = batch['x'].reshape(-1).tolist() + diff = max(d) - min(d) + assert diff Date: Thu, 30 Jun 2022 06:33:56 +0000 Subject: [PATCH 45/52] =?UTF-8?q?=E8=B0=83=E6=95=B4=20driver=20=E7=9A=84?= =?UTF-8?q?=E9=83=A8=E5=88=86=E6=96=87=E6=A1=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../drivers/jittor_driver/jittor_driver.py | 19 +++-- fastNLP/core/drivers/jittor_driver/mpi.py | 3 +- .../drivers/jittor_driver/single_device.py | 14 +--- fastNLP/core/drivers/paddle_driver/fleet.py | 2 +- .../drivers/paddle_driver/paddle_driver.py | 6 -- tests/core/drivers/oneflow_driver/dist.py | 78 ------------------- 6 files changed, 17 insertions(+), 105 deletions(-) delete mode 100644 tests/core/drivers/oneflow_driver/dist.py diff --git a/fastNLP/core/drivers/jittor_driver/jittor_driver.py b/fastNLP/core/drivers/jittor_driver/jittor_driver.py index 312f0d83..542b39f9 100644 --- a/fastNLP/core/drivers/jittor_driver/jittor_driver.py +++ b/fastNLP/core/drivers/jittor_driver/jittor_driver.py @@ -40,20 +40,22 @@ __all__ = [ class JittorDriver(Driver): r""" - ``Jittor`` 框架的 ``Driver`` + ``Jittor`` 框架的 ``Driver``,是 ``JittorSingleDevice`` 和 ``JittorMPIDriver`` 的父类。 - .. note:: + .. warning:: - 这是一个正在开发中的功能,敬请期待。 + 您不应当直接初始化该类,然后传入给 ``Trainer``,换句话说,您应当使用该类的子类 ``JittorSingleDriver`` 和 ``TorchDDPDriver``,而不是 + 该类本身; - .. todo:: + .. note:: - 实现 fp16 的设置,且支持 cpu 和 gpu 的切换; - 实现用于断点重训的 save 和 load 函数; + 您可以在使用 ``JittorSingleDevice`` 和 ``JittorMPIDriver`` 时使用 ``JittorDriver`` 提供的接口; + :param model: 训练时使用的 **jittor** 模型; + :param fp16: 是否开启混合精度训练; + :param jittor_kwargs: """ - - def __init__(self, model, fp16: bool = False, **kwargs): + def __init__(self, model, fp16: bool = False, jittor_kwargs: Dict = {}, **kwargs): if not isinstance(model, Module): raise ValueError(f"Parameter `model` can not be `{type(model)}` in `JittorDriver`, it should be exactly " f"`jittor.Module` type.") @@ -65,6 +67,7 @@ class JittorDriver(Driver): jt.flags.auto_mixed_precision_level = 0 self.fp16 = fp16 self._auto_cast = nullcontext + self._jittor_kwargs = jittor_kwargs # 用来设置是否关闭 auto_param_call 中的参数匹配问题; self.wo_auto_param_call = kwargs.get("model_wo_auto_param_call", False) diff --git a/fastNLP/core/drivers/jittor_driver/mpi.py b/fastNLP/core/drivers/jittor_driver/mpi.py index b072b83d..47e9279b 100644 --- a/fastNLP/core/drivers/jittor_driver/mpi.py +++ b/fastNLP/core/drivers/jittor_driver/mpi.py @@ -34,10 +34,11 @@ class JittorMPIDriver(JittorDriver): parallel_device: None, is_pull_by_jittor_run: bool = False, fp16: bool = False, + jittor_kwargs: Dict = {}, **kwargs ): - super(JittorMPIDriver, self).__init__(model, fp16=fp16, **kwargs) + super(JittorMPIDriver, self).__init__(model, fp16=fp16, jittor_kwargs=jittor_kwargs, **kwargs) raise NotImplementedError("MPI for Jittor is not supported right now.") self.is_pull_by_jittor_run = is_pull_by_jittor_run diff --git a/fastNLP/core/drivers/jittor_driver/single_device.py b/fastNLP/core/drivers/jittor_driver/single_device.py index 386f8694..be8ef1b9 100644 --- a/fastNLP/core/drivers/jittor_driver/single_device.py +++ b/fastNLP/core/drivers/jittor_driver/single_device.py @@ -25,15 +25,6 @@ class JittorSingleDriver(JittorDriver): r""" ``Jittor`` 框架下用于 ``cpu`` 和单卡 ``gpu`` 运算的 ``Driver``。 - .. note:: - - 这是一个正在开发中的功能,敬请期待。 - - .. todo:: - - 支持 cpu 和 gpu 的切换; - 实现断点重训中替换 dataloader 的 set_dist_repro_dataloader 函数 - :param model: 传入给 ``Trainer`` 的 ``model`` 参数; :param device: 训练和模型所在的设备,在 **Jittor** 中,应当为以下值之一:``[None, 'cpu', 'gpu', 'cuda']``; @@ -43,12 +34,13 @@ class JittorSingleDriver(JittorDriver): 表示在显卡设备上进行训练; :param fp16: 是否开启 fp16; + :param jittor_kwargs: """ - def __init__(self, model, device=None, fp16: bool = False, **kwargs): + def __init__(self, model, device=None, fp16: bool = False, jittor_kwargs: Dict = {}, **kwargs): if device not in [None, "cpu", "gpu", "cuda"]: raise RuntimeError("Parameter `device` should be one of [None, 'cpu', 'gpu', 'cuda'] .") - super(JittorSingleDriver, self).__init__(model, fp16) + super(JittorSingleDriver, self).__init__(model, fp16, jittor_kwargs=jittor_kwargs) self.model_device = device if device is not None else "cpu" diff --git a/fastNLP/core/drivers/paddle_driver/fleet.py b/fastNLP/core/drivers/paddle_driver/fleet.py index a7d08e5c..6668d577 100644 --- a/fastNLP/core/drivers/paddle_driver/fleet.py +++ b/fastNLP/core/drivers/paddle_driver/fleet.py @@ -157,7 +157,7 @@ class PaddleFleetDriver(PaddleDriver): ): if USER_CUDA_VISIBLE_DEVICES not in os.environ: raise RuntimeError("To run paddle distributed training, please set `FASTNLP_BACKEND` to 'paddle' before using FastNLP.") - super(PaddleFleetDriver, self).__init__(model, fp16=fp16, **kwargs) + super(PaddleFleetDriver, self).__init__(model, fp16=fp16, paddle_kwrags=paddle_kwargs, **kwargs) # 如果不是通过 launch 启动,要求用户必须传入 parallel_device if not is_pull_by_paddle_run: diff --git a/fastNLP/core/drivers/paddle_driver/paddle_driver.py b/fastNLP/core/drivers/paddle_driver/paddle_driver.py index 5bd35b7a..f604994e 100644 --- a/fastNLP/core/drivers/paddle_driver/paddle_driver.py +++ b/fastNLP/core/drivers/paddle_driver/paddle_driver.py @@ -68,12 +68,6 @@ class PaddleDriver(Driver): :param model: 训练时使用的 **PaddlePaddle** 模型; :param fp16: 是否开启混合精度训练; :param paddle_kwargs: - :kwargs: - * wo_auto_param_call (``bool``) -- 是否关闭在训练时调用我们的 ``auto_param_call`` 函数来自动匹配 batch 和前向函数的参数的行为; - - .. note:: - - 关于该参数的详细说明,请参见 :class:`~fastNLP.core.controllers.Trainer` 中的描述;函数 ``auto_param_call`` 详见 :func:`fastNLP.core.utils.auto_param_call`。 """ def __init__(self, model: "paddle.nn.Layer", fp16: Optional[bool] = False, paddle_kwrags: Dict = {}, **kwargs): diff --git a/tests/core/drivers/oneflow_driver/dist.py b/tests/core/drivers/oneflow_driver/dist.py deleted file mode 100644 index 894fcc3c..00000000 --- a/tests/core/drivers/oneflow_driver/dist.py +++ /dev/null @@ -1,78 +0,0 @@ -import oneflow -from oneflow import nn -from oneflow.utils.data import DataLoader, Dataset -from oneflow.nn.parallel import DistributedDataParallel as ddp -import os -# print(oneflow.ones(3,4).device) -# print(oneflow.rand(3,4).device) -# exit(0) -# PLACEMENT = oneflow.placement("cuda", [0,1]) -# S0 = oneflow.sbp.split(0) -# B = oneflow.sbp.broadcast -print(oneflow.cuda.current_device()) -exit(0) -class OneflowArgMaxDataset(Dataset): - def __init__(self, feature_dimension=10, data_num=1000, seed=0): - self.num_labels = feature_dimension - self.feature_dimension = feature_dimension - self.data_num = data_num - self.seed = seed - - g = oneflow.Generator() - g.manual_seed(1000) - self.x = oneflow.randint(low=-100, high=100, size=[data_num, feature_dimension], generator=g).float() - self.y = oneflow.max(self.x, dim=-1)[1] - - def __len__(self): - return self.data_num - - def __getitem__(self, item): - return self.x[item], self.y[item] - -class Model(nn.Module): - def __init__(self, num_labels, feature_dimension): - super(Model, self).__init__() - self.num_labels = num_labels - - self.linear1 = nn.Linear(in_features=feature_dimension, out_features=10) - self.ac1 = nn.ReLU() - self.linear2 = nn.Linear(in_features=10, out_features=10) - self.ac2 = nn.ReLU() - self.output = nn.Linear(in_features=10, out_features=num_labels) - - def forward(self, x): - x = self.ac1(self.linear1(x)) - x = self.ac2(self.linear2(x)) - x = self.output(x) - return x - -dataset = OneflowArgMaxDataset(10, 100) -model = Model(10, 10) -loss_func = nn.CrossEntropyLoss() -optimizer = oneflow.optim.Adam(model.parameters(), 0.001) -dataloader = oneflow.utils.data.DataLoader(dataset, batch_size=32) - -device = "cuda" -model.to(device) -# model = ddp(model) -loss_func.to(device) - -# model = model.to_global(PLACEMENT, B) - -for i in range(2): - for i, (x, y) in enumerate(dataloader): - if i % 2 != oneflow.env.get_rank(): - continue - x = x.to(device) - y = y.to(device) - # x = x.to_global(PLACEMENT, S0) - # y = y.to_global(PLACEMENT, S0) - output = model(x) - loss = loss_func(output, y) - optimizer.zero_grad() - loss.backward() - optimizer.step() -oneflow.save(model, "ttt") -print("end.") -# python -m oneflow.distributed.launch --nproc_per_node 2 dist.py - From ea40c62f0aed1b2b4db2ee632e4044a81167010a Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Thu, 30 Jun 2022 06:38:11 +0000 Subject: [PATCH 46/52] fix conflict --- fastNLP/envs/imports.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/fastNLP/envs/imports.py b/fastNLP/envs/imports.py index c0ffffa3..08afc6a5 100644 --- a/fastNLP/envs/imports.py +++ b/fastNLP/envs/imports.py @@ -22,10 +22,7 @@ _NEED_IMPORT_FAIRSCALE = not _IS_WINDOWS and _module_available("fairscale") and _NEED_IMPORT_TORCH = _module_available("torch") and 'torch' in need_import _NEED_IMPORT_JITTOR = _module_available("jittor") and 'jittor' in need_import _NEED_IMPORT_PADDLE = _module_available("paddle") and 'paddle' in need_import -<<<<<<< HEAD _NEED_IMPORT_DEEPSPEED = _module_available("deepspeed") and 'torch' in need_import -======= _NEED_IMPORT_ONEFLOW = _module_available("oneflow") and 'oneflow' in need_import ->>>>>>> dev0.8.0 _TORCH_GREATER_EQUAL_1_8 = _NEED_IMPORT_TORCH and _compare_version("torch", operator.ge, "1.8.0") From cf3877ca3130e88ce613a7eb86f6728ede19b0c5 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Thu, 30 Jun 2022 06:42:16 +0000 Subject: [PATCH 47/52] conflict --- fastNLP/core/drivers/__init__.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/fastNLP/core/drivers/__init__.py b/fastNLP/core/drivers/__init__.py index 84e0e9b5..d775a039 100644 --- a/fastNLP/core/drivers/__init__.py +++ b/fastNLP/core/drivers/__init__.py @@ -1,7 +1,6 @@ __all__ = [ 'Driver', 'TorchDriver', -<<<<<<< HEAD "TorchSingleDriver", "TorchDDPDriver", "DeepSpeedDriver", @@ -11,7 +10,6 @@ __all__ = [ "JittorDriver", "JittorSingleDriver", "JittorMPIDriver", -======= 'TorchSingleDriver', 'TorchDDPDriver', 'PaddleDriver', @@ -23,7 +21,6 @@ __all__ = [ 'OneflowDriver', 'OneflowSingleDriver', 'OneflowDDPDriver', ->>>>>>> dev0.8.0 'torch_seed_everything', 'paddle_seed_everything', 'oneflow_seed_everything', From eb43948636b99c27e573d99b0b0ebed05a8eb35b Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Thu, 30 Jun 2022 07:23:37 +0000 Subject: [PATCH 48/52] =?UTF-8?q?deepspeed=20=E6=96=87=E6=A1=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../core/drivers/torch_driver/deepspeed.py | 98 ++++++++++++++++--- 1 file changed, 87 insertions(+), 11 deletions(-) diff --git a/fastNLP/core/drivers/torch_driver/deepspeed.py b/fastNLP/core/drivers/torch_driver/deepspeed.py index a99a42f8..aedff1e9 100644 --- a/fastNLP/core/drivers/torch_driver/deepspeed.py +++ b/fastNLP/core/drivers/torch_driver/deepspeed.py @@ -29,6 +29,81 @@ __all__ = [ ] class DeepSpeedDriver(TorchDDPDriver): + """ + 实现 ``deepspeed`` 分布式训练的 ``Driver``。 + + .. note:: + + 您在绝大多数情况下不需要自己使用到该类,通过向 ``Trainer`` 传入正确的参数,您可以方便快速地部署您的分布式训练; + + ``DeepSpeedDriver`` 目前支持的三种启动方式: + + 1. 用户自己不进行任何操作,直接使用我们的 ``Trainer``,这时是由我们自己使用 ``open_subprocesses`` 拉起多个进程, + 然后 ``DeepSpeedDriver`` 自己通过调用 ``deepspeed.initialize`` 来初始化模型和同心组;(情况 A) + + .. code-block:: + + trainer = Trainer( + ... + driver='deepspeed', + device=[0, 1] + ) + trainer.run() + + 通过运行 ``python train.py`` 启动; + + 2. 用户同样不在 ``Trainer`` 之外初始化 ``deepspeed``,但是用户自己使用 ``python -m torch.distributed.launch`` 拉起来创建多个进程,这时我们仍旧 + 会通过调用 ``model.initialize`` 来初始化 ``ddp`` 的通信组;(情况 B) + + .. code-block:: + + trainer = Trainer( + ... + driver='deepspeed', + device=None + ) + trainer.run() + + 通过运行 ``deepspeed train.py`` 启动; + + 3. 用户自己在外面初始化 ``deepspeed``,并且通过 ``deepspeed train.py`` 拉起,这时无论是多个进程的拉起和通信组的建立 + 都由用户自己操作,我们只会在 ``driver.setup`` 的时候对 ``DeepSpeedDriver`` 设置一些必要的属性值;(情况 C) + + .. code-block:: + + import deepspeed + + # 初始化 + model, _, _, _ = deepspeed.initialize(model, ...) + + trainer = Trainer( + ... + driver='deepspeed', + device=None + ) + trainer.run() + + 通过运行 ``deepspeed train.py`` 启动; + + :param model: 传入给 ``Trainer`` 的 ``model`` 参数; + :param parallel_device: 用于分布式训练的 ``gpu`` 设备; + :param is_pull_by_torch_run: 标志当前的脚本的启动是否由 ``python -m torch.distributed.launch`` 启动的; + :param fp16: 是否开启 fp16 训练; + :param deepspeed_kwargs: + * *strategy* -- 使用 ZeRO 优化的策略,默认为 ``deepspeed``;目前仅支持以下值: + + * ``deepspeed`` -- 使用 ZeRO 的第二阶段,等同于 ``deepspeed_stage_2``; + * ``deepspeed_stage_1`` -- 使用 ZeRO 的第一阶段,仅将 ``optimizer`` 的状态分散到不同设备上; + * ``deepspeed_stage_2`` -- 使用 ZeRO 的第二阶段,将 ``optimizer`` 和**梯度**分散到不同设备上; + * ``deepspeed_stage_2_offload`` -- 使用 ZeRO 的第二阶段,并且借助 cpu 的内存来进一步节约显存; + * ``deepspeed_stage_3`` -- 使用 ZeRO 的第三阶段,将 ``optimizer`` 、**梯度**和**模型**分散到不同设备上; + * ``deepspeed_stage_3_offload`` -- 使用 ZeRO 的第三阶段,并且借助 cpu 的内存来进一步节约显存; + * ``deepspeed_stage_3_offload_nvme`` -- 使用 ZeRO 的第三阶段,并且借助 NVMe 硬盘来进一步节约显存; + * *logging_level* -- ``deepspeed`` 库的日志等级,默认为 **logging.ERROR**; + * *config* -- ``deepspeed`` 的各项设置;**FastNLP** 允许用户传入自己的设置以增强灵活性,但这会使参数 + 中的 ``optimizer`` 、``strategy`` 、 ``fp16`` 等失效,即当这个参数存在时,**FastNLP** 会用该参数覆盖 + 其它的设置; + """ # TODO fp16 load_config def __init__( self, @@ -36,11 +111,13 @@ class DeepSpeedDriver(TorchDDPDriver): parallel_device: Union[List["torch.device"], "torch.device"], is_pull_by_torch_run = False, fp16: bool = False, + deepspeed_kwargs: Dict = {}, **kwargs ): assert _NEED_IMPORT_DEEPSPEED, "Deepspeed is not imported." - # assert not dist.is_initialized(), "DeepSpeedDriver does not support initialize distributed by user." - TorchDriver.__init__(self, model=model, fp16=False, **kwargs) + kwargs.pop("torch_kwargs", None) + self._ds_kwargs = deepspeed_kwargs + TorchDriver.__init__(self, model=model, fp16=False, torch_kwargs=deepspeed_kwargs, **kwargs) self.fp16 = fp16 # 如果用户自己在外面初始化 DDP,那么其一定是通过 python -m torch.distributed.launch 拉起的; @@ -108,7 +185,6 @@ class DeepSpeedDriver(TorchDDPDriver): "to 1 for deepspeed configuration.") self.train_micro_batch_size = 1 - self._ds_kwargs = kwargs.get("deepspeed_kwargs", {}) self.strategy = self._ds_kwargs.get("strategy", "deepspeed") deepspeed_logging_level = self._ds_kwargs.get("logging_level", logging.ERROR) deepspeed.utils.logging.logger.setLevel(deepspeed_logging_level) @@ -125,7 +201,7 @@ class DeepSpeedDriver(TorchDDPDriver): 准备分布式环境,该函数主要做以下两件事情: 1. 开启多进程,每个 gpu 设备对应单独的一个进程; - 2. 每个进程将模型迁移到自己对应的 ``gpu`` 设备上;然后使用 ``DistributedDataParallel`` 包裹模型; + 2. 使用 ``deepspeed.initialize`` 包裹模型; """ if len(self.optimizers) != 1: raise ValueError("Multi optimizers is not supported for `DeepSpeedDriver` right now.") @@ -160,15 +236,15 @@ class DeepSpeedDriver(TorchDDPDriver): self.open_subprocess() self.global_rank = self.local_rank # rank 一定是通过环境变量去获取的; deepspeed.init_distributed("nccl", distributed_port=self.master_port) - # 用户在这个 trainer 前面又初始化了一个 trainer,并且使用的是 TorchDDPDriver; + # 用户在这个 trainer 前面又初始化了一个 trainer,并且使用的是 DeepSpeedDriver; else: - # 如果 `dist.is_initialized() == True`,那么说明 TorchDDPDriver 在之前已经初始化并且已经 setup 过一次,那么我们需要保证现在 - # 使用的(即之后的)TorchDDPDriver 的设置和第一个 TorchDDPDriver 是完全一样的; + # 如果 `dist.is_initialized() == True`,那么说明 DeepSpeedDriver 在之前已经初始化并且已经 setup 过一次,那么我们需要保证现在 + # 使用的(即之后的)DeepSpeedDriver 的设置和第一个 DeepSpeedDriver 是完全一样的; pre_num_processes = int(os.environ[FASTNLP_DISTRIBUTED_CHECK]) if pre_num_processes != len(self.parallel_device): raise RuntimeError( - "Notice you are using `TorchDDPDriver` after one instantiated `TorchDDPDriver`, it is not" - "allowed that your second `TorchDDPDriver` has a new setting of parameters " + "Notice you are using `DeepSpeedDriver` after one instantiated `DeepSpeedDriver`, it is not" + "allowed that your second `DeepSpeedDriver` has a new setting of parameters " "`num_nodes` and `num_processes`.") self.world_size = dist.get_world_size() self.global_rank = dist.get_rank() @@ -302,7 +378,7 @@ class DeepSpeedDriver(TorchDDPDriver): 保存当前 driver 的模型到 folder 下。 :param filepath: 保存到哪个文件夹; - :param only_state_dict: 是否只保存权重; + :param only_state_dict: 是否只保存权重;在 ``DeepSpeedDriver`` 中该参数无效; :return: """ # deepspeed engine 要求在每个 rank 都调用 save_checkpoint,故去掉了 rank_zero_call 装饰器 @@ -325,7 +401,7 @@ class DeepSpeedDriver(TorchDDPDriver): 从 folder 中加载权重并赋值到当前 driver 的模型上。 :param filepath: 加载权重或模型的路径 - :param load_state_dict: 保存的内容是否只是权重。 + :param load_state_dict: 保存的内容是否只是权重;在 ``DeepSpeedDriver`` 中该参数无效; :param kwargs: :return: """ From 18018f0e640289c18a420f15d29bc3e741b39184 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Fri, 1 Jul 2022 05:53:53 +0000 Subject: [PATCH 49/52] =?UTF-8?q?=E8=B0=83=E6=95=B4=20fastNLP/core/callbac?= =?UTF-8?q?ks=20=E4=B8=AD=E7=9A=84=E9=83=A8=E5=88=86=E6=96=87=E6=A1=A3?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/callbacks/callback.py | 110 ++--- fastNLP/core/callbacks/callback_event.py | 378 +++++++++--------- fastNLP/core/callbacks/callback_manager.py | 24 +- fastNLP/core/callbacks/checkpoint_callback.py | 16 +- fastNLP/core/callbacks/fitlog_callback.py | 6 +- .../core/callbacks/has_monitor_callback.py | 64 +-- .../callbacks/load_best_model_callback.py | 24 +- .../core/callbacks/lr_scheduler_callback.py | 8 +- .../core/callbacks/more_evaluate_callback.py | 59 +-- fastNLP/core/callbacks/progress_callback.py | 43 +- fastNLP/core/callbacks/timer_callback.py | 2 +- fastNLP/core/callbacks/topk_saver.py | 14 +- .../torch_grad_clip_callback.py | 16 +- .../torch_lr_sched_callback.py | 12 +- 14 files changed, 392 insertions(+), 384 deletions(-) diff --git a/fastNLP/core/callbacks/callback.py b/fastNLP/core/callbacks/callback.py index d3679572..74224c8a 100644 --- a/fastNLP/core/callbacks/callback.py +++ b/fastNLP/core/callbacks/callback.py @@ -10,7 +10,7 @@ from .callback_event import Event, Filter class Callback: r""" - 实际使用的 callback 类,不管是我们 fastNLP 默认提供的一些 callback 类,还是用户自己定制的 callback 类,都应该继承该基类; + 实际使用的 callback 类,不管是 **fastNLP** 默认提供的一些 callback 实例,还是用户自己定制的 callback 类,都应该继承该基类; callback 调用时机顺序大概如下:: Trainer.__init__(): @@ -41,17 +41,17 @@ class Callback: finally: on_train_end(trainer) - 其它 callback 例如 on_evaluate_begin(trainer)/on_evaluate_end(trainer, results)/on_save_model(trainer)/ - on_load_model(trainer)/on_save_checkpoint(trainer)/on_load_checkpoint(trainer)将根据需要在Trainer.run()中特定 - 的时间调用。 + 其它 callback 例如 **on_evaluate_begin(trainer)** / **on_evaluate_end(trainer, results)** / **on_save_model(trainer)** / + **on_load_model(trainer)** / **on_save_checkpoint(trainer)** / **on_load_checkpoint(trainer)** 将根据需要在 :meth:`fastNLP.Trainer.run` + 中特定的时间调用。 """ def on_after_trainer_initialized(self, trainer, driver): r""" - 在 `Trainer` 初始化后会被触发; + 在 ``Trainer`` 初始化后会被触发; - :param trainer: ``Trainer`` 实例; - :param driver: ``Trainer`` 中的 ``driver`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; + :param driver: :class:`~fastNLP.Trainer` 中的 ``driver`` 实例; """ pass @@ -59,7 +59,7 @@ class Callback: r""" 在 '预跑'检测 开始前会被触发; - :param trainer: ``Trainer`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; """ pass @@ -67,7 +67,7 @@ class Callback: r""" 在 '预跑'检测 开始后会被触发; - :param trainer: ``Trainer`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; :param sanity_check_res: 预跑得到的评测结果,关于对于 **预跑** 的解释,请见 :meth:`~fastNLP.core.controllers.trainer.Trainer.run`; """ pass @@ -76,7 +76,7 @@ class Callback: r""" 在训练开始前会被触发; - :param trainer: ``Trainer`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; """ pass @@ -84,7 +84,7 @@ class Callback: r""" 在训练完成后会被触发; - :param trainer: ``Trainer`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; """ pass @@ -92,7 +92,7 @@ class Callback: r""" 在训练过程中的每一个 epoch 开始前会被触发; - :param trainer: ``Trainer`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; """ pass @@ -100,7 +100,7 @@ class Callback: r""" 在训练过程中的每一个 epoch 完成后会被触发;此时 trainer.cur_epoch_idx 已经完成加 1 操作。 - :param trainer: ``Trainer`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; """ pass @@ -108,7 +108,7 @@ class Callback: r""" 在训练过程中准备取出下一个 batch 的数据时触发 - :param trainer: ``Trainer`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; """ pass @@ -116,30 +116,30 @@ class Callback: r""" 在训练过程中拿到当前的 batch 数据后会被触发; - :param trainer: ``Trainer`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; """ pass def on_train_batch_begin(self, trainer, batch, indices): r""" - 在取得数据,执行完 ``input_mapping`` (如果 ``Trainer`` 传有该参数),并且移动 ``batch`` 中的 ``tensor`` 到了指定设备。 + 在取得数据,执行完 ``input_mapping`` (如果 :class:`~fastNLP.Trainer` 传有该参数),并且移动 ``batch`` 中的张量到了指定设备之后会被触发。 其中 ``batch`` 中的数据格式要么是 ``Dataloader`` 返回的每个 ``batch`` 的格式;要么是 ``input_mapping`` 之后的内容。 - 如果 ``batch`` 是 ``dict`` 类型,直接增删其中的 ``key`` 或 修改其中的 ``value`` 会影响到输入到 ``model`` 的中的 ``batch`` 数据。 + 如果 ``batch`` 是 ``dict`` 类型,直接增删其中的 key 或 修改其中的 value 会影响到输入模型的中的 ``batch`` 数据。 - :param trainer: ``Trainer`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; :param batch: batch 的数据,已经经过 ``input_mapping`` (如果有) 以及移动到指定设备 。 - :param list[int] indices: 当前的 ``batch`` 是 ``dataset`` 中的哪些数据。仅在 ``DataLoader`` 支持得到当前 ``batch index`` 的时候有值, - 其它时候为 None 。 + :param list[int] indices: 当前的 ``batch`` 是数据集中的哪些数据。仅在 ``DataLoader`` 支持得到当前 ``batch index`` 的时候有值, + 其它时候为 ``None`` 。 """ pass def on_train_batch_end(self, trainer): r""" - 完成一个 batch 的训练(forward)、梯度回传(backward)、梯度更新(step)、梯度置零、batch_idx_in_epoch与 - global_forward_batches累计加1操作。其中梯度更新】梯度置零操作会考虑 accumulation_steps ,所以不一定在当前 batch 会 + 完成一个 batch 的训练(forward)、梯度回传(backward)、梯度更新(step)、梯度置零、batch_idx_in_epoch 与 + global_forward_batches 累计加1操作之后会被触发。其中梯度更新、梯度置零操作会考虑 **accumulation_steps** ,所以不一定在当前 batch 会 执行。 - :param trainer: ``Trainer`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; """ pass @@ -147,41 +147,41 @@ class Callback: r""" 在训练过程遇到异常时调用。 - :param trainer: ``Trainer`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; :param exception: 遭遇的异常; """ pass def on_save_model(self, trainer): r""" - 当调用 Trainer.save_model() 时调用,此刻模型还未保存。 + 当调用 :meth:`fastNLP.Trainer.save_model` 时调用,此刻模型还未保存。 - :param trainer: ``Trainer`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; """ pass def on_load_model(self, trainer): r""" - 当调用 Trainer.load_model() 加载模型时调用,此刻模型还未加载。 + 当调用 :meth:`fastNLP.Trainer.load_model` 加载模型时调用,此刻模型还未加载。 - :param trainer: ``Trainer`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; """ pass def on_save_checkpoint(self, trainer) -> Dict: r""" - 当 Trainer 将要保存 checkpoint 的时候触发 (即调用 Trainer.save_checkpoint() 函数时),该函数用于保存当前 callback 在恢复需要的相关数据。 + 当 Trainer 将要保存 checkpoint 的时候触发 (即调用 :meth:`Trainer.save_checkpoint`()` 函数时),该函数用于保存当前 callback 在恢复时需要的相关数据。 - :param trainer: ``Trainer`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; """ pass def on_load_checkpoint(self, trainer, states: Optional[Dict]): r""" - 当 Trainer 要恢复 checkpoint 的时候触发(即调用 Trainer.load_checkpoint() 函数时, 此刻 Trainer 与 Driver 已经加载好自身 - 的状态), 参数 states 为 Callback 在调用 on_save_checkpoint() 的返回值。 + 当 Trainer 要恢复 checkpoint 的时候触发(即调用 :meth:`Trainer.load_checkpoint` 函数时, 此刻 Trainer 与 Driver 已经加载好自身 + 的状态), 参数 states 为 Callback 在调用 :meth:`on_save_checkpoint` 的返回值。 - :param trainer: ``Trainer`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; :param states: """ pass @@ -190,7 +190,7 @@ class Callback: r""" 在 backward 前执行。 - :param trainer: ``Trainer`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; :param outputs: ``model`` 的返回内容。如果有 ``output_mapping``,则 ``outputs`` 中的内容为已经执行了 ``output_mapping`` 后的结果。 """ pass @@ -198,54 +198,54 @@ class Callback: def on_after_backward(self, trainer): r""" 在 ``backward`` 后执行。在多卡场景下,由于 ``accumulation_steps`` 的影响,仅在需要真正 ``update`` 参数那次梯度回传才会触发梯度同步, - 因此在多卡且使用 ``accumulation_steps`` 时,可能存在某些 ``step`` 各卡上梯度不一致的问题。 + 因此在多卡且使用 ``accumulation_steps`` 时,可能存在某些 step 各卡上梯度不一致的问题。 - :param trainer: ``Trainer`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; """ pass def on_before_optimizers_step(self, trainer, optimizers): r""" - 在进行 optimizer 优化进行前调用。该接口不一定每次前向计算都会触发,实际调用会受到 accumulation_steps 的影响。 + 在进行 optimizer 优化进行前调用。该接口不一定每次前向计算都会触发,实际调用会受到 ``accumulation_steps`` 的影响。 - :param trainer: ``Trainer`` 实例; - :param optimizers: 优化器,内容为在 ``Trainer`` 初始化时传入的值。 + :param trainer: :class:`~fastNLP.Trainer` 实例; + :param optimizers: 优化器,内容为在 :class:`~fastNLP.Trainer` 初始化时传入的值。 """ pass def on_after_optimizers_step(self, trainer, optimizers): r""" - 在进行 optimizer 优化进行后调用。该接口不一定每次前向计算都会触发,实际调用会受到 accumulation_steps 的影响。 + 在进行 optimizer 优化进行后调用。该接口不一定每次前向计算都会触发,实际调用会受到 ``accumulation_steps`` 的影响。 - :param trainer: ``Trainer`` 实例; - :param optimizers: 优化器,内容为在 ``Trainer`` 初始化时传入的值。 + :param trainer: :class:`~fastNLP.Trainer` 实例; + :param optimizers: 优化器,内容为在 :class:`~fastNLP.Trainer` 初始化时传入的值。 """ pass def on_before_zero_grad(self, trainer, optimizers): r""" - 在进行模型梯度置零前调用。该接口不一定每次前向计算都会触发,实际调用会受到 accumulation_steps 的影响。 + 在进行模型梯度置零前调用。该接口不一定每次前向计算都会触发,实际调用会受到 ``accumulation_steps`` 的影响。 - :param trainer: ``Trainer`` 实例; - :param optimizers: 优化器,内容为在 ``Trainer`` 初始化时传入的值。 + :param trainer: :class:`~fastNLP.Trainer` 实例; + :param optimizers: 优化器,内容为在 :class:`~fastNLP.Trainer` 初始化时传入的值。 """ pass def on_after_zero_grad(self, trainer, optimizers): r""" - 在进行模型梯度置零后调用。该接口不一定每次前向计算都会触发,实际调用会受到 accumulation_steps 的影响。 + 在进行模型梯度置零后调用。该接口不一定每次前向计算都会触发,实际调用会受到 ``accumulation_steps`` 的影响。 - :param trainer: ``Trainer`` 实例; - :param optimizers: 优化器,内容为在 ``Trainer`` 初始化时传入的值。 + :param trainer: :class:`~fastNLP.Trainer` 实例; + :param optimizers: 优化器,内容为在 :class:`~fastNLP.Trainer` 初始化时传入的值。 """ pass def on_evaluate_begin(self, trainer): r""" - 在将要进行 evaluate 时调用。如果是设置的以 step 数量 或 自定义地 决定 evaluate 的频率,该接口是在 on_train_batch_end 之后 - 进行调用。如果是以 epoch 数量决定调用,该接口是在 on_train_epoch_end 之后调用。 + 在将要进行 ``evaluate`` 时调用。如果是设置的以 step 数量或自定义地决定 evaluate 的频率,该接口是在 :meth:`on_train_batch_end` 之后 + 进行调用。如果是以 epoch 数量决定调用时机,该接口是在 :meth:`on_train_epoch_end` 之后调用。 - :param trainer: ``Trainer`` 实例; + :param trainer: :class:`~fastNLP.Trainer` 实例; """ pass @@ -253,17 +253,17 @@ class Callback: r""" 结束 evaluate 时调用,并把 evaluate 的结果传入。 - :param trainer: ``Trainer`` 实例; - :param results: ``Trainer`` 内置的 ``Evaluator`` 评测的结果,通常是个 ``dict``; + :param trainer: :class:`~fastNLP.Trainer` 实例; + :param results: :class:`~fastNLP.Trainer` 内置的 ``Evaluator`` 评测的结果,通常是个 ``dict``; """ pass @property def callback_name(self): r""" - ``callback`` 的名称,我们会使用该名称从 ``checkpoint`` 中读取的相应的 ``state`` 并传递给 ``on_load_checkpoint()`` 函数。 + ``callback`` 的名称,我们会使用该名称从 ``checkpoint`` 中读取的相应的 ``state`` 并传递给 :meth:`on_load_checkpoint` 函数。 - :return: 返回用于区分该 ``callback`` 实例的 ``name``; + :return: 返回用于区分该 ``callback`` 实例的名称; """ return self.__class__.__name__ diff --git a/fastNLP/core/callbacks/callback_event.py b/fastNLP/core/callbacks/callback_event.py index e7657a25..8a51b6de 100644 --- a/fastNLP/core/callbacks/callback_event.py +++ b/fastNLP/core/callbacks/callback_event.py @@ -31,13 +31,13 @@ def check_legality(fn): class Event: """ - 与 Trainer.on 函数配合使用,达到控制 callback 函数运行时机的目的。 + 与 :meth:`Trainer.on` 函数配合使用,达到控制 callback 函数运行时机的目的。 - :param value: Trainer 的 callback 时机。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变量分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param value: Trainer 的 callback 时机; + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; """ every: Optional[int] once: Optional[int] @@ -53,416 +53,416 @@ class Event: return "".format(self.value, self.every, self.once, self.filter_fn) @staticmethod - @check_legality def on_after_trainer_initialized(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_after_trainer_initialized 时 + 当 Trainer 运行到 :func:`on_after_trainer_initialized` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。默认为 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_after_trainer_initialized', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_sanity_check_begin(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_sanity_check_begin 时 + 当 Trainer 运行到 :func:`on_sanity_check_begin` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; + :return: :return: """ return Event(value='on_sanity_check_begin', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_sanity_check_end(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_sanity_check_end 时 + 当 Trainer 运行到 :func:`on_sanity_check_end` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_sanity_check_end', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_train_begin(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_train_begin 时 + 当 Trainer 运行到 :func:`on_train_begin` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_train_begin', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_train_end(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_train_end 时 + 当 Trainer 运行到 :func:`on_train_end` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_train_end', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_train_epoch_begin(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_train_epoch_begin 时 + 当 Trainer 运行到 :func:`on_train_epoch_begin` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_train_epoch_begin', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_train_epoch_end(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_train_epoch_end 时 + 当 Trainer 运行到 :func:`on_train_epoch_end` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_train_epoch_end', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_fetch_data_begin(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_fetch_data_begin 时 + 当 Trainer 运行到 :func:`on_fetch_data_begin` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_fetch_data_begin', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_fetch_data_end(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_fetch_data_end 时 + 当 Trainer 运行到 :func:`on_fetch_data_end` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_fetch_data_end', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_train_batch_begin(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_train_batch_begin 时 + 当 Trainer 运行到 :func:`on_train_batch_begin` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_train_batch_begin', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_train_batch_end(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_train_batch_end 时 + 当 Trainer 运行到 :func:`on_train_batch_end` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_train_batch_end', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_exception(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_exception 时 + 当 Trainer 运行到 :func:`on_exception` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_exception', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_save_model(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_save_model 时 + 当 Trainer 运行到 :func:`on_save_model` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_save_model', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_load_model(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_load_model 时 + 当 Trainer 运行到 :func:`on_load_model` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_load_model', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_save_checkpoint(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_save_checkpoint 时 + 当 Trainer 运行到 :func:`on_save_checkpoint` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_save_checkpoint', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_load_checkpoint(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_load_checkpoint 时 + 当 Trainer 运行到 :func:`on_load_checkpoint` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_load_checkpoint', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_load_checkpoint(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_load_checkpoint 时 + 当 Trainer 运行到 :func:`on_load_checkpoint` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_load_checkpoint', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_before_backward(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_before_backward 时 + 当 Trainer 运行到 :func:`on_before_backward` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_before_backward', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_after_backward(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_after_backward 时 + 当 Trainer 运行到 :func:`on_after_backward` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_after_backward', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_before_optimizers_step(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_before_optimizers_step 时 + 当 Trainer 运行到 :func:`on_before_optimizers_step` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_before_optimizers_step', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_after_optimizers_step(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_after_optimizers_step 时 + 当 Trainer 运行到 :func:`on_after_optimizers_step` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_after_optimizers_step', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_before_zero_grad(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_before_zero_grad 时 + 当 Trainer 运行到 :func:`on_before_zero_grad` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_before_zero_grad', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_after_zero_grad(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_after_zero_grad 时 + 当 Trainer 运行到 :func:`on_after_zero_grad` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_after_zero_grad', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_evaluate_begin(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_evaluate_begin 时 + 当 Trainer 运行到 :func:`on_evaluate_begin` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_evaluate_begin', every=every, once=once, filter_fn=filter_fn) @staticmethod - @check_legality + def on_evaluate_end(every=None, once=None, filter_fn=None): """ - 当 Trainer 运行到 on_evaluate_end 时 + 当 Trainer 运行到 :func:`on_evaluate_end` 时触发; - 以下三个参数互斥,只能设置其中一个。默认为行为等同于 every=1 。 + 以下三个参数互斥,只能设置其中一个。默认为行为等同于 ``every=1`` 。 - :param int every: 触发了多少次,才真正运行一次。 - :param bool once: 是否只在第一次运行后就不再执行了。 - :param Callable filter_fn: 输入参数的应该为 (filter, trainer),其中 filter 对象中包含了 filter.num_called 和 - filter.num_executed 两个变了分别获取当前被调用了多少次,真正执行了多少次。trainer 对象即为当前正在运行的 Trainer 。 + :param every: 每触发多少次才真正运行一次; + :param once: 在第一次运行后时候再次执行; + :param filter_fn: 输入参数的应该为 ``(filter, trainer)``,其中 ``filter`` 对象中包含了 `filter.num_called` 和 + `filter.num_executed` 两个变量分别获取当前被调用了多少次,真正执行了多少次;``trainer`` 对象即为当前正在运行的 Trainer; :return: """ return Event(value='on_evaluate_end', every=every, once=once, filter_fn=filter_fn) class Filter: - def __init__(self, every: Optional[int] = None, once: Optional[bool] = None, filter_fn: Optional[Callable] = None): - r""" - 通过该 `Filter` 作为函数修饰器来控制一个函数的实际的运行频率。 + r""" + 可以控制一个函数实际的运行频率的函数修饰器。 - :param every: 表示一个函数隔多少次运行一次; - :param once: 表示一个函数只运行一次; - :param filter_fn: 用户定制的频率控制函数;注意该函数内部的频率判断应当是无状态的,除了参数 `self.num_called` 和 - `self.num_executed` 外,因为我们会在预跑后重置这两个参数的状态; - """ + :param every: 表示一个函数隔多少次运行一次; + :param once: 表示一个函数是否只运行一次; + :param filter_fn: 用户定制的频率控制函数;注意该函数内部的频率判断应当是无状态的,除了参数 `self.num_called` 和 + `self.num_executed` 外,因为我们会在预跑后重置这两个参数的状态; + """ + def __init__(self, every: Optional[int] = None, once: Optional[bool] = None, filter_fn: Optional[Callable] = None): # check legality check_legality(lambda *args,**kwargs:...)(every, once, filter_fn) if (every is None) and (once is None) and (filter_fn is None): diff --git a/fastNLP/core/callbacks/callback_manager.py b/fastNLP/core/callbacks/callback_manager.py index d3d8ae75..40f73485 100644 --- a/fastNLP/core/callbacks/callback_manager.py +++ b/fastNLP/core/callbacks/callback_manager.py @@ -75,12 +75,13 @@ class CallbackManager: def __init__(self, callbacks: Optional[List[Callback]]): r""" - 注意 callback 的调用顺序: + 注意 callback 的调用顺序为: + 1. 通过函数修饰器 `Trainer.on` 添加的 callback 函数; 2. 通过 `Trainer` 的参数 `callbacks` 添加的 callback 类; 3. 通过 `Trainer.add_callback_fn` 添加的 callback 函数; - :param callbacks: 初始化时可以传入的一系列 callback 类,通常为用户在初始化 ``Trainer`` 时直接传入的 callback 类; + :param callbacks: 初始化时可以传入的一系列 :class:`~fastNLP.Callback` 类,通常为用户在初始化 ``Trainer`` 时直接传入的 callback 列表; """ self._need_reproducible_sampler = False @@ -106,12 +107,9 @@ class CallbackManager: def initialize_class_callbacks(self): r""" - 在实际的运行过程中,我们是将具体的一个 callback 实例拆分为单独的一个个 callback 函数,然后将它们加在一个字典里,该字典的键值就是 + 在实际的运行过程中,我们会将具体的一个 callback 实例拆分为单独的一个个 callback 函数,然后将它们加在一个字典里,该字典的键值就是 一个个 callback 时机,也就是 `Event` 的类别; 如果一个 callback 类的 callback 函数并不具备任何作用,我们实际并不会将其加在字典当中; - - :param callbacks: - :return: """ for each_callback in self.class_callbacks: self._need_reproducible_sampler |= each_callback.need_reproducible_sampler @@ -144,11 +142,12 @@ class CallbackManager: 用于断点重训的 callback 的保存函数; 该函数主要涉及两个方面: - 1. callback 的状态的保存;我们会调用每一个 callback 的 `on_save_checkpoint` 方法,该方法应当返回一个字典,其中包含着 - 断点重训应当保存的状态; + 1. callback 的状态的保存;我们会调用每一个 callback 的 :func:`on_save_checkpoint` 方法,该方法应当返回一个字典,其中包含着 + 断点重训应当保存的状态; 2. 每一个具体的 callback 函数的 filter 的状态; - :return: 一个包含上述内容的字典: + :param trainer: :class:`~fastNLP.Trainer` 实例; + :return: 一个包含上述内容的字典,格式如下: .. code-block:: { @@ -195,11 +194,10 @@ class CallbackManager: def on_load_checkpoint(self, trainer, states: Dict): r""" - 用于断点重训的加载函数; - 对应于断点重训的保存函数; + 用于断点重训的加载函数,对应于断点重训的保存函数; - :param trainer: `Trainer` - :param states: 见 `on_save_checkpoint` 函数的返回值; + :param trainer: :class:`~fastNLP.Trainer` 实例; + :param states: 同 :func:`on_save_checkpoint` 函数的返回值; """ # 1. 先恢复每一个具体的 callback 函数的 filter 的状态; diff --git a/fastNLP/core/callbacks/checkpoint_callback.py b/fastNLP/core/callbacks/checkpoint_callback.py index 0cc3021b..7132cb76 100644 --- a/fastNLP/core/callbacks/checkpoint_callback.py +++ b/fastNLP/core/callbacks/checkpoint_callback.py @@ -24,7 +24,7 @@ class CheckpointCallback(Callback): - {save_object}-epoch_{epoch_idx}-batch_{global_batch_idx}-exception_{exception_type}/ # exception时保存。 - {save_object}-epoch_{epoch_idx}-batch_{global_batch_idx}-{monitor}_{monitor_value}/ # 满足topk条件存储文件名 - model_save_fn 为 None ,则以上每个 folder 中,将生成 fastnlp_model.pkl.tar 文件。若 model_save_fn 不为 None, + ``model_save_fn`` 为 ``Non``e ,则以上每个 folder 中,将生成 fastnlp_model.pkl.tar 文件。若 ``model_save_fn`` 不为 ``None``, 则 fastNLP 将 folder 绝对路径传递给该函数,fastNLP 在该 folder 下不进行模型保存。默认情况下,本 checkpoint 只保存了 model 的状态;如还需保存 Trainer 的状态以断点重训的话,请使用 ``save_object='trainer'`` 。 @@ -42,18 +42,18 @@ class CheckpointCallback(Callback): 时间戳文件夹中。如果为 None ,默认使用当前文件夹。 :param every_n_epochs: 多少个 epoch 保存一次。 :param every_n_batches: 多少个 batch 保存一次。 - :param last: 如果为 True ,将在每次 epoch 运行结束都保存一次,会覆盖之前的保存。如果为 False 则不会保存 {save_object}-last 文件 - :param topk: 保存 monitor 结果 topK 个。 - :param on_exceptions: 在出异常信息时,是否保存。传入需要捕获的异常的类。默认将捕获 EarlyStopException 。 + :param last: 如果为 ``True`` ,将在每次 epoch 运行结束都保存一次,会覆盖之前的保存。如果为 ``False`` 则不会保存 ``{save_object}-last`` 文件 + :param topk: 保存 monitor 结果中的 ``topk`` 个。 + :param on_exceptions: 在出异常信息时,是否保存。传入需要捕获的异常的类。默认将捕获 :class:`~fastNLP.core.callbacks.EarlyStopException` 。 :param larger_better: monitor 的值是否时越大越好。 - :param only_state_dict: 保存模型时是否只保存 state_dict 。当 model_save_fn 不为 None 时,该参数无效。 + :param only_state_dict: 保存模型时是否只保存 state_dict 。当 ``model_save_fn`` 不为 ``None`` 时,该参数无效。 :param model_save_fn: 个性化的保存函数,当触发保存操作时,就调用这个函数,这个函数应当接受一个文件夹作为参数,不返回任何东西。 - 如果传入了 model_save_fn 函数,fastNLP 将不再进行模型相关的保存。在多卡场景下,我们只在 rank 0 上会运行该函数。 + 如果传入了 ``model_save_fn`` 函数,fastNLP 将不再进行模型相关的保存。在多卡场景下,我们只在 rank 0 上会运行该函数。 :param save_object: 可选 ['trainer', 'model'],表示在保存时的保存对象为 ``trainer+model`` 还是 只是 ``model`` 。如果 保存 ``trainer`` 对象的话,将会保存 :class:`~fastNLP.Trainer` 的相关状态,可以通过 :meth:`Trainer.load_checkpoint` 加载该断 点继续训练。如果保存的是 ``Model`` 对象,则可以通过 :meth:`Trainer.load_model` 加载该模型权重。 - :param save_evaluate_results: 是否保存 evaluate 的结果。如果为 True ,在保存 topk 模型的 folder 中还将额外保存一个 - fastnlp_evaluate_results.json 文件,记录当前的 results。仅在设置了 topk 的场景下有用,默认为 True 。 + :param save_evaluate_results: 是否保存 evaluate 的结果。如果为 ``True`` ,在保存 topk 模型的 folder 中还将额外保存一个 + ``*``fastnlp_evaluate_results.json``文件,记录当前的 results。仅在设置了 ``topk`` 的场景下有用,默认为 ``True`` 。 :param kwargs: """ def __init__(self, folder: Optional[Union[str, Path]] = None, every_n_epochs: Optional[int] = None, diff --git a/fastNLP/core/callbacks/fitlog_callback.py b/fastNLP/core/callbacks/fitlog_callback.py index 19a8b476..10dc49b7 100644 --- a/fastNLP/core/callbacks/fitlog_callback.py +++ b/fastNLP/core/callbacks/fitlog_callback.py @@ -14,9 +14,9 @@ if _module_available('fitlog'): class FitlogCallback(HasMonitorCallback): """ 自动记录 ``evaluation`` 结果到 ``fitlog`` 中。会自动记录每一次 ``evaluate`` 后的结果;同时会根据 - ``monitor`` 记录最好的结果。另外,会自动将非 ``rank 0`` 上的 ``fitlog`` 设置为 ``debug`` 状态。同时还会在 ``fitlog`` 的 - ``other`` 列中记录一个 ``launch_time`` ,可以通过这个数值找到当前这个脚本的在 save_folder (如果有使用其它需要保存模型的 - ``Callback`` ,例如 :class:`~fastNLP.CheckpointCallback` )下的文件夹名称。 + ``monitor`` 记录最好的结果。另外,会自动将非 ``rank 0`` 上的 ``fitlog`` 设置为 ``debug`` 状态。同时还会在 ``fitlog`` 的 + ``other`` 列中记录一个 ``launch_time`` ,可以通过这个数值找到当前这个脚本的在 save_folder (如果有使用其它需要保存模型的 + ``Callback`` ,例如 :class:`~fastNLP.CheckpointCallback` )下的文件夹名称。 :param monitor: 监控的 metric 值。 diff --git a/fastNLP/core/callbacks/has_monitor_callback.py b/fastNLP/core/callbacks/has_monitor_callback.py index 4fadc3d7..702d27c0 100644 --- a/fastNLP/core/callbacks/has_monitor_callback.py +++ b/fastNLP/core/callbacks/has_monitor_callback.py @@ -26,19 +26,19 @@ class CanItemDataType(ABC): class ResultsMonitor: """ - 可用于监控某个数值,并通过 is_better_results() 等接口实现检测结果是否变得更好了。 + 可用于监控某个数值,并通过 :meth:`is_better_results` 等接口检测结果是否变得更好。 - :param monitor: 监控的 metric 值。 + :param monitor: 监控的 metric 值: * 为 ``None`` - 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置)。 + 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置); * 为 ``str`` 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 - 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` 。 + 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` ; * 为 ``Callable`` 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 - 的 ``monitor`` 值请返回 ``None`` 。 - :param larger_better: monitor 是否时越大越好 + 的 ``monitor`` 值请返回 ``None`` ; + :param larger_better: monitor 是否为越大越好; """ def __init__(self, monitor:Union[Callback, str], larger_better:bool=True): self.set_monitor(monitor, larger_better) @@ -60,7 +60,7 @@ class ResultsMonitor: def itemize_results(self, results): """ - 将结果中有 .item() 方法的都调用一下,使得 tensor 类型的数据转为 python 内置类型。 + 执行结果中所有对象的 :meth:`item` 方法(如果没有则忽略),使得 Tensor 类型的数据转为 python 内置类型。 :param results: :return: @@ -69,10 +69,10 @@ class ResultsMonitor: def get_monitor_value(self, results:Dict)->Union[float, None]: """ - 获取 monitor 的值,如果 monitor 没有直接找到,会尝试使用 最长公共字符串算法 匹配的方式寻找。 + 获取 monitor 的值,如果 monitor 没有直接找到,会尝试使用 **最长公共字符串算法** 匹配的方式寻找。 - :param results: 评测结果。 - :return: 如果为 None ,表明此次没有找到合适的monitor + :param results: 评测结果; + :return: monitor 的值;如果为 ``None`` ,表明此次没有找到合适的monitor; """ if len(results) == 0 or self.monitor is None: return None @@ -100,10 +100,10 @@ class ResultsMonitor: def is_better_monitor_value(self, monitor_value: float, keep_if_better=True): """ - 检测 monitor_value 是否是更好的 + 检测 ``monitor_value`` 是否是更好的 - :param monitor_value: 待检查的 monitor_value 。如果为 None ,返回 False - :param keep_if_better: 如果传入的 monitor_value 值更好,则将其保存下来。 + :param monitor_value: 待检查的 ``monitor_value`` 。如果为 ``None`` ,返回 False; + :param keep_if_better: 如果传入的 ``monitor_value`` 值更好,则将其保存下来; :return: """ if monitor_value is None: @@ -115,10 +115,10 @@ class ResultsMonitor: def is_better_results(self, results, keep_if_better=True): """ - 检测给定的 results 是否比上一次更好,如果本次 results 中没有找到相关的monitor 返回 False。 + 检测给定的 ``results`` 是否比上一次更好,如果本次 results 中没有找到相关的 monitor 返回 ``False``。 - :param results: evaluation 结果。 - :param keep_if_better: 当返回为 True 时,是否保存到 self.monitor_value 中。 + :param results: evaluation 结果; + :param keep_if_better: 当返回为 ``True`` 时,是否保存到 ``self.monitor_value`` 中; :return: """ monitor_value = self.get_monitor_value(results) @@ -128,7 +128,7 @@ class ResultsMonitor: def is_former_monitor_value_better(self, monitor_value1, monitor_value2): """ - 传入的两个值中,是否monitor_value1的结果更好。 + 传入的两个值中,是否 ``monitor_value1`` 的结果更好。 :param monitor_value1: :param monitor_value2: @@ -149,7 +149,7 @@ class ResultsMonitor: @property def monitor_name(self): """ - 返回 monitor 的名字,如果 monitor 是个 callable 的函数,则返回该函数的名称。 + 返回 monitor 的名字,如果 monitor 是个 Callable 的函数,则返回该函数的名称。 :return: """ @@ -185,20 +185,20 @@ class ResultsMonitor: class HasMonitorCallback(ResultsMonitor, Callback): """ 该 callback 不直接进行使用,作为其它相关 callback 的父类使用,如果 callback 有使用 monitor 可以继承该函数里面实现了 - (1)判断monitor合法性;(2)在需要时, 根据trainer的monitor设置自己的monitor名称。 + (1)判断 monitor 合法性;(2)在需要时, 根据 trainer 的 monitor 设置自己的 monitor 名称。 - :param monitor: 监控的 metric 值。 + :param monitor: 监控的 metric 值: * 为 ``None`` - 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置)。 + 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置); * 为 ``str`` 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 - 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` 。 + 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` ; * 为 ``Callable`` 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 - 的 ``monitor`` 值请返回 ``None`` 。 - :param larger_better: monitor 是否时越大越好 - :param must_have_monitor: 这个 callback 是否必须有 monitor 设置。如果设置为 True ,且没检测到设置 monitor 会报错。 + 的 ``monitor`` 值请返回 ``None`` ; + :param larger_better: monitor 是否为越大越好; + :param must_have_monitor: 这个 callback 是否必须有 monitor 设置。如果设置为 ``True`` ,且没检测到设置 monitor 会报错; """ def __init__(self, monitor, larger_better, must_have_monitor=False): super().__init__(monitor, larger_better) @@ -230,20 +230,20 @@ class HasMonitorCallback(ResultsMonitor, Callback): class ExecuteOnceBetterMonitor(HasMonitorCallback): """ - 当监控的 monitor 结果更好的时候,调用 execute_fn 函数。 + 当监控的 ``monitor`` 结果更好的时候,调用 ``execute_fn`` 函数。 - :param monitor: 监控的 metric 值。 + :param monitor: 监控的 metric 值: * 为 ``None`` - 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置)。 + 将尝试使用 :class:`~fastNLP.Trainer` 中设置 ``monitor`` 值(如果有设置); * 为 ``str`` 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 - 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` 。 + 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` ; * 为 ``Callable`` 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 - 的 ``monitor`` 值请返回 ``None`` 。 - :param larger_better: monitor 是否时越大越好 - :param execute_fn: 一个可执行的函数,不接受任何参数,不反回值。在 monitor 取得更好结果的时候会调用。 + 的 ``monitor`` 值请返回 ``None`` ; + :param larger_better: monitor 是否是越大越好; + :param execute_fn: 一个可执行的函数,不接受任何参数,没有返回值。在 monitor 取得更好结果的时候会调用; """ def __init__(self, monitor, larger_better, execute_fn): super().__init__(monitor, larger_better, must_have_monitor=True) diff --git a/fastNLP/core/callbacks/load_best_model_callback.py b/fastNLP/core/callbacks/load_best_model_callback.py index b0fa83c4..b530cdc5 100644 --- a/fastNLP/core/callbacks/load_best_model_callback.py +++ b/fastNLP/core/callbacks/load_best_model_callback.py @@ -19,25 +19,25 @@ class LoadBestModelCallback(HasMonitorCallback): 保存最佳的 monitor 值最佳的模型,并在训练结束的时候重新加载模型,默认会在加载之后删除权重文件。仅在训练正常结束的时候才能加载 最好的模型。 - :param monitor: 监控的 metric 值。 + :param monitor: 监控的 metric 值: * 为 ``None`` - 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置)。 + 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置); * 为 ``str`` 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 - 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` 。 + 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` ; * 为 ``Callable`` 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 - 的 ``monitor`` 值请返回 ``None`` 。 - :param larger_better: 该 metric 值是否是越大越好。 + 的 ``monitor`` 值请返回 ``None`` ; + :param larger_better: 该 metric 值是否是越大越好; :param save_folder: 保存的文件夹,如果为空,则保存在内存中。不为空,则保存一份权重到文件中,当为多机训练,且本值不为空时,请确保 - 不同的机器均可访问当该路径。当 model_save_fn 不为 None 时该值一定不能为空。 - :param only_state_dict: 是否只保存模型的参数。当 model_save_fn 不为空时,该值无效。 - :param model_save_fn: 保存 model 的函数,与 model_load_fn 必须同时不为空。本函数的输入为一个已经创建好的文件夹,没有输出, - 请在函数内完成对模型的保存。 - :param model_load_fn: 加载 model 的函数,与 model_save_fn 必须同时不为空。本函数的输入为一个已经创建好的文件夹,没有输出, - 请在函数内完成对模型的加载。 - :param delete_after_train: 在训练结束后是否删掉模型。 + 不同的机器均可访问当该路径。当 ``model_save_fn`` 不为 None 时该值一定不能为空; + :param only_state_dict: 是否只保存模型的参数。当 ``model_save_fn`` 不为空时,该值无效; + :param model_save_fn: 保存 model 的函数,与 ``model_load_fn`` 必须同时不为空。本函数的输入为一个已经创建好的文件夹,没有输出, + 请在函数内完成对模型的保存; + :param model_load_fn: 加载 model 的函数,与 ``model_save_fn`` 必须同时不为空。本函数的输入为一个已经创建好的文件夹,没有输出, + 请在函数内完成对模型的加载; + :param delete_after_train: 在训练结束后是否删掉模型; """ def __init__(self, monitor:Union[str, Callable]=None, larger_better:bool = True, only_state_dict:bool = True, save_folder:Optional[str] = None, model_save_fn:Optional[Callable] = None, diff --git a/fastNLP/core/callbacks/lr_scheduler_callback.py b/fastNLP/core/callbacks/lr_scheduler_callback.py index a71428ca..65c50dd3 100644 --- a/fastNLP/core/callbacks/lr_scheduler_callback.py +++ b/fastNLP/core/callbacks/lr_scheduler_callback.py @@ -7,11 +7,11 @@ __all__ = [ class LRSchedCallback(Callback): """ - 根据 step_on 参数在合适的时机调用 scheduler 的 step 函数。 + 根据 ``step_on`` 参数在合适的时机调用 scheduler 的 step 函数。 - :param scheduler: 实现了 step() 函数的对象 - :param step_on: 可选 ['batch', 'epoch'] 表示在何时调用 scheduler 的 step 函数。如果为 batch 的话在每次更新参数 - 之前调用;如果为 epoch 则是在一个 epoch 运行结束后调用。 + :param scheduler: 实现了 :meth:`step` 函数的对象; + :param step_on: 可选 ['batch', 'epoch'] 表示在何时调用 scheduler 的 step 函数。如果为 ``batch`` 的话在每次更新参数 + 之前调用;如果为 ``epoch`` 则是在一个 epoch 运行结束后调用; """ def __init__(self, scheduler, step_on:str='batch'): assert hasattr(scheduler, 'step') and callable(scheduler.step), "The scheduler object should have a " \ diff --git a/fastNLP/core/callbacks/more_evaluate_callback.py b/fastNLP/core/callbacks/more_evaluate_callback.py index 690146a2..7bbc8fa0 100644 --- a/fastNLP/core/callbacks/more_evaluate_callback.py +++ b/fastNLP/core/callbacks/more_evaluate_callback.py @@ -12,10 +12,10 @@ from .topk_saver import TopkSaver class MoreEvaluateCallback(HasMonitorCallback): """ - 当评测时需要调用不同的 evaluate_fn (例如在大部分生成任务中,一般使用训练 loss 作为训练过程中的 evaluate ;但同时在训练到 - 一定 epoch 数量之后,会让 model 生成的完整的数据评测 bleu 等。此刻就可能需要两种不同的 evaluate_fn ),只使用 Trainer - 无法满足需求,可以通过调用本 callback 进行。如果需要根据本 callback 中的评测结果进行模型保存,请传入 topk 以及 - topk_monitor 等相关参数。可以通过 evaluate_every 或 watch_monitor 控制触发进行 evaluate 的条件。 + 当评测时需要调用不同的 ``evaluate_fn`` (例如在大部分生成任务中,一般使用训练 loss 作为训练过程中的 evaluate ;但同时在训练到 + 一定 epoch 数量之后,会让 model 生成的完整的数据评测 bleu 等。此刻就可能需要两种不同的 ``evaluate_fn`` ),只使用 Trainer + 无法满足需求,可以通过调用本 callback 进行。如果需要根据本 callback 中的评测结果进行模型保存,请传入 ``topk`` 以及 + ``topk_monitor`` 等相关参数。可以通过 ``evaluate_every`` 或 ``watch_monitor`` 控制触发进行 evaluate 的条件。 如果设置了 evaluate 结果更好就保存的话,将按如下文件结构进行保存:: @@ -30,7 +30,7 @@ class MoreEvaluateCallback(HasMonitorCallback): 1. 为负数时表示每隔几个 ``epoch`` evaluate 一次; 2. 为正数则表示每隔几个 ``batch`` evaluate 一次; 3. 为函数时表示用户自己传入的用于控制 evaluate 的频率的函数,该函数的应该接受当前 trainer 对象作为参数,并 - 返回一个 bool 值,返回为 True 说明需要进行 evaluate ;将在每个 ``batch`` 结束后调用该函数判断是否需要 evaluate; + 返回一个 bool 值,返回为 ``True`` 说明需要进行 evaluate ;将在每个 ``batch`` 结束后调用该函数判断是否需要 evaluate; .. note:: @@ -45,32 +45,41 @@ class MoreEvaluateCallback(HasMonitorCallback): 该函数表示当每经过 1000 个 batch,``Trainer`` 中内置的 ``Evaluator`` 就会验证一次; 另一个需要注意的事情在于该函数会在每一次 batch 的结尾进行调用,当该函数返回 ``True`` 时,``Evaluator`` 才会进行验证; - :param watch_monitor: 这个值用来表示监控的 Trainer 中的 evaluate 结果的,当该值不为 None ,evaluate_every 失效。本参数的 - 意义是,当检测到 Trainer 中 evaluate results 的 {watch_monitor} 的结果更好时,则进行一次 evaluate 。该参数有两种 - 取值: (1) str 类型,监控的 metric 值。如果在 evaluation 结果中没有找到完全一致的名称,将使用 最长公共字符串算法 找到最 - 匹配的那个作为 monitor ; (2) 也可以传入一个函数,接受参数为 evaluation 的结果(字典类型),返回一个 float 值作为 monitor - 的结果,如果当前结果中没有相关的monitor 值请返回 None 。 - :param watch_monitor_larger_better: watch_monitor 是否越大越好。 - :param evaluate_fn: 用来控制 `Evaluator` 在评测的前向传播过程中是调用哪一个函数,例如是 `model.evaluate_step` 还是 - `model.forward`;(1) 如果该值是 None,那么我们会默认使用 `evaluate_step` 当做前向传播的函数,如果在模型中没有 - 找到该方法,则使用 `model.forward` 函数;(2) 如果为 str 类型,则尝试从 model 中寻找该方法,找不到则报错。 + :param watch_monitor: 这个值用来表示监控的 Trainer 中的 evaluate 结果的,当该值不为 ``None`` ,``evaluate_every`` 失效。本参数的 + 意义是,当检测到 Trainer 中 evaluate results 的 ``{watch_monitor}`` 的结果更好时,则进行一次 evaluate 。该参数有两种 + 取值: + + 1. ``str`` 类型,含义为监控的 metric 值。如果在 evaluation 结果中没有找到完全一致的名称,将使用 **最长公共字符串算法** 找到最 + 匹配的那个作为 monitor ; + 2. 一个函数,接受参数为 evaluation 的结果(字典类型),返回一个 float 值作为 monitor + 的结果,如果当前结果中没有相关的monitor 值请返回 ``None`` ; + :param watch_monitor_larger_better: ``watch_monitor`` 是否越大越好; + :param evaluate_fn: 用来控制 `Evaluator` 在评测的前向传播过程中是调用哪一个函数,例如是 :meth:`model.evaluate_step` 还是 + :meth:`model.forward`: + + 1. 如果该值是 ``None``,那么我们会默认使用 :meth:`model.evaluate_step` 当做前向传播的函数,如果 + 在模型中没有找到该方法,则使用 :meth:`model.forward` 函数; + 2. 如果为 ``str`` 类型,则尝试从 model 中寻找该方法,找不到则报错; :param num_eval_sanity_batch: 在初始化 Evaluator 后运行多少个 sanity check 的 batch ,检测一下。 - :param topk: 如果需要根据当前 callback 中的 evaluate 结果保存模型或 Trainer ,可以通过设置 tokp 实现。(1)为 -1 表示每次 - evaluate 后都保存;(2)为 0 (默认),表示不保存;(3)为整数,表示保存性能最 topk 个。 + :param topk: 如果需要根据当前 callback 中的 evaluate 结果保存模型或 Trainer ,可以通过设置 topk 实现: + + 1. 为 ``-1`` 表示每次 evaluate 后都保存; + 2. 为 ``0`` (默认),表示不保存; + 3. 为整数,表示保存性能最好的 ``topk`` 个。 :param topk_monitor: 如果需要根据当前 callback 中的 evaluate 结果保存。这个参数是指在当前 callback 中的 evaluate 结果寻找 - :param topk_larger_better: topk_monitor 的值是否时越大越好。 + :param topk_larger_better: ``topk_monitor`` 的值是否是越大越好。 :param folder: 保存的文件夹,fastNLP 将在该文件下以时间戳创建子文件夹,并在里面保存。因此不同次运行可以将被保存到不同的 - 时间戳文件夹中。如果为 None ,默认使用当前文件夹。 - :param only_state_dict: 保存模型时是否只保存 state_dict 。当 model_save_fn 不为 None 时,该参数无效。 + 时间戳文件夹中。如果为 ``··``None`` ,默认使用当前文件夹。 + :param only_state_dict: 保存模型时是否只保存 state_dict 。当 ``model_save_fn`` 不为 ``None`` 时,该参数无效。 :param save_object: 可选 ['trainer', 'model'],表示在保存时的保存对象为 ``trainer+model`` 还是 只是 ``model`` 。如果 - 保存 ``trainer`` 对象的话,将会保存 :class:~fastNLP.Trainer 的相关状态,可以通过 :meth:`Trainer.load_checkpoint` 加载该断 + 保存 ``trainer`` 对象的话,将会保存 :class:`~fastNLP.Trainer` 的相关状态,可以通过 :meth:`Trainer.load_checkpoint` 加载该断 点继续训练。如果保存的是 ``Model`` 对象,则可以通过 :meth:`Trainer.load_model` 加载该模型权重。 :param model_save_fn: 个性化的保存函数,当触发保存操作时,就调用这个函数,这个函数应当接受一个文件夹作为参数,不返回任何东西。 - 如果传入了 model_save_fn 函数,fastNLP 将不再进行模型相关的保存。在多卡场景下,我们只在 rank 0 上会运行该函数。 - :param save_evaluate_results: 是否保存 evaluate 的结果。如果为 True ,在保存 topk 模型的 folder 中还将额外保存一个 - ``fastnlp_evaluate_results.json`` 文件,记录当前的 results。仅在设置了 topk 的场景下有用,默认为 True 。 - :param save_kwargs: dict。更多的保存相关的参数。 - :param kwargs: 其它与 Evaluator 相关的初始化参数,如果不传入,将从 Trainer 中获取。 + 如果传入了 ``model_save_fn`` 函数,fastNLP 将不再进行模型相关的保存。在多卡场景下,我们只在 rank 0 上会运行该函数。 + :param save_evaluate_results: 是否保存 evaluate 的结果。如果为 ``True`` ,在保存 topk 模型的 folder 中还将额外保存一个 + ``fastnlp_evaluate_results.json`` 文件,记录当前的 results。仅在设置了 ``topk`` 的场景下有效,默认为 True 。 + :param save_kwargs: 一个字典,表示更多的保存相关的参数。 + :param kwargs: 其它与 :class:`~fastNLP.Evaluator` 相关的初始化参数,如果不传入,将从 :class:`~fastNLP.Trainer` 中获取。 """ def __init__(self, dataloaders, metrics:Dict, evaluate_every:Optional[Union[int, Callable]]=-1, watch_monitor:Union[str, Callable]=None, watch_monitor_larger_better:bool=True, diff --git a/fastNLP/core/callbacks/progress_callback.py b/fastNLP/core/callbacks/progress_callback.py index c172a9a7..3a412b03 100644 --- a/fastNLP/core/callbacks/progress_callback.py +++ b/fastNLP/core/callbacks/progress_callback.py @@ -60,7 +60,7 @@ def choose_progress_callback(progress_bar: Union[str, ProgressCallback]) -> Prog class RichCallback(ProgressCallback): """ - 在训练过程中打印 rich progress bar 的 callback 。在 Trainer 中,默认就会使用这个 callback 来显示进度。如果需要定制这个 Callback 的 + 在训练过程中打印 *rich* progress bar 的 callback 。在 Trainer 中,默认就会使用这个 callback 来显示进度。如果需要定制这个 Callback 的 参数,请通过实例化本 Callback 并传入到 Trainer 中实现。在打印 evaluate 的结果时,不会打印名称以 "_" 开头的内容。 :param print_every: 多少个 batch 更新一次显示。 @@ -169,26 +169,26 @@ class RichCallback(ProgressCallback): class RawTextCallback(ProgressCallback): + """ + 通过向命令行打印进度的方式显示。在打印 evaluate 的结果时,不会打印名称以 "_" 开头的内容。 + + :param print_every: 多少个 batch 更新一次显示。 + :param loss_round_ndigit: 显示的 loss 保留多少位有效数字 + :param monitor: 监控的 metric 值。当检测到这个key的结果更好时,会打印出不同的颜色进行提示。 + + * 为 ``None`` + 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置)。 + * 为 ``str`` + 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 + 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` 。 + * 为 ``Callable`` + 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 + 的 ``monitor`` 值请返回 ``None`` 。 + :param larger_better: 是否是monitor的结果越大越好。 + :param format_json: 是否format json再打印 + """ def __init__(self, print_every:int = 1, loss_round_ndigit:int = 6, monitor:str=None, larger_better:bool=True, format_json=True): - """ - 通过向命令行打印进度的方式显示。在打印 evaluate 的结果时,不会打印名称以 "_" 开头的内容。 - - :param print_every: 多少个 batch 更新一次显示。 - :param loss_round_ndigit: 显示的 loss 保留多少位有效数字 - :param monitor: 监控的 metric 值。当检测到这个key的结果更好时,会打印出不同的颜色进行提示。 - - * 为 ``None`` - 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置)。 - * 为 ``str`` - 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 - 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` 。 - * 为 ``Callable`` - 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 - 的 ``monitor`` 值请返回 ``None`` 。 - :param larger_better: 是否是monitor的结果越大越好。 - :param format_json: 是否format json再打印 - """ super().__init__(monitor=monitor, larger_better=larger_better, must_have_monitor=False) self.print_every = print_every self.task2id = {} @@ -242,8 +242,9 @@ class RawTextCallback(ProgressCallback): class TqdmCallback(ProgressCallback): """ - 在训练过程中打印 tqdm progress bar 的 callback 。在 Trainer 中,默认就会使用这个 callback 来显示进度。如果需要定制这个 Callback 的 - 参数,请通过实例化本 Callback 并传入到 Trainer 中实现。在打印 evaluate 的结果时,不会打印名称以 "_" 开头的内容。 + 在训练过程中打印 *tqdm* progress bar 的 callback 。在 Trainer 中,如果设置了 ``progress_bar='tqdm'`` 就会使用 + 这个 callback 来显示进度。如果需要定制这个 Callback 的参数,请通过实例化本 Callback 并传入到 Trainer 中实现。在 + 打印 evaluate 的结果时,不会打印名称以 "_" 开头的内容。 :param print_every: 多少个 batch 更新一次显示。 :param loss_round_ndigit: 显示的 loss 保留多少位有效数字 diff --git a/fastNLP/core/callbacks/timer_callback.py b/fastNLP/core/callbacks/timer_callback.py index f0dafcb6..27dbe538 100644 --- a/fastNLP/core/callbacks/timer_callback.py +++ b/fastNLP/core/callbacks/timer_callback.py @@ -68,7 +68,7 @@ class Timers: class TimerCallback(Callback): """ - 这个 callback 的作用是打印训练过程中的相关时间信息,例如训练时长,评测时长,总的时长等 + 这个 callback 的作用是打印训练过程中的相关时间信息,例如训练时长、评测时长、总时长等 """ def __init__(self, print_every=-1, time_ndigit=3): diff --git a/fastNLP/core/callbacks/topk_saver.py b/fastNLP/core/callbacks/topk_saver.py index 98e5c269..389bac4b 100644 --- a/fastNLP/core/callbacks/topk_saver.py +++ b/fastNLP/core/callbacks/topk_saver.py @@ -178,8 +178,8 @@ class TopkSaver(ResultsMonitor, Saver): - YYYY-mm-dd-HH_MM_SS_fffff/ # 自动根据当前脚本的启动时间创建的 - {save_object}-epoch_{epoch_idx}-batch_{global_batch_idx}-{topk_monitor}_{monitor_value}/ # 满足topk条件存储文件名 - :param topk: 保存 topk 多少的模型,-1 为保存所有模型;0 为都不保存;大于 0 的数为保存 topk 个。 - :param monitor: 监控的 metric 值。 + :param topk: 保存表现最好的 ``topk`` 个模型,-1 为保存所有模型;0 为都不保存;大于 0 的数为保存 ``topk`` 个; + :param monitor: 监控的 metric 值: * 为 ``None`` 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置)。 @@ -192,14 +192,14 @@ class TopkSaver(ResultsMonitor, Saver): :param larger_better: 该 monitor 是否越大越好。 :param folder: 保存在哪个文件夹下,默认为当前 folder 下。 :param save_object: 可选 ['trainer', 'model'],表示在保存时的保存对象为 ``trainer+model`` 还是 只是 ``model`` 。如果 - 保存 ``trainer`` 对象的话,将会保存 :class:~fastNLP.Trainer 的相关状态,可以通过 :meth:`Trainer.load_checkpoint` 加载该断 + 保存 ``trainer`` 对象的话,将会保存 :class:`~fastNLP.Trainer` 的相关状态,可以通过 :meth:`Trainer.load_checkpoint` 加载该断 点继续训练。如果保存的是 ``Model`` 对象,则可以通过 :meth:`Trainer.load_model` 加载该模型权重。 - :param only_state_dict: 保存时是否仅保存权重,在 model_save_fn 不为 None 时无意义。 + :param only_state_dict: 保存时是否仅保存权重,在 ``model_save_fn`` 不为 None 时无意义。 :param model_save_fn: 个性化的保存函数,当触发保存操作时,就调用这个函数,这个函数应当接受一个文件夹作为参数,不返回任何东西。 - 如果传入了 model_save_fn 函数,fastNLP 将不再进行模型相关的保存。在多卡场景下,我们只在 rank 0 上会运行该函数。 + 如果传入了 ``model_save_fn`` 函数,fastNLP 将不再进行模型相关的保存。在多卡场景下,我们只在 rank 0 上会运行该函数。 :param save_evaluate_results: 是否保存 evaluate 的结果。如果为 True ,在保存 topk 模型的 folder 中还将额外保存一个 - ``fastnlp_evaluate_results.json`` 文件,记录当前的 metric results 。仅在设置了 topk 的场景下有用,默认为 True 。 - :param kwargs: 更多需要传递给 Trainer.save_checkpoint() 或者 Trainer.save_model() 接口的参数。 + ``fastnlp_evaluate_results.json`` 文件,记录当前的 metric results 。仅在设置了 ``topk`` 的场景下有用,默认为 True 。 + :param kwargs: 更多需要传递给 :meth:`Trainer.save_checkpoint` 或者 :meth:`Trainer.save_model` 接口的参数。 """ def __init__(self, topk:int=0, monitor:str=None, larger_better:bool=True, folder:str=None, save_object:str='model', only_state_dict:bool=True, model_save_fn:Callable=None, save_evaluate_results:bool=True, diff --git a/fastNLP/core/callbacks/torch_callbacks/torch_grad_clip_callback.py b/fastNLP/core/callbacks/torch_callbacks/torch_grad_clip_callback.py index c986e4e4..10ef7894 100644 --- a/fastNLP/core/callbacks/torch_callbacks/torch_grad_clip_callback.py +++ b/fastNLP/core/callbacks/torch_callbacks/torch_grad_clip_callback.py @@ -11,17 +11,17 @@ if _NEED_IMPORT_FAIRSCALE: class TorchGradClipCallback(Callback): r""" - 在每次 optimizer update 之前将 parameter 进行 clip 。 + 在每次 :func:`optimizer.step` 之前对参数的梯度进行截断。 - :param clip_value: 将gradient 限制到[-clip_value, clip_value]。clip_value应该为正数 - :param clip_type: 支持'norm', 'value'两种: + :param clip_value: 将梯度限制到 [-clip_value, clip_value] 之间。``clip_value`` 应该为正数; + :param clip_type: 应为 ``'norm'``, ``'value'`` 中的一个: - 1. 'norm', 将gradient的norm rescale到[-clip_value, clip_value] - 2. 'value', 将gradient限制在[-clip_value, clip_value], - 小于-clip_value的gradient被赋值为-clip_value;大于clip_value的gradient被赋值为clip_value. + 1. 为 ``'norm'`` 时, 将梯度的范数限制在 [-clip_value, clip_value] 之间; + 2. 为 ``'value'`` 时,, 将梯度限制在 [-clip_value, clip_value] 之间,小于 ``-clip_value`` + 的梯度被赋值为 ``-clip_value``,大于 ``clip_value`` 的梯度被赋值为 ``clip_value``; - :param None,torch.Tensor,List[torch.Tensor] parameters: 一般通过model.parameters()获得。 - 如果为None则默认对 Trainer 的 optimizers 中所有参数进行梯度裁剪。 + :param parameters: 参数,一般通过 :func:`model.parameters` 获得。 + 如果为 ``None`` 则默认对 Trainer 的 optimizers 中所有参数进行梯度裁剪。 """ def __init__(self, clip_value:int=1, clip_type:str='norm', parameters:Union["torch.Tensor", List["torch.Tensor"]]=None): diff --git a/fastNLP/core/callbacks/torch_callbacks/torch_lr_sched_callback.py b/fastNLP/core/callbacks/torch_callbacks/torch_lr_sched_callback.py index 29c1aa2b..24474b64 100644 --- a/fastNLP/core/callbacks/torch_callbacks/torch_lr_sched_callback.py +++ b/fastNLP/core/callbacks/torch_callbacks/torch_lr_sched_callback.py @@ -9,14 +9,14 @@ from ..callback import Callback class TorchWarmupCallback(Callback): r""" - 调整 learning rate 的 callback 。 + 调整学习率的 **callback** 。 - :param warmup: 如果warmup为int,则在该step之前,learning rate根据schedule的策略变化; 如果warmup为float, - 如0.1, 则前10%的step是按照schedule策略调整learning rate。 - :param schedule: 以哪种方式调整。 + :param warmup: 如果 ``warmup`` 为整数,则在该 step 之前,学习率根据 ``schedule`` 的策略变化; 如果 ``warmup`` 为 ``float``, + 如 0.1, 则前 10% 的 step 是按照 ``schedule`` 策略调整。 + :param schedule: 对学习率进行调整的策略: - 1. linear: 前warmup的step上升到指定的learning rate(从Trainer中的optimizer处获取的), 后warmup的step下降到0; - 2. constant前warmup的step上升到指定learning rate,后面的step保持learning rate. + 1. *linear* -- 前 ``warmup`` 的 step 上升到指定的学习率(从 Trainer 中 optimizer 处获取), 在剩下的 step 中下降到 0; + 2. *constant* -- 前 ``warmup`` 的 step 上升到指定的学习率,余下的 step 保持不变。 """ def __init__(self, warmup:Union[int, float]=0.1, schedule:str='constant'): super().__init__() From c597a72c212c8dfab524f81ebb1a476c52b9814e Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Fri, 1 Jul 2022 06:53:46 +0000 Subject: [PATCH 50/52] =?UTF-8?q?=E8=B0=83=E6=95=B4=20fastNLP/core/collato?= =?UTF-8?q?rs/=20=E7=9A=84=E9=83=A8=E5=88=86=E6=96=87=E6=A1=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/callbacks/checkpoint_callback.py | 2 +- fastNLP/core/callbacks/early_stop_callback.py | 2 +- .../core/callbacks/lr_scheduler_callback.py | 2 +- .../core/callbacks/more_evaluate_callback.py | 4 +- fastNLP/core/callbacks/topk_saver.py | 4 +- fastNLP/core/collators/collator.py | 68 ++++++++++--------- fastNLP/core/collators/packer_unpacker.py | 2 +- fastNLP/core/collators/padders/get_padder.py | 8 +-- .../core/collators/padders/jittor_padder.py | 50 +++++++------- .../core/collators/padders/numpy_padder.py | 41 ++++++++--- .../core/collators/padders/oneflow_padder.py | 43 +++++++++--- fastNLP/core/collators/padders/padder.py | 2 +- .../core/collators/padders/paddle_padder.py | 49 ++++++++++--- fastNLP/core/collators/padders/raw_padder.py | 32 ++++----- .../core/collators/padders/torch_padder.py | 24 +++---- fastNLP/core/collators/padders/torch_utils.py | 1 + fastNLP/core/collators/padders/utils.py | 11 ++- 17 files changed, 210 insertions(+), 135 deletions(-) diff --git a/fastNLP/core/callbacks/checkpoint_callback.py b/fastNLP/core/callbacks/checkpoint_callback.py index 7132cb76..30684643 100644 --- a/fastNLP/core/callbacks/checkpoint_callback.py +++ b/fastNLP/core/callbacks/checkpoint_callback.py @@ -49,7 +49,7 @@ class CheckpointCallback(Callback): :param only_state_dict: 保存模型时是否只保存 state_dict 。当 ``model_save_fn`` 不为 ``None`` 时,该参数无效。 :param model_save_fn: 个性化的保存函数,当触发保存操作时,就调用这个函数,这个函数应当接受一个文件夹作为参数,不返回任何东西。 如果传入了 ``model_save_fn`` 函数,fastNLP 将不再进行模型相关的保存。在多卡场景下,我们只在 rank 0 上会运行该函数。 - :param save_object: 可选 ['trainer', 'model'],表示在保存时的保存对象为 ``trainer+model`` 还是 只是 ``model`` 。如果 + :param save_object: 可选 ``['trainer', 'model']`` ,表示在保存时的保存对象为 ``trainer+model`` 还是 只是 ``model`` 。如果 保存 ``trainer`` 对象的话,将会保存 :class:`~fastNLP.Trainer` 的相关状态,可以通过 :meth:`Trainer.load_checkpoint` 加载该断 点继续训练。如果保存的是 ``Model`` 对象,则可以通过 :meth:`Trainer.load_model` 加载该模型权重。 :param save_evaluate_results: 是否保存 evaluate 的结果。如果为 ``True`` ,在保存 topk 模型的 folder 中还将额外保存一个 diff --git a/fastNLP/core/callbacks/early_stop_callback.py b/fastNLP/core/callbacks/early_stop_callback.py index db9b6493..e73a2103 100644 --- a/fastNLP/core/callbacks/early_stop_callback.py +++ b/fastNLP/core/callbacks/early_stop_callback.py @@ -10,7 +10,7 @@ from fastNLP.core.utils.exceptions import EarlyStopException class EarlyStopCallback(HasMonitorCallback): """ - 用于 early stop 的 callback 。当监控的结果连续多少次没有变好边 raise 一个 EarlyStopException 。 + 用于 early stop 的 callback 。当监控的结果连续多少次没有变好便 raise 一个 EarlyStopException 。 :param monitor: 监控的 metric 值。 diff --git a/fastNLP/core/callbacks/lr_scheduler_callback.py b/fastNLP/core/callbacks/lr_scheduler_callback.py index 65c50dd3..3d3f4a0f 100644 --- a/fastNLP/core/callbacks/lr_scheduler_callback.py +++ b/fastNLP/core/callbacks/lr_scheduler_callback.py @@ -10,7 +10,7 @@ class LRSchedCallback(Callback): 根据 ``step_on`` 参数在合适的时机调用 scheduler 的 step 函数。 :param scheduler: 实现了 :meth:`step` 函数的对象; - :param step_on: 可选 ['batch', 'epoch'] 表示在何时调用 scheduler 的 step 函数。如果为 ``batch`` 的话在每次更新参数 + :param step_on: 可选 ``['batch', 'epoch']`` 表示在何时调用 scheduler 的 step 函数。如果为 ``batch`` 的话在每次更新参数 之前调用;如果为 ``epoch`` 则是在一个 epoch 运行结束后调用; """ def __init__(self, scheduler, step_on:str='batch'): diff --git a/fastNLP/core/callbacks/more_evaluate_callback.py b/fastNLP/core/callbacks/more_evaluate_callback.py index 7bbc8fa0..01a0f7ae 100644 --- a/fastNLP/core/callbacks/more_evaluate_callback.py +++ b/fastNLP/core/callbacks/more_evaluate_callback.py @@ -69,9 +69,9 @@ class MoreEvaluateCallback(HasMonitorCallback): :param topk_monitor: 如果需要根据当前 callback 中的 evaluate 结果保存。这个参数是指在当前 callback 中的 evaluate 结果寻找 :param topk_larger_better: ``topk_monitor`` 的值是否是越大越好。 :param folder: 保存的文件夹,fastNLP 将在该文件下以时间戳创建子文件夹,并在里面保存。因此不同次运行可以将被保存到不同的 - 时间戳文件夹中。如果为 ``··``None`` ,默认使用当前文件夹。 + 时间戳文件夹中。如果为 ``None`` ,默认使用当前文件夹。 :param only_state_dict: 保存模型时是否只保存 state_dict 。当 ``model_save_fn`` 不为 ``None`` 时,该参数无效。 - :param save_object: 可选 ['trainer', 'model'],表示在保存时的保存对象为 ``trainer+model`` 还是 只是 ``model`` 。如果 + :param save_object: 可选 ``['trainer', 'model']`` ,表示在保存时的保存对象为 ``trainer+model`` 还是 只是 ``model`` 。如果 保存 ``trainer`` 对象的话,将会保存 :class:`~fastNLP.Trainer` 的相关状态,可以通过 :meth:`Trainer.load_checkpoint` 加载该断 点继续训练。如果保存的是 ``Model`` 对象,则可以通过 :meth:`Trainer.load_model` 加载该模型权重。 :param model_save_fn: 个性化的保存函数,当触发保存操作时,就调用这个函数,这个函数应当接受一个文件夹作为参数,不返回任何东西。 diff --git a/fastNLP/core/callbacks/topk_saver.py b/fastNLP/core/callbacks/topk_saver.py index 389bac4b..c5892674 100644 --- a/fastNLP/core/callbacks/topk_saver.py +++ b/fastNLP/core/callbacks/topk_saver.py @@ -24,7 +24,7 @@ class Saver: - folder_name # 由 save() 调用时传入。 :param folder: 保存在哪个文件夹下,默认为当前 folder 下。 - :param save_object: 可选 ['trainer', 'model'],表示在保存时的保存对象为 ``trainer+model`` 还是 只是 ``model`` 。如果 + :param save_object: 可选 ``['trainer', 'model']`` ,表示在保存时的保存对象为 ``trainer+model`` 还是 只是 ``model`` 。如果 保存 ``trainer`` 对象的话,将会保存 :class:~fastNLP.Trainer 的相关状态,可以通过 :meth:`Trainer.load_checkpoint` 加载该断 点继续训练。如果保存的是 ``Model`` 对象,则可以通过 :meth:`Trainer.load_model` 加载该模型权重。 :param only_state_dict: 保存时是否仅保存权重,在 model_save_fn 不为 None 时无意义。 @@ -191,7 +191,7 @@ class TopkSaver(ResultsMonitor, Saver): 的 ``monitor`` 值请返回 ``None`` 。 :param larger_better: 该 monitor 是否越大越好。 :param folder: 保存在哪个文件夹下,默认为当前 folder 下。 - :param save_object: 可选 ['trainer', 'model'],表示在保存时的保存对象为 ``trainer+model`` 还是 只是 ``model`` 。如果 + :param save_object: 可选 ``['trainer', 'model']`` ,表示在保存时的保存对象为 ``trainer+model`` 还是 只是 ``model`` 。如果 保存 ``trainer`` 对象的话,将会保存 :class:`~fastNLP.Trainer` 的相关状态,可以通过 :meth:`Trainer.load_checkpoint` 加载该断 点继续训练。如果保存的是 ``Model`` 对象,则可以通过 :meth:`Trainer.load_model` 加载该模型权重。 :param only_state_dict: 保存时是否仅保存权重,在 ``model_save_fn`` 不为 None 时无意义。 diff --git a/fastNLP/core/collators/collator.py b/fastNLP/core/collators/collator.py index 5fbdacb9..fcffdaf0 100644 --- a/fastNLP/core/collators/collator.py +++ b/fastNLP/core/collators/collator.py @@ -85,27 +85,33 @@ def _get_backend() -> str: class Collator: """ 用于 pad 数据的对象。会自动将所有能够 pad (由 fastNLP 根据数据判定能否 pad )的数据都进行 pad 操作,默认 pad 的值为 0。 - 哦安定一个 field 是否可以 pad 的方式为:(1)当前这个 field 是否所有对象都是一样的数据类型;(因此,如果某 field 的数据有些是float - 有些是 int 将知道该 field 被判定为不可 pad 类型。)(2)当前这个 field 是否每个 sample 都具有一样的深度;(因此,例如有个 field 的 - 数据转为 batch 类型后为 [1, [1,2]], 会被判定为不可 pad ,因为第一个 sample 与 第二个 sample 深度不同)(3)当前这个 field 的类 - 型是否是可以 pad (例如 str 类型的数据)。可以通过设置 logger.setLevel('debug') 来打印是判定不可 pad 的原因。 + 判定一个 field 是否可以 pad 的方式为: + + 1. 当前这个 field 是否所有对象都是一样的数据类型;比如,如果某 field 的数据有些是 float ,有些是 int ,则该 field 将被 + 判定为不可 pad 类型; + 2. 当前这个 field 是否每个 sample 都具有一样的深度;比如,如果某 field 的数据转为 batch 类型后为 ``[1, [1,2]]``, 则会 + 被判定为不可 pad ,因为第一个 sample 与 第二个 sample 深度不同; + 3. 当前这个 field 的类型是否是可以 pad (例如 str 类型的数据)。可以通过设置 ``logger.setLevel('debug')`` 来打印是判定不可 + pad 的原因。 .. note:: - ``Collator`` 的原理是使用第一个 ``batch`` 的数据尝试推断每个``field``应该使用哪种类型的 ``Padder``,如果第一个 ``batch`` - 的数据刚好比较特殊,可能导致在之后的 pad 中遭遇失败,这种情况请通过 ``set_pad()`` 函数手动设置一下。 + ``Collator`` 的原理是使用第一个 ``batch`` 的数据尝试推断每个 ``field`` 应该使用哪种类型的 ``Padder``,如果第一个 ``batch`` + 的数据刚好比较特殊,可能导致在之后的 pad 中遭遇失败,这种情况请通过 :meth:`set_pad` 函数手动设置一下。 - todo 补充 code example 。 + .. todo:: + + 补充 code example 。 - 如果需要将某个本可以 pad 的 field 设置为不可 pad ,则可以通过 :meth:`~fastNLP.Collator.set_pad` 的 pad_val 设置为 None 实现。 + 如果需要将某个本可以 pad 的 field 设置为不可 pad ,则可以通过 :meth:`~fastNLP.Collator.set_pad` 的 ``pad_val`` 设置为 ``None`` 实现。 如果需要某些 field 不要包含在 pad 之后的结果中,可以使用 :meth:`~fastNLP.Collator.set_ignore` 进行设置。 Collator 在第一次进行 pad 的时候自动根据设置以及数据情况,为每个 field 获取一个 padder ,在之后的每次调用中,都将使用对应 的 Padder 给对应的 field 。 - :param backend: 对于可以 pad 的 field,使用哪种 tensor,支持 ['torch','jittor','paddle','oneflow','numpy','raw', auto, None]。 - 若为 'auto' ,则在进行 pad 的时候会根据调用的环境决定其 backend 。该参数对不能进行 pad 的数据没用影响,不能 pad - 的数据返回一定是 list 。 + :param backend: 对于可以 pad 的 field,使用哪种 tensor,支持 ``['torch','jittor','paddle','oneflow','numpy','raw', 'auto', None]``。 + 若为 ``'auto'`` ,则在进行 pad 的时候会根据调用的环境决定其 ``backend`` 。该参数对不能进行 pad 的数据没有影响,无法 pad 的数据返回一定 + 是 :class:`list` 。 """ def __init__(self, backend='auto'): self.unpack_batch_func = None @@ -192,20 +198,20 @@ class Collator: """ 如果需要对某个 field 的内容进行特殊的调整,请使用这个函数。 - :param field_name: 需要调整的 field 的名称。如果 Dataset 的 __getitem__ 方法返回的是 dict 类型的,则可以直接使用对应的 - field 的 key 来表示,如果是 nested 的 dict,可以使用元组表示多层次的 key,例如 {'a': {'b': 1}} 中的使用 ('a', 'b'); - 如果 __getitem__ 返回的是 Sequence 类型的,则可以使用 '_0', '_1' 表示序列中第 0 或 1 个元素。如果该 field 在数据中没 - 有找到,则报错;如果 __getitem__ 返回的是就是整体内容,请使用 "_single" 。 - :param pad_val: 这个 field 的默认 pad 值。如果设置为 None,则表示该 field 不需要 pad , fastNLP 默认只会对可以 pad 的 - field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 None 。如果 backend 为 None ,该值 - 无意义。 - :param dtype: 对于需要 pad 的 field ,该 field 的数据 dtype 应该是什么。 - :param backend: 可选['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto'],分别代表,输出为 list, numpy.ndarray, - torch.Tensor, paddle.Tensor, jittor.Var oneflow.Tensor 类型。若 pad_val 为 None ,该值无意义 。 - :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 pad_val, dtype, backend 等参数失效。pad_fn 的输入为当前 field 的 - batch 形式。 Collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。pad_func 的输入即为 field 的 batch - 形式,输出将被直接作为结果输出。 - :return: 返回 Collator 自身 + :param field_name: 需要调整的 field 的名称。如果 :meth:`Dataset.__getitem__` 方法返回的是字典类型,则可以直接使用对应的 + field 的 key 来表示,如果是嵌套字典,可以使用元组表示多层次的 key,例如 ``{'a': {'b': 1}}`` 中可以使用 ``('a', 'b')``; + 如果 :meth:`Dataset.__getitem__` 返回的是 Sequence 类型,则可以使用 ``'_0'``, ``'_1'`` 表示序列中第 **0** 或 **1** 个元素。 + 如果该 field 在数据中没有找到,则报错;如果 :meth:`Dataset.__getitem__` 返回的是就是整体内容,请使用 "_single" 。 + :param pad_val: 这个 field 的默认 pad 值。如果设置为 ``None``,则表示该 field 不需要 pad , fastNLP 默认只会对可以 pad 的 + field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 ``None`` 。如果 ``backend`` 为 ``None``, + 该值无意义。 + :param dtype: 对于需要 pad 的 field ,该 field 的数据 ``dtype`` 应该是什么。 + :param backend: 可选 ``['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto']`` ,分别代表,输出为 :class:`list`, + :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`paddle.Tensor`, :class:`jittor.Var`, :class:`oneflow.Tensor` 类型。 + 若 ``pad_val`` 为 ``None`` ,该值无意义 。 + :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 ``pad_val``, ``dtype``, ``backend`` 等参数失效。``pad_fn`` 的输入为当前 field 的 + batch 形式。 Collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。 + :return: 返回 Collator 自身; """ self._renew() @@ -275,8 +281,8 @@ class Collator: """ 设置可以 pad 的 field 默认 pad 为什么类型的 tensor - :param backend: 对于可以 pad 的 field,使用哪种 tensor,支持 ['torch','jittor','paddle','oneflow','numpy','raw', 'auto', None], - 若为 auto ,则在进行 pad 的时候会自动根据调用的环境决定其 backend 。 + :param backend: 对于可以 pad 的 field,使用哪种 tensor,支持 ``['torch','jittor','paddle','oneflow','numpy','raw', 'auto', None]``, + 若为 ``'auto'`` ,则在进行 pad 的时候会自动根据调用的环境决定其 ``backend`` ; :return: """ assert backend in SUPPORTED_BACKENDS @@ -289,10 +295,10 @@ class Collator: >>> collator = Collator().set_ignore('field1', 'field2') - :param field_names: 需要忽略的 field 的名称。如果 Dataset 的 __getitem__ 方法返回的是 dict 类型的,则可以直接使用对应的 - field 的 key 来表示,如果是 nested 的 dict,可以使用元组来表示,例如 {'a': {'b': 1}} 中的使用 ('a', 'b'); 如果 - __getitem__ 返回的是 Sequence 类型的,则可以使用 '_0', '_1' 表示序列中第 0 或 1 个元素。 - :return: 返回 Collator 自身 + :param field_names: field_name: 需要调整的 field 的名称。如果 :meth:`Dataset.__getitem__` 方法返回的是字典类型,则可以直接使用对应的 + field 的 key 来表示,如果是嵌套字典,可以使用元组表示多层次的 key,例如 ``{'a': {'b': 1}}`` 中可以使用 ``('a', 'b')``; + 如果 :meth:`Dataset.__getitem__` 返回的是 Sequence 类型,则可以使用 ``'_0'``, ``'_1'`` 表示序列中第 **0** 或 **1** 个元素。 + :return: 返回 Collator 自身; """ self._renew() input_field_names = [(field, field) if isinstance(field, tuple) else ((field,), field) diff --git a/fastNLP/core/collators/packer_unpacker.py b/fastNLP/core/collators/packer_unpacker.py index 2b78ea0a..f2b734b7 100644 --- a/fastNLP/core/collators/packer_unpacker.py +++ b/fastNLP/core/collators/packer_unpacker.py @@ -70,7 +70,7 @@ class SequencePackerUnpacker: @staticmethod def unpack_batch(batch:Sequence[Sequence], ignore_fields, input_fields)->Dict: """ - 将 Sequence[Sequence] 转为 Mapping 。例如 [[[1, 2], 2], [[3], 2]] -> {'_0': [[1, 2], [3]], '_1': [1, 2]} + 将 Sequence[Sequence] 转为 Mapping 。例如 [[[1, 2], 2], [[3], 2]] -> {'_0': [[1, 2], [3]], '_1': [2, 2]} :param batch: 需要 unpack 的 batch 数据。 :param ignore_fields: 需要忽略的 field 。 diff --git a/fastNLP/core/collators/padders/get_padder.py b/fastNLP/core/collators/padders/get_padder.py index 6416a978..41bcd8c0 100644 --- a/fastNLP/core/collators/padders/get_padder.py +++ b/fastNLP/core/collators/padders/get_padder.py @@ -16,13 +16,13 @@ from .exceptions import * def get_padder(batch_field:Sequence[Any], pad_val, dtype, backend, field_name)->Padder: """ - 根据 参数 与 batch_field ,返回适合于当前 batch_field 的 padder 。 + 根据 参数 与 ``batch_field`` ,返回适合于当前 ``batch_field`` 的 *padder* 。 - :param batch_field: 将某 field 的内容组合成一个 batch 传入。 - :param pad_val: + :param batch_field: 将某 field 的内容组合成一个 batch 传入; + :param pad_val: :param backend: :param dtype: - :param field_name: 方便报错的。 + :param field_name: field 名称,方便在报错时显示; :return: """ try: diff --git a/fastNLP/core/collators/padders/jittor_padder.py b/fastNLP/core/collators/padders/jittor_padder.py index c9b36b89..6b37d61c 100644 --- a/fastNLP/core/collators/padders/jittor_padder.py +++ b/fastNLP/core/collators/padders/jittor_padder.py @@ -84,14 +84,14 @@ def _get_dtype(ele_dtype, dtype, class_name): class JittorNumberPadder(Padder): - def __init__(self, pad_val=0, ele_dtype=None, dtype=None): - """ - 可以将形如 [1, 2, 3] 这类的数据转为 jittor.Var([1, 2, 3]) + """ + 可以将形如 ``[1, 2, 3]`` 这类的数据转为 ``jittor.Var([1, 2, 3])`` - :param pad_val: 该值无意义 - :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 jittor.Var 类型。 - :param dtype: 输出的数据的 dtype 是什么。如 jittor.long, jittor.float32, int, float 等 - """ + :param pad_val: 该值无意义 + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 :class:`jittor.Var` 类型; + :param dtype: 输出的数据的 dtype 是什么。如 :class:`jittor.long`, :class:`jittor.float32`, :class:`int`, :class:`float` 等; + """ + def __init__(self, pad_val=0, ele_dtype=None, dtype=None): dtype = _get_dtype(ele_dtype, dtype, class_name=self.__class__.__name__) super().__init__(pad_val=pad_val, dtype=dtype) @@ -106,23 +106,23 @@ class JittorNumberPadder(Padder): class JittorSequencePadder(Padder): - def __init__(self, pad_val=0, ele_dtype=None, dtype=None): - """ - 将类似于 [[1], [1, 2]] 的内容 pad 为 jittor.Var([[1, 0], [1, 2]]) 可以 pad 多重嵌套的数据。 + """ + 可以将形如 ``[[1], [1, 2]]`` 这类的数据转为 ``jittor.Var([[1], [1, 2]])`` - :param pad_val: 需要 pad 的值。 - :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 jittor.Var 类型。 - :param dtype: 输出的数据的 dtype 是什么。如 jittor.long, jittor.float32, int, float 等 - """ + :param pad_val: 该值无意义 + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 :class:`jittor.Var` 类型; + :param dtype: 输出的数据的 dtype 是什么。如 :class:`jittor.long`, :class:`jittor.float32`, :class:`int`, :class:`float` 等; + """ + def __init__(self, pad_val=0, ele_dtype=None, dtype=None): dtype = _get_dtype(ele_dtype, dtype, class_name=self.__class__.__name__) super().__init__(pad_val=pad_val, dtype=dtype) @staticmethod def pad(batch_field, pad_val=0, dtype=None): """ - :param batch_field 输入的某个 field 的 batch 数据。 - :param pad_val 需要填充的值 - :dtype 数据的类型 + :param batch_field: 输入的某个 field 的 batch 数据。 + :param pad_val: 需要填充的值 + :param dtype: 数据的类型 """ tensor = get_padded_jittor_tensor(batch_field, dtype=dtype, pad_val=pad_val) return tensor @@ -131,11 +131,11 @@ class JittorSequencePadder(Padder): class JittorTensorPadder(Padder): def __init__(self, pad_val=0, ele_dtype=None, dtype=None): """ - 目前支持 [jittor.Var([3, 2], jittor.Var([1])] 类似的。若内部元素不为 jittor.Var ,则必须含有 tolist() 方法。 + 目前支持 ``[jittor.Var([3, 2], jittor.Var([1])]`` 类似的输入。若内部元素不为 :class:`jittor.Var` ,则必须含有 :meth:`tolist` 方法。 - :param pad_val: 需要 pad 的值。 - :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 jittor.Var 类型。 - :param dtype: 输出的数据的 dtype 是什么。如 jittor.long, jittor.float32, int, float 等 + :param pad_val: 需要 pad 的值; + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 :class:`jittor.Var` 类型; + :param dtype: 输出的数据的 dtype 是什么。如 :class:`jittor.long`, :class:`jittor.float32`, :class:`int`, :class:`float` 等 """ dtype = _get_dtype(ele_dtype, dtype, class_name=self.__class__.__name__) super().__init__(pad_val=pad_val, dtype=dtype) @@ -143,11 +143,11 @@ class JittorTensorPadder(Padder): @staticmethod def pad(batch_field, pad_val=0, dtype=None): """ - 将 batch_field 数据 转为 jittor.Var 并 pad 到相同长度。 + 将 ``batch_field`` 数据 转为 :class:`jittor.Var` 并 pad 到相同长度。 - :param batch_field 输入的某个 field 的 batch 数据。 - :param pad_val 需要填充的值 - :dtype 数据的类型 + :param batch_field: 输入的某个 field 的 batch 数据。 + :param pad_val: 需要填充的值 + :param dtype: 数据的类型 """ try: if not isinstance(batch_field[0], jittor.Var): diff --git a/fastNLP/core/collators/padders/numpy_padder.py b/fastNLP/core/collators/padders/numpy_padder.py index 499fdb8b..2f386978 100644 --- a/fastNLP/core/collators/padders/numpy_padder.py +++ b/fastNLP/core/collators/padders/numpy_padder.py @@ -38,15 +38,15 @@ def _get_dtype(ele_dtype, dtype, class_name): class NumpyNumberPadder(Padder): """ - 可以将形如 [1, 2, 3] 这类的数据转为 np.array([1, 2, 3]) 。可以通过: + 可以将形如 ``[1, 2, 3]`` 这类的数据转为 ``np.array([1, 2, 3])`` 。可以通过:: >>> NumpyNumberPadder.pad([1, 2, 3]) 使用。 - :param pad_val: 该值无意义 - :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 np.array 类型。 - :param dtype: 输出的数据的 dtype 是什么 + :param pad_val: 该值无意义; + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 :class:`np.array` 类型; + :param dtype: 输出的数据的 dtype ; """ def __init__(self, pad_val=0, ele_dtype=None, dtype=None): dtype = _get_dtype(ele_dtype, dtype, self.__class__.__name__) @@ -54,21 +54,28 @@ class NumpyNumberPadder(Padder): @staticmethod def pad(batch_field, pad_val=0, dtype=None): + """ + 将 ``batch_field`` 数据 转为 :class:`numpy.array` 并 pad 到相同长度。 + + :param batch_field: 输入的某个 field 的 batch 数据。 + :param pad_val: 需要填充的值 + :param dtype: 数据的类型 + """ return np.array(batch_field, dtype=dtype) class NumpySequencePadder(Padder): """ - 将类似于 [[1], [1, 2]] 的内容 pad 为 np.array([[1, 0], [1, 2]]) 可以 pad 多重嵌套的数据。 + 将类似于 ``[[1], [1, 2]]`` 的内容 pad 为 ``np.array([[1, 0], [1, 2]])``, 可以 pad 多重嵌套的数据。 可以通过以下的方式直接使用: >>> NumpySequencePadder.pad([[1], [1, 2]], pad_val=-100, dtype=float) [[ 1. -100.] [ 1. 2.]] - :param pad_val: pad 的值是多少。 - :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 np.array 类型。 - :param dtype: 输出的数据的 dtype 是什么 + :param pad_val: pad 的值是多少; + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 :class:`np.array` 类型; + :param dtype: 输出的数据的 dtype ; """ def __init__(self, pad_val=0, ele_dtype=None, dtype=None): dtype = _get_dtype(ele_dtype, dtype, self.__class__.__name__) @@ -76,18 +83,25 @@ class NumpySequencePadder(Padder): @staticmethod def pad(batch_field, pad_val=0, dtype=None): + """ + 将 ``batch_field`` 数据 转为 :class:`numpy.array` 并 pad 到相同长度。 + + :param batch_field: 输入的某个 field 的 batch 数据。 + :param pad_val: 需要填充的值 + :param dtype: 数据的类型 + """ return get_padded_numpy_array(batch_field, dtype=dtype, pad_val=pad_val) class NumpyTensorPadder(Padder): """ - pad 类似于 [np.array([3, 4]), np.array([1])] 的 field 。若内部元素不为 np.ndarray ,则必须含有 tolist() 方法。 + pad 类似于 ``[np.array([3, 4]), np.array([1])]`` 的 field 。若内部元素不为 :class:`np.ndarray` ,则必须含有 :meth:`tolist` 方法。 >>> NumpyTensorPadder.pad([np.array([3, 4]), np.array([1])], pad_val=-100) [[ 3. 4.] [ 1. -100.]] :param pad_val: pad 的值是多少。 - :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 np.array 类型。 + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 :class:`np.array` 类型。 :param dtype: 输出的数据的 dtype 是什么 """ def __init__(self, pad_val=0, ele_dtype=None, dtype=None): @@ -96,6 +110,13 @@ class NumpyTensorPadder(Padder): @staticmethod def pad(batch_field, pad_val=0, dtype=None): + """ + 将 ``batch_field`` 数据 转为 :class:`numpy.array` 并 pad 到相同长度。 + + :param batch_field: 输入的某个 field 的 batch 数据。 + :param pad_val: 需要填充的值 + :param dtype: 数据的类型 + """ try: if not isinstance(batch_field[0], np.ndarray): batch_field = [np.array(field.tolist(), dtype=dtype) for field in batch_field] diff --git a/fastNLP/core/collators/padders/oneflow_padder.py b/fastNLP/core/collators/padders/oneflow_padder.py index c218bcca..5e235a0f 100644 --- a/fastNLP/core/collators/padders/oneflow_padder.py +++ b/fastNLP/core/collators/padders/oneflow_padder.py @@ -74,11 +74,11 @@ def _get_dtype(ele_dtype, dtype, class_name): class OneflowNumberPadder(Padder): """ - 可以将形如 [1, 2, 3] 这类的数据转为 oneflow.Tensor([1, 2, 3]) + 可以将形如 ``[1, 2, 3]`` 这类的数据转为 ``oneflow.Tensor([1, 2, 3])``。 - :param pad_val: 该值无意义 - :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 oneflow.tensor 类型。 - :param dtype: 输出的数据的 dtype 是什么。如 oneflow.long, oneflow.float32, int, float 等 + :param pad_val: 该值无意义; + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 :class:`oneflow.Tensor` 类型; + :param dtype: 输出的数据的 dtype,。如 :class:`oneflow.long`, :class:`oneflow.float32`, :class:`int`, :class:`float` 等; """ def __init__(self, pad_val=0, ele_dtype=None, dtype=None): dtype = _get_dtype(ele_dtype, dtype, class_name=self.__class__.__name__) @@ -86,16 +86,23 @@ class OneflowNumberPadder(Padder): @staticmethod def pad(batch_field, pad_val=0, dtype=None): + """ + 将 ``batch_field`` 数据 转为 :class:`oneflow.Tensor` 并 pad 到相同长度。 + + :param batch_field: 输入的某个 field 的 batch 数据。 + :param pad_val: 需要填充的值 + :param dtype: 数据的类型 + """ return oneflow.tensor(batch_field, dtype=dtype) class OneflowSequencePadder(Padder): """ - 将类似于 [[1], [1, 2]] 的内容 pad 为 oneflow.Tensor([[1, 0], [1, 2]]) 可以 pad 多重嵌套的数据。 + 将类似于 ``[[1], [1, 2]]`` 的内容 pad 为 ``oneflow.Tensor([[1, 0], [1, 2]])``, 可以 pad 多重嵌套的数据。 - :param pad_val: 需要 pad 的值。 - :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 oneflow.tensor 类型。 - :param dtype: 输出的数据的 dtype 是什么。如 oneflow.long, oneflow.float32, int, float 等 + :param pad_val: 需要 pad 的值; + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 :class:`oneflow.Tensor` 类型; + :param type: 输出的数据的 dtype,。如 :class:`oneflow.long`, :class:`oneflow.float32`, :class:`int`, :class:`float` 等; """ def __init__(self, pad_val=0, ele_dtype=None, dtype=None): dtype = _get_dtype(ele_dtype, dtype, class_name=self.__class__.__name__) @@ -103,13 +110,20 @@ class OneflowSequencePadder(Padder): @staticmethod def pad(batch_field, pad_val=0, dtype=None): + """ + 将 ``batch_field`` 数据 转为 :class:`oneflow.Tensor` 并 pad 到相同长度。 + + :param batch_field: 输入的某个 field 的 batch 数据。 + :param pad_val: 需要填充的值 + :param dtype: 数据的类型 + """ tensor = get_padded_oneflow_tensor(batch_field, dtype=dtype, pad_val=pad_val) return tensor class OneflowTensorPadder(Padder): """ - 目前支持 [oneflow.tensor([3, 2], oneflow.tensor([1])] 类似的。若内部元素不为 oneflow.tensor ,则必须含有 tolist() 方法。 + 目前支持 ``[oneflow.tensor([3, 2], oneflow.tensor([1])]`` 类似的输入,若内部元素不为 :class:`oneflow.Tensor` ,则必须含有 :meth:`tolist` 方法。 >>> OneflowTensorPadder.pad([np.array([3, 4]), np.array([1])], pad_val=-100) [[ 3. 4.] @@ -119,8 +133,8 @@ class OneflowTensorPadder(Padder): [ 1, -100]]) :param pad_val: 需要 pad 的值。 - :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 oneflow.tensor 类型。 - :param dtype: 输出的数据的 dtype 是什么。如 oneflow.long, oneflow.float32, int, float 等 + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 :class:`oneflow.Tensor` 类型。 + :param dtype: 输出的数据的 dtype,。如 :class:`oneflow.long`, :class:`oneflow.float32`, :class:`int`, :class:`float` 等; """ def __init__(self, pad_val=0, ele_dtype=None, dtype=None): dtype = _get_dtype(ele_dtype, dtype, class_name=self.__class__.__name__) @@ -128,6 +142,13 @@ class OneflowTensorPadder(Padder): @staticmethod def pad(batch_field, pad_val=0, dtype=None): + """ + 将 ``batch_field`` 数据 转为 :class:`oneflow.Tensor` 并 pad 到相同长度。 + + :param batch_field: 输入的某个 field 的 batch 数据。 + :param pad_val: 需要填充的值 + :param dtype: 数据的类型 + """ device = None try: if not isinstance(batch_field[0], oneflow.Tensor): diff --git a/fastNLP/core/collators/padders/padder.py b/fastNLP/core/collators/padders/padder.py index 6a75b634..afa0b45f 100644 --- a/fastNLP/core/collators/padders/padder.py +++ b/fastNLP/core/collators/padders/padder.py @@ -1,7 +1,7 @@ class Padder: """ - 所有 Padder 对象父类,所有的 Padder 对象都会实现 pad(batch_field, pad_val=0, dtype=None) 的静态函数。 + 所有 **Padder** 对象的父类,所有的 Padder 对象都会实现静态函数 *pad(batch_field, pad_val=0, dtype=None)* 。 """ def __init__(self, pad_val, dtype): diff --git a/fastNLP/core/collators/padders/paddle_padder.py b/fastNLP/core/collators/padders/paddle_padder.py index ab287b58..57d31967 100644 --- a/fastNLP/core/collators/padders/paddle_padder.py +++ b/fastNLP/core/collators/padders/paddle_padder.py @@ -99,11 +99,11 @@ def _get_dtype(ele_dtype, dtype, class_name): class PaddleNumberPadder(Padder): """ - 可以将形如 [1, 2, 3] 这类的数据转为 paddle.Tensor([1, 2, 3]) + 可以将形如 ``[1, 2, 3]`` 这类的数据转为 ``paddle.Tensor([1, 2, 3])`` - :param pad_val: 该值无意义 - :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 paddle.tensor 类型。 - :param dtype: 输出的数据的 dtype 是什么。如 int, float, 'int32' 等 + :param pad_val: 该值无意义; + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 :class:`paddle.Tensor` 类型; + :param dtype: 输出的数据的 dtype 是什么。如 :class:`int`, :class:`float`, :class:`int32` 等; """ def __init__(self, pad_val=0, ele_dtype=None, dtype=None): # 仅当 ele_dtype 是 python number/ numpy number 或者 tensor @@ -112,16 +112,23 @@ class PaddleNumberPadder(Padder): @staticmethod def pad(batch_field, pad_val=0, dtype=None): + """ + 将 ``batch_field`` 数据 转为 :class:`paddle.Tensor` 并 pad 到相同长度。 + + :param batch_field: 输入的某个 field 的 batch 数据。 + :param pad_val: 需要填充的值 + :param dtype: 数据的类型 + """ return paddle.to_tensor(batch_field, dtype=dtype) class PaddleSequencePadder(Padder): """ - 将类似于 [[1], [1, 2]] 的内容 pad 为 paddle.Tensor([[1, 0], [1, 2]]) 可以 pad 多重嵌套的数据。 + 将类似于 ``[[1], [1, 2]]`` 的内容 pad 为 ``paddle.Tensor([[1, 0], [1, 2]])`` 可以 pad 多重嵌套的数据。 :param pad_val: pad 的值。 - :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 paddle.tensor 类型。 - :param dtype: 输出的数据的 dtype 是什么。如 int, float, 'int32' 等 + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 :class:`paddle.Tensor` 类型; + :param dtype: 输出的数据的 dtype 是什么。如 :class:`int`, :class:`float`, :class:`int32` 等; """ def __init__(self, ele_dtype=None, pad_val=0, dtype=None): dtype = _get_dtype(ele_dtype, dtype, class_name=self.__class__.__name__) @@ -129,17 +136,30 @@ class PaddleSequencePadder(Padder): @staticmethod def pad(batch_field, pad_val=0, dtype=None): + """ + 将 ``batch_field`` 数据 转为 :class:`paddle.Tensor` 并 pad 到相同长度。 + + :param batch_field: 输入的某个 field 的 batch 数据。 + :param pad_val: 需要填充的值 + :param dtype: 数据的类型 + """ tensor = get_padded_paddle_tensor(batch_field, dtype=dtype, pad_val=pad_val) return tensor class PaddleTensorPadder(Padder): """ - 目前支持 [paddle.tensor([3, 2], paddle.tensor([2, 1])] 类似的,若内部元素不为 paddle.tensor ,则必须含有 tolist() 方法。 - + 目前支持 ``[paddle.tensor([3, 2], paddle.tensor([2, 1])]`` 类似的输入,若内部元素不为 :class:`paddle.Tensor` ,则必须含有 :meth:`tolist` 方法。 + + >>> PaddleTensorPadder.pad([np.array([3, 4]), np.array([1])], pad_val=-100) + [[ 3. 4.] + [ 1. -100.]] + >>> PaddleTensorPadder.pad([paddle.to_tensor([3, 4]), paddle.to_tensor([1])], pad_val=-100) + tensor([[ 3, 4], + [ 1, -100]]) :param pad_val: pad 的值。 - :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 paddle.tensor 类型。 - :param dtype: 输出的数据的 dtype 是什么。如 int, float, 'int32' 等 + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 :class:`paddle.Tensor` 类型; + :param dtype: 输出的数据的 dtype 是什么。如 :class:`int`, :class:`float`, :class:`int32` 等; """ def __init__(self, pad_val=0, ele_dtype=None, dtype=None): dtype = _get_dtype(ele_dtype, dtype, class_name=self.__class__.__name__) @@ -147,6 +167,13 @@ class PaddleTensorPadder(Padder): @staticmethod def pad(batch_field, pad_val=0, dtype=None): + """ + 将 ``batch_field`` 数据 转为 :class:`paddle.Tensor` 并 pad 到相同长度。 + + :param batch_field: 输入的某个 field 的 batch 数据。 + :param pad_val: 需要填充的值 + :param dtype: 数据的类型 + """ try: if not isinstance(batch_field[0], paddle.Tensor): batch_field = [np.array(field.tolist()) for field in batch_field] diff --git a/fastNLP/core/collators/padders/raw_padder.py b/fastNLP/core/collators/padders/raw_padder.py index 3828b2c0..52ba6617 100644 --- a/fastNLP/core/collators/padders/raw_padder.py +++ b/fastNLP/core/collators/padders/raw_padder.py @@ -34,11 +34,11 @@ def _get_dtype(ele_dtype, dtype, class_name): class RawNumberPadder(Padder): """ - 可以将形如 [1, 2, 3] 这类的数据转为 [1, 2, 3] 。实际上该 padder 无意义。 + 可以将形如 ``[1, 2, 3]`` 这类的数据转为 ``[1, 2, 3]`` 。实际上该 padder 无意义。 - :param pad_val: 该值无意义 - :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 np.array 类型。 - :param dtype: 输出的数据的 dtype 是什么 + :param pad_val: + :param ele_dtype: + :param dtype: """ def __init__(self, pad_val=0, ele_dtype=None, dtype=None): dtype = _get_dtype(ele_dtype, dtype, self.__class__.__name__) @@ -54,11 +54,11 @@ class RawNumberPadder(Padder): class RawSequencePadder(Padder): """ - 将类似于 [[1], [1, 2]] 的内容 pad 为 [[1, 0], [1, 2]] 。可以 pad 多重嵌套的数据。 + 将类似于 ``[[1], [1, 2]]`` 的内容 pad 为 ``[[1, 0], [1, 2]]`` 。可以 pad 多重嵌套的数据。 - :param pad_val: pad 的值 - :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 np.array 类型。 - :param dtype: 输出的数据的 dtype 是什么 + :param pad_val: pad 的值; + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 :class:`np.array` 类型; + :param dtype: 输出的数据的 dtype ; """ def __init__(self, pad_val=0, ele_dtype=None, dtype=None): dtype = _get_dtype(ele_dtype, dtype, self.__class__.__name__) @@ -68,8 +68,8 @@ class RawSequencePadder(Padder): def pad(batch_field, pad_val=0, dtype=None): """ - :param batch_field: - :param pad_val: + :param batch_field: 输入的某个 field 的 batch 数据。 + :param pad_val: 需要填充的值 :param dtype: 该参数无意义。 :return: """ @@ -78,11 +78,11 @@ class RawSequencePadder(Padder): class RawTensorPadder(Padder): """ - 将类似于 [[1], [1, 2]] 的内容 pad 为 [[1, 0], [1, 2]] 。可以 pad 多重嵌套的数据。 + 将类似于 ``[[1], [1, 2]]`` 的内容 pad 为 ``[[1, 0], [1, 2]]`` 。可以 pad 多重嵌套的数据。 - :param pad_val: pad 的值 - :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 np.array 类型。 - :param dtype: 输出的数据的 dtype 是什么 + :param pad_val: pad 的值; + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 :class:`np.array` 类型; + :param dtype: 输出的数据的 dtype ; """ def __init__(self, pad_val=0, ele_dtype=None, dtype=None): dtype = _get_dtype(ele_dtype, dtype, self.__class__.__name__) @@ -92,8 +92,8 @@ class RawTensorPadder(Padder): def pad(batch_field, pad_val=0, dtype=None): """ - :param batch_field: - :param pad_val: + :param batch_field: 输入的某个 field 的 batch 数据。 + :param pad_val: 需要填充的值 :param dtype: 该参数无意义。 :return: """ diff --git a/fastNLP/core/collators/padders/torch_padder.py b/fastNLP/core/collators/padders/torch_padder.py index 91f58af4..a5ab9149 100644 --- a/fastNLP/core/collators/padders/torch_padder.py +++ b/fastNLP/core/collators/padders/torch_padder.py @@ -77,11 +77,11 @@ def _get_dtype(ele_dtype, dtype, class_name): class TorchNumberPadder(Padder): """ - 可以将形如 [1, 2, 3] 这类的数据转为 torch.Tensor([1, 2, 3]) + 可以将形如 ``[1, 2, 3]`` 这类的数据转为 ``torch.Tensor([1, 2, 3])`` - :param pad_val: 该值无意义 - :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 torch.tensor 类型。 - :param dtype: 输出的数据的 dtype 是什么。如 torch.long, torch.float32, int, float 等 + :param pad_val: 该值无意义; + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 :class:`torch.Tensor` 类型; + :param dtype: 输出的数据的 dtype 是什么。如 :class:`torch.long`, :class:`torch.float32`, :class:`int`, :class:`float` 等; """ def __init__(self, pad_val=0, ele_dtype=None, dtype=None): dtype = _get_dtype(ele_dtype, dtype, class_name=self.__class__.__name__) @@ -94,11 +94,11 @@ class TorchNumberPadder(Padder): class TorchSequencePadder(Padder): """ - 将类似于 [[1], [1, 2]] 的内容 pad 为 torch.Tensor([[1, 0], [1, 2]]) 可以 pad 多重嵌套的数据。 + 将类似于 ``[[1], [1, 2]]`` 的内容 pad 为 ``torch.Tensor([[1, 0], [1, 2]])`` 可以 pad 多重嵌套的数据。 - :param pad_val: 需要 pad 的值。 - :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 torch.tensor 类型。 - :param dtype: 输出的数据的 dtype 是什么。如 torch.long, torch.float32, int, float 等 + :param pad_val: 需要 pad 的值; + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 :class:`torch.Tensor` 类型; + :param dtype: 输出的数据的 dtype 是什么。如 :class:`torch.long`, :class:`torch.float32`, :class:`int`, :class:`float` 等; """ def __init__(self, pad_val=0, ele_dtype=None, dtype=None): dtype = _get_dtype(ele_dtype, dtype, class_name=self.__class__.__name__) @@ -112,7 +112,7 @@ class TorchSequencePadder(Padder): class TorchTensorPadder(Padder): """ - 目前支持 [torch.tensor([3, 2], torch.tensor([1])] 类似的。若内部元素不为 torch.tensor ,则必须含有 tolist() 方法。 + 目前支持 ``[torch.tensor([3, 2], torch.tensor([1])]`` 类似的输入。若内部元素不为 :class:`torch.Tensor` ,则必须含有 :meth:`tolist` 方法。 >>> TorchTensorPadder.pad([np.array([3, 4]), np.array([1])], pad_val=-100) [[ 3. 4.] @@ -121,9 +121,9 @@ class TorchTensorPadder(Padder): tensor([[ 3, 4], [ 1, -100]]) - :param pad_val: 需要 pad 的值。 - :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 torch.tensor 类型。 - :param dtype: 输出的数据的 dtype 是什么。如 torch.long, torch.float32, int, float 等 + :param pad_val: 需要 pad 的值; + :param ele_dtype: 用于检测当前 field 的元素类型是否可以转换为 :class:`torch.Tensor` 类型; + :param dtype: 输出的数据的 dtype 是什么。如 :class:`torch.long`, :class:`torch.float32`, :class:`int`, :class:`float` 等; """ def __init__(self, pad_val=0, ele_dtype=None, dtype=None): dtype = _get_dtype(ele_dtype, dtype, class_name=self.__class__.__name__) diff --git a/fastNLP/core/collators/padders/torch_utils.py b/fastNLP/core/collators/padders/torch_utils.py index 3f21333b..d1887b36 100644 --- a/fastNLP/core/collators/padders/torch_utils.py +++ b/fastNLP/core/collators/padders/torch_utils.py @@ -5,6 +5,7 @@ from fastNLP.envs.imports import _NEED_IMPORT_TORCH if _NEED_IMPORT_TORCH: import torch +__all__ = [] def is_torch_tensor_dtype(dtype) -> bool: """ diff --git a/fastNLP/core/collators/padders/utils.py b/fastNLP/core/collators/padders/utils.py index e4a258a8..5ba957e1 100644 --- a/fastNLP/core/collators/padders/utils.py +++ b/fastNLP/core/collators/padders/utils.py @@ -78,13 +78,12 @@ def fill_array(batch_field:List, padded_batch:np.ndarray): def get_padded_numpy_array(batch_field: List, dtype=None, pad_val=0) -> np.ndarray: """ - 例如: - [[1,2], [3]] -> np.array([[1, 2], [3, 0]]) + 将输入 pad 为 :class:`numpy.arraay` 类型,如:``[[1,2], [3]] -> np.array([[1, 2], [3, 0]])`` - :param batch_field: 需要 pad 的对象。需要保证应该是可以进行 pad 的。支持 1d(多为句子长度)/2d(多为文本序列)/3d(多为字符序列) - /4d(多为图片)。 - :param dtype: 目标类别是什么 - :param pad_val: pad 的 value + :param batch_field: 需要 pad 的对象。需要保证应该是可以进行 pad 的。支持 **1d**(多为句子长度)/ **2d**(多为文本序列)/ **3d**(多为字符序列) + /4d(多为图片); + :param dtype: 输出数据的 dtype 类型; + :param pad_val: 填充值; :return: """ shapes = get_shape(batch_field) From d786f392f0a8c8f584926aa8d6b6f75c06985d7a Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Fri, 1 Jul 2022 12:02:30 +0000 Subject: [PATCH 51/52] =?UTF-8?q?=E8=B0=83=E6=95=B4=20fastNLP/core/control?= =?UTF-8?q?lers=20dataloaders=20dataset=20=E7=9A=84=E6=96=87=E6=A1=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/callbacks/callback.py | 75 ++--- fastNLP/core/callbacks/callback_manager.py | 4 +- fastNLP/core/callbacks/checkpoint_callback.py | 19 +- fastNLP/core/callbacks/early_stop_callback.py | 6 +- fastNLP/core/callbacks/fitlog_callback.py | 15 +- .../core/callbacks/has_monitor_callback.py | 12 +- .../callbacks/load_best_model_callback.py | 4 +- .../core/callbacks/more_evaluate_callback.py | 4 +- fastNLP/core/callbacks/progress_callback.py | 13 +- fastNLP/core/callbacks/topk_saver.py | 10 +- fastNLP/core/collators/collator.py | 4 +- fastNLP/core/collators/packer_unpacker.py | 1 + fastNLP/core/collators/padders/padder.py | 2 +- fastNLP/core/collators/padders/utils.py | 2 +- fastNLP/core/controllers/evaluator.py | 77 +++--- .../controllers/loops/evaluate_batch_loop.py | 14 +- fastNLP/core/controllers/loops/loop.py | 6 +- .../controllers/loops/train_batch_loop.py | 14 +- fastNLP/core/controllers/trainer.py | 158 ++++++----- fastNLP/core/controllers/utils/state.py | 5 +- fastNLP/core/controllers/utils/utils.py | 7 +- .../core/dataloaders/jittor_dataloader/fdl.py | 132 +++++---- .../dataloaders/oneflow_dataloader/fdl.py | 168 ++++++------ .../dataloaders/paddle_dataloader/__init__.py | 2 +- .../core/dataloaders/paddle_dataloader/fdl.py | 193 +++++++------ .../core/dataloaders/prepare_dataloader.py | 20 +- .../core/dataloaders/torch_dataloader/fdl.py | 162 +++++------ .../torch_dataloader/mix_dataloader.py | 96 ++++--- fastNLP/core/dataloaders/utils.py | 4 +- fastNLP/core/dataset/dataset.py | 256 +++++++++--------- fastNLP/core/dataset/field.py | 73 +++-- fastNLP/core/dataset/instance.py | 9 +- .../drivers/paddle_driver/paddle_driver.py | 2 +- fastNLP/core/metrics/metric.py | 2 +- .../core/metrics/span_f1_pre_rec_metric.py | 2 +- 35 files changed, 793 insertions(+), 780 deletions(-) diff --git a/fastNLP/core/callbacks/callback.py b/fastNLP/core/callbacks/callback.py index 74224c8a..9584aba5 100644 --- a/fastNLP/core/callbacks/callback.py +++ b/fastNLP/core/callbacks/callback.py @@ -42,7 +42,7 @@ class Callback: on_train_end(trainer) 其它 callback 例如 **on_evaluate_begin(trainer)** / **on_evaluate_end(trainer, results)** / **on_save_model(trainer)** / - **on_load_model(trainer)** / **on_save_checkpoint(trainer)** / **on_load_checkpoint(trainer)** 将根据需要在 :meth:`fastNLP.Trainer.run` + **on_load_model(trainer)** / **on_save_checkpoint(trainer)** / **on_load_checkpoint(trainer)** 将根据需要在 :meth:`Trainer.run ` 中特定的时间调用。 """ @@ -50,8 +50,8 @@ class Callback: r""" 在 ``Trainer`` 初始化后会被触发; - :param trainer: :class:`~fastNLP.Trainer` 实例; - :param driver: :class:`~fastNLP.Trainer` 中的 ``driver`` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; + :param driver: :class:`~fastNLP.core.controllers.Trainer` 中的 ``driver`` 实例; """ pass @@ -59,7 +59,7 @@ class Callback: r""" 在 '预跑'检测 开始前会被触发; - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; """ pass @@ -67,7 +67,7 @@ class Callback: r""" 在 '预跑'检测 开始后会被触发; - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; :param sanity_check_res: 预跑得到的评测结果,关于对于 **预跑** 的解释,请见 :meth:`~fastNLP.core.controllers.trainer.Trainer.run`; """ pass @@ -76,7 +76,7 @@ class Callback: r""" 在训练开始前会被触发; - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; """ pass @@ -84,7 +84,7 @@ class Callback: r""" 在训练完成后会被触发; - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; """ pass @@ -92,7 +92,7 @@ class Callback: r""" 在训练过程中的每一个 epoch 开始前会被触发; - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; """ pass @@ -100,7 +100,7 @@ class Callback: r""" 在训练过程中的每一个 epoch 完成后会被触发;此时 trainer.cur_epoch_idx 已经完成加 1 操作。 - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; """ pass @@ -108,7 +108,7 @@ class Callback: r""" 在训练过程中准备取出下一个 batch 的数据时触发 - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; """ pass @@ -116,17 +116,17 @@ class Callback: r""" 在训练过程中拿到当前的 batch 数据后会被触发; - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; """ pass def on_train_batch_begin(self, trainer, batch, indices): r""" - 在取得数据,执行完 ``input_mapping`` (如果 :class:`~fastNLP.Trainer` 传有该参数),并且移动 ``batch`` 中的张量到了指定设备之后会被触发。 + 在取得数据,执行完 ``input_mapping`` (如果 :class:`~fastNLP.core.controllers.Trainer` 传有该参数),并且移动 ``batch`` 中的张量到了指定设备之后会被触发。 其中 ``batch`` 中的数据格式要么是 ``Dataloader`` 返回的每个 ``batch`` 的格式;要么是 ``input_mapping`` 之后的内容。 如果 ``batch`` 是 ``dict`` 类型,直接增删其中的 key 或 修改其中的 value 会影响到输入模型的中的 ``batch`` 数据。 - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; :param batch: batch 的数据,已经经过 ``input_mapping`` (如果有) 以及移动到指定设备 。 :param list[int] indices: 当前的 ``batch`` 是数据集中的哪些数据。仅在 ``DataLoader`` 支持得到当前 ``batch index`` 的时候有值, 其它时候为 ``None`` 。 @@ -139,7 +139,7 @@ class Callback: global_forward_batches 累计加1操作之后会被触发。其中梯度更新、梯度置零操作会考虑 **accumulation_steps** ,所以不一定在当前 batch 会 执行。 - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; """ pass @@ -147,41 +147,42 @@ class Callback: r""" 在训练过程遇到异常时调用。 - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; :param exception: 遭遇的异常; """ pass def on_save_model(self, trainer): r""" - 当调用 :meth:`fastNLP.Trainer.save_model` 时调用,此刻模型还未保存。 + 当调用 :meth:`Trainer.save_model() ` 时调用,此刻模型还未保存。 - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; """ pass def on_load_model(self, trainer): r""" - 当调用 :meth:`fastNLP.Trainer.load_model` 加载模型时调用,此刻模型还未加载。 + 当调用 :meth:`Trainer.load_model() ` 加载模型时调用,此刻模型还未加载。 - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; """ pass def on_save_checkpoint(self, trainer) -> Dict: r""" - 当 Trainer 将要保存 checkpoint 的时候触发 (即调用 :meth:`Trainer.save_checkpoint`()` 函数时),该函数用于保存当前 callback 在恢复时需要的相关数据。 + 当 Trainer 将要保存 checkpoint 的时候触发 (即调用 :meth:`Trainer.save_checkpoint() ` + 函数时),该函数用于保存当前 callback 在恢复时需要的相关数据。 - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; """ pass def on_load_checkpoint(self, trainer, states: Optional[Dict]): r""" - 当 Trainer 要恢复 checkpoint 的时候触发(即调用 :meth:`Trainer.load_checkpoint` 函数时, 此刻 Trainer 与 Driver 已经加载好自身 - 的状态), 参数 states 为 Callback 在调用 :meth:`on_save_checkpoint` 的返回值。 + 当 Trainer 要恢复 checkpoint 的时候触发(即调用 :meth:`Trainer.load_checkpoint() ` + 函数时, 此刻 Trainer 与 Driver 已经加载好自身的状态), 参数 states 为 Callback 在调用 :meth:`on_save_checkpoint` 的返回值。 - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; :param states: """ pass @@ -190,7 +191,7 @@ class Callback: r""" 在 backward 前执行。 - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; :param outputs: ``model`` 的返回内容。如果有 ``output_mapping``,则 ``outputs`` 中的内容为已经执行了 ``output_mapping`` 后的结果。 """ pass @@ -200,7 +201,7 @@ class Callback: 在 ``backward`` 后执行。在多卡场景下,由于 ``accumulation_steps`` 的影响,仅在需要真正 ``update`` 参数那次梯度回传才会触发梯度同步, 因此在多卡且使用 ``accumulation_steps`` 时,可能存在某些 step 各卡上梯度不一致的问题。 - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; """ pass @@ -208,8 +209,8 @@ class Callback: r""" 在进行 optimizer 优化进行前调用。该接口不一定每次前向计算都会触发,实际调用会受到 ``accumulation_steps`` 的影响。 - :param trainer: :class:`~fastNLP.Trainer` 实例; - :param optimizers: 优化器,内容为在 :class:`~fastNLP.Trainer` 初始化时传入的值。 + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; + :param optimizers: 优化器,内容为在 :class:`~fastNLP.core.controllers.Trainer` 初始化时传入的值。 """ pass @@ -217,8 +218,8 @@ class Callback: r""" 在进行 optimizer 优化进行后调用。该接口不一定每次前向计算都会触发,实际调用会受到 ``accumulation_steps`` 的影响。 - :param trainer: :class:`~fastNLP.Trainer` 实例; - :param optimizers: 优化器,内容为在 :class:`~fastNLP.Trainer` 初始化时传入的值。 + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; + :param optimizers: 优化器,内容为在 :class:`~fastNLP.core.controllers.Trainer` 初始化时传入的值。 """ pass @@ -226,8 +227,8 @@ class Callback: r""" 在进行模型梯度置零前调用。该接口不一定每次前向计算都会触发,实际调用会受到 ``accumulation_steps`` 的影响。 - :param trainer: :class:`~fastNLP.Trainer` 实例; - :param optimizers: 优化器,内容为在 :class:`~fastNLP.Trainer` 初始化时传入的值。 + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; + :param optimizers: 优化器,内容为在 :class:`~fastNLP.core.controllers.Trainer` 初始化时传入的值。 """ pass @@ -235,8 +236,8 @@ class Callback: r""" 在进行模型梯度置零后调用。该接口不一定每次前向计算都会触发,实际调用会受到 ``accumulation_steps`` 的影响。 - :param trainer: :class:`~fastNLP.Trainer` 实例; - :param optimizers: 优化器,内容为在 :class:`~fastNLP.Trainer` 初始化时传入的值。 + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; + :param optimizers: 优化器,内容为在 :class:`~fastNLP.core.controllers.Trainer` 初始化时传入的值。 """ pass @@ -245,7 +246,7 @@ class Callback: 在将要进行 ``evaluate`` 时调用。如果是设置的以 step 数量或自定义地决定 evaluate 的频率,该接口是在 :meth:`on_train_batch_end` 之后 进行调用。如果是以 epoch 数量决定调用时机,该接口是在 :meth:`on_train_epoch_end` 之后调用。 - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; """ pass @@ -253,8 +254,8 @@ class Callback: r""" 结束 evaluate 时调用,并把 evaluate 的结果传入。 - :param trainer: :class:`~fastNLP.Trainer` 实例; - :param results: :class:`~fastNLP.Trainer` 内置的 ``Evaluator`` 评测的结果,通常是个 ``dict``; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; + :param results: :class:`~fastNLP.core.controllers.Trainer` 内置的 ``Evaluator`` 评测的结果,通常是个 ``dict``; """ pass diff --git a/fastNLP/core/callbacks/callback_manager.py b/fastNLP/core/callbacks/callback_manager.py index 40f73485..bf1de884 100644 --- a/fastNLP/core/callbacks/callback_manager.py +++ b/fastNLP/core/callbacks/callback_manager.py @@ -146,7 +146,7 @@ class CallbackManager: 断点重训应当保存的状态; 2. 每一个具体的 callback 函数的 filter 的状态; - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; :return: 一个包含上述内容的字典,格式如下: .. code-block:: @@ -196,7 +196,7 @@ class CallbackManager: r""" 用于断点重训的加载函数,对应于断点重训的保存函数; - :param trainer: :class:`~fastNLP.Trainer` 实例; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; :param states: 同 :func:`on_save_checkpoint` 函数的返回值; """ diff --git a/fastNLP/core/callbacks/checkpoint_callback.py b/fastNLP/core/callbacks/checkpoint_callback.py index 30684643..44bd9c03 100644 --- a/fastNLP/core/callbacks/checkpoint_callback.py +++ b/fastNLP/core/callbacks/checkpoint_callback.py @@ -24,20 +24,21 @@ class CheckpointCallback(Callback): - {save_object}-epoch_{epoch_idx}-batch_{global_batch_idx}-exception_{exception_type}/ # exception时保存。 - {save_object}-epoch_{epoch_idx}-batch_{global_batch_idx}-{monitor}_{monitor_value}/ # 满足topk条件存储文件名 - ``model_save_fn`` 为 ``Non``e ,则以上每个 folder 中,将生成 fastnlp_model.pkl.tar 文件。若 ``model_save_fn`` 不为 ``None``, + ``model_save_fn`` 为 ``None`` ,则以上每个 folder 中,将生成 fastnlp_model.pkl.tar 文件。若 ``model_save_fn`` 不为 ``None``, 则 fastNLP 将 folder 绝对路径传递给该函数,fastNLP 在该 folder 下不进行模型保存。默认情况下,本 checkpoint 只保存了 model 的状态;如还需保存 Trainer 的状态以断点重训的话,请使用 ``save_object='trainer'`` 。 :param monitor: 监控的 metric 值。 * 为 ``None`` - 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置)。 + 将尝试使用 :class:`~fastNLP.core.controllers.Trainer` 中设置 `monitor` 值(如果有设置)。 * 为 ``str`` - 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 - 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` 。 - * 为 ``Callable`` - 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 - 的 ``monitor`` 值请返回 ``None`` 。 + 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 + 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` 。 + * 为 :class:`Callable` + 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 + 的 ``monitor`` 值请返回 ``None`` 。 + :param folder: 保存的文件夹,fastNLP 将在该文件下以时间戳创建子文件夹,并在里面保存。因此不同次运行可以将被保存到不同的 时间戳文件夹中。如果为 None ,默认使用当前文件夹。 :param every_n_epochs: 多少个 epoch 保存一次。 @@ -50,10 +51,10 @@ class CheckpointCallback(Callback): :param model_save_fn: 个性化的保存函数,当触发保存操作时,就调用这个函数,这个函数应当接受一个文件夹作为参数,不返回任何东西。 如果传入了 ``model_save_fn`` 函数,fastNLP 将不再进行模型相关的保存。在多卡场景下,我们只在 rank 0 上会运行该函数。 :param save_object: 可选 ``['trainer', 'model']`` ,表示在保存时的保存对象为 ``trainer+model`` 还是 只是 ``model`` 。如果 - 保存 ``trainer`` 对象的话,将会保存 :class:`~fastNLP.Trainer` 的相关状态,可以通过 :meth:`Trainer.load_checkpoint` 加载该断 + 保存 ``trainer`` 对象的话,将会保存 :class:`~fastNLP.core.controllers.Trainer` 的相关状态,可以通过 :meth:`Trainer.load_checkpoint` 加载该断 点继续训练。如果保存的是 ``Model`` 对象,则可以通过 :meth:`Trainer.load_model` 加载该模型权重。 :param save_evaluate_results: 是否保存 evaluate 的结果。如果为 ``True`` ,在保存 topk 模型的 folder 中还将额外保存一个 - ``*``fastnlp_evaluate_results.json``文件,记录当前的 results。仅在设置了 ``topk`` 的场景下有用,默认为 ``True`` 。 + ``fastnlp_evaluate_results.json`` 文件,记录当前的 results。仅在设置了 ``topk`` 的场景下有用,默认为 ``True`` 。 :param kwargs: """ def __init__(self, folder: Optional[Union[str, Path]] = None, every_n_epochs: Optional[int] = None, diff --git a/fastNLP/core/callbacks/early_stop_callback.py b/fastNLP/core/callbacks/early_stop_callback.py index e73a2103..8e542d56 100644 --- a/fastNLP/core/callbacks/early_stop_callback.py +++ b/fastNLP/core/callbacks/early_stop_callback.py @@ -10,16 +10,16 @@ from fastNLP.core.utils.exceptions import EarlyStopException class EarlyStopCallback(HasMonitorCallback): """ - 用于 early stop 的 callback 。当监控的结果连续多少次没有变好便 raise 一个 EarlyStopException 。 + 用于 early stop 的 callback 。当监控的结果连续多少次没有变好便 raise 一个 :class:`EarlyStopException` 。 :param monitor: 监控的 metric 值。 * 为 ``None`` - 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置)。 + 将尝试使用 :class:`~fastNLP.core.controllers.Trainer` 中设置 `monitor` 值(如果有设置)。 * 为 ``str`` 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` 。 - * 为 ``Callable`` + * 为 :class:`Callable` 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 的 ``monitor`` 值请返回 ``None`` 。 :param larger_better: monitor 的值是否是越大越好。 diff --git a/fastNLP/core/callbacks/fitlog_callback.py b/fastNLP/core/callbacks/fitlog_callback.py index 10dc49b7..a7716fa6 100644 --- a/fastNLP/core/callbacks/fitlog_callback.py +++ b/fastNLP/core/callbacks/fitlog_callback.py @@ -16,18 +16,19 @@ class FitlogCallback(HasMonitorCallback): 自动记录 ``evaluation`` 结果到 ``fitlog`` 中。会自动记录每一次 ``evaluate`` 后的结果;同时会根据 ``monitor`` 记录最好的结果。另外,会自动将非 ``rank 0`` 上的 ``fitlog`` 设置为 ``debug`` 状态。同时还会在 ``fitlog`` 的 ``other`` 列中记录一个 ``launch_time`` ,可以通过这个数值找到当前这个脚本的在 save_folder (如果有使用其它需要保存模型的 - ``Callback`` ,例如 :class:`~fastNLP.CheckpointCallback` )下的文件夹名称。 + ``Callback`` ,例如 :class:`~fastNLP.core.callbacks.CheckpointCallback` )下的文件夹名称。 :param monitor: 监控的 metric 值。 * 为 ``None`` - 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置)。 + 将尝试使用 :class:`~fastNLP.core.controllers.Trainer` 中设置 `monitor` 值(如果有设置)。 * 为 ``str`` - 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 - 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` 。 - * 为 ``Callable`` - 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 - 的 ``monitor`` 值请返回 ``None`` 。 + 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 + 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` 。 + * 为 :class:`Callable` + 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 + 的 ``monitor`` 值请返回 ``None`` 。 + :param larger_better: 是否是越大越好。 :param log_exception: 是否记录 ``exception`` 。 :param log_loss_every: 多少个 ``batch`` 记录一次 loss 到 ``fitlog`` 中。 diff --git a/fastNLP/core/callbacks/has_monitor_callback.py b/fastNLP/core/callbacks/has_monitor_callback.py index 702d27c0..bb865bf8 100644 --- a/fastNLP/core/callbacks/has_monitor_callback.py +++ b/fastNLP/core/callbacks/has_monitor_callback.py @@ -31,11 +31,11 @@ class ResultsMonitor: :param monitor: 监控的 metric 值: * 为 ``None`` - 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置); + 将尝试使用 :class:`~fastNLP.core.controllers.Trainer` 中设置 `monitor` 值(如果有设置); * 为 ``str`` 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` ; - * 为 ``Callable`` + * 为 :class:`Callable` 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 的 ``monitor`` 值请返回 ``None`` ; :param larger_better: monitor 是否为越大越好; @@ -190,11 +190,11 @@ class HasMonitorCallback(ResultsMonitor, Callback): :param monitor: 监控的 metric 值: * 为 ``None`` - 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置); + 将尝试使用 :class:`~fastNLP.core.controllers.Trainer` 中设置 `monitor` 值(如果有设置); * 为 ``str`` 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` ; - * 为 ``Callable`` + * 为 :class:`Callable` 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 的 ``monitor`` 值请返回 ``None`` ; :param larger_better: monitor 是否为越大越好; @@ -235,11 +235,11 @@ class ExecuteOnceBetterMonitor(HasMonitorCallback): :param monitor: 监控的 metric 值: * 为 ``None`` - 将尝试使用 :class:`~fastNLP.Trainer` 中设置 ``monitor`` 值(如果有设置); + 将尝试使用 :class:`~fastNLP.core.controllers.Trainer` 中设置 ``monitor`` 值(如果有设置); * 为 ``str`` 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` ; - * 为 ``Callable`` + * 为 :class:`Callable` 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 的 ``monitor`` 值请返回 ``None`` ; :param larger_better: monitor 是否是越大越好; diff --git a/fastNLP/core/callbacks/load_best_model_callback.py b/fastNLP/core/callbacks/load_best_model_callback.py index b530cdc5..2bd41b5a 100644 --- a/fastNLP/core/callbacks/load_best_model_callback.py +++ b/fastNLP/core/callbacks/load_best_model_callback.py @@ -22,11 +22,11 @@ class LoadBestModelCallback(HasMonitorCallback): :param monitor: 监控的 metric 值: * 为 ``None`` - 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置); + 将尝试使用 :class:`~fastNLP.core.controllers.Trainer` 中设置 `monitor` 值(如果有设置); * 为 ``str`` 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` ; - * 为 ``Callable`` + * 为 :class:`Callable` 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 的 ``monitor`` 值请返回 ``None`` ; :param larger_better: 该 metric 值是否是越大越好; diff --git a/fastNLP/core/callbacks/more_evaluate_callback.py b/fastNLP/core/callbacks/more_evaluate_callback.py index 01a0f7ae..04b35984 100644 --- a/fastNLP/core/callbacks/more_evaluate_callback.py +++ b/fastNLP/core/callbacks/more_evaluate_callback.py @@ -72,14 +72,14 @@ class MoreEvaluateCallback(HasMonitorCallback): 时间戳文件夹中。如果为 ``None`` ,默认使用当前文件夹。 :param only_state_dict: 保存模型时是否只保存 state_dict 。当 ``model_save_fn`` 不为 ``None`` 时,该参数无效。 :param save_object: 可选 ``['trainer', 'model']`` ,表示在保存时的保存对象为 ``trainer+model`` 还是 只是 ``model`` 。如果 - 保存 ``trainer`` 对象的话,将会保存 :class:`~fastNLP.Trainer` 的相关状态,可以通过 :meth:`Trainer.load_checkpoint` 加载该断 + 保存 ``trainer`` 对象的话,将会保存 :class:`~fastNLP.core.controllers.Trainer` 的相关状态,可以通过 :meth:`Trainer.load_checkpoint` 加载该断 点继续训练。如果保存的是 ``Model`` 对象,则可以通过 :meth:`Trainer.load_model` 加载该模型权重。 :param model_save_fn: 个性化的保存函数,当触发保存操作时,就调用这个函数,这个函数应当接受一个文件夹作为参数,不返回任何东西。 如果传入了 ``model_save_fn`` 函数,fastNLP 将不再进行模型相关的保存。在多卡场景下,我们只在 rank 0 上会运行该函数。 :param save_evaluate_results: 是否保存 evaluate 的结果。如果为 ``True`` ,在保存 topk 模型的 folder 中还将额外保存一个 ``fastnlp_evaluate_results.json`` 文件,记录当前的 results。仅在设置了 ``topk`` 的场景下有效,默认为 True 。 :param save_kwargs: 一个字典,表示更多的保存相关的参数。 - :param kwargs: 其它与 :class:`~fastNLP.Evaluator` 相关的初始化参数,如果不传入,将从 :class:`~fastNLP.Trainer` 中获取。 + :param kwargs: 其它与 :class:`~fastNLP.core.controllers.Evaluator` 相关的初始化参数,如果不传入,将从 :class:`~fastNLP.core.controllers.Trainer` 中获取。 """ def __init__(self, dataloaders, metrics:Dict, evaluate_every:Optional[Union[int, Callable]]=-1, watch_monitor:Union[str, Callable]=None, watch_monitor_larger_better:bool=True, diff --git a/fastNLP/core/callbacks/progress_callback.py b/fastNLP/core/callbacks/progress_callback.py index 3a412b03..681ea4d3 100644 --- a/fastNLP/core/callbacks/progress_callback.py +++ b/fastNLP/core/callbacks/progress_callback.py @@ -68,13 +68,14 @@ class RichCallback(ProgressCallback): :param monitor: 监控的 metric 值。当检测到这个key的结果更好时,会打印出不同的颜色进行提示。 * 为 ``None`` - 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置)。 + 将尝试使用 :class:`~fastNLP.core.controllers.Trainer` 中设置 `monitor` 值(如果有设置)。 * 为 ``str`` 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` 。 - * 为 ``Callable`` + * 为 :class:`Callable` 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 的 ``monitor`` 值请返回 ``None`` 。 + :param larger_better: 是否是 monitor 的结果越大越好。 :param format_json: 是否格式化 json 再打印 """ @@ -177,11 +178,11 @@ class RawTextCallback(ProgressCallback): :param monitor: 监控的 metric 值。当检测到这个key的结果更好时,会打印出不同的颜色进行提示。 * 为 ``None`` - 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置)。 + 将尝试使用 :class:`~fastNLP.core.controllers.Trainer` 中设置 `monitor` 值(如果有设置)。 * 为 ``str`` 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` 。 - * 为 ``Callable`` + * 为 :class:`Callable` 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 的 ``monitor`` 值请返回 ``None`` 。 :param larger_better: 是否是monitor的结果越大越好。 @@ -251,11 +252,11 @@ class TqdmCallback(ProgressCallback): :param monitor: 监控的 metric 值。当检测到这个key的结果更好时,会打印出不同的颜色进行提示。 * 为 ``None`` - 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置)。 + 将尝试使用 :class:`~fastNLP.core.controllers.Trainer` 中设置 `monitor` 值(如果有设置)。 * 为 ``str`` 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` 。 - * 为 ``Callable`` + * 为 :class:`Callable` 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 的 ``monitor`` 值请返回 ``None`` 。 :param larger_better: 是否是 monitor 的结果越大越好。 diff --git a/fastNLP/core/callbacks/topk_saver.py b/fastNLP/core/callbacks/topk_saver.py index c5892674..1ac23b77 100644 --- a/fastNLP/core/callbacks/topk_saver.py +++ b/fastNLP/core/callbacks/topk_saver.py @@ -25,7 +25,7 @@ class Saver: :param folder: 保存在哪个文件夹下,默认为当前 folder 下。 :param save_object: 可选 ``['trainer', 'model']`` ,表示在保存时的保存对象为 ``trainer+model`` 还是 只是 ``model`` 。如果 - 保存 ``trainer`` 对象的话,将会保存 :class:~fastNLP.Trainer 的相关状态,可以通过 :meth:`Trainer.load_checkpoint` 加载该断 + 保存 ``trainer`` 对象的话,将会保存 :class:`~fastNLP.core.controllers.Trainer` 的相关状态,可以通过 :meth:`Trainer.load_checkpoint` 加载该断 点继续训练。如果保存的是 ``Model`` 对象,则可以通过 :meth:`Trainer.load_model` 加载该模型权重。 :param only_state_dict: 保存时是否仅保存权重,在 model_save_fn 不为 None 时无意义。 :param model_save_fn: 个性化的保存函数,当触发保存操作时,就调用这个函数,这个函数应当接受一个文件夹作为参数,不返回任何东西。 @@ -182,17 +182,17 @@ class TopkSaver(ResultsMonitor, Saver): :param monitor: 监控的 metric 值: * 为 ``None`` - 将尝试使用 :class:`~fastNLP.Trainer` 中设置 `monitor` 值(如果有设置)。 + 将尝试使用 :class:`~fastNLP.core.controllers.Trainer` 中设置 `monitor` 值(如果有设置)。 * 为 ``str`` 尝试直接使用该名称从 ``evaluation`` 结果中寻找,如果在 ``evaluation`` 结果中没有找到完全一致的名称,将 使用 最长公共字符串算法 从 ``evaluation`` 结果中找到最匹配的那个作为 ``monitor`` 。 - * 为 ``Callable`` + * 为 :class:`Callable` 接受参数为 ``evaluation`` 的结果(字典类型),返回一个 ``float`` 值作为 ``monitor`` 的结果,如果当前结果中没有相关 的 ``monitor`` 值请返回 ``None`` 。 :param larger_better: 该 monitor 是否越大越好。 :param folder: 保存在哪个文件夹下,默认为当前 folder 下。 :param save_object: 可选 ``['trainer', 'model']`` ,表示在保存时的保存对象为 ``trainer+model`` 还是 只是 ``model`` 。如果 - 保存 ``trainer`` 对象的话,将会保存 :class:`~fastNLP.Trainer` 的相关状态,可以通过 :meth:`Trainer.load_checkpoint` 加载该断 + 保存 ``trainer`` 对象的话,将会保存 :class:`~fastNLP.core.controllers.Trainer` 的相关状态,可以通过 :meth:`Trainer.load_checkpoint` 加载该断 点继续训练。如果保存的是 ``Model`` 对象,则可以通过 :meth:`Trainer.load_model` 加载该模型权重。 :param only_state_dict: 保存时是否仅保存权重,在 ``model_save_fn`` 不为 None 时无意义。 :param model_save_fn: 个性化的保存函数,当触发保存操作时,就调用这个函数,这个函数应当接受一个文件夹作为参数,不返回任何东西。 @@ -220,7 +220,7 @@ class TopkSaver(ResultsMonitor, Saver): @rank_zero_call def save_topk(self, trainer, results: Dict) -> Optional[str]: """ - 根据 results 是否满足 topk 的相关设定决定是否保存,如果发生了保存,将返回保存的文件夹。如果返回为 None ,则说明此次没有满足 + 根据 ``results`` 是否满足 topk 的相关设定决定是否保存,如果发生了保存,将返回保存的文件夹。如果返回为 ``None`` ,则说明此次没有满足 topk 要求,没有发生保存。 :param trainer: diff --git a/fastNLP/core/collators/collator.py b/fastNLP/core/collators/collator.py index fcffdaf0..dde3e2af 100644 --- a/fastNLP/core/collators/collator.py +++ b/fastNLP/core/collators/collator.py @@ -205,7 +205,7 @@ class Collator: :param pad_val: 这个 field 的默认 pad 值。如果设置为 ``None``,则表示该 field 不需要 pad , fastNLP 默认只会对可以 pad 的 field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 ``None`` 。如果 ``backend`` 为 ``None``, 该值无意义。 - :param dtype: 对于需要 pad 的 field ,该 field 的数据 ``dtype`` 应该是什么。 + :param dtype: 对于需要 pad 的 field ,该 field 数据的 ``dtype`` 。 :param backend: 可选 ``['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto']`` ,分别代表,输出为 :class:`list`, :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`paddle.Tensor`, :class:`jittor.Var`, :class:`oneflow.Tensor` 类型。 若 ``pad_val`` 为 ``None`` ,该值无意义 。 @@ -291,7 +291,7 @@ class Collator: def set_ignore(self, *field_names) -> "Collator": """ - 如果有的内容不希望输出,可以在此处进行设置,被设置的 field 将在 batch 的输出中被忽略。 + 如果有的内容不希望输出,可以在此处进行设置,被设置的 field 将在 batch 的输出中被忽略:: >>> collator = Collator().set_ignore('field1', 'field2') diff --git a/fastNLP/core/collators/packer_unpacker.py b/fastNLP/core/collators/packer_unpacker.py index f2b734b7..7d9c23cd 100644 --- a/fastNLP/core/collators/packer_unpacker.py +++ b/fastNLP/core/collators/packer_unpacker.py @@ -2,6 +2,7 @@ from collections import defaultdict from functools import reduce from typing import Sequence, Mapping, Dict +__all__ = [] class MappingPackerUnpacker: @staticmethod diff --git a/fastNLP/core/collators/padders/padder.py b/fastNLP/core/collators/padders/padder.py index afa0b45f..783d8fa2 100644 --- a/fastNLP/core/collators/padders/padder.py +++ b/fastNLP/core/collators/padders/padder.py @@ -1,7 +1,7 @@ class Padder: """ - 所有 **Padder** 对象的父类,所有的 Padder 对象都会实现静态函数 *pad(batch_field, pad_val=0, dtype=None)* 。 + 所有 **Padder** 对象的父类,所有的 Padder 对象都会实现静态函数 ``pad(batch_field, pad_val=0, dtype=None)`` 。 """ def __init__(self, pad_val, dtype): diff --git a/fastNLP/core/collators/padders/utils.py b/fastNLP/core/collators/padders/utils.py index 5ba957e1..6a50b33d 100644 --- a/fastNLP/core/collators/padders/utils.py +++ b/fastNLP/core/collators/padders/utils.py @@ -80,7 +80,7 @@ def get_padded_numpy_array(batch_field: List, dtype=None, pad_val=0) -> np.ndarr """ 将输入 pad 为 :class:`numpy.arraay` 类型,如:``[[1,2], [3]] -> np.array([[1, 2], [3, 0]])`` - :param batch_field: 需要 pad 的对象。需要保证应该是可以进行 pad 的。支持 **1d**(多为句子长度)/ **2d**(多为文本序列)/ **3d**(多为字符序列) + :param batch_field: 需要 pad 的对象。需要保证应该是可以进行 pad 的。支持 **1d** (多为句子长度)/ **2d** (多为文本序列)/ **3d** (多为字符序列) /4d(多为图片); :param dtype: 输出数据的 dtype 类型; :param pad_val: 填充值; diff --git a/fastNLP/core/controllers/evaluator.py b/fastNLP/core/controllers/evaluator.py index ac5b7c05..2b749b29 100644 --- a/fastNLP/core/controllers/evaluator.py +++ b/fastNLP/core/controllers/evaluator.py @@ -1,5 +1,5 @@ r""" -``Evaluator`` 是新版 fastNLP 中用来进行评测模型的评测器,其与 ``Trainer`` 相对应,二者共同构建起了 fastNLP 中**训练**和**评测**的框架。 +``Evaluator`` 是新版 **fastNLP** 中用来进行评测模型的评测器,其与 ``Trainer`` 相对应,二者共同构建起了 **fastNLP** 中 **训练** 和 **评测** 的框架。 ``Evaluator`` 的整体架构与 ``Trainer`` 类似,也是利用 ``Driver`` 来负责底层的评测逻辑。通过使用 ``Evaluator``,您可以快速、方便、准确地 对您的模型进行全方位地评测。 @@ -75,11 +75,11 @@ class Evaluator: :param device: 等价于 ``Trainer`` 中的 ``device`` 参数; :param evaluate_batch_step_fn: 您可以传入该参数来定制每次评测一个 batch 的数据时所执行的函数。该函数应接受的两个参数为 ``evaluator`` 和 ``batch``, 不需要有返回值;可以参考 :meth:`~fastNLP.core.controllers.loops.evaluate_batch_loop.EvaluateBatchLoop.batch_step_fn`; - :param evaluate_fn: 用来控制 ``Evaluator`` 在评测的前向传播过程中调用的是哪一个函数,例如对于 pytorch 而言,通过该参数确定使用的是 ``model.evaluate_step`` 还是 - ``model.forward``(不同训练框架所使用的的前向传播函数的方法名称不同); + :param evaluate_fn: 用来控制 ``Evaluator`` 在评测的前向传播过程中调用的是哪一个函数,例如对于 pytorch 而言,通过该参数确定使用的是 :meth:`model.evaluate_step` 还是 + :meth:`model.forward` (不同训练框架所使用的的前向传播函数的方法名称不同); 1. 如果该值是 ``None``,那么我们会默认使用 ``evaluate_step`` 当做前向传播的函数,如果在模型中没有找到该方法,则使用训练框架默认的前向传播函数; - 2. 如果为 ``str`` 类型,例如为 ``my_evaluate_step_fn``,则尝试寻找 ``model.my_evaluate_step_fn``,如果找不到则直接报错; + 2. 如果为 ``str`` 类型,例如为 ``'my_evaluate_step_fn'``,则尝试寻找 :meth:`model.my_evaluate_step_fn`,如果找不到则直接报错; :param input_mapping: 等价于 ``Trainer`` 中的 ``input_mapping`` 参数;对具体的用于评测一个 batch 的数据使用 ``input_mapping`` 处理之后再输入到 ``model`` 以及 ``metric`` 中。如果针对 ``model`` 和 ``metric`` 需要不同的 ``mapping``,请考虑使用 ``evaluate_batch_step_fn`` 参数定制; @@ -97,20 +97,27 @@ class Evaluator: ``metric`` 的计算都是自动化的,因此其一定需要参数匹配:根据 ``metric.update`` 的函数签名直接从字典数据中抽取其需要的参数传入进去; - :param fp16: 是否在评测时使用 fp16; + :param fp16: 是否在评测时使用 fp16 混合精度; :param verbose: 是否打印 evaluate 的结果; :kwargs: * *torch_kwargs* -- 等价于 ``Trainer`` 中的 ``torch_kwargs`` 参数; + * *paddle_kwargs* -- 等价于 ``Trainer`` 中的 ``paddle_kwargs`` 参数; + * *fairscale_kwargs* -- 等价于 ``Trainer`` 中的 ``fairscale_kwargs`` 参数; + * *deepspeed_kwargs* -- 等价于 ``Trainer`` 中的 ``deepspeed_kwargs`` 参数; + * *oneflow_kwargs* -- 等价于 ``Trainer`` 中的 ``oneflow_kwargs`` 参数; * *data_device* -- 等价于 ``Trainer`` 中的 ``data_device`` 参数; * *model_use_eval_mode* (``bool``) -- - 是否在评测的时候将 ``model`` 的状态设置成 ``eval`` 状态。在 ``eval`` 状态下,``model`` 的 - ``dropout`` 与 ``batch normalization`` 将会关闭。默认为 ``True``。如果为 ``False``,``fastNLP`` 不会对 ``model`` 的 ``evaluate`` 状态做任何设置。无论 - 该值是什么,``fastNLP`` 都会在评测后将 ``model`` 的状态设置为 ``train``; + 是否在评测的时候将 ``model`` 的状态设置成 ``eval`` 状态。在 ``eval`` 状态下,``model`` 的 + ``dropout`` 与 ``batch normalization`` 将会关闭。默认为 ``True``。如果为 ``False``,``fastNLP`` 不会对 ``model`` 的 ``evaluate`` 状态做任何设置。无论 + 该值是什么,``fastNLP`` 都会在评测后将 ``model`` 的状态设置为 ``train``; * *use_dist_sampler* -- - 表示在 ``Evaluator`` 中在使用分布式的时候是否将保证 dataloader 的 ``sampler`` 替换为 - 分布式的 ``sampler``,其特点是每个卡上的数据之间不重叠,所有卡上数据的加起来是整个数据集。若传入的 dataloader - 的 sampler 为 (a) 深度学习框架自带的默认 sampler ; (b) fastNLP 的 Sampler 等,则将替换为 - :class:`~fastNLP.UnrepeatedSequentialSampler`,如果这个行为不是期待的,请本参数设置为 ``False``,并针对每个卡控制其可以 + 表示在 ``Evaluator`` 中在使用分布式的时候是否将保证 dataloader 的 ``sampler`` 替换为 + 分布式的 ``sampler``,其特点是每个卡上的数据之间不重叠,所有卡上数据的加起来是整个数据集。若传入的 dataloader + 的 sampler 为: + + - 深度学习框架自带的默认 sampler ; + - fastNLP 的 Sampler ; + 则将替换为 :class:`~fastNLP.UnrepeatedSequentialSampler`,如果这个行为不是期待的,请本参数设置为 ``False``,并针对每个卡控制其可以 用到的数据。如果不是以上两类 sampler ,fastNLP 将报错。 * *output_from_new_proc* -- 等价于 ``Trainer`` 中的 ``output_from_new_proc`` 参数; * *progress_bar* -- 等价于 ``Trainer`` 中的 ``progress_bar`` 参数; @@ -123,7 +130,7 @@ class Evaluator: def __init__(self, model, dataloaders, metrics: Optional[Dict] = None, driver: Union[str, Driver] = 'auto', device: Optional[Union[int, List[int], str]] = None, - evaluate_batch_step_fn: Optional[callable] = None, evaluate_fn: Optional[str] = None, + evaluate_batch_step_fn: Optional[Callable] = None, evaluate_fn: Optional[str] = None, input_mapping: Optional[Union[Callable, Dict]] = None, output_mapping: Optional[Union[Callable, Dict]] = None, model_wo_auto_param_call: bool = False, fp16: bool = False, verbose: int = 1, **kwargs): @@ -203,16 +210,16 @@ class Evaluator: """ 用于帮助您加载模型的辅助函数; - :param folder: 存放着您需要加载的 model 的文件夹,默认会尝试读取该文件夹下的 fastnlp_model.pkl.tar 文件。在 model_load_fn 不为空时, - 直接将该 folder 传递到 model_load_fn 中; - :param only_state_dict: 要读取的文件中是否仅包含模型权重。在 ``model_load_fn 不为 None`` 时,该参数无意义; - :param model_load_fn: ``callable`` 的函数,接受一个 folder 作为参数,需要注意该函数不需要返回任何内容; + :param folder: 存放着您需要加载的 model 的文件夹,默认会尝试读取该文件夹下的 ``fastnlp_model.pkl.tar`` 文件。在 ``model_load_fn`` 不为空时, + 直接将该 folder 传递到 ``model_load_fn`` 中; + :param only_state_dict: 要读取的文件中是否仅包含模型权重。在 ``model_load_fn`` 不为 ``None`` 时,该参数无意义; + :param model_load_fn: :class:`Callable` 的函数,接受一个 folder 作为参数,需要注意该函数不需要返回任何内容; :param kwargs: 理论上您不需要使用到该参数; .. note:: 注意您需要在初始化 ``Evaluator`` 后再通过 ``evaluator`` 实例来调用该函数;这意味着您需要保证在保存和加载时使用的 ``driver`` 是属于同一个 - 训练框架的,例如都是 ``pytorch`` 或者 ``paddle``; + 训练框架的,例如都是 **pytorch** 或者 **PaddlePaddle** ; """ self.driver.barrier() if not isinstance(folder, (io.BytesIO, BinaryIO)): @@ -240,15 +247,14 @@ class Evaluator: """ 该函数是在 ``Evaluator`` 初始化后用于真正开始评测的函数; - 返回一个字典类型的数据,其中key为metric的名字,value为对应metric的结果。 + 返回一个字典类型的数据,其中 key 为 metric 的名字,value 为对应 metric 的结果。 - 1. 如果存在多个metric,一个dataloader的情况,key的命名规则是 - ``metric_indicator_name#metric_name`` + 1. 如果存在多个 metric ,一个 dataloader 的情况,key 的命名规则是 + ``metric_indicator_name#metric_name``; 2. 如果存在多个数据集,一个metric的情况,key的命名规则是 - ``metric_indicator_name#metric_name#dataloader_name`` (其中 # 是默认的 separator ,可以通过 Evaluator 初始化参数修改)。 - 如果存在多个metric,多个dataloader的情况,key的命名规则是 - ``metric_indicator_name#metric_name#dataloader_name`` - 其中 metric_indicator_name 可能不存在; + ``metric_indicator_name#metric_name#dataloader_name`` (其中 **#** 是默认的 separator ,可以通过 Evaluator 初始化参数修改); + 3. 如果存在多个metric,多个dataloader的情况,key的命名规则是 + ``metric_indicator_name#metric_name#dataloader_name``,其中 metric_indicator_name 可能不存在; :param num_eval_batch_per_dl: 每个 dataloader 测试前多少个 batch 的数据,-1 为测试所有数据。 :return: 返回评测得到的结果,是一个没有嵌套的字典; @@ -360,7 +366,7 @@ class Evaluator: def reset(self): """ - 调用所有 metric 的 reset() 方法,清除累积的状态。 + 调用所有 metric 的 :meth:`reset` 方法,清除累积的状态。 :return: """ @@ -368,7 +374,7 @@ class Evaluator: def update(self, batch, outputs): """ - 自动调用所有 metric 的 update 方法,会根据不同 metric 的参数列表进行匹配传参。 + 自动调用所有 metric 的 :meth:`update` 方法,会根据不同 metric 的参数列表进行匹配传参。 :param batch: 一般是来自于 DataLoader 的输出,如果不为 dict 类型的话,该值将被忽略。 :param outputs: 一般是来自于模型的输出。类别应为 dict 或者 dataclass 类型。 @@ -378,7 +384,7 @@ class Evaluator: def get_metric(self) -> Dict: """ - 调用所有 metric 的 get_metric 方法,并返回结果。其中 key 为 metric 的名称,value 是各个 metric 的结果。 + 调用所有 metric 的 :meth:`get_metric` 方法,并返回结果。其中 key 为 metric 的名称,value 是各个 metric 的结果。 :return: """ @@ -387,11 +393,9 @@ class Evaluator: @property def metrics_wrapper(self): """ - 由于需要保持 Evaluator 中 metrics 对象与用户传入的 metrics 保持完全一致(方便他在 evaluate_batch_step_fn )中使用,同时也为了支持 + 由于需要保持 Evaluator 中 ``metrics`` 对象与用户传入的 ``metrics`` 保持完全一致(方便在 ``evaluate_batch_step_fn`` )中使用,同时也为了支持 不同形式的 metric( fastNLP 的 metric/torchmetrics 等),所以 Evaluator 在进行 metric 操作的时候都调用 metrics_wrapper 进行操作。 - - Returns: """ if self._metric_wrapper is None: self._metric_wrapper = _MetricsWrapper(self.metrics, evaluator=self) @@ -399,11 +403,12 @@ class Evaluator: def evaluate_step(self, batch): """ - 将 batch 传递到model中进行处理,根据当前 evaluate_fn 选择进行 evaluate 。会将返回结果经过 output_mapping 处理后再 - 返回。 + 将 ``batch`` 传递到 model 中进行处理,根据当前 ``evaluate_fn`` 选择进行 evaluate 。会将返回结果经过 ``output_mapping`` + 处理后再 +返回。 - :param batch: {evaluate_fn} 函数支持的输入类型 - :return: {evaluate_fn} 函数的输出结果,如果有设置 output_mapping ,将是 output_mapping 之后的结果。 + :param batch: ``evaluate_fn`` 函数支持的输入类型 + :return: ``evaluate_fn`` 函数的输出结果,如果有设置 ``output_mapping`` ,将是 ``output_mapping`` 之后的结果。 """ outputs = self.driver.model_call(batch, self._evaluate_step, self._evaluate_step_signature_fn) outputs = match_and_substitute_params(self.output_mapping, outputs) @@ -412,7 +417,7 @@ class Evaluator: @property def metrics(self): """ - 返回用户传入的 metrics 对象。 + 返回用户传入的 ``metrics`` 对象。 :return: """ diff --git a/fastNLP/core/controllers/loops/evaluate_batch_loop.py b/fastNLP/core/controllers/loops/evaluate_batch_loop.py index fb936236..c31cfa0e 100644 --- a/fastNLP/core/controllers/loops/evaluate_batch_loop.py +++ b/fastNLP/core/controllers/loops/evaluate_batch_loop.py @@ -13,7 +13,7 @@ class EvaluateBatchLoop(Loop): r""" ``EvaluateBatchLoop`` 针对一个 dataloader 的数据完成一个 epoch 的评测迭代过程; - :param batch_step_fn: 您可以传入该参数来替换默认的 bath_step_fn; + :param batch_step_fn: 您可以传入该参数来替换默认的 ``bath_step_fn``; """ def __init__(self, batch_step_fn:Optional[Callable]=None): if batch_step_fn is not None: @@ -21,10 +21,10 @@ class EvaluateBatchLoop(Loop): def run(self, evaluator, dataloader) -> Dict: r""" - 需要返回在传入的 dataloader 中的 evaluation 结果 + 需要返回在传入的 ``dataloader`` 中的 evaluation 结果 - :param evaluator: Evaluator 对象 - :param dataloader: 当前需要进行评测的dataloader + :param evaluator: :class:`~fastNLP.core.controllers.Evaluator` 对象 + :param dataloader: 当前需要进行评测的 ``dataloader`` :return: """ iterator = iter(dataloader) @@ -58,10 +58,10 @@ class EvaluateBatchLoop(Loop): @staticmethod def batch_step_fn(evaluator, batch): r""" - 针对一个 batch 的数据的评测过程; + 针对一个 ``batch`` 的数据的评测过程; - :param evaluator: Evaluator 对象 - :param batch: 当前需要评测的一个 batch 的数据; + :param evaluator: :class:`~fastNLP.core.controllers.Evaluator` 对象 + :param batch: 当前需要评测的一个 ``batch`` 的数据; """ outputs = evaluator.evaluate_step(batch) # 将batch输入到model中得到结果 evaluator.update(batch, outputs) # evaluator将根据metric的形参名字从batch/outputs中取出对应的值进行赋值 diff --git a/fastNLP/core/controllers/loops/loop.py b/fastNLP/core/controllers/loops/loop.py index b1952236..dc149587 100644 --- a/fastNLP/core/controllers/loops/loop.py +++ b/fastNLP/core/controllers/loops/loop.py @@ -1,5 +1,5 @@ r""" -``TrainBatchLoop`` 和 ``EvaluateBatchLoop`` 的父类,为了在实现 fastNLP 主要功能的同时保证 fastNLP 的易用性和代码的易读性,我们只对 +``TrainBatchLoop`` 和 ``EvaluateBatchLoop`` 的父类,为了在实现 **fastNLP** 主要功能的同时保证 **fastNLP** 的易用性和代码的易读性,我们只对 训练中的循环做了非常简单的抽象,``Loop`` 表示的是在训练或者评测的过程中针对单独一个 ``dataloader`` 的一个 ``epoch`` 的运算过程; 更为具体的使用详见 :class:`~fastNLP.core.controllers.loops.train_batch_loop.TrainBatchLoop` 和 @@ -24,7 +24,7 @@ class Loop: .. note:: - ``Trainer`` 和 ``Evaluator`` 中都提供了方便您进行定制 ``Loop`` 的接口函数,例如 ``Trainer.train_step``,``Trainer.backward``等; + ``Trainer`` 和 ``Evaluator`` 中都提供了方便您进行定制 ``Loop`` 的接口函数,例如 ``Trainer.train_step``, ``Trainer.backward`` 等; 在定制您自己的 ``TrainBatchLoop`` 时,请务必记得在正确的时机调用对应的 callback 函数,详见 :class:`~fastNLP.core.controllers.loops.train_batch_loop.TrainBatchLoop` 中对于 callback 函数的调用; @@ -34,5 +34,5 @@ class Loop: @staticmethod def batch_step_fn(controller: Union["Trainer", "Evaluator"], batch): r""" - 对于具体的一个 batch 的数据,实现训练或者评测过程中的一步; + 对于具体的一个 ``batch`` 的数据,实现训练或者评测过程中的一步; """ \ No newline at end of file diff --git a/fastNLP/core/controllers/loops/train_batch_loop.py b/fastNLP/core/controllers/loops/train_batch_loop.py index ca8389b1..ca97fe9e 100644 --- a/fastNLP/core/controllers/loops/train_batch_loop.py +++ b/fastNLP/core/controllers/loops/train_batch_loop.py @@ -14,7 +14,7 @@ class TrainBatchLoop(Loop): r""" ``TrainBatchLoop`` 针对一个 dataloader 的数据完成一个 epoch 的训练迭代过程; - :param batch_step_fn: 您可以传入该参数来替换默认的 bath_step_fn; + :param batch_step_fn: 您可以传入该参数来替换默认的 ``bath_step_fn``; """ def __init__(self, batch_step_fn: Optional[Callable] = None): @@ -23,14 +23,14 @@ class TrainBatchLoop(Loop): def run(self, trainer, dataloader): r""" - 对传入的 dataloader 进行一个 epoch 的主要的训练的循环过程; + 对传入的 ``dataloader`` 进行一个 epoch 的主要的训练的循环过程; .. note:: 您不需要自己主动地调用该方法,``Trainer`` 会负责调用该方法来完成训练过程; - :param trainer: ``Trainer`` 实例; - :param dataloader: 当前训练所使用的 dataloader; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; + :param dataloader: 当前训练所使用的 ``dataloader``; """ get_batch_indices = dataloader.get_batch_indices if callable(getattr(dataloader, 'get_batch_indices', None))\ else lambda *args, **kwargs: None @@ -68,10 +68,10 @@ class TrainBatchLoop(Loop): @staticmethod def batch_step_fn(trainer, batch): r""" - 针对一个 batch 的数据的训练过程; + 针对一个 ``batch`` 的数据的训练过程; - :param trainer: ``Trainer`` 实例; - :param batch: 一个 batch 的数据; + :param trainer: :class:`~fastNLP.core.controllers.Trainer` 实例; + :param batch: 一个 ``batch`` 的数据; """ outputs = trainer.train_step(batch) trainer.backward(outputs) diff --git a/fastNLP/core/controllers/trainer.py b/fastNLP/core/controllers/trainer.py index c1e64636..af227380 100644 --- a/fastNLP/core/controllers/trainer.py +++ b/fastNLP/core/controllers/trainer.py @@ -1,7 +1,7 @@ """ -``Trainer`` 是 fastNLP 用于训练模型的专门的训练器,其支持多种不同的驱动模式 ``Driver``,不仅包括最为经常使用的 DDP,而且还支持 jittor 等国产 -的训练框架;新版的 fastNLP 新加入了方便的 callback 函数修饰器,并且支持定制用户自己特定的训练循环过程;通过使用该训练器,用户只需要自己实现 -模型部分,而将训练层面的逻辑完全地交给 fastNLP; +``Trainer`` 是 **fastNLP** 用于训练模型的专门的训练器,其支持多种不同的驱动模式 ``Driver``,不仅包括最为经常使用的 DDP,而且还支持 jittor 等国产 +的训练框架;新版的 **fastNLP** 新加入了方便的 callback 函数修饰器,并且支持定制用户自己特定的训练循环过程;通过使用该训练器,用户只需要自己实现 +模型部分,而将训练层面的逻辑完全地交给 **fastNLP**; """ from typing import Union, Optional, List, Callable, Dict, BinaryIO @@ -42,7 +42,7 @@ class Trainer(TrainerEventTrigger): r""" 用于支持快速训练的训练器。 - :param model: 训练所需要的模型,例如 ``torch.nn.Module``; + :param model: 训练所需要的模型,例如 :class:`torch.nn.Module`; .. note:: @@ -55,10 +55,17 @@ class Trainer(TrainerEventTrigger): 您应当使用 ``TorchDDPDriver``,意味着您需要通过 ``python -m torch.distributed.launch`` 的方式来启动训练,此时参数 ``device`` 应当设置为 None(此时我们会忽略该参数),具体见下面对于参数 ``device`` 的更详细的解释。 - :param driver: 训练模型所使用的具体的驱动模式,应当为以下选择中的一个:["auto", "torch", "paddle", "jittor", "fairscale"]。其值为 ``"auto"`` 时, - **FastNLP** 会根据传入模型的类型自行判断使用哪一种模式;其值为 "torch" 时,表示使用 ``TorchSingleDriver`` 或者 ``TorchDDPDriver``; - 其值为 "paddle" 时,表示使用 ``PaddleSingleDriver`` 或者 ``PaddleFleetDriver``;其值为 "jittor" 时,表示使用 ``JittorSingleDriver`` - 或者 ``JittorMPIDriver``;其值为 "fairscale" 时,表示使用 ``FairScaleDriver``。在指定了框架的情况下,具体使用哪一种取决于参数 ``device`` 的设置; + :param driver: 训练模型所使用的具体的驱动模式,应当为以下选择中的一个:``["auto", "torch", "paddle", "jittor", "fairscale", "deepspeed", "oneflow"]``: + + 1. 值为 ``"auto"`` 时,**FastNLP** 会根据传入模型的类型自行判断使用哪一种模式; + 2. 其值为 ``"torch"`` 时,表示使用 :class:`~fastNLP.core.drivers.TorchSingleDriver` 或者 :class:`~fastNLP.core.drivers.TorchDDPDriver`; + 3. 其值为 ``"paddle"`` 时,表示使用 :class:`~fastNLP.core.drivers.PaddleSingleDriver` 或者 :class:`~fastNLP.core.drivers.PaddleFleetDriver`; + 4. 其值为 ``"jittor"`` 时,表示使用 :class:`~fastNLP.core.drivers.JittorSingleDriver` 或者 :class:`~fastNLP.core.drivers.JittorMPIDriver`; + 5. 其值为 ``"fairscale"`` 时,表示使用 :class:`~fastNLP.core.drivers.FairScaleDriver`; + 6. 其值为 ``"deepspeed"`` 时,表示使用 :class:`~fastNLP.core.drivers.DeepSpeedDriver`; + 7. 其值为 ``"oneflow"`` 时,表示使用 :class:`~fastNLP.core.drivers.OneflowSingleDriver` 或者 :class:`~fastNLP.core.drivers.OneflowDDPDriver`; + + 在指定了框架的情况下,具体使用哪一种取决于参数 ``device`` 的设置; .. warning:: @@ -66,26 +73,28 @@ class Trainer(TrainerEventTrigger): 这意味着当您传入一个 ``Driver`` 实例时,您传入给 ``Trainer`` 的 ``model`` 参数将会被忽略;也就是说模型在训练时使用的真正的模型是 您传入的 ``Driver`` 实例中的模型; - :param train_dataloader: 训练数据集,注意其必须是单独的一个数据集,不能是 List 或者 Dict; + :param train_dataloader: 训练数据集,注意其必须是单独的一个数据集,不能是 :class:`List` 或者 :class:`Dict`; .. warning:: - 当使用分布式训练时, ``fastNLP`` 会默认将 ``dataloader`` 中的 ``Sampler`` 进行处理,以使得在一个 ``epcoh`` 中,不同卡 + 当使用分布式训练时, **fastNLP** 会默认将 ``dataloader`` 中的 ``Sampler`` 进行处理,以使得在一个 epoch 中,不同卡 用以训练的数据是不重叠的。如果你对 sampler 有特殊处理,那么请将 ``use_dist_sampler`` 参数设置为 ``False`` ,此刻需要由 你自身保证每张卡上所使用的数据是不同的。 :param optimizers: 训练所需要的优化器;可以是单独的一个优化器实例,也可以是多个优化器组成的 List; - :param device: 该参数用来指定具体训练时使用的机器;注意当该参数仅当您通过 `torch.distributed.launch/run` 启动时可以为 None, - 此时 fastNLP 不会对模型和数据进行设备之间的移动处理,但是你可以通过参数 `input_mapping` 和 `output_mapping` 来实现设备之间 - 数据迁移的工作(通过这两个参数传入两个处理数据的函数);同时你也可以通过在 kwargs 添加参数 "data_device" 来让我们帮助您将数据 + :param device: 该参数用来指定具体训练时使用的机器;注意当该参数仅当您通过 ``torch.distributed.launch/run`` 启动时可以为 ``None``, + 此时 fastNLP 不会对模型和数据进行设备之间的移动处理,但是你可以通过参数 ``input_mapping`` 和 ``output_mapping`` 来实现设备之间 + 数据迁移的工作(通过这两个参数传入两个处理数据的函数);同时你也可以通过在 kwargs 添加参数 ``data_device`` 来让我们帮助您将数据 迁移到指定的机器上(注意这种情况理应只出现在用户在 Trainer 实例化前自己构造 DDP 的场景); device 的可选输入如下所示: - * *str*: 例如 'cpu', 'cuda', 'cuda:0', 'cuda:1', 'gpu:0' 等; - * *torch.device*: 例如 'torch.device("cuda:0")'; - * *int*: 将使用 ``device_id`` 为该值的 ``gpu`` 进行训练;如果值为 -1,那么默认使用全部的显卡,此时使用的 driver 实例是 `TorchDDPDriver`; - * *list(int)*: 如果多于 1 个device,应当通过该种方式进行设定;注意此时我们一定会使用 ``TorchDDPDriver``,不管您传入的列表的长度是 1 还是其它值; + * *str*: 例如 ``'cpu'``, ``'cuda'``, ``'cuda:0'``, ``'cuda:1'``, ``'gpu:0'`` 等; + * *torch.device*: 例如 ``torch.device("cuda:0")``; + * *oneflow.device*:例如 ``oneflow.device("cuda", 0)``; + * *int*: 将使用 ``device_id`` 为该值的 ``gpu`` 进行训练;如果值为 -1,那么默认使用全部的显卡,此时使用的 driver 实例是 `TorchDDPDriver` 这类 + 执行分布式训练的 Driver + * *list(int)*: 如果多于 1 个device,应当通过该种方式进行设定;注意此时我们一定会使用分布式训练的 Driver ,不管您传入的列表的长度是 1 还是其它值; * *None*: 仅当用户自己通过训练框架提供的并行训练启动脚本开启 ddp 进程时为 None; .. note:: @@ -121,7 +130,7 @@ class Trainer(TrainerEventTrigger): :param n_epochs: 训练总共的 epoch 的数量,默认为 20;也可以通过 ``n_batches`` 参数设置总共迭代多少个 ``batch`` 。 :param evaluate_dataloaders: 验证数据集,其可以是单独的一个数据集,也可以是多个数据集;当为多个数据集时,注意其必须是 Dict;默认 - 为 None; + 为 ``None``; :param batch_step_fn: 定制每次训练时前向运行一个 batch 的数据所执行的函数。该函数应接受两个参数为 ``trainer`` 和 ``batch``, 不需要要返回值;更详细的使用位置和说明请见 :meth:`~fastNLP.core.controllers.TrainBatchLoop.batch_step_fn`; :param evaluate_batch_step_fn: 定制每次验证时前向运行一个 batch 的数据所执行的函数。该函数应接受的两个参数为 ``evaluator`` 和 ``batch``, @@ -133,8 +142,8 @@ class Trainer(TrainerEventTrigger): .. note:: 在 fastNLP 中,对于训练时使用的前向传播函数的查找逻辑如下所示: - 1. 如果 ``train_fn`` 为 None,那么在 model 的类 Model 中寻找方法 ``Model.train_step``;如果没有找到,那么默认使用 ``Model.forward``; - 2. 如果 ``train_fn`` 为一个字符串,例如 'my_step_fn',那么我们首先会在 model 的类 Model 中寻找方法 ``Model.my_step_fn``, + 1. 如果 ``train_fn`` 为 None,那么在 model 的类 Model 中寻找方法 :meth:`Model.train_step` ;如果没有找到,那么默认使用 :meth:`Model.forward`; + 2. 如果 ``train_fn`` 为一个字符串,例如 ``'my_step_fn'``,那么我们首先会在 model 的类 Model 中寻找方法 :meth:`Model.my_step_fn`, 如果没有找到,那么会直接报错; :param evaluate_fn: 用来控制 ``Trainer`` 中内置的 ``Evaluator`` 在验证的前向传播过程中是调用模型的哪一个函数,应当为 ``None`` @@ -142,7 +151,7 @@ class Trainer(TrainerEventTrigger): :param callbacks: 训练当中触发的 callback 类,该参数应当为一个列表,其中的每一个元素都应当继承 ``Callback`` 类;具体可见 :class:`~fastNLP.core.callbacks.Callback`; :param metrics: 用于传给 ``Trainer`` 内部的 ``Evaluator`` 实例来进行训练过程中的验证。其应当为一个字典,其中 key 表示 monitor, - 例如 {"acc1": AccMetric(), "acc2": AccMetric()}; + 例如 ``{"acc1": AccMetric(), "acc2": AccMetric()}``; 目前我们支持的 ``metric`` 的种类有以下几种: @@ -156,7 +165,7 @@ class Trainer(TrainerEventTrigger): 1. 为负数时表示每隔几个 ``epoch`` evaluate 一次; 2. 为正数则表示每隔几个 ``batch`` evaluate 一次; 3. 为函数时表示用户自己传入的用于控制 evaluate 的频率的函数,该函数的应该接受当前 trainer 对象作为参数,并 - 返回一个 bool 值,返回为 True 说明需要进行 evaluate ;将在每个 ``batch`` 结束后调用该函数判断是否需要 evaluate; + 返回一个 bool 值,返回为 ``True`` 说明需要进行 evaluate ;将在每个 ``batch`` 结束后调用该函数判断是否需要 evaluate; .. note:: @@ -208,7 +217,7 @@ class Trainer(TrainerEventTrigger): :param model_wo_auto_param_call: 是否关闭在训练时调用我们的 ``auto_param_call`` 函数来自动匹配 batch 和前向函数的参数的行为; - 1. 如果该值为 ``False``,并且当 batch 为字典时,我们会根据**前向函数**所需要的参数从 batch 中提取对应的对象,然后传入到**前向函数**中; + 1. 如果该值为 ``False``,并且当 batch 为字典时,我们会根据 **前向函数** 所需要的参数从 batch 中提取对应的对象,然后传入到 **前向函数** 中; 2. 如果该值为 ``True``,那么我们会将 batch 直接透传给模型; .. todo:: @@ -219,8 +228,8 @@ class Trainer(TrainerEventTrigger): :param accumulation_steps: 梯度累积的步数,表示每隔几个 batch 才让优化器迭代一次,默认为 1; :param fp16: 是否开启混合精度训练,默认为 False; :param monitor: 对于一些特殊的 ``Callback``,例如 :class:`~fastNLP.core.callbacks.CheckpointCallback`,它们需要参数 ``monitor`` - 来从 ``Evaluator`` 的验证结果中获取当前评测的值,从而来判断是否执行一些特殊的操作。例如,对于 ``CheckpointCallback`` 而言,如果我们 - 想要每隔一个 epoch 让 ``Evaluator`` 进行一次验证,然后保存训练以来的最好的结果;那么我们需要这样设置: + 来从 ``Evaluator`` 的验证结果中获取当前评测的值,从而来判断是否执行一些特殊的操作。例如,对于 :class:`~fastNLP.core.callbacks.CheckpointCallback` + 而言,如果我们想要每隔一个 epoch 让 ``Evaluator`` 进行一次验证,然后保存训练以来的最好的结果;那么我们需要这样设置: .. code-block:: @@ -234,7 +243,7 @@ class Trainer(TrainerEventTrigger): )] ) - 这意味着对于 ``CheckpointCallback`` 来说,*'acc'* 就是一个监测的指标,用于在 ``Evaluator`` 验证后取出其需要监测的那个指标的值。 + 这意味着对于 :class:`~fastNLP.core.callbacks.CheckpointCallback` 来说,*'acc'* 就是一个监测的指标,用于在 ``Evaluator`` 验证后取出其需要监测的那个指标的值。 ``Trainer`` 中的参数 ``monitor`` 的作用在于为没有设置 ``monitor`` 参数但是需要该参数的 *callback* 实例设置该值。关于 ``monitor`` 参数更详细的说明,请见 :class:`~fastNLP.core.callbacks.CheckpointCallback`; @@ -247,7 +256,7 @@ class Trainer(TrainerEventTrigger): 注意该参数仅当 ``Trainer`` 内置的 ``Evaluator`` 不为 None 时且有需要该参数但是没有设置该参数的 *callback* 实例才有效; :param n_batches: 总共迭代多少个 ``batch`` 的训练结束。当该值不为 -1 时,将直接忽略 ``n_epochs`` 的值。 - :param overfit_batches: 使用该参数来支持 '过拟合' 的功能;支持的值为 ``-1``、``0`` 或者 大于 0 的整数,表示使用多少个 batch 的数据 + :param overfit_batches: 使用该参数来支持 **'过拟合'** 的功能;支持的值为 ``-1``、``0`` 或者 大于 0 的整数,表示使用多少个 batch 的数据 来进行过拟合训练;其中 0 为表示不进行任何操作;-1 表示使用所有的数据进行训练; .. note:: @@ -258,10 +267,10 @@ class Trainer(TrainerEventTrigger): .. warning:: - 在使用该参数时,您同样可以指定 ``metrics`` 参数来进行简单的验证,当该参数和 ``metrics`` 同时出现时,我们会将 evaluate_dataloaders + 在使用该参数时,您同样可以指定 ``metrics`` 参数来进行简单的验证,当该参数和 ``metrics`` 同时出现时,我们会将 ``evaluate_dataloaders`` 直接替换为在过拟合中所使用的训练数据;因此您需要保证您的 ``metrics`` 是能够在 ``train_dataloader`` 上使用的; - :param marker: 用于标记一个 ``Trainer`` 实例,从而在用户调用 ``Trainer.on`` 函数时,标记该函数属于哪一个具体的 ``Trainer`` 实例;默认为 None; + :param marker: 用于标记一个 ``Trainer`` 实例,从而在用户调用 ``Trainer.on`` 函数时,标记该函数属于哪一个具体的 ``Trainer`` 实例;默认为 ``None``; .. note:: @@ -283,7 +292,7 @@ class Trainer(TrainerEventTrigger): ) 另一点需要说明的是,如果一个被 ``Trainer.on`` 修饰的函数,其修饰时没有指明 ``marker``,那么会将该函数传给代码位于其之后的 - 第一个 ``Trainer`` 实例,即使该 ``Trainer`` 实例的 marker 不为 None;这一点详见 :meth:`~fastNLP.core.controllers.Trainer.on` + 第一个 ``Trainer`` 实例,即使该 ``Trainer`` 实例的 marker 不为 ``None``;这一点详见 :meth:`~fastNLP.core.controllers.Trainer.on` :kwargs: * *torch_kwargs* -- ``TorchDriver`` 所需的其它参数,详见 :class:`~fastNLP.core.drivers.torch_driver.TorchSingleDriver` 和 @@ -291,35 +300,43 @@ class Trainer(TrainerEventTrigger): * *paddle_kwargs* -- ``PaddleDriver`` 所需的其它参数,详见 :class:`~fastNLP.core.drivers.paddle_driver.PaddleSingleDriver` 和 :class:`~fastNLP.core.drivers.paddle_driver.PaddleSingleDriver`; * *fairscale_kwargs* -- ``FairScaleDriver`` 所需的其它参数,详见 :class:`~fastNLP.core.drivers.torch_driver.FairScaleDriver`; + * *deepspeed_kwargs* -- ``DeepSpeedDriver`` 所需的其它参数,详见 :class:`~fastNLP.core.drivers.torch_driver.DeepSpeedDriver`; + * *torch_kwargs* -- ``OneflowDriver`` 所需的其它参数,详见 :class:`~fastNLP.core.drivers.oneflow_driver.OneflowSingleDriver` 和 + :class:`~fastNLP.core.drivers.oneflow_driver.OneflowDDPDriver`; * *data_device* -- 一个具体的 driver 实例中,有 ``model_device`` 和 ``data_device``,前者表示模型所在的设备,后者表示 当 ``model_device`` 为 None 时应当将数据迁移到哪个设备; .. note:: - 注意您在绝大部分情况下不会用到该参数! + **注意您在绝大部分情况下不会用到该参数!** 1. 当 driver 实例的 ``model_device`` 不为 None 时,该参数无效; - 2. 对于 pytorch,仅当用户自己通过 ``python -m torch.distributed.launch`` 并且自己初始化 ``init_process_group`` 时, - driver 实例的 ``model_device`` 才会为 None; - 3. 对于 paddle,该参数无效; + 2. 对于 **pytorch**,仅当用户自己通过 ``python -m torch.distributed.launch`` 并且自己初始化 ``init_process_group`` 时, + driver 实例的 ``model_device`` 才会为 None; + 2. 对于 **deepspeed**,仅当用户自己通过 ``deepspeed xxx.py`` 并且自己初始化 ``model.initialize`` 时, + driver 实例的 ``model_device`` 才会为 None; + 3. 对于 **paddle** 和 **oneflow**,该参数无效; * *use_dist_sampler* -- 表示是否使用分布式的 ``sampler``。在多卡时,分布式 ``sampler`` 将自动决定每张卡上读取的 sample ,使得一个 epoch 内所有卡的 sample 加起来为一整个数据集的 sample,同时为了保证所有卡上拥有相同数量的 sample ,有的卡上可能会有重复的 sample ,例如 - 8卡训练,只有9个sample,如果batch_size为1,那么第二个batch时,有7张卡将没有 sample 可用,因此只有重复使用 sample 来 pad 到第二个 - batch 中。如果不希望 fastNLP 对 dataloader 的sampler 做特殊设置,请将该值设置为 False ,若确实需要分布式的训练,请在 Trainer 外 - 对 train_dataloader 做的数据做特殊处理使得其在不同的卡之间 sample 是 + 8卡训练,只有9个 sample ,如果 batch_size 为 1,那么第二个 batch 时,有7张卡将没有 sample 可用,因此只有 **重复** 使用 sample 来 pad 到第二个 + batch 中。如果不希望 fastNLP 对 dataloader 的 sampler 做特殊设置,请将该值设置为 False ,若确实需要分布式的训练,请在 Trainer 外 + 对 ``train_dataloader`` 做的数据做特殊处理使得其在不同的卡之间 sample 是不同的。 * *evaluate_use_dist_sampler* -- 表示在 ``Evaluator`` 中在使用分布式的时候是否将保证 dataloader 的 ``sampler`` 替换为 evaluate 时使用的分布式的 ``sampler``,其特点是每个卡上的数据之间不重叠,所有卡上数据的加起来是整个数据集。若传入的 dataloader - 的 sampler 为 (a) 深度学习框架自带的默认 sampler ; (b) fastNLP 的 Sampler 等,则将替换为 - :class:`~fastNLP.UnrepeatedSequentialSampler`,如果这个行为不是期待的,请本参数设置为 ``False``,并针对每个卡控制其可以 + 的 sampler 为: + + - 深度学习框架自带的默认 sampler ; + - fastNLP 的 Sampler ; + 则将替换为 :class:`~fastNLP.UnrepeatedSequentialSampler`,如果这个行为不是期待的,请本参数设置为 ``False``,并针对每个卡控制其可以 用到的数据。 * *output_from_new_proc* -- 应当为一个字符串,表示在多进程的 driver 中其它进程的输出流应当被做如何处理;其值应当为以下之一: - ["all", "ignore", "only_error"];当该参数的值不是以上值时,该值应当表示一个文件夹的名字,我们会将其他 rank 的输出流重定向到 - log 文件中,然后将 log 文件保存在通过该参数值设定的文件夹中;默认为 "only_error"; + ``["all", "ignore", "only_error"]``;当该参数的值不是以上值时,该值应当表示一个文件夹的名字,我们会将其他 rank 的输出流重定向到 + log 文件中,然后将 log 文件保存在通过该参数值设定的文件夹中;默认为 ``"only_error"``; 注意该参数仅当使用分布式的 ``driver`` 时才有效,例如 ``TorchDDPDriver``; - * *progress_bar* -- 以哪种方式显示 progress ,目前支持[None, 'raw', 'rich', 'auto', 'tqdm'] 或者 :class:`~fastNLP.RichCallback`, :class:`~fastNLP.RawTextCallback`等对象, - 默认为 auto , auto 表示如果检测到当前 terminal 为交互型则使用 :class:`~fastNLP.RichCallback`,否则使用 :class:`~fastNLP.RawTextCallback` 对象。如果 + * *progress_bar* -- 显示进度条的方式,目前支持 ``[None, 'raw', 'rich', 'auto', 'tqdm']`` 或者 :class:`~fastNLP.RichCallback` 、 :class:`~fastNLP.RawTextCallback` 等对象, + 默认为 ``'auto'`` , ``'auto'`` 表示如果检测到当前 terminal 为交互型则使用 :class:`~fastNLP.RichCallback`,否则使用 :class:`~fastNLP.RawTextCallback` 对象。如果 需要定制 progress bar 的参数,例如打印频率等,可以传入 :class:`~fastNLP.RichCallback`, :class:`~fastNLP.RawTextCallback` 等对象。 * *train_input_mapping* -- 与 input_mapping 一致,但是只用于 ``Trainer`` 中。与 input_mapping 互斥。 * *train_output_mapping* -- 与 output_mapping 一致,但是只用于 ``Trainer`` 中。与 output_mapping 互斥。 @@ -331,19 +348,19 @@ class Trainer(TrainerEventTrigger): ``Trainer`` 是通过在内部直接初始化一个 ``Evaluator`` 来进行验证; ``Trainer`` 内部的 ``Evaluator`` 默认是 None,如果您需要在训练过程中进行验证,你需要保证这几个参数得到正确的传入: - 必须的参数:1. ``metrics``;2. ``evaluate_dataloaders``; + 必须的参数:``metrics`` 与 ``evaluate_dataloaders``; - 可选的其它参数:1. ``evaluate_batch_step_fn;2. ``evaluate_fn``;3. ``evaluate_every``;4. ``input_mapping``; - 5. ``output_mapping``; 6. ``model_wo_auto_param_call``;7. ``fp16``;8. ``monitor``;9. ``larger_better``; + 可选的其它参数:``evaluate_batch_step_fn``、 ``evaluate_fn``、``evaluate_every``、``input_mapping``、 + ``output_mapping``、``model_wo_auto_param_call``、``fp16``、``monitor``、``larger_better``; .. warning:: 如果 ``Trainer`` 中内置的 ``Evaluator`` 实例不为 ``None``,那么需要注意 ``Trainer`` 中的一些参数是与 ``Evaluator`` 一致的,它们分别为: 1. ``Evaluator`` 在初始化时的 ``driver`` 参数是 ``Trainer`` 中已经实例化过的 driver;这一点使得一些参数对于 ``Trainer`` 内部的 - ``Evaluator`` 没有用处,例如 ``device``,``torch_kwargs``,``data_device`` 和 ``output_from_new_proc`` 等; + ``Evaluator`` 没有用处,例如 ``device``,``torch_kwargs``,``data_device`` 和 ``output_from_new_proc`` 等; 2. ``input_mapping``,``output_mapping``,``model_wo_auto_param_call`` 和 ``fp16`` 是 ``Trainer`` 和其内部默认的 - ``Evaluator`` 是一致的; + ``Evaluator`` 是一致的; 当然,对于 ``input_mapping`` 和 ``output_mapping``,您可以通过添加 ``kwargs`` 中的参数 ``evaluate_input_mapping`` 和 ``evaluate_output_mapping`` 来单独为 ``Evaluator`` 进行更细致的订制。 @@ -547,17 +564,17 @@ class Trainer(TrainerEventTrigger): r""" 该函数是在 ``Trainer`` 初始化后用于真正开始训练的函数; - 注意如果是断点重训的第一次训练,即还没有保存任何用于断点重训的文件,那么其应当置 resume_from 为 None,并且使用 ``CheckpointCallback`` + 注意如果是断点重训的第一次训练,即还没有保存任何用于断点重训的文件,那么其应当置 ``resume_from`` 为 ``None``,并且使用 ``CheckpointCallback`` 去保存断点重训的文件; - :param num_train_batch_per_epoch: 每个 epoch 训练多少个 batch 后停止,*-1* 表示使用 train_dataloader 本身的长度; - :param num_eval_batch_per_dl: 每个 evaluate_dataloader 验证多少个 batch 停止,*-1* 表示使用 evaluate_dataloader 本身的长度; + :param num_train_batch_per_epoch: 每个 epoch 训练多少个 batch 后停止,*-1* 表示使用 ``train_dataloader`` 本身的长度; + :param num_eval_batch_per_dl: 每个 ``evaluate_dataloader`` 验证多少个 batch 停止,*-1* 表示使用 ``evaluate_dataloader`` 本身的长度; :param num_eval_sanity_batch: 在训练之前运行多少个 evaluation batch 来检测一下 evaluation 的过程是否有错误。为 0 表示不检测; :param resume_from: 从哪个路径下恢复 trainer 的状态,注意该值需要为一个文件夹,例如使用 ``CheckpointCallback`` 时帮助您创建的保存的子文件夹; :param resume_training: 是否按照 checkpoint 中训练状态恢复。如果为 False,则只恢复 model 和 optimizers 的状态;该参数如果为 ``True``, 在下一次断点重训的时候我们会精确到上次训练截止的具体的 sample 进行训练;否则我们只会恢复 model 和 optimizers 的状态,而 ``Trainer`` 中的 其余状态都是保持初始化时的状态不会改变; - :param catch_KeyboardInterrupt: 是否捕获 KeyboardInterrupt;如果该参数为 ``True``,在训练时如果您使用 ``ctrl+c`` 来终止程序, + :param catch_KeyboardInterrupt: 是否捕获 :class:`KeyboardInterrupt`;如果该参数为 ``True``,在训练时如果您使用 ``ctrl+c`` 来终止程序, ``Trainer`` 不会抛出异常,但是会提前退出,然后 ``trainer.run()`` 之后的代码会继续运行。注意该参数在您使用分布式训练的 ``Driver`` 时无效,例如 ``TorchDDPDriver``;非分布式训练的 ``Driver`` 下该参数默认为 True; @@ -578,7 +595,7 @@ class Trainer(TrainerEventTrigger): 整体的验证流程是否正确; ``num_eval_sanity_batch`` 的作用可能会让人产生迷惑,其本质和 ``num_eval_batch_per_dl`` 作用一致,但是其只被 ``Trainer`` 使用; - 并且其只会在训练的一开始使用,意思为:我们在训练的开始时会先使用 ``Evaluator``(如果其不为 ``None``) 进行验证,此时验证的 batch 的 + 并且其只会在训练的一开始使用,意思为:我们在训练的开始时会先使用 ``Evaluator`` (如果其不为 ``None``) 进行验证,此时验证的 batch 的 数量只有 ``num_eval_sanity_batch`` 个;但是对于 ``num_eval_batch_per_dl`` 而言,其表示在实际的整体的训练过程中,每次 ``Evaluator`` 进行验证时会验证的 batch 的数量。 @@ -724,7 +741,7 @@ class Trainer(TrainerEventTrigger): .. note:: 对于训练一个神经网络的整体的流程来说,其可以分为很多个时间点,例如 **"整体的训练前"**,**"训练具体的一个 epoch 前"**, - **"反向传播前"**,**"整体的训练结束后"**等;一个 ``callback`` 时机指的就是这些一个个具体的时间点; + **"反向传播前"**,**"整体的训练结束后"** 等;一个 ``callback`` 时机指的就是这些一个个具体的时间点; 该函数的参数 ``event`` 需要是一个 ``Event`` 实例,其使用方式见下方的例子; @@ -1014,10 +1031,11 @@ class Trainer(TrainerEventTrigger): r""" 用于帮助您保存模型的辅助函数; - :param folder: 保存模型的文件夹。如果没有传入 model_save_fn 参数,则我们会在这个文件夹下保存 fastnlp_model.pkl.tar 文件; - :param only_state_dict: 仅在 model_save_fn 为空时,有效。是否只保存模型的 ``state_dict``; + :param folder: 保存模型的文件夹。如果没有传入 ``model_save_fn`` 参数,则我们会在这个文件夹下保存 ``fastnlp_model.pkl.tar`` 文件; + :param only_state_dict: 仅在 ``model_save_fn`` 为空时,有效。是否只保存模型的 ``state_dict``; :param model_save_fn: 您自己定制的用来替换该保存函数本身保存逻辑的函数,当您传入了该参数后,我们会实际调用该函数,而不会去调用 ``driver`` 的 ``save_model`` 函数; - :param kwargs: 理论上您不需要使用到该参数; + :kwargs: + * *input_spec* -- 该参数详见 **PaddlePaddle** 框架的保存函数 :meth:`~fastNLP.core.drivers.PaddleDriver.save_model` 中的说明; .. note:: @@ -1056,10 +1074,10 @@ class Trainer(TrainerEventTrigger): """ 用于帮助您加载模型的辅助函数; - :param folder: 存放着您需要加载的 model 的文件夹,默认会尝试读取该文件夹下的 fastnlp_model.pkl.tar 文件。在 model_load_fn 不为空时, - 直接将该 folder 传递到 model_load_fn 中; - :param only_state_dict: 要读取的文件中是否仅包含模型权重。在 ``model_load_fn 不为 None`` 时,该参数无意义; - :param model_load_fn: ``callable`` 的函数,接受一个 folder 作为参数,需要注意该函数不需要返回任何内容; + :param folder: 存放着您需要加载的 model 的文件夹,默认会尝试读取该文件夹下的 ``fastnlp_model.pkl.tar`` 文件。在 ``model_load_fn`` + 不为空时,直接将该 folder 传递到 ``model_load_fn`` 中; + :param only_state_dict: 要读取的文件中是否仅包含模型权重。在 ``model_load_fn`` 不为 ``None`` 时,该参数无意义; + :param model_load_fn: :class:`Callable` 的函数,接受一个 folder 作为参数,需要注意该函数不需要返回任何内容; :param kwargs: 理论上您不需要使用到该参数; .. note:: @@ -1099,12 +1117,13 @@ class Trainer(TrainerEventTrigger): 用于帮助您实现断点重训功能的保存函数;保存内容包括:callback 状态、Trainer 的状态、Sampler 的状态【在恢复的时候才能恢复到特定 batch 】、 模型参数、optimizer的状态、fp16 Scaler的状态【如果有】。 - :param folder: 保存在哪个文件夹下,会在该文件下声称两个文件:fastnlp_checkpoint.pkl.tar 与 fastnlp_model.pkl.tar 。 - 如果 model_save_fn 不为空,则没有 fastnlp_model.pkl.tar 文件; - :param only_state_dict: 当 model_save_fn 为空时有效,表明是否仅保存模型的权重; + :param folder: 保存在哪个文件夹下,会在该文件下生成两个文件:``fastnlp_checkpoint.pkl.tar`` 与 ``fastnlp_model.pkl.tar`` 。 + 如果 ``model_save_fn`` 不为空,则没有 ``fastnlp_model.pkl.tar`` 文件; + :param only_state_dict: 当 ``model_save_fn`` 为空时有效,表明是否仅保存模型的权重; :param model_save_fn: 如果模型保存比较特殊,可以传入该函数自定义模型的保存过程,输入应该接受一个文件夹(实际上就是接受上面的 folder 参数),不需要返回值;这意味着您可以通过该函数来自己负责模型的保存过程,而我们则会将 ``trainer`` 的状态保存好; - :param kwargs: 理论上您不需要使用到该参数; + :kwargs: + * *input_spec* -- 该参数详见 **PaddlePaddle** 框架的保存函数 :meth:`~fastNLP.core.drivers.PaddleDriver.save_model` 中的说明; .. note:: @@ -1123,7 +1142,7 @@ class Trainer(TrainerEventTrigger): 为了支持断点重训功能,我们会在调用该函数时保存以下内容: 1. 各个 ``callback`` 的状态,这主要涉及到一些带有运行状态的 ``callback``; - 2. 控制训练流程的变量 ``trainer_state``,具体详见 :class:`~fastNLP.core.controllers.utils.states.TrainerState`; + 2. 控制训练流程的变量 ``trainer_state``,具体详见 :class:`~fastNLP.core.controllers.utils.state.TrainerState`; 3. 一个特殊的变量 ``num_consumed_batches``,表示在这次训练过程中总共训练了多少个 batch 的数据;您不需要关心这个变量; 4. sampler 的状态,为了支持断点重训功能,我们会在 trainer 初始化的时候,将您的 ``trainer_dataloader`` 的 ``sampler`` 替换为 我们专门用于断点重训功能的 ``ReproducibleSampler``,详见 :class:`~fastNLP.core.samplers.reproducible_sampler.ReproducibleSampler`; @@ -1335,6 +1354,11 @@ class Trainer(TrainerEventTrigger): 用于在使用梯度累积并且进行分布式训练时,由于在前 ``accumulation_steps - 1`` 的时间内不需要进行梯度的同步,因此通过使用该 context 上下文 环境来避免梯度的同步; + .. note:: + + 部分深度学习框架的梯度累积并不需要通过提供上下文环境实现,关于这点需要您深入了解您正在使用的框架的机制;而对于这些框架,fastNLP 会返回一个 + 空的上下文环境。 + :return: 一个支持 ``no_sync`` 的 ``context``; """ @@ -1420,7 +1444,7 @@ class Trainer(TrainerEventTrigger): def model_device(self): r""" :return: 返回当前模型所在的设备;注意该值在当且仅当在少数情况下为 ``None``,例如当使用 ``pytorch`` 时,仅当用户自己初始化 ``init_progress_group`` 时 - ``model_device`` 才为 None; + ``model_device`` 才为 None; """ return self.driver.model_device diff --git a/fastNLP/core/controllers/utils/state.py b/fastNLP/core/controllers/utils/state.py index 676b548c..8c1bfde8 100644 --- a/fastNLP/core/controllers/utils/state.py +++ b/fastNLP/core/controllers/utils/state.py @@ -42,7 +42,7 @@ class State(dict): class TrainerState: r""" 该类用于我们 fastNLP 自己内部为了训练流程所记录的一些状态,当然是要暴露给用户给用户使用的; - 我们保存的state大部分上是 trainer 断点重训 需要重新加载的; + 我们保存的 state 大部分上是 trainer 断点重训 需要重新加载的; 专属于 `Trainer` 的状态记载的类; :param n_epochs: 训练过程中总共的 epoch 的数量; @@ -50,7 +50,7 @@ class TrainerState: :param global_forward_batches: 当前模型总共 forward 了多少个 step; :param batch_idx_in_epoch: 训练中在当前 epoch 的第几个 step; :param num_batches_per_epoch: 每一个 epoch 会 forward 多少个 step; - :param n_batches: 完整训练过程会 forward 的 step 数量,注意 n_batches = n_batches * n_epochs; + :param n_batches: 完整训练过程会 forward 的 step 数量,注意 ``n_batches = num_batches_per_epoch * n_epochs`` ; """ n_epochs: Optional[int] = None # 无论如何重新算 @@ -73,6 +73,7 @@ class TrainerState: def load_state_dict(self, state_dict: Dict): r""" 用于断点重训来重新加载保存的状态字典; + :param state_dict: 用于加载的状态字典; """ for key in state_dict: diff --git a/fastNLP/core/controllers/utils/utils.py b/fastNLP/core/controllers/utils/utils.py index ef3cf98c..0a351354 100644 --- a/fastNLP/core/controllers/utils/utils.py +++ b/fastNLP/core/controllers/utils/utils.py @@ -4,11 +4,12 @@ from fastNLP.core.callbacks import CallbackManager from .state import TrainerState from fastNLP.core.utils.utils import _check_valid_parameters_number +__all__ = [] class TrainerEventTrigger: r""" - 为了避免在训练流程中调用 callback 函数中写成类似 'trainer.callback_manager.on_train_begin' 的形式,我们选择单独抽象为 'Trainer' - 抽象一层,然后一些特殊的操作可以在这里进行,例如我们通过 `on_validate_end` 来通知所有的 'CheckpointCallback' 实例在当前的 step 后保存 + 为了避免在训练流程中调用 callback 函数中写成类似 `'trainer.callback_manager.on_train_begin'` 的形式,我们选择单独为 ``Trainer`` + 抽象一层,然后一些特殊的操作可以在这里进行,例如我们通过 :meth:`on_validate_end` 来通知所有的 ``CheckpointCallback`` 实例在当前的 step 后保存 模型。 """ callback_manager: CallbackManager @@ -138,7 +139,7 @@ def check_evaluate_every(evaluate_every): ``evaluate_every`` 的使用详见 ``Trainer`` 的 ``evaluate_every`` 参数; - 主要在于当参数 ``evaluate_every`` 是一个 callable 的函数时,需要保证其参数的正确性; + 主要在于当参数 ``evaluate_every`` 是一个 Callable 的函数时,需要保证其参数的正确性; """ if not callable(evaluate_every) and (not isinstance(evaluate_every, int) or evaluate_every == 0): raise ValueError("Parameter 'evaluate_every' should be set to 'int' type and either < 0 or > 0.") diff --git a/fastNLP/core/dataloaders/jittor_dataloader/fdl.py b/fastNLP/core/dataloaders/jittor_dataloader/fdl.py index 4631ba7b..22aeeec7 100644 --- a/fastNLP/core/dataloaders/jittor_dataloader/fdl.py +++ b/fastNLP/core/dataloaders/jittor_dataloader/fdl.py @@ -47,38 +47,35 @@ class JittorDataLoader: * callate_fn 为 ``'auto'`` 时,``JittorDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的取值。 此时可以配套使用 ``JittorDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 * callate_fn 为 ``None`` 时, ``JittorDataLoader`` 默认使用 Jittor DataLoader 自带的 collate_fn - * collate_fn 为 ``Callable`` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 + * collate_fn 为 :class:`Callable` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 dataset 的一条数据;该 Callable 函数还应当返回一个对象。 + :param dataset: 实现了 __getitem__() 和 __len__() 的对象。 + :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 + :param shuffle: 是否打乱数据集, 默认为 ``False``。 + :param drop_last: 当 ``drop_last=True`` 时,``JittorDataLoader`` 会扔掉最后一个长度小于 ``batch_size`` 的 batch 数据; + 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 + :param num_workers: 当 ``num_workers > 0`` 时, ``JittorDataLoader`` 会开启 num_workers 个子进程来处理数据, 可以加快 + 数据处理速度,但同时也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 + :param buffer_size: 每个进程占用的内存空间,默认为 512M。主要是配合 ``num_workers`` 使用,用户可以自定义每个进程的内存大小。 + :param stop_grad: 是否不使用梯度, 默认 ``True`` 。 + :param keep_numpy_array: 返回的数据是 ``np.array`` 类型而不是 ``jittor.Var`` 类型,默认为 ``False`` + :param endless: 是否让 ``JittorDataLoader`` 无限返回数据,也就是将 dataset 循环使用使得返回数据是没有限制的。默认为 ``False``. + :param collate_fn: 用于从 dataset 取到的一个 batch 数据进行打包处理的 Callable 函数,其值应该为以下三个: ``[None, "auto", Callable]``. + + * callate_fn 为 ``None`` 时,需要注意的是此时传进来的 datset 类型不能为 :class:`~fastNLP.core.dataset.DataSet` , 当 collate_fn 为 ``None`` 时, + ``JittorDataLoader`` 调用默认的 Jittor 框架的 ``DataLoader`` 自带的 ``collate_batch`` 作为 callate_fn 的默认值, 其无法处理 + :class:`~fastNLP.core.dataset.DataSet` 的 dataset 对象。 + * callate_fn 为 ``'auto'`` 时,``JittorDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的默认值。 + 此时可以配套使用 ``JittorDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 + * collate_fn 为 :class:`Callable` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 + dataset 的一条数据;该 Callable 函数还应当返回一个对象。 """ def __init__(self, dataset, batch_size: int = 16, shuffle: bool = False, drop_last: bool = False, num_workers: int = 0, buffer_size: int = 512 * 1024 * 1024, stop_grad: bool = True, keep_numpy_array: bool = False, endless: bool = False, collate_fn: Union[None, str, Callable] = "auto") -> None: - """ - :param dataset: 实现了 __getitem__() 和 __len__() 的对象。 - :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 - :param shuffle: 是否打乱数据集, 默认为 ``False``。 - :param drop_last: 当 ``drop_last=True`` 时,``JittorDataLoader`` 会扔掉最后一个长度小于 ``batch_size`` 的 batch 数据; - 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 - :param num_workers: 当 ``num_workers > 0`` 时, ``JittorDataLoader`` 会开启 num_workers 个子进程来处理数据, 可以加快 - 数据处理速度,但同时也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 - :param buffer_size: 每个进程占用的内存空间,默认为512M。主要是配合num_workers使用,用户可以自定义每个进程的内存大小。 - :param stop_grad: 是否不使用梯度, 默认 ``True`` 。 - :param keep_numpy_array: 返回的数据是 ``np.array`` 类型而不是 ``jittor.Var`` 类型,默认为 ``False`` - :param endless: 是否让 ``JittorDataLoader`` 无限返回数据,也就是将 dataset 循环使用使得返回数据是没有限制的。默认为 ``False``. - :param collate_fn: 用于从 dataset 取到的一个 batch 数据进行打包处理的 Callable 函数,其值应该为以下三个: ``[None, "auto", Callable]``. - - * callate_fn 为 ``None`` 时,需要注意的是此时传进来的 datset 类型不能为 :class:`~fastNLP.core.dataset.DataSet` , 当 collate_fn 为 ``None`` 时, - ``JittorDataLoader`` 调用默认的 Jittor 框架的 ``DataLoader`` 自带的 ``collate_batch`` 作为 callate_fn 的默认值, 其无法处理 - :class:`~fastNLP.core.dataset.DataSet` 的 dataset 对象。 - * callate_fn 为 ``'auto'`` 时,``JittorDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的默认值。 - 此时可以配套使用 ``JittorDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 - * collate_fn 为 ``Callable`` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 - dataset 的一条数据;该 Callable 函数还应当返回一个对象。 - - """ # TODO 验证支持replacesampler (以后完成) 增加Sampler # 将内部dataset批次设置为1 if isinstance(dataset, Dataset): @@ -136,20 +133,20 @@ class JittorDataLoader: """ 如果需要对某个 field 的内容进行特殊的调整,请使用这个函数。 - :param field_name: 需要调整的 field 的名称。如果 :class:`~fastNLP.core.Dataset` 的 :class:`~fastNLP.core.Dataset.__getitem__` - 方法返回的是 dict 类型的,则可以直接使用对应的 field 的 key 来表示,如果是 nested 的 dict,可以使用元组表示多层次的 key,例如 - ``{'a': {'b': 1}}`` 中的使用 ``('a', 'b')`` 如果 ``__getitem__`` 返回的是 Sequence 类型的,则可以使用 *_0*, *_1* 表示序列中 - 第 **0** 或 **1** 个元素。如果该 field 在数据中没有找到,则报错;如果 __getitem__ 返回的是就是整体内容,请使用 ``_single`` 。 - :param pad_val: 这个 field 的默认 pad 值。如果设置为 None,则表示该 field 不需要 pad , fastNLP 默认只会对可以 pad 的 - field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 None 。如果 backend 为 None ,该值 - 无意义。 - :param dtype: 对于需要 pad 的 field ,该 field 的数据 dtype 应该是什么。 - :param backend: 可选 ``['raw', 'numpy', 'Jittor', 'paddle', 'jittor', 'auto']`` ,分别代表,输出为 ``list`` , ``numpy.ndarray`` , - ``Jittor.Tensor`` , ``paddle.Tensor`` , ``jittor.Var`` 类型。若 ``pad_val`` 为 ``None`` ,该值无意义 。 - :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 pad_val, dtype, backend 等参数失效。pad_fn 的输入为当前 field 的 - batch 形式。 Collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。pad_func 的输入即为 field 的 batch - 形式,输出将被直接作为结果输出。 - :return: 返回 Collator 自身 + :param field_name: 需要调整的 field 的名称。如果 :meth:`Dataset.__getitem__` 方法返回的是字典类型,则可以直接使用对应的 + field 的 key 来表示,如果是嵌套字典,可以使用元组表示多层次的 key,例如 ``{'a': {'b': 1}}`` 中可以使用 ``('a', 'b')``; + 如果 :meth:`Dataset.__getitem__` 返回的是 Sequence 类型,则可以使用 ``'_0'``, ``'_1'`` 表示序列中第 **0** 或 **1** 个元素。 + 如果该 field 在数据中没有找到,则报错;如果 :meth:`Dataset.__getitem__` 返回的是就是整体内容,请使用 ``"_single"`` 。 + :param pad_val: 这个 field 的默认 pad 值。如果设置为 ``None``,则表示该 field 不需要 pad , fastNLP 默认只会对可以 pad 的 + field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 ``None`` 。如果 ``backend`` 为 ``None``, + 该值无意义。 + :param dtype: 对于需要 pad 的 field ,该 field 数据的 ``dtype`` 。 + :param backend: 可选 ``['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto']`` ,分别代表,输出为 :class:`list`, + :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`paddle.Tensor`, :class:`jittor.Var`, :class:`oneflow.Tensor` 类型。 + 若 ``pad_val`` 为 ``None`` ,该值无意义 。 + :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 ``pad_val``, ``dtype``, ``backend`` 等参数失效。``pad_fn`` 的输入为当前 field 的 + batch 形式。 collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。 + :return: 返回使用的 collator """ collator = self._get_collator() if isinstance(collator, Collator): @@ -173,16 +170,14 @@ class JittorDataLoader: def set_ignore(self, *field_names) -> Collator: """ - 如果有的内容不希望输出,可以在此处进行设置,被设置的 field 将在 batch 的输出中被忽略。 - Example:: + 如果有的内容不希望输出,可以在此处进行设置,被设置的 field 将在 batch 的输出中被忽略:: - collator.set_ignore('field1', 'field2') + dataloader.set_ignore('field1', 'field2') - :param field_name: 需要调整的 field 的名称。如果 :class:`~fastNLP.core.Dataset` 的 :class:`~fastNLP.core.Dataset.__getitem__` - 方法返回的是 dict 类型的,则可以直接使用对应的 field 的 key 来表示,如果是 nested 的 dict,可以使用元组表示多层次的 key,例如 - ``{'a': {'b': 1}}`` 中的使用 ``('a', 'b')`` 如果 ``__getitem__`` 返回的是 Sequence 类型的,则可以使用 *_0*, *_1* 表示序列中 - 第 **0** 或 **1** 个元素。 - :return: 返回 Collator 自身 + :param field_names: field_name: 需要调整的 field 的名称。如果 :meth:`Dataset.__getitem__` 方法返回的是字典类型,则可以直接使用对应的 + field 的 key 来表示,如果是嵌套字典,可以使用元组表示多层次的 key,例如 ``{'a': {'b': 1}}`` 中可以使用 ``('a', 'b')``; + 如果 :meth:`Dataset.__getitem__` 返回的是 Sequence 类型,则可以使用 ``'_0'``, ``'_1'`` 表示序列中第 **0** 或 **1** 个元素。 + :return: 返回使用的 collator """ collator = self._get_collator() if isinstance(collator, Collator): @@ -193,9 +188,9 @@ class JittorDataLoader: def get_batch_indices(self) -> List[int]: """ - 获取当前 batch 的 idx + 获取当前 ``batch`` 中每条数据对应的索引。 - :return: + :return: 当前 ``batch`` 数据的索引; """ return self.cur_batch_indices @@ -208,37 +203,37 @@ def prepare_jittor_dataloader(ds_or_db, batch_size: int = 16, shuffle: bool = No -> Union[Dict[str, JittorDataLoader], JittorDataLoader]: """ ``prepare_jittor_dataloader`` 的功能是将输入的单个或多个 dataset 同时转为 :class:`JittorDataLoader` 对象, 详见 :class:`~fastNLP.core.dataloaders.JittorDataLoader`。 - 根据 ds_or_db 的类型 ``[DataSet, DataBundle, Dict[name, Dataset]]`` 不同而有不同返回结果, 具体如下: - - * 当 ds_or_db 为 ``DataSet`` 时,``prepare_jittor_dataloader`` 会将使用的除了 non_train_batch_size 和 non_train_sampler 以外的参数来 - 帮你实例化一个 :class:`JittorDataLoader` 对象并返回该对象。 详见 :class:`~fastNLP.core.dataloaders.JittorDataLoader`。 - * 当 ds_or_db 为 :class:`~fastNLP.io.DataBundle` 时,``prepare_Jittor_dataloader`` 会遍历 ``DataBundle`` 的数据集的 key-value - 来创建不同的 :class:`JittorDataLoader` 对象;当 key 中包含'train'字符串时,``prepare_jittor_dataloader`` 默认该 value 为 train 数据集, - 会将 batch_size 和 sampler 作为参数,其他 key 不包含 'train' 字符串的数据集则使用 non_train_size 和 non_train_sampler 作为参数。 - 最终根据 ``key: JittorDataLoader`` 组成 ``Dict[key, JittorDataLoader]`` 的字典返回。 + 根据 ``ds_or_db`` 的类型 ``[DataSet, DataBundle, Dict[name, Dataset]]`` 不同而有不同返回结果, 具体如下: + + * 当 ds_or_db 为 :class:`~fastNLP.io.DataSet` 时,``prepare_jittor_dataloader`` 会将使用的除了 non_train_batch_size 和 non_train_sampler 以外的参数来 + 帮你实例化一个 :class:`JittorDataLoader` 对象并返回该对象。 详见 :class:`~fastNLP.core.dataloaders.JittorDataLoader`; + * 当 ds_or_db 为 :class:`~fastNLP.io.DataBundle` 时,``prepare_jittor_dataloader`` 会遍历 ``DataBundle`` 的数据集的 key-value + 来创建不同的 :class:`JittorDataLoader` 对象;当 key 中包含 ``'train'`` 字符串时,``prepare_jittor_dataloader`` 默认该 value 为训练数据集, + 会将 ``batch_size`` 和 ``sampler`` 作为参数,其他 key 不包含 ``'train'`` 字符串的数据集则使用 ``non_train_size`` 和 ``non_train_sampler`` 作为参数。 + 最终根据 ``key: JittorDataLoader`` 组成 ``Dict[key, JittorDataLoader]`` 的字典返回; * 当 ds_or_db 为 ``Dict[str, DataSet]`` 字典类型时, ``prepare_jittor_dataloader`` 会遍历 该 dict 的的 key-value 来创建不同的 - :class:`JittorDataLoader` 对象;当 key 中包含'train'字符串时,``prepare_Jittor_dataloader`` 默认该 value 为 train 数据集,会将 batch_size 和 sampler 作为参数, - 其他 key 不包含 'train' 字符串的数据集则使用 non_train_size 和 non_train_sampler 作为参数。最终根据 ``key: JittorDataLoader`` 组成 - ``Dict[key, JittorDataLoader]`` 的字典返回。 + :class:`JittorDataLoader` 对象;当 key 中包含 ``'train'`` 字符串时,``prepare_jittor_dataloader`` 默认该 value 为训练数据集,会将 ``batch_size`` 和 + ``sampler`` 作为参数,其他 key 不包含 ``'train'`` 字符串的数据集则使用 ``non_train_size`` 和 ``non_train_sampler`` 作为参数。最终根据 ``key: JittorDataLoader`` 组成 + ``Dict[key, JittorDataLoader]`` 的字典返回; - :param ds_or_db: 可以有以下三种取值, + :param ds_or_db: 可以有以下三种取值: - * ds_or_db 为 :class:`~fastNLP.io.DataBundle`, 返回值为 ``Dict[str, TorchDataLoader]`` 的字典 - * ds_or_db 为 ``Dict[str, DataSet]`` 字典, 返回值为 ``Dict[str, TorchDataLoader]`` 的字典 - * ds_or_db 为实现了 __getitem__() 和 __len__() 的对象 ,返回值为:class:`~fastNLP.TorchDataLoader` + * ds_or_db 为 :class:`~fastNLP.io.DataBundle`, 返回值为 ``Dict[str, TorchDataLoader]`` 的字典; + * ds_or_db 为 ``Dict[str, DataSet]`` 字典, 返回值为 ``Dict[str, TorchDataLoader]`` 的字典; + * ds_or_db 为实现了 :meth:`__getitem__` 和 :meth:`__len__` 的对象 ,返回值为 :class:`~fastNLP.core.dataloaders.JittorDataLoader`; :param non_train_batch_size: 如果传入的 ``ds_or_db`` 为 :class:`Dict` 或 :class:`~fastNLP.io.DataBundle` 对象,可以通过改参数 设置名称不为 `train` 的其他 ``dataset`` 的 ``batch_size``。 默认为 ``16``。 :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 - :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 'train' 则设置其 shuffle 为 True , + :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 ``'train'`` 则设置其 shuffle 为 True , 其它的为 False 。 :param drop_last: 当 ``drop_last=True`` 时,:class:`JittorDataLoader` 会扔掉最后一个长度小于 ``batch_size`` 的 batch 数据; 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 :param num_workers: 当 ``num_workers > 0`` 时, :class:`JittorDataLoader` 会开启 num_workers 个子进程来处理数据, 可以加快 数据处理速度,但同时也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 - :param buffer_size: 每个进程占用的内存空间,默认为512M。主要是配合num_workers使用,用户可以自定义每个进程的内存大小。 + :param buffer_size: 每个进程占用的内存空间,默认为512M。主要是配合 ``num_workers`` 使用,用户可以自定义每个进程的内存大小。 :param stop_grad: 是否不使用梯度, 默认 ``True`` 。 - :param keep_numpy_array: 返回的数据是 ``np.array`` 类型而不是 ``jittor.Var`` 类型,默认为 ``False`` + :param keep_numpy_array: 返回的数据是 :class:`np.array` 类型而不是 :class:`ittor.Var` 类型,默认为 ``False`` :param endless: 是否让 :class:`JittorDataLoader` 无限返回数据,也就是将 dataset 循环使用使得返回数据是没有限制的。默认为 ``False``. :param collate_fn: 用于从 dataset 取到的一个 batch 数据进行打包处理的 Callable 函数,其值应该为以下三个: ``[None, "auto", Callable]``. @@ -247,11 +242,8 @@ def prepare_jittor_dataloader(ds_or_db, batch_size: int = 16, shuffle: bool = No :class:`~fastNLP.core.dataset.DataSet` 的 dataset 对象。 * callate_fn 为 ``'auto'`` 时,:class:`JittorDataLoader` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的默认值。 此时可以配套使用 :class:`JittorDataLoader` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 - * collate_fn 为 ``Callable`` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 + * collate_fn 为 :class:`Callable` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 dataset 的一条数据;该 Callable 函数还应当返回一个对象。 - - :return: 返回数据类型为 :class:`Dict[str, JittorDataLoader]`, :class:`JittorDataLoader` 其中之一,根据输入 - ``ds_or_db`` 变化而变化。 """ from fastNLP.io.data_bundle import DataBundle diff --git a/fastNLP/core/dataloaders/oneflow_dataloader/fdl.py b/fastNLP/core/dataloaders/oneflow_dataloader/fdl.py index e68402ea..deed9281 100644 --- a/fastNLP/core/dataloaders/oneflow_dataloader/fdl.py +++ b/fastNLP/core/dataloaders/oneflow_dataloader/fdl.py @@ -57,10 +57,42 @@ class OneflowDataLoader(DataLoader): * callate_fn 为 ``'auto'`` 时,``OneflowDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的取值。 此时可以配套使用 ``OneflowDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 - * callate_fn 为 ``None`` 时, ``OneflowDataLoadr`` 默认使用 oneflow DataLoader 自带的 collate_fn - * collate_fn 为 ``Callable`` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 + * callate_fn 为 ``None`` 时, ``OneflowDataLoadr`` 默认使用 :class:`oneflow.utils.data.DataLoader` 自带的 collate_fn + * collate_fn 为 :class:`Callable` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 dataset 的一条数据;该 Callable 函数还应当返回一个对象。 + :param dataset: 实现了 __getitem__() 和 __len__() 的对象。 + :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 + :param non_train_batch_size: 非训练数据集的 ``OneflowDataLoader`` 批次大小,默认为 ``16`` 且当 ``batch_sampler`` 为 ``None`` 有效。 + :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 ``'train'`` 则设置其 shuffle 为 ``True`` , + 其它的为 False 。 + :param sampler: 实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , + 默认为 ``None``, 当其不为 ``None`` 时, shuffle 参数无效。 + :param non_train_sampler: 非训练数据集的的实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , + 默认为None, 当其不为 None 时, shuffle 参数无效。 + :param batch_sampler: 实现了 __len__() 和 __iter__() 的实例化对象,,其__iter__() 方法每次都会返回一个 List 对象, List中的值为 + dataset 的下标 index ;默认为 ``None``,当其不为 ``None`` 时,``bacth_size``, ``sampler``, ``shuffle`` 参数均失效。 + :param num_workers: 当 ``num_workers > 0`` 时, ``OneflowDataLoader`` 会开启 ``num_workers`` 个子进程来处理数据, 可以加快 + 数据处理速度,但同时也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 + :param collate_fn: 用于从 dataset 取到的一个 batch 数据进行打包处理的 Callable 函数,其值应该为以下三个: ``[None, "auto", Callable]``. + + * callate_fn 为 ``None`` 时,需要注意的是此时传进来的 datset 类型不能为 :class:`~fastNLP.core.dataset.DataSet` , 当 collate_fn 为 ``None`` 时, + ``OneflowDataLoader`` 调用默认的 oneflow 框架的 ``DataLoader`` 自带的 `default_collate_fn` 作为 callate_fn 的默认值, 其无法处理 + :class:`~fastNLP.core.dataset.DataSet` 的dataset对象。 + * callate_fn 为 ``'auto'`` 时,``OneflowDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的默认值。 + 此时可以配套使用 ``OneflowDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 + * collate_fn 为 :class:`Callable` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 + dataset 的一条数据;该 Callable 函数还应当返回一个对象。 + + :param pin_memory: 如果其为 ``True``, 那么 ``OneflowDataLoader`` 会在返回数据张量之前将其 copy 到 cuda 的 pin memory 中。 + :param drop_last: 当 ``drop_last=True`` 时,``OneflowDataLoader`` 会扔掉最后一个长度小于 ``batch_size`` 的 batch 数据; + 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 + :param timeout: 子进程的输出队列获取数据的超时值 + :param worker_init_fn: init 函数,如果不设置为 ``None``,则将会在每个子进程初始化时调用该函数。 + :param multiprocessing_context: 多进程的上下文环境 + :param generator: 如果其不为 ``None``, 将会使用 RandomSampler 去生成随机的 index 且会为每个子进程生成一个 ``base_seed`` + :param prefetch_factor: 每个 worker 提前装载的 samples 数量。``2`` 意味着在所有的进程中会有 2*num_workers 的数据被预取。默认值为 ``2`` . + :param persistent_workers: 如果其为 ``True``, ``OneflowDataLoader`` 在迭代完一次 dataset 后不会关闭所有进程。默认为 ``False`` """ def __init__(self, dataset, batch_size: int = 16, @@ -70,38 +102,7 @@ class OneflowDataLoader(DataLoader): timeout: float = 0, worker_init_fn: Optional[Callable] = None, multiprocessing_context=None, generator=None, prefetch_factor: int = 2, persistent_workers: bool = False, **kwargs) -> None: - """ - - :param dataset: 实现了 __getitem__() 和 __len__() 的对象。 - :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 - :param shuffle: 是否打乱数据集, 默认为 ``False``。 - :param sampler: 实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , - 默认为None, 当其不为 None 时, shuffle 参数无效。 - :param batch_sampler: 实现了 __len__() 和 __iter__() 的实例化对象,,其__iter__() 方法每次都会返回一个 List 对象, List中的值为 - dataset 的下标 index ;默认为 None,当其不为 None 时,bacth_size, sampler, shuffle 参数均失效。 - :param num_workers: 当 ``num_workers > 0`` 时, ``OneflowDataLoader`` 会开启 num_workers 个子进程来处理数据, 可以加快 - 数据处理速度,但同时也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 - :param collate_fn: 用于从 dataset 取到的一个 batch 数据进行打包处理的 Callable 函数,其值应该为以下三个: ``[None, "auto", Callable]``. - - * callate_fn 为 ``None`` 时,需要注意的是此时传进来的 datset 类型不能为 :class:`~fastNLP.core.dataset.DataSet` , 当 collate_fn 为 ``None`` 时, - ``OneflowDataLoader`` 调用默认的 oneflow 框架的 ``DataLoader`` 自带的 ``default_collate_fn`` 作为 callate_fn 的默认值, 其无法处理 - :class:`~fastNLP.core.dataset.DataSet` 的dataset对象。 - * callate_fn 为 ``'auto'`` 时,``OneflowDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的默认值。 - 此时可以配套使用 ``OneflowDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 - * collate_fn 为 ``Callable`` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 - dataset 的一条数据;该 Callable 函数还应当返回一个对象。 - - :param pin_memory: 如果其为 ``True``, 那么 ``OneflowDataLoader`` 会在返回数据张量之前将其 copy 到 cud a的 pin memory 中。 - :param drop_last: 当 ``drop_last=True`` 时,``OneflowDataLoader`` 会扔掉最后一个长度小于 ``batch_size`` 的 batch 数据; - 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 - :param timeout: 子进程的输出队列获取数据的超时值 - :param worker_init_fn: init 函数,如果不设置为 None ,则将会在每个子进程初始化时调用该函数。 - :param multiprocessing_context: 多进程的上下文环境 - :param generator: 如果其不为 ``None``, 将会使用 RandomSampler 去生成随机的 index 且会为每个子进程生成一个 ``base_seed`` - :param prefetch_factor: 每个 worker 提前装载的 samples 数量。``2``意味着在所有的进程中会有 2*num_workers 的数据被预取。默认值为 ``2`` . - :param persistent_workers: 如果其为 ``True``, ``OneflowDataLoader`` 在迭代完一次 dataset 后不会关闭所有进程。默认为 ``False`` - """ if isinstance(dataset, DataSet) and collate_fn is None: raise ValueError("When use FastNLP DataSet, collate_fn must be not None") @@ -153,20 +154,20 @@ class OneflowDataLoader(DataLoader): """ 如果需要对某个 field 的内容进行特殊的调整,请使用这个函数。 - :param field_name: 需要调整的 field 的名称。如果 Dataset 的 __getitem__ 方法返回的是 dict 类型的,则可以直接使用对应的 - field 的 key 来表示,如果是 nested 的 dict,可以使用元组表示多层次的 key,例如 {'a': {'b': 1}} 中的使用 ('a', 'b'); - 如果 __getitem__ 返回的是 Sequence 类型的,则可以使用 '_0', '_1' 表示序列中第 0 或 1 个元素。如果该 field 在数据中没 - 有找到,则报错;如果 __getitem__ 返回的是就是整体内容,请使用 "_single" 。 - :param pad_val: 这个 field 的默认 pad 值。如果设置为 None,则表示该 field 不需要 pad , fastNLP 默认只会对可以 pad 的 - field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 None 。如果 backend 为 None ,该值 - 无意义。 - :param dtype: 对于需要 pad 的 field ,该 field 的数据 dtype 应该是什么。 - :param backend: 可选['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto'],分别代表,输出为 list, numpy.ndarray, - torch.Tensor, paddle.Tensor, jittor.Var, oneflow.Tensor 类型。若 pad_val 为 None ,该值无意义 。 - :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 pad_val, dtype, backend 等参数失效。pad_fn 的输入为当前 field 的 - batch 形式。 Collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。pad_func 的输入即为 field 的 batch - 形式,输出将被直接作为结果输出。 - :return: 返回 Collator + :param field_name: 需要调整的 field 的名称。如果 :meth:`Dataset.__getitem__` 方法返回的是字典类型,则可以直接使用对应的 + field 的 key 来表示,如果是嵌套字典,可以使用元组表示多层次的 key,例如 ``{'a': {'b': 1}}`` 中可以使用 ``('a', 'b')``; + 如果 :meth:`Dataset.__getitem__` 返回的是 Sequence 类型,则可以使用 ``'_0'``, ``'_1'`` 表示序列中第 **0** 或 **1** 个元素。 + 如果该 field 在数据中没有找到,则报错;如果 :meth:`Dataset.__getitem__` 返回的是就是整体内容,请使用 "_single" 。 + :param pad_val: 这个 field 的默认 pad 值。如果设置为 ``None``,则表示该 field 不需要 pad , fastNLP 默认只会对可以 pad 的 + field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 ``None`` 。如果 ``backend`` 为 ``None``, + 该值无意义。 + :param dtype: 对于需要 pad 的 field ,该 field 数据的 ``dtype`` 。 + :param backend: 可选 ``['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto']`` ,分别代表,输出为 :class:`list`, + :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`paddle.Tensor`, :class:`jittor.Var`, :class:`oneflow.Tensor` 类型。 + 若 ``pad_val`` 为 ``None`` ,该值无意义 。 + :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 ``pad_val``, ``dtype``, ``backend`` 等参数失效。``pad_fn`` 的输入为当前 field 的 + batch 形式。 collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。 + :return: 返回使用的 collator """ collator = self._get_collator() if isinstance(collator, Collator): @@ -190,15 +191,14 @@ class OneflowDataLoader(DataLoader): def set_ignore(self, *field_names) -> Collator: """ - 如果有的内容不希望输出,可以在此处进行设置,被设置的 field 将在 batch 的输出中被忽略。 - Example:: + 如果有的内容不希望输出,可以在此处进行设置,被设置的 field 将在 batch 的输出中被忽略:: - collator.set_ignore('field1', 'field2') + dataloader.set_ignore('field1', 'field2') - :param field_names: 需要忽略的 field 的名称。如果 Dataset 的 __getitem__ 方法返回的是 dict 类型的,则可以直接使用对应的 - field 的 key 来表示,如果是 nested 的 dict,可以使用元组来表示,例如 {'a': {'b': 1}} 中的使用 ('a', 'b'); 如果 - __getitem__ 返回的是 Sequence 类型的,则可以使用 '_0', '_1' 表示序列中第 0 或 1 个元素。 - :return: 返回 Collator 自身 + :param field_names: field_name: 需要调整的 field 的名称。如果 :meth:`Dataset.__getitem__` 方法返回的是字典类型,则可以直接使用对应的 + field 的 key 来表示,如果是嵌套字典,可以使用元组表示多层次的 key,例如 ``{'a': {'b': 1}}`` 中可以使用 ``('a', 'b')``; + 如果 :meth:`Dataset.__getitem__` 返回的是 Sequence 类型,则可以使用 ``'_0'``, ``'_1'`` 表示序列中第 **0** 或 **1** 个元素。 + :return: 返回使用的 collator """ collator = self._get_collator() if isinstance(collator, Collator): @@ -230,56 +230,56 @@ def prepare_oneflow_dataloader(ds_or_db, non_train_batch_size: int = None) \ -> Union[OneflowDataLoader, Dict[str, OneflowDataLoader]]: """ - ``prepare_oneflow_dataloader`` 的功能是将输入的单个或多个 dataset 同时转为 ``OneflowDataloader``对象, 详见 :class:`~fastNLP.OneflowDataLoader`。 + ``prepare_oneflow_dataloader`` 的功能是将输入的单个或多个 dataset 同时转为 ``OneflowDataloader`` 对象, 详见 :class:`~fastNLP.OneflowDataLoader`。 根据 ds_or_db 的类型 ``[DataSet, DataBundle, Dict[name, Dataset]]`` 不同而有不同返回结果, 具体如下: - * 当 ds_or_db 为 ``DataSet``时,``prepare_oneflow_dataloader`` 会将使用的除了 non_train_batch_size 和 non_train_sampler 以外的参数来 - 帮你实例化一个 ``OneflowDataLoader`` 对象并返回该对象。 详见:class:`~fastNLP.core.dataloaders.OneflowDataLoader`。 + * 当 ds_or_db 为 ``DataSet`` 时,``prepare_oneflow_dataloader`` 会将使用的除了 ``non_train_batch_size`` 和 ``non_train_sampler`` 以外的参数来 + 帮你实例化一个 ``OneflowDataLoader`` 对象并返回该对象。 详见 :class:`~fastNLP.core.dataloaders.OneflowDataLoader`。 * 当 ds_or_db 为 :class:`~fastNLP.io.DataBundle` 时,``prepare_oneflow_dataloader`` 会遍历 ``DataBundle`` 的数据集的 key-value - 来创建不同的 ``OneflowDataLoader`` 对象;当 key 中包含'train'字符串时,``prepare_oneflow_dataloader`` 默认该 value 为 train 数据集, - 会将 batch_size 和 sampler 作为参数,其他 key 不包含 'train' 字符串的数据集则使用 non_train_size 和 non_train_sampler 作为参数。 - 最终根据 ``key: OneflowDataLoader`` 组成 ``Dict[key, OneflowDataLoader]`` 的字典返回。 + 来创建不同的 ``OneflowDataLoader`` 对象;当 key 中包含 ``'train'`` 字符串时,``prepare_oneflow_dataloader`` 默认该 value 为训练数据集, + 会将 ``batch_size`` 和 ``sampler`` 作为参数,其他 key 不包含 ``'train'`` 字符串的数据集则使用 ``non_train_size`` 和 ``non_train_sampler`` 作为参数。 + 最终根据 ``key: OneflowDataLoader`` 组成 ``Dict[key, OneflowDataLoader]`` 的字典返回。 * 当 ds_or_db 为 ``Dict[str, DataSet]`` 字典类型时, ``prepare_oneflow_dataloader`` 会遍历 该 dict 的的 key-value 来创建不同的 - ``OneflowDataLoader`` 对象;当 key 中包含'train'字符串时,``prepare_oneflow_dataloader`` 默认该 value 为 train 数据集,会将 batch_size 和 sampler 作为参数, - 其他 key 不包含 'train' 字符串的数据集则使用 non_train_size 和 non_train_sampler 作为参数。最终根据 ``key: OneflowDataLoader`` 组成 - ``Dict[key, OneflowDataLoader]`` 的字典返回。 + ``OneflowDataLoader`` 对象;当 key 中包含 ``'train'`` 字符串时,``prepare_oneflow_dataloader`` 默认该 value 为训练数据集,会将 ``batch_size`` 和 ``sampler`` 作为参数, + 其他 key 不包含 ``'train'`` 字符串的数据集则使用 ``non_train_size`` 和 ``non_train_sampler`` 作为参数。最终根据 ``key: OneflowDataLoader`` 组成 + ``Dict[key, OneflowDataLoader]`` 的字典返回。 :param ds_or_db: 可以有以下三种取值, - * ds_or_db 为 :class:`~fastNLP.io.DataBundle`, 返回值为 ``Dict[str, OneflowDataLoader]`` 的字典 - * ds_or_db 为 ``Dict[str, DataSet]`` 字典, 返回值为 ``Dict[str, OneflowDataLoader]`` 的字典 - * ds_or_db 为实现了 __getitem__() 和 __len__() 的对象 ,返回值为:class:`~fastNLP.OneflowDataLoader` + * ds_or_db 为 :class:`~fastNLP.io.DataBundle`, 返回值为 ``Dict[str, OneflowDataLoader]`` 的字典; + * ds_or_db 为 ``Dict[str, DataSet]`` 字典, 返回值为 ``Dict[str, OneflowDataLoader]`` 的字典; + * ds_or_db 为实现了 __getitem__() 和 __len__() 的对象 ,返回值为 :class:`~fastNLP.core.dataloaders.OneflowDataLoader`; :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 - :param non_train_batch_size: 非 'train' 数据集的 ``OneflowDataLoader`` 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 - :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 'train' 则设置其 shuffle 为 True , + :param non_train_batch_size: 非训练数据集的 ``OneflowDataLoader`` 批次大小,默认为 ``16`` 且当 ``batch_sampler`` 为 ``None`` 有效。 + :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 ``'train'`` 则设置其 shuffle 为 ``True`` , 其它的为 False 。 :param sampler: 实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , + 默认为 ``None``, 当其不为 ``None`` 时, shuffle 参数无效。 + :param non_train_sampler: 非训练数据集的的实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , 默认为None, 当其不为 None 时, shuffle 参数无效。 - :param non_train_sampler: 非 'train' 数据集的的实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , - 默认为None, 当其不为 None 时, shuffle 参数无效。 - :param batch_sampler: 实现了 __len__() 和 __iter__() 的实例化对象,,其__iter__() 方法每次都会返回一个 List 对象, List中的值为 - dataset 的下标 index ;默认为 None,当其不为 None 时,bacth_size, sampler, shuffle 参数均失效。 - :param num_workers: 当 ``num_workers > 0`` 时, ``OneflowDataLoader`` 会开启 num_workers 个子进程来处理数据, 可以加快 + :param batch_sampler: 实现了 __len__() 和 __iter__() 的实例化对象,,其__iter__() 方法每次都会返回一个 List 对象, List 中的值为 + dataset 的下标 index ;默认为 ``None``,当其不为 ``None`` 时,``bacth_size``, ``sampler``, ``shuffle`` 参数均失效。 + :param num_workers: 当 ``num_workers > 0`` 时, ``OneflowDataLoader`` 会开启 ``num_workers`` 个子进程来处理数据, 可以加快 数据处理速度,但同时也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 :param collate_fn: 用于从 dataset 取到的一个 batch 数据进行打包处理的 Callable 函数,其值应该为以下三个: ``[None, "auto", Callable]``. - * callate_fn 为 'None' 时,需要注意的是此时传进来的 datset 类型不能为 :class:`~fastNLP.core.dataset.DataSet` , 当 collate_fn 为 ``None`` 时, - ``OneflowDataLoader`` 调用默认的 oneflow 框架的 ``DataLoader`` 自带的 `default_collate_fn` 作为 callate_fn 的默认值, 其无法处理 - :class:`~fastNLP.core.dataset.DataSet` 的dataset对象。 - * callate_fn 为 ``'auto'`` 时,`OneflowDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的默认值。 - 此时可以配套使用 ``OneflowDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 - * `collate_fn 为 ``Callable`` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 - dataset 的一条数据;该 Callable 函数还应当返回一个对象。 + * callate_fn 为 ``None`` 时,需要注意的是此时传进来的 datset 类型不能为 :class:`~fastNLP.core.dataset.DataSet` , 当 collate_fn 为 ``None`` 时, + ``OneflowDataLoader`` 调用默认的 oneflow 框架的 ``DataLoader`` 自带的 `default_collate_fn` 作为 callate_fn 的默认值, 其无法处理 + :class:`~fastNLP.core.dataset.DataSet` 的dataset对象。 + * callate_fn 为 ``'auto'`` 时,``OneflowDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的默认值。 + 此时可以配套使用 ``OneflowDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 + * collate_fn 为 :class:`Callable` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 + dataset 的一条数据;该 Callable 函数还应当返回一个对象。 - :param pin_memory: 如果其为 ``True``, 那么 ``OneflowDataLoader`` 会在返回数据张量之前将其 copy 到 cud a的 pin memory 中。 + :param pin_memory: 如果其为 ``True``, 那么 ``OneflowDataLoader`` 会在返回数据张量之前将其 copy 到 cuda 的 pin memory 中。 :param drop_last: 当 ``drop_last=True`` 时,``OneflowDataLoader`` 会扔掉最后一个长度小于 ``batch_size`` 的 batch 数据; 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 :param timeout: 子进程的输出队列获取数据的超时值 - :param worker_init_fn: init 函数,如果不设置为 None ,则将会在每个子进程初始化时调用该函数。 + :param worker_init_fn: init 函数,如果不设置为 ``None``,则将会在每个子进程初始化时调用该函数。 :param multiprocessing_context: 多进程的上下文环境 - :param generator: 如果其不为 ``None``, 将会使用 RandomSampler 去生成随机的 index 且会为每个子进程生成一个``base_seed`` - :param prefetch_factor: 每个 worker 提前装载的 samples 数量。``2``意味着在所有的进程中会有 2*num_workers 的数据被预取。默认值为 ``2`` . + :param generator: 如果其不为 ``None``, 将会使用 RandomSampler 去生成随机的 index 且会为每个子进程生成一个 ``base_seed`` + :param prefetch_factor: 每个 worker 提前装载的 samples 数量。 ``2`` 意味着在所有的进程中会有 2*num_workers 的数据被预取。默认值为 ``2`` 。 :param persistent_workers: 如果其为 ``True``, ``OneflowDataLoader`` 在迭代完一次 dataset 后不会关闭所有进程。默认为 ``False`` """ diff --git a/fastNLP/core/dataloaders/paddle_dataloader/__init__.py b/fastNLP/core/dataloaders/paddle_dataloader/__init__.py index ab9523e5..a5ae3a68 100644 --- a/fastNLP/core/dataloaders/paddle_dataloader/__init__.py +++ b/fastNLP/core/dataloaders/paddle_dataloader/__init__.py @@ -1,6 +1,6 @@ __all__ = [ + 'PaddleDataLoader', 'prepare_paddle_dataloader', - 'PaddleDataLoader' ] from .fdl import PaddleDataLoader, prepare_paddle_dataloader \ No newline at end of file diff --git a/fastNLP/core/dataloaders/paddle_dataloader/fdl.py b/fastNLP/core/dataloaders/paddle_dataloader/fdl.py index 9eec6e8f..575ffef4 100644 --- a/fastNLP/core/dataloaders/paddle_dataloader/fdl.py +++ b/fastNLP/core/dataloaders/paddle_dataloader/fdl.py @@ -53,6 +53,7 @@ class PaddleDataLoader(DataLoader): 1. ``PaddleDataLoader`` 支持输入的 dataset 是无框架的,只要实现了 __getitem__() 和 __len__() 的对象即可, 当不使用 :class:`~fastNLP.core.dataset.DataSet` 时也不需要传入 collate_fn, 只要只需要将 ``collate_fn='auto'`` 就能够自动 探测数据的类型并判断能否 pad 。此时可以调用 ``set_pad`` 和 ``set_ignore`` 方法来设置 field 的 pad_val 或者忽略某个 field 的 pad 操作。 + Example:: from fastNLP import PaddleDataLoader @@ -76,9 +77,46 @@ class PaddleDataLoader(DataLoader): .. note:: 当传入的dataset为fastNLP的DataSet时,collate_fn不能为None。默认可以是"auto"或者自定义callable函数。 - 3. 当 collate_fn 为 ``Callable`` 时,该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 + 3. 当 collate_fn 为 :class:`Callable` 时,该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 dataset 的一条数据;该 Callable 函数还应当返回一个对象。 - + + :param dataset: 实现了 __getitem__() 和 __len__() 的对象。 + :param feed_list: feed Tensor list. + 这个张量能被 ``paddle.static.data`` 创建。 如果 :attr:`return_list` 是 ``False``, 那么 :attr:`feed_list` + 应该被设置。 默认为 ``None `` 。 + :param places: 将数据放进的一个 list 的 place。 :attr:`places` 能为 None. + 如果 :attr:`places` 为 None, 默认放在 CPUPlace 或者 CUDAPlace(0) 设备上。 如果 ``places`` 是一个 list 类型的 字符串, 那么字符串 + 可以是 ``cpu`` , ``gpu:x`` 或者 ``gpu_pinned`` , 其中 ``x`` 是 gpu 的下标。 + :param return_list: 每个设备上的返回值是否为以列表形式显示。 如果 :attr:`return_list=False`, + 每个设备上的返回值值为 str -> Tensor 的 dict, 其中 dict 的 key 为每个 fed Tensors 的名字。 + 如果 :attr:`return_list=True`, 每个设备上的返回值值为 list(Tensor)。 :attr:`return_list` 只能在动态图情况下设置为 ``True`` . + 默认值为 ``True`` 。 + :param batch_sampler: 实现了 __len__() 和 __iter__() 的实例化对象,,其__iter__() 方法每次都会返回一个 List 对象, List中的值为 + dataset 的下标 index ;默认为 ``None``,当其不为 ``None`` 时,``bacth_size``, ``shuffle`` 参数均失效。 + :param batch_size: 批次大小,默认为 ``16`` 且当 ``batch_sampler`` 为 None 有效。 + :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 ``'train'`` 则设置其 shuffle 为 ``True`` , + 其它的为 False 。 + :param drop_last: 当 ``drop_last=True`` 时,``PaddleDataLoader`` 会扔掉最后一个长度小于 ``batch_size`` 的 batch 数据; + 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 + :param collate_fn: 用于从 dataset 取到的一个 batch 数据进行打包处理的 Callable 函数,其值应该为以下三个: ``[None, "auto", Callable]``. + + * callate_fn 为 ``None`` 时,需要注意的是此时传进来的 datset 类型不能为 :class:`~fastNLP.core.dataset.DataSet` , 当 collate_fn 为 ``None`` 时, + ``PaddleDataLoader`` 调用默认的 Paddle 框架的 ``DataLoader`` 自带的 `default_collate_fn` 作为 callate_fn 的默认值, 其无法处理 + :class:`~fastNLP.core.dataset.DataSet` 的dataset对象。 + * callate_fn 为 ``'auto'`` 时,``PaddleDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的默认值。 + 此时可以配套使用 ``PaddleDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 + * collate_fn 为 :class:`Callable` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 + dataset 的一条数据;该 Callable 函数还应当返回一个对象。 + + :param num_workers: 当 ``num_workers > 0`` 时, ``PaddleDataLoader`` 会开启 ``num_workers`` 个子进程来处理数据, 可以加快 + 数据处理速度,但同时也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 + :param use_buffer_reader: 是否开启 buffer_reader 。如果 ``use_buffer_reader=True`` ,那么 ``PaddleDataLoader`` 会异步地预取下一个 batch 的 + 数据,因此它将会加快数据传输的速度,但是将会占用更多的内存或者显存。默认值是 ``True``。 + :param use_shared_memory: 是否使用共享内存。当 ``use_shared_memory=True`` 时,将采用共享内存来加快将数据放进进程队列。建议仅当计算机上的 + 共享空间足够大时。(例如 Linux 上的 /dev/shm/ 空间足够大)共享内存仅在多进程模式( ``num_workers>0`` )下生效。 + :param timeout: 从子进程的输出队列获取数据的超时值 + :param worker_init_fn: init 函数,如果不设置为 None ,则将会在每个子进程初始化时调用该函数。 + :param persistent_workers: 如果其为 ``True``, ``PaddleDataLoader`` 在迭代完一次 dataset 后不会关闭所有进程。默认为 ``False`` """ def __init__(self, dataset, feed_list=None, places=None, @@ -88,45 +126,7 @@ class PaddleDataLoader(DataLoader): num_workers: int = 0, use_buffer_reader: bool = True, use_shared_memory: bool = True, timeout: int = 0, worker_init_fn: Callable = None, persistent_workers=False) -> None: - """ - - :param dataset: 实现了 __getitem__() 和 __len__() 的对象。 - :param feed_list: feed Tensor list。 - 这个张量能被 :code:`paddle.static.data()` 创建。 如果 :attr:`return_list` 是 ``False``, 那么 :attr:`feed_list` - 应该被设置。 默认为 ``None`` - :param places: 将数据放进的一个 list 的 place。 :attr:`places` 能为 None。 - 如果 :attr:`places` 为 None, 默认放在 CPUPlace 或者 CUDAPlace(0) 设备上。 如果 ``places`` 是一个 list 类型的 字符串, 那么字符串 - 可以是 ``cpu`` , ``gpu:x`` 或者 ``gpu_pinned`` , 其中 ``x`` 是 gpu 的下标。 - :param return_list: 每个设备上的返回值是否为以列表形式显示。 如果 :attr:`return_list=False`, 每个设备上的返回值值为 str -> Tensor 的 dict, - 其中 dict 的 key 为每个 fed Tensors 的名字。如果 :attr:`return_list` 为 ``True`` , 每个设备上的返回值值为 list(Tensor)。 :attr:`return_list` - 只能在动态图情况下设置为 ``True`` 。默认值为 ``True`` 。 - :param batch_sampler: 实现了 __len__() 和 __iter__() 的实例化对象,,其__iter__() 方法每次都会返回一个 List 对象, List中的值为 - dataset 的下标 index ;默认为 None,当其不为 None 时,bacth_size, shuffle 参数均失效。 - :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 - :param shuffle: 是否将数据打乱,若``shuffle=True`` 则会将dataset打乱;若否则什么也不做。 - :param drop_last: 当 ``drop_last=True`` 时,``PaddleDataLoader`` 会扔掉最后一个长度小于 ``batch_size`` 的 batch 数据; - 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 - :param collate_fn: 用于从 dataset 取到的一个 batch 数据进行打包处理的 Callable 函数,其值应该为以下三个: ``[None, "auto", Callable]``. - - * callate_fn 为 ``None`` 时,需要注意的是此时传进来的 datset 类型不能为 :class:`~fastNLP.core.dataset.DataSet` , 当 collate_fn 为 ``None`` 时, - ``PaddleDataLoader`` 调用默认的 Paddle 框架的 ``DataLoader`` 自带的 ``default_collate_fn`` 作为 callate_fn 的默认值, 其无法处理 - :class:`~fastNLP.core.dataset.DataSet` 的dataset对象。 - * callate_fn 为 ``'auto'`` 时,``PaddleDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的默认值。 - 此时可以配套使用 ``PaddleDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 - * collate_fn 为 ``Callable`` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 - dataset 的一条数据;该 Callable 函数还应当返回一个对象。 - - :param num_workers: 当 ``num_workers > 0`` 时, ``PaddleDataLoader`` 会开启 num_workers 个子进程来处理数据, 可以加快 - 数据处理速度,但同时也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 - :param use_buffer_reader: 是否开启 buffer_reader 。如果 ``use_buffer_reader=True`` ,那么 ``PaddleDataLoader`` 会异步地预取下一个 batch 的 - 数据,因此它将会加快数据传输的速度,但是将会占用更多的内存或者显存。默认值是 ``True``。 - :param use_shared_memory: 是否使用共享内存。当 ``use_shared_memory=True`` 时,将采用共享内存来加快将数据放进进程队列。建议仅当计算机上的 - 共享空间足够大时。(例如 Linux 上的 /dev/shm/ 空间足够大)共享内存仅在多进程模式( num_workers>0 )下生效。 - :param timeout: 从子进程的输出队列获取数据的超时值 - :param worker_init_fn: init 函数,如果不设置为 None ,则将会在每个子进程初始化时调用该函数。 - :param persistent_workers: 如果其为 ``True``, ``PaddleDataLoader`` 在迭代完一次 dataset 后不会关闭所有进程。默认为 ``False`` - """ # FastNLP Datset, collate_fn not None if isinstance(dataset, FDataSet) and collate_fn is None: raise ValueError("When use FastNLP DataSet, collate_fn must be not None") @@ -186,20 +186,20 @@ class PaddleDataLoader(DataLoader): """ 如果需要对某个 field 的内容进行特殊的调整,请使用这个函数。 - :param field_name: 需要调整的 field 的名称。如果 Dataset 的 __getitem__ 方法返回的是 dict 类型的,则可以直接使用对应的 - field 的 key 来表示,如果是 nested 的 dict,可以使用元组表示多层次的 key,例如 {'a': {'b': 1}} 中的使用 ('a', 'b'); - 如果 __getitem__ 返回的是 Sequence 类型的,则可以使用 '_0', '_1' 表示序列中第 0 或 1 个元素。如果该 field 在数据中没 - 有找到,则报错;如果 __getitem__ 返回的是就是整体内容,请使用 "_single" 。 - :param pad_val: 这个 field 的默认 pad 值。如果设置为 None,则表示该 field 不需要 pad , fastNLP 默认只会对可以 pad 的 - field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 None 。如果 backend 为 None ,该值 - 无意义。 - :param dtype: 对于需要 pad 的 field ,该 field 的数据 dtype 应该是什么。 - :param backend: 可选['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto'],分别代表,输出为 list, numpy.ndarray, - torch.Tensor, paddle.Tensor, jittor.Var, oneflow.Tensor 类型。若 pad_val 为 None ,该值无意义 。 - :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 pad_val, dtype, backend 等参数失效。pad_fn 的输入为当前 field 的 - batch 形式。 Collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。pad_func 的输入即为 field 的 batch - 形式,输出将被直接作为结果输出。 - :return: 返回 Collator 自身 + :param field_name: 需要调整的 field 的名称。如果 :meth:`Dataset.__getitem__` 方法返回的是字典类型,则可以直接使用对应的 + field 的 key 来表示,如果是嵌套字典,可以使用元组表示多层次的 key,例如 ``{'a': {'b': 1}}`` 中可以使用 ``('a', 'b')``; + 如果 :meth:`Dataset.__getitem__` 返回的是 Sequence 类型,则可以使用 ``'_0'``, ``'_1'`` 表示序列中第 **0** 或 **1** 个元素。 + 如果该 field 在数据中没有找到,则报错;如果 :meth:`Dataset.__getitem__` 返回的是就是整体内容,请使用 "_single" 。 + :param pad_val: 这个 field 的默认 pad 值。如果设置为 ``None``,则表示该 field 不需要 pad , fastNLP 默认只会对可以 pad 的 + field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 ``None`` 。如果 ``backend`` 为 ``None``, + 该值无意义。 + :param dtype: 对于需要 pad 的 field ,该 field 数据的 ``dtype`` 。 + :param backend: 可选 ``['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto']`` ,分别代表,输出为 :class:`list`, + :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`paddle.Tensor`, :class:`jittor.Var`, :class:`oneflow.Tensor` 类型。 + 若 ``pad_val`` 为 ``None`` ,该值无意义 。 + :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 ``pad_val``, ``dtype``, ``backend`` 等参数失效。``pad_fn`` 的输入为当前 field 的 + batch 形式。 collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。 + :return: 返回使用的 collator """ collator = self._get_collator() if isinstance(collator, Collator): @@ -223,15 +223,14 @@ class PaddleDataLoader(DataLoader): def set_ignore(self, *field_names) -> Collator: """ - 如果有的内容不希望输出,可以在此处进行设置,被设置的 field 将在 batch 的输出中被忽略。 - Example:: + 如果有的内容不希望输出,可以在此处进行设置,被设置的 field 将在 batch 的输出中被忽略:: - collator.set_ignore('field1', 'field2') + dataloader.set_ignore('field1', 'field2') - :param field_names: 需要忽略的 field 的名称。如果 Dataset 的 __getitem__ 方法返回的是 dict 类型的,则可以直接使用对应的 - field 的 key 来表示,如果是 nested 的 dict,可以使用元组来表示,例如 {'a': {'b': 1}} 中的使用 ('a', 'b'); 如果 - __getitem__ 返回的是 Sequence 类型的,则可以使用 '_0', '_1' 表示序列中第 0 或 1 个元素。 - :return: 返回 Collator 自身 + :param field_names: field_name: 需要调整的 field 的名称。如果 :meth:`Dataset.__getitem__` 方法返回的是字典类型,则可以直接使用对应的 + field 的 key 来表示,如果是嵌套字典,可以使用元组表示多层次的 key,例如 ``{'a': {'b': 1}}`` 中可以使用 ``('a', 'b')``; + 如果 :meth:`Dataset.__getitem__` 返回的是 Sequence 类型,则可以使用 ``'_0'``, ``'_1'`` 表示序列中第 **0** 或 **1** 个元素。 + :return: 返回使用的 collator """ collator = self._get_collator() if isinstance(collator, Collator): @@ -260,59 +259,59 @@ def prepare_paddle_dataloader(ds_or_db, feed_list=None, places=None, non_train_batch_size: int = None) \ -> Union[Dict[str, PaddleDataLoader], PaddleDataLoader]: """ - ``prepare_paddle_dataloader`` 的功能是将输入的单个或多个 dataset 同时转为 ``PaddleDataloader``对象, 详见 :class:`~fastNLP.PaddleDataLoader`。 + ``prepare_paddle_dataloader`` 的功能是将输入的单个或多个 dataset 同时转为 ``PaddleDataloader`` 对象, 详见 :class:`~fastNLP.PaddleDataLoader`。 根据 ds_or_db 的类型 ``[DataSet, DataBundle, Dict[name, Dataset]]`` 不同而有不同返回结果, 具体如下: - * 当 ds_or_db 为 ``DataSet``时,``prepare_paddle_dataloader`` 会将使用的除了 non_train_batch_size 和 non_train_sampler 以外的参数来 - 帮你实例化一个 ``PaddleDataLoader`` 对象并返回该对象。 详见:class:`~fastNLP.core.dataloaders.PaddleDataLoader`。 + * 当 ds_or_db 为 ``DataSet`` 时,``prepare_paddle_dataloader`` 会将除了 ``non_train_batch_size`` 和 ``non_train_sampler`` 以外的参数来 + 帮你实例化一个 ``PaddleDataLoader`` 对象并返回该对象。 详见 :class:`~fastNLP.core.dataloaders.PaddleDataLoader`。 * 当 ds_or_db 为 :class:`~fastNLP.io.DataBundle` 时,``prepare_paddle_dataloader`` 会遍历 ``DataBundle`` 的数据集的 key-value - 来创建不同的 ``PaddleDataLoader`` 对象;当 key 中包含'train'字符串时,``prepare_Paddle_dataloader`` 默认该 value 为 train 数据集, - 会将 batch_size 和 sampler 作为参数,其他 key 不包含 'train' 字符串的数据集则使用 non_train_size 和 non_train_sampler 作为参数。 - 最终根据 ``key: PaddleDataLoader`` 组成 ``Dict[key, PaddleDataLoader]`` 的字典返回。 + 来创建不同的 ``PaddleDataLoader`` 对象;当 key 中包含 ``'train'`` 字符串时,``prepare_Paddle_dataloader`` 默认该 value 为训练数据集, + 会将 ``batch_size`` 和 ``sampler`` 作为参数,其他 key 不包含 ``'train'`` 字符串的数据集则使用 ``non_train_size`` 和 ``non_train_sampler`` 作为参数。 + 最终根据 ``key: PaddleDataLoader`` 组成 ``Dict[key, PaddleDataLoader]`` 的字典返回。 * 当 ds_or_db 为 ``Dict[str, DataSet]`` 字典类型时, ``prepare_paddle_dataloader`` 会遍历 该 dict 的的 key-value 来创建不同的 - ``PaddleDataLoader`` 对象;当 key 中包含'train'字符串时,``prepare_paddle_dataloader`` 默认该 value 为 train 数据集,会将 batch_size 和 sampler 作为参数, - 其他 key 不包含 'train' 字符串的数据集则使用 non_train_size 和 non_train_sampler 作为参数。最终根据 ``key: PaddleDataLoader`` 组成 - ``Dict[key, PaddleDataLoader]`` 的字典返回。 + ``PaddleDataLoader`` 对象;当 key 中包含 ``'train'`` 字符串时,``prepare_paddle_dataloader`` 默认该 value 为训练数据集,会将 ``batch_size`` 和 ``sampler`` 作为参数, + 其他 key 不包含 ``'train'`` 字符串的数据集则使用 ``non_train_size`` 和 ``non_train_sampler`` 作为参数。最终根据 ``key: PaddleDataLoader`` 组成 + ``Dict[key, PaddleDataLoader]`` 的字典返回。 :param ds_or_db: 可以有以下三种取值, - * ds_or_db 为 :class:`~fastNLP.io.DataBundle`, 返回值为 ``Dict[str, TorchDataLoader]`` 的字典 - * ds_or_db 为 ``Dict[str, DataSet]`` 字典, 返回值为 ``Dict[str, TorchDataLoader]`` 的字典 - * ds_or_db 为实现了 __getitem__() 和 __len__() 的对象 ,返回值为:class:`~fastNLP.TorchDataLoader` + * ds_or_db 为 :class:`~fastNLP.io.DataBundle`, 返回值为 ``Dict[str, TorchDataLoader]`` 的字典; + * ds_or_db 为 ``Dict[str, DataSet]`` 字典, 返回值为 ``Dict[str, TorchDataLoader]`` 的字典; + * ds_or_db 为实现了 __getitem__() 和 __len__() 的对象 ,返回值为 :class:`~fastNLP.TorchDataLoader`; - :param feed_list: (list(Tensor)|tuple(Tensor)): feed Tensor list. - 这个张量能被 :code:`paddle.static.data()` 创建。 如果:attr:`return_list` 是 ``False``, 那么 :attr:`feed_list` - 应该被设置。 默认为 ``None `` - :param places: (list(Place)|tuple(Place)|list(str)|optional): 将数据放进的一个 list 的 place。 :attr:`places` 能为 None. - 如果 :attr:`places` 为 None, 默认放在 CPUPlace 或者 CUDAPlace(0) 设备上。 如果 ``places`` 是一个 list 类型的 字符串, 那么字符串 - 可以是 ``cpu`` , ``gpu:x`` 或者 ``gpu_pinned`` , 其中 ``x`` 是 gpu 的下标。 + :param feed_list: feed Tensor list. + 这个张量能被 ``paddle.static.data`` 创建。 如果 :attr:`return_list` 是 ``False``, 那么 :attr:`feed_list` + 应该被设置。 默认为 ``None `` 。 + :param places: 将数据放进的一个 list 的 place。 :attr:`places` 能为 None. + 如果 :attr:`places` 为 None, 默认放在 CPUPlace 或者 CUDAPlace(0) 设备上。 如果 ``places`` 是一个 list 类型的 字符串, 那么字符串 + 可以是 ``cpu`` , ``gpu:x`` 或者 ``gpu_pinned`` , 其中 ``x`` 是 gpu 的下标。 :param return_list: 每个设备上的返回值是否为以列表形式显示。 如果 :attr:`return_list=False`, - 每个设备上的返回值值为 str -> Tensor 的 dict, 其中 dict 的 key 为每个 fed Tensors 的名字。 - 如果 :attr:`return_list=True`, 每个设备上的返回值值为 list(Tensor)。 :attr:`return_list` 只能在动态图情况下设置为 ``True`` . - 默认值为 ``True`` 。 + 每个设备上的返回值值为 str -> Tensor 的 dict, 其中 dict 的 key 为每个 fed Tensors 的名字。 + 如果 :attr:`return_list=True`, 每个设备上的返回值值为 list(Tensor)。 :attr:`return_list` 只能在动态图情况下设置为 ``True`` . + 默认值为 ``True`` 。 :param batch_sampler: 实现了 __len__() 和 __iter__() 的实例化对象,,其__iter__() 方法每次都会返回一个 List 对象, List中的值为 - dataset 的下标 index ;默认为 None,当其不为 None 时,bacth_size, shuffle 参数均失效。 + dataset 的下标 index ;默认为 ``None``,当其不为 ``None`` 时,``bacth_size``, ``shuffle`` 参数均失效。 :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 - :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 'train' 则设置其 shuffle 为 True , + :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 ``'train'`` 则设置其 shuffle 为 ``True`` , 其它的为 False 。 :param drop_last: 当 ``drop_last=True`` 时,``PaddleDataLoader`` 会扔掉最后一个长度小于 ``batch_size`` 的 batch 数据; - 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 + 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 :param collate_fn: 用于从 dataset 取到的一个 batch 数据进行打包处理的 Callable 函数,其值应该为以下三个: ``[None, "auto", Callable]``. - * callate_fn 为 ``None`` 时,需要注意的是此时传进来的 datset 类型不能为 :class:`~fastNLP.core.dataset.DataSet` , 当 collate_fn 为 ``None`` 时, - ``PaddleDataLoader`` 调用默认的 Paddle 框架的 ``DataLoader`` 自带的 `default_collate_fn` 作为 callate_fn 的默认值, 其无法处理 - :class:`~fastNLP.core.dataset.DataSet` 的dataset对象。 + * callate_fn 为 ``None`` 时,需要注意的是此时传进来的 datset 类型不能为 :class:`~fastNLP.core.dataset.DataSet` , 当 collate_fn 为 ``None`` 时, + ``PaddleDataLoader`` 调用默认的 Paddle 框架的 ``DataLoader`` 自带的 `default_collate_fn` 作为 callate_fn 的默认值, 其无法处理 + :class:`~fastNLP.core.dataset.DataSet` 的dataset对象。 * callate_fn 为 ``'auto'`` 时,``PaddleDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的默认值。 - 此时可以配套使用 ``PaddleDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 - * `collate_fn 为 ``Callable`` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 - dataset 的一条数据;该 Callable 函数还应当返回一个对象。 - - :param num_workers: 当 ``num_workers > 0`` 时, ``PaddleDataLoader`` 会开启 num_workers 个子进程来处理数据, 可以加快 - 数据处理速度,但同时也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 - :param use_buffer_reader: 是否开启 buffer_reader 。如果 `use_buffer_reader=True`` ,那么 ``PaddleDataLoader` `会异步的预取下一个 batch 的 - 数据,因此它将会加快数据传输的速度,但是将会占用更多的内存或者显存。默认值是 ``True``。 + 此时可以配套使用 ``PaddleDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 + * collate_fn 为 :class:`Callable` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 + dataset 的一条数据;该 Callable 函数还应当返回一个对象。 + + :param num_workers: 当 ``num_workers > 0`` 时, ``PaddleDataLoader`` 会开启 ``num_workers`` 个子进程来处理数据, 可以加快 + 数据处理速度,但同时也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 + :param use_buffer_reader: 是否开启 buffer_reader 。如果 ``use_buffer_reader=True`` ,那么 ``PaddleDataLoader`` 会异步地预取下一个 batch 的 + 数据,因此它将会加快数据传输的速度,但是将会占用更多的内存或者显存。默认值是 ``True``。 :param use_shared_memory: 是否使用共享内存。当 ``use_shared_memory=True`` 时,将采用共享内存来加快将数据放进进程队列。建议仅当计算机上的 - 共享空间足够大时。(例如 Linux 上的 /dev/shm/ 空间足够大)共享内存仅在多进程模式( num_workers>0 )下生效。 + 共享空间足够大时。(例如 Linux 上的 /dev/shm/ 空间足够大)共享内存仅在多进程模式( ``num_workers>0`` )下生效。 :param timeout: 从子进程的输出队列获取数据的超时值 :param worker_init_fn: init 函数,如果不设置为 None ,则将会在每个子进程初始化时调用该函数。 :param persistent_workers: 如果其为 ``True``, ``PaddleDataLoader`` 在迭代完一次 dataset 后不会关闭所有进程。默认为 ``False`` diff --git a/fastNLP/core/dataloaders/prepare_dataloader.py b/fastNLP/core/dataloaders/prepare_dataloader.py index 1bac3257..65b739aa 100644 --- a/fastNLP/core/dataloaders/prepare_dataloader.py +++ b/fastNLP/core/dataloaders/prepare_dataloader.py @@ -20,33 +20,33 @@ def prepare_dataloader(dataset, batch_size: int = 16, shuffle: bool = None, drop """ 自动创建合适的 ``DataLoader`` 对象。例如,检测当当前环境是 ``torch`` 的,则返回 ``TorchDataLoader`` , 是 ``paddle`` 的则 返回 ``PaddleDataLoader`` 。如果有更多需要定制的参数,请直接使用对应的 ``prepare`` 函数,例如 - :func:`~fastNLP.prepare_torch_dataloader` 或 :func:`~fastNLP.prepare_paddle_dataloader` 等。 + :func:`~fastNLP.core.dataloaders.prepare_torch_dataloader` 或 :func:`~fastNLP.core.dataloaders.prepare_paddle_dataloader` 等。 :param dataset: 实现 __getitem__() 和 __len__() 的对象;或这种对象的序列;或字典。 * 为单个数据集对象时,返回一个 DataLoader 。 * 为数据集对象序列时,返回一个序列的 DataLoader 。 - * 为字典型 或 :class:`~fastNLP.io.DataBundle` 数据时,返回 `Dict` 类型的数据。 + * 为字典型 或 :class:`~fastNLP.io.DataBundle` 数据时,返回 :class:`Dict` 类型的数据。 :param batch_size: 批次大小。 - :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 'train' 则设置其 shuffle 为 True , + :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 ``'train'`` 则设置其 shuffle 为 ``True`` , 其它的为 False 。 - :param drop_last: 当最后一个 batch 不足 batch_size 数量的是否,是否丢弃。 + :param drop_last: 当最后一个 batch 不足 ``batch_size`` 数量的是否,是否丢弃。 :param collate_fn: 用于处理一个 batch 的函数,一般包括 padding 和转为 tensor。有以下三种取值: * 为 ``auto`` 时,使用 :class:`~fastNLP.Collator` 进行 padding 和 转tensor 。 - * 为 ``Callable`` 时,应当接受一个 ``batch`` 的数据作为参数,同时输出一个对象 。 + * 为 :class:`Callable` 时,应当接受一个 ``batch`` 的数据作为参数,同时输出一个对象 。 * 为 ``None`` 时,使用各个框架的 DataLoader 的默认 ``collate_fn`` 。 :param num_workers: 使用多少进程进行数据的 fetch 。 :param backend: 当前支持 ``["auto", "torch", "paddle", "jittor", "oneflow"]`` 四种类型。 - * 为 ``auto`` 时,首先(1) 根据环境变量 "FASTNLP_BACKEND" 进行判断;如果没有设置则,(2)通过当前 + * 为 ``auto`` 时,首先根据环境变量 ``"FASTNLP_BACKEND"`` 进行判断;如果没有设置则通过当前 ``sys.modules`` 中已经 import 的 ``backend`` 进行判定。如果以上均无法判定,则报错。如果找到了 ``backend`` ,则按照下述的方式处理。 - * 为 ``torch`` 时,使用 :func:`~fastNLP.prepare_torch_dataloader` 。 - * 为 ``paddle`` 时,使用 :func:`~fastNLP.prepare_paddle_dataloader` 。 - * 为 ``jittor`` 时,使用 :func:`~fastNLP.prepare_jittor_dataloader` 。 - * 为 ``oneflow`` 时,使用 :func:`~fastNLP.prepare_oneflow_dataloader` 。 + * 为 ``torch`` 时,使用 :func:`~fastNLP.core.dataloaders.prepare_torch_dataloader` 。 + * 为 ``paddle`` 时,使用 :func:`~fastNLP.core.dataloaders.prepare_paddle_dataloader` 。 + * 为 ``jittor`` 时,使用 :func:`~fastNLP.core.dataloaders.prepare_jittor_dataloader` 。 + * 为 ``oneflow`` 时,使用 :func:`~fastNLP.core.dataloaders.prepare_oneflow_dataloader` 。 :return """ diff --git a/fastNLP/core/dataloaders/torch_dataloader/fdl.py b/fastNLP/core/dataloaders/torch_dataloader/fdl.py index 09211f71..84ad1f53 100644 --- a/fastNLP/core/dataloaders/torch_dataloader/fdl.py +++ b/fastNLP/core/dataloaders/torch_dataloader/fdl.py @@ -58,9 +58,41 @@ class TorchDataLoader(DataLoader): * callate_fn 为 ``'auto'`` 时,``TorchDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的取值。 此时可以配套使用 ``TorchDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 * callate_fn 为 ``None`` 时, ``TorchDataLoadr`` 默认使用 torch DataLoader 自带的 collate_fn - * collate_fn 为 ``Callable`` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 + * collate_fn 为 :class:`Callable` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 dataset 的一条数据;该 Callable 函数还应当返回一个对象。 + :param dataset: 实现了 __getitem__() 和 __len__() 的对象。 + :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 + :param non_train_batch_size: 非训练数据集的 ``TorchDataLoader`` 批次大小,默认为 ``16`` 且当 ``batch_sampler`` 为 ``None`` 有效。 + :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 ``'train'`` 则设置其 shuffle 为 ``True`` , + 其它的为 False 。 + :param sampler: 实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , + 默认为 ``None``, 当其不为 ``None`` 时, shuffle 参数无效。 + :param non_train_sampler: 非训练数据集的的实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , + 默认为None, 当其不为 None 时, shuffle 参数无效。 + :param batch_sampler: 实现了 __len__() 和 __iter__() 的实例化对象,,其__iter__() 方法每次都会返回一个 List 对象, List中的值为 + dataset 的下标 index ;默认为 ``None``,当其不为 ``None`` 时,``bacth_size``, ``sampler``, ``shuffle`` 参数均失效。 + :param num_workers: 当 ``num_workers > 0`` 时, ``TorchDataLoader`` 会开启 ``num_workers`` 个子进程来处理数据, 可以加快 + 数据处理速度,但同时也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 + :param collate_fn: 用于从 dataset 取到的一个 batch 数据进行打包处理的 Callable 函数,其值应该为以下三个: ``[None, "auto", Callable]``. + + * callate_fn 为 ``None`` 时,需要注意的是此时传进来的 datset 类型不能为 :class:`~fastNLP.core.dataset.DataSet` , 当 collate_fn 为 ``None`` 时, + ``TorchDataLoader`` 调用默认的 torch 框架的 ``DataLoader`` 自带的 `default_collate_fn` 作为 callate_fn 的默认值, 其无法处理 + :class:`~fastNLP.core.dataset.DataSet` 的dataset对象。 + * callate_fn 为 ``'auto'`` 时,``TorchDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的默认值。 + 此时可以配套使用 ``TorchDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 + * collate_fn 为 :class:`Callable` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 + dataset 的一条数据;该 Callable 函数还应当返回一个对象。 + + :param pin_memory: 如果其为 ``True``, 那么 ``TorchDataLoader`` 会在返回数据张量之前将其 copy 到 cud a的 pin memory 中。 + :param drop_last: 当 ``drop_last=True`` 时,``TorchDataLoader`` 会扔掉最后一个长度小于 ``batch_size`` 的 batch 数据; + 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 + :param timeout: 子进程的输出队列获取数据的超时值 + :param worker_init_fn: init 函数,如果不设置为 ``None``,则将会在每个子进程初始化时调用该函数。 + :param multiprocessing_context: 多进程的上下文环境 + :param generator: 如果其不为 ``None``, 将会使用 RandomSampler 去生成随机的 index 且会为每个子进程生成一个 ``base_seed`` + :param prefetch_factor: 每个 worker 提前装载的 samples 数量。``2`` 意味着在所有的进程中会有 2*num_workers 的数据被预取。默认值为 ``2`` . + :param persistent_workers: 如果其为 ``True``, ``TorchDataLoader`` 在迭代完一次 dataset 后不会关闭所有进程。默认为 ``False`` """ def __init__(self, dataset, batch_size: int = 16, @@ -70,38 +102,7 @@ class TorchDataLoader(DataLoader): timeout: float = 0, worker_init_fn: Optional[Callable] = None, multiprocessing_context=None, generator=None, prefetch_factor: int = 2, persistent_workers: bool = False, **kwargs) -> None: - """ - :param dataset: 实现了 __getitem__() 和 __len__() 的对象。 - :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 - :param shuffle: 是否打乱数据集, 默认为 ``False``。 - :param sampler: 实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , - 默认为None, 当其不为 None 时, shuffle 参数无效。 - :param batch_sampler: 实现了 __len__() 和 __iter__() 的实例化对象,,其__iter__() 方法每次都会返回一个 List 对象, List中的值为 - dataset 的下标 index ;默认为 None,当其不为 None 时,bacth_size, sampler, shuffle 参数均失效。 - :param num_workers: 当 ``num_workers > 0`` 时, ``TorchDataLoader`` 会开启 num_workers 个子进程来处理数据, 可以加快 - 数据处理速度,但同时也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 - :param collate_fn: 用于从 dataset 取到的一个 batch 数据进行打包处理的 Callable 函数,其值应该为以下三个: ``[None, "auto", Callable]``. - - * callate_fn 为 ``None`` 时,需要注意的是此时传进来的 datset 类型不能为 :class:`~fastNLP.core.dataset.DataSet` , 当 collate_fn 为 ``None`` 时, - ``TorchDataLoader`` 调用默认的 torch 框架的 ``DataLoader`` 自带的 ``default_collate_fn`` 作为 callate_fn 的默认值, 其无法处理 - :class:`~fastNLP.core.dataset.DataSet` 的dataset对象。 - * callate_fn 为 ``'auto'`` 时,``TorchDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的默认值。 - 此时可以配套使用 ``TorchDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 - * collate_fn 为 ``Callable`` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 - dataset 的一条数据;该 Callable 函数还应当返回一个对象。 - - :param pin_memory: 如果其为 ``True``, 那么 ``TorchDataLoader`` 会在返回数据张量之前将其 copy 到 cud a的 pin memory 中。 - :param drop_last: 当 ``drop_last=True`` 时,``TorchDataLoader`` 会扔掉最后一个长度小于 ``batch_size`` 的 batch 数据; - 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 - :param timeout: 子进程的输出队列获取数据的超时值 - :param worker_init_fn: init 函数,如果不设置为 None ,则将会在每个子进程初始化时调用该函数。 - :param multiprocessing_context: 多进程的上下文环境 - :param generator: 如果其不为 ``None``, 将会使用 RandomSampler 去生成随机的 index 且会为每个子进程生成一个 ``base_seed`` - :param prefetch_factor: 每个 worker 提前装载的 samples 数量。``2``意味着在所有的进程中会有 2*num_workers 的数据被预取。默认值为 ``2`` . - :param persistent_workers: 如果其为 ``True``, ``TorchDataLoader`` 在迭代完一次 dataset 后不会关闭所有进程。默认为 ``False`` - - """ if isinstance(dataset, DataSet) and collate_fn is None: raise ValueError("When use FastNLP DataSet, collate_fn must be not None") @@ -153,20 +154,20 @@ class TorchDataLoader(DataLoader): """ 如果需要对某个 field 的内容进行特殊的调整,请使用这个函数。 - :param field_name: 需要调整的 field 的名称。如果 Dataset 的 __getitem__ 方法返回的是 dict 类型的,则可以直接使用对应的 - field 的 key 来表示,如果是 nested 的 dict,可以使用元组表示多层次的 key,例如 {'a': {'b': 1}} 中的使用 ('a', 'b'); - 如果 __getitem__ 返回的是 Sequence 类型的,则可以使用 '_0', '_1' 表示序列中第 0 或 1 个元素。如果该 field 在数据中没 - 有找到,则报错;如果 __getitem__ 返回的是就是整体内容,请使用 "_single" 。 - :param pad_val: 这个 field 的默认 pad 值。如果设置为 None,则表示该 field 不需要 pad , fastNLP 默认只会对可以 pad 的 - field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 None 。如果 backend 为 None ,该值 - 无意义。 - :param dtype: 对于需要 pad 的 field ,该 field 的数据 dtype 应该是什么。 - :param backend: 可选['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto'],分别代表,输出为 list, numpy.ndarray, - torch.Tensor, paddle.Tensor, jittor.Var, oneflow.Tensor 类型。若 pad_val 为 None ,该值无意义 。 - :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 pad_val, dtype, backend 等参数失效。pad_fn 的输入为当前 field 的 - batch 形式。 Collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。pad_func 的输入即为 field 的 batch - 形式,输出将被直接作为结果输出。 - :return: 返回 Collator + :param field_name: 需要调整的 field 的名称。如果 :meth:`Dataset.__getitem__` 方法返回的是字典类型,则可以直接使用对应的 + field 的 key 来表示,如果是嵌套字典,可以使用元组表示多层次的 key,例如 ``{'a': {'b': 1}}`` 中可以使用 ``('a', 'b')``; + 如果 :meth:`Dataset.__getitem__` 返回的是 Sequence 类型,则可以使用 ``'_0'``, ``'_1'`` 表示序列中第 **0** 或 **1** 个元素。 + 如果该 field 在数据中没有找到,则报错;如果 :meth:`Dataset.__getitem__` 返回的是就是整体内容,请使用 "_single" 。 + :param pad_val: 这个 field 的默认 pad 值。如果设置为 ``None``,则表示该 field 不需要 pad , fastNLP 默认只会对可以 pad 的 + field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 ``None`` 。如果 ``backend`` 为 ``None``, + 该值无意义。 + :param dtype: 对于需要 pad 的 field ,该 field 数据的 ``dtype`` 。 + :param backend: 可选 ``['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto']`` ,分别代表,输出为 :class:`list`, + :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`paddle.Tensor`, :class:`jittor.Var`, :class:`oneflow.Tensor` 类型。 + 若 ``pad_val`` 为 ``None`` ,该值无意义 。 + :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 ``pad_val``, ``dtype``, ``backend`` 等参数失效。``pad_fn`` 的输入为当前 field 的 + batch 形式。 collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。 + :return: 返回使用的 collator """ collator = self._get_collator() if isinstance(collator, Collator): @@ -190,15 +191,14 @@ class TorchDataLoader(DataLoader): def set_ignore(self, *field_names) -> Collator: """ - 如果有的内容不希望输出,可以在此处进行设置,被设置的 field 将在 batch 的输出中被忽略。 - Example:: + 如果有的内容不希望输出,可以在此处进行设置,被设置的 field 将在 batch 的输出中被忽略:: - collator.set_ignore('field1', 'field2') + dataloader.set_ignore('field1', 'field2') - :param field_names: 需要忽略的 field 的名称。如果 Dataset 的 __getitem__ 方法返回的是 dict 类型的,则可以直接使用对应的 - field 的 key 来表示,如果是 nested 的 dict,可以使用元组来表示,例如 {'a': {'b': 1}} 中的使用 ('a', 'b'); 如果 - __getitem__ 返回的是 Sequence 类型的,则可以使用 '_0', '_1' 表示序列中第 0 或 1 个元素。 - :return: 返回 Collator 自身 + :param field_names: field_name: 需要调整的 field 的名称。如果 :meth:`Dataset.__getitem__` 方法返回的是字典类型,则可以直接使用对应的 + field 的 key 来表示,如果是嵌套字典,可以使用元组表示多层次的 key,例如 ``{'a': {'b': 1}}`` 中可以使用 ``('a', 'b')``; + 如果 :meth:`Dataset.__getitem__` 返回的是 Sequence 类型,则可以使用 ``'_0'``, ``'_1'`` 表示序列中第 **0** 或 **1** 个元素。 + :return: 返回使用的 collator """ collator = self._get_collator() if isinstance(collator, Collator): @@ -230,56 +230,56 @@ def prepare_torch_dataloader(ds_or_db, non_train_batch_size: int = None) \ -> Union[TorchDataLoader, Dict[str, TorchDataLoader]]: """ - ``prepare_torch_dataloader`` 的功能是将输入的单个或多个 dataset 同时转为 ``TorchDataloader``对象, 详见 :class:`~fastNLP.TorchDataLoader`。 + ``prepare_torch_dataloader`` 的功能是将输入的单个或多个 dataset 同时转为 ``TorchDataloader`` 对象, 详见 :class:`~fastNLP.TorchDataLoader`。 根据 ds_or_db 的类型 ``[DataSet, DataBundle, Dict[name, Dataset]]`` 不同而有不同返回结果, 具体如下: - * 当 ds_or_db 为 ``DataSet``时,``prepare_torch_dataloader`` 会将使用的除了 non_train_batch_size 和 non_train_sampler 以外的参数来 - 帮你实例化一个 ``TorchDataLoader`` 对象并返回该对象。 详见:class:`~fastNLP.core.dataloaders.TorchDataLoader`。 + * 当 ds_or_db 为 ``DataSet`` 时,``prepare_torch_dataloader`` 会将使用的除了 ``non_train_batch_size`` 和 ``non_train_sampler`` 以外的参数来 + 帮你实例化一个 ``TorchDataLoader`` 对象并返回该对象。 详见 :class:`~fastNLP.core.dataloaders.TorchDataLoader`。 * 当 ds_or_db 为 :class:`~fastNLP.io.DataBundle` 时,``prepare_torch_dataloader`` 会遍历 ``DataBundle`` 的数据集的 key-value - 来创建不同的 ``TorchDataLoader`` 对象;当 key 中包含'train'字符串时,``prepare_torch_dataloader`` 默认该 value 为 train 数据集, - 会将 batch_size 和 sampler 作为参数,其他 key 不包含 'train' 字符串的数据集则使用 non_train_size 和 non_train_sampler 作为参数。 - 最终根据 ``key: TorchDataLoader`` 组成 ``Dict[key, TorchDataLoader]`` 的字典返回。 + 来创建不同的 ``TorchDataLoader`` 对象;当 key 中包含 ``'train'`` 字符串时,``prepare_torch_dataloader`` 默认该 value 为训练数据集, + 会将 ``batch_size`` 和 ``sampler`` 作为参数,其他 key 不包含 ``'train'`` 字符串的数据集则使用 ``non_train_size`` 和 ``non_train_sampler`` 作为参数。 + 最终根据 ``key: TorchDataLoader`` 组成 ``Dict[key, TorchDataLoader]`` 的字典返回。 * 当 ds_or_db 为 ``Dict[str, DataSet]`` 字典类型时, ``prepare_torch_dataloader`` 会遍历 该 dict 的的 key-value 来创建不同的 - ``TorchDataLoader`` 对象;当 key 中包含'train'字符串时,``prepare_torch_dataloader`` 默认该 value 为 train 数据集,会将 batch_size 和 sampler 作为参数, - 其他 key 不包含 'train' 字符串的数据集则使用 non_train_size 和 non_train_sampler 作为参数。最终根据 ``key: TorchDataLoader`` 组成 - ``Dict[key, TorchDataLoader]`` 的字典返回。 + ``TorchDataLoader`` 对象;当 key 中包含 ``'train'`` 字符串时,``prepare_torch_dataloader`` 默认该 value 为训练数据集,会将 ``batch_size`` 和 ``sampler`` 作为参数, + 其他 key 不包含 ``'train'`` 字符串的数据集则使用 ``non_train_size`` 和 ``non_train_sampler`` 作为参数。最终根据 ``key: TorchDataLoader`` 组成 + ``Dict[key, TorchDataLoader]`` 的字典返回。 :param ds_or_db: 可以有以下三种取值, - * ds_or_db 为 :class:`~fastNLP.io.DataBundle`, 返回值为 ``Dict[str, TorchDataLoader]`` 的字典 - * ds_or_db 为 ``Dict[str, DataSet]`` 字典, 返回值为 ``Dict[str, TorchDataLoader]`` 的字典 - * ds_or_db 为实现了 __getitem__() 和 __len__() 的对象 ,返回值为:class:`~fastNLP.TorchDataLoader` + * ds_or_db 为 :class:`~fastNLP.io.DataBundle`, 返回值为 ``Dict[str, TorchDataLoader]`` 的字典; + * ds_or_db 为 ``Dict[str, DataSet]`` 字典, 返回值为 ``Dict[str, TorchDataLoader]`` 的字典; + * ds_or_db 为实现了 __getitem__() 和 __len__() 的对象 ,返回值为 :class:`~fastNLP.TorchDataLoader`; :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 - :param non_train_batch_size: 非 'train' 数据集的 ``TorchDataLoader`` 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 - :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 'train' 则设置其 shuffle 为 True , + :param non_train_batch_size: 非训练数据集的 ``TorchDataLoader`` 批次大小,默认为 ``16`` 且当 ``batch_sampler`` 为 ``None`` 有效。 + :param shuffle: 是否打乱数据集, 默认为 ``None``, 如果传入的 ``ds_or_db`` 可以判断出哪个是 ``'train'`` 则设置其 shuffle 为 ``True`` , 其它的为 False 。 :param sampler: 实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , - 默认为None, 当其不为 None 时, shuffle 参数无效。 - :param non_train_sampler: 非 'train' 数据集的的实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , + 默认为 ``None``, 当其不为 ``None`` 时, shuffle 参数无效。 + :param non_train_sampler: 非训练数据集的的实现了 __len__() 和 __iter__() 的实例化对象,其 __iter__() 方法每次都会返回 dataset 的一个下标 index , 默认为None, 当其不为 None 时, shuffle 参数无效。 :param batch_sampler: 实现了 __len__() 和 __iter__() 的实例化对象,,其__iter__() 方法每次都会返回一个 List 对象, List中的值为 - dataset 的下标 index ;默认为 None,当其不为 None 时,bacth_size, sampler, shuffle 参数均失效。 - :param num_workers: 当 ``num_workers > 0`` 时, ``TorchDataLoader`` 会开启 num_workers 个子进程来处理数据, 可以加快 + dataset 的下标 index ;默认为 ``None``,当其不为 ``None`` 时,``bacth_size``, ``sampler``, ``shuffle`` 参数均失效。 + :param num_workers: 当 ``num_workers > 0`` 时, ``TorchDataLoader`` 会开启 ``num_workers`` 个子进程来处理数据, 可以加快 数据处理速度,但同时也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 :param collate_fn: 用于从 dataset 取到的一个 batch 数据进行打包处理的 Callable 函数,其值应该为以下三个: ``[None, "auto", Callable]``. - * callate_fn 为 'None' 时,需要注意的是此时传进来的 datset 类型不能为 :class:`~fastNLP.core.dataset.DataSet` , 当 collate_fn 为 ``None`` 时, - ``TorchDataLoader`` 调用默认的 torch 框架的 ``DataLoader`` 自带的 `default_collate_fn` 作为 callate_fn 的默认值, 其无法处理 - :class:`~fastNLP.core.dataset.DataSet` 的dataset对象。 - * callate_fn 为 ``'auto'`` 时,`TorchDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的默认值。 - 此时可以配套使用 ``TorchDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 - * `collate_fn 为 ``Callable`` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 - dataset 的一条数据;该 Callable 函数还应当返回一个对象。 + * callate_fn 为 ``None`` 时,需要注意的是此时传进来的 datset 类型不能为 :class:`~fastNLP.core.dataset.DataSet` , 当 collate_fn 为 ``None`` 时, + ``TorchDataLoader`` 调用默认的 torch 框架的 ``DataLoader`` 自带的 `default_collate_fn` 作为 callate_fn 的默认值, 其无法处理 + :class:`~fastNLP.core.dataset.DataSet` 的dataset对象。 + * callate_fn 为 ``'auto'`` 时,``TorchDataLoader`` 使用 :class:`~fastNLP.core.collators.Collator` 作为 collate_fn 的默认值。 + 此时可以配套使用 ``TorchDataLoader`` 的 ``set_pad`` 和 ``set_ignore`` 方法来设置 pad_val 或 忽略某个 field 的检测。 + * collate_fn 为 :class:`Callable` 时, 该 Callable 函数应当接受一个 batch 参数作为输入, batch 是一个 List 对象且 List 中的每一条数据都是 + dataset 的一条数据;该 Callable 函数还应当返回一个对象。 :param pin_memory: 如果其为 ``True``, 那么 ``TorchDataLoader`` 会在返回数据张量之前将其 copy 到 cud a的 pin memory 中。 :param drop_last: 当 ``drop_last=True`` 时,``TorchDataLoader`` 会扔掉最后一个长度小于 ``batch_size`` 的 batch 数据; 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 :param timeout: 子进程的输出队列获取数据的超时值 - :param worker_init_fn: init 函数,如果不设置为 None ,则将会在每个子进程初始化时调用该函数。 + :param worker_init_fn: init 函数,如果不设置为 ``None``,则将会在每个子进程初始化时调用该函数。 :param multiprocessing_context: 多进程的上下文环境 - :param generator: 如果其不为 ``None``, 将会使用 RandomSampler 去生成随机的 index 且会为每个子进程生成一个``base_seed`` - :param prefetch_factor: 每个 worker 提前装载的 samples 数量。``2``意味着在所有的进程中会有 2*num_workers 的数据被预取。默认值为 ``2`` . + :param generator: 如果其不为 ``None``, 将会使用 RandomSampler 去生成随机的 index 且会为每个子进程生成一个 ``base_seed`` + :param prefetch_factor: 每个 worker 提前装载的 samples 数量。``2`` 意味着在所有的进程中会有 2*num_workers 的数据被预取。默认值为 ``2`` . :param persistent_workers: 如果其为 ``True``, ``TorchDataLoader`` 在迭代完一次 dataset 后不会关闭所有进程。默认为 ``False`` """ diff --git a/fastNLP/core/dataloaders/torch_dataloader/mix_dataloader.py b/fastNLP/core/dataloaders/torch_dataloader/mix_dataloader.py index 6b9b9f4d..bd5cd176 100644 --- a/fastNLP/core/dataloaders/torch_dataloader/mix_dataloader.py +++ b/fastNLP/core/dataloaders/torch_dataloader/mix_dataloader.py @@ -101,6 +101,19 @@ class MixDataLoader(DataLoader): """ 针对以下四种情况提供的 ``MixDataLoader``, 目前只支持 ``torch`` 框架的版本, 其中 mode 的取值范围为 ``['sequential', 'mix', 'polling', "Sampler"]``: + * 当 mode 为 ``'sequential'`` 时,``MixDataLoader`` 将 ``datasets`` 的序列或者字典视为一个混合大数据集, 按照 datasets 数据集序列或者字典的顺序一个 + 接一个的 sample 完所有数据。 + * 当 mode 为 ``'mix'`` 时, ``MixDataLoader`` 将 ``datasets`` 的序列或者字典视为一个混合大数据集, 然后根据用户输入的 idx 序列随机 sample + 混合数据集 datasets 的数据组成一个 batch 序列返回。 + * 当 mode 为 ``'polling'`` 时, ``MixDataLoader`` 按照 ``datasets`` 数据集的顺序, 先从第一个数据集采样一个 batch 的数据返回, + 再从第二数据集采样一个 batch 数据返回, 直至最后一个数据集采样一个 batch 数据返回后再从第一个数据采样第二个 batch 数据返回,直至所有的数据集都被轮询的采样完。 + * 当 mode 为 ``"Sampler"`` 时, 该 Sampler 是实现 __iter__() 的实例化对象, 其功能是每次 iter 时返回一个 batch 序列, 其类型为 List[int]; + 且 Sampler 必须将输入的 datasets 视为一个混合大数据集, 其 index 范围为 ``0 0`` 时, ``MixDataLoader`` 会开启 ``num_workers`` 个子进程来处理数据, 可以加快数据处理速度,但同时 + 也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 + :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 ``None`` 有效。 且 datasets 上所有 dataset 的 batch_size 一致。 + :param drop_last: 当 ``drop_last=True`` 时,``MixDataLoader`` 会扔掉 datasets 中 每个 dataset 最后一个长度小于 ``batch_size`` 的 batch 数据; + 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 + :param ds_ratio: ``ds_ratio`` 是控制 datasets 怎么组成一个混合大数据集的重要参数, 其取值为 ``[None, 'truncate_to_least', 'pad_to_most', List[float], Dict[str, float]]``: + + * ds_ratio 为 ``None``, datasets 数据集序列或字典不进行数据扩充处理。 + * ds_ratio 为 ``'truncate_to_least'``, datasets 数据集序列或字典会计算得到 datasets序列中 dataset 最断长度 ``mix_len``, 其他数据集会被切断 + 到最短长度 ``mix_len``。这种切断不是物理上切断,``MixDataLoader`` 会根据 sampler 不同来采样数据集到指定的最短长度 ``mix_len``。 + * ds_ratio 为 ``'pad_to_most'``, datasets 数据集序列或字典会计算得到 datasets序列中 dataset 最大长度 ``max_len``, 其他其他数据集会扩充 + 到最大长度 ``mix_len``。这种扩充不是物理上扩充, ``MixDataLoader`` 会根据 sampler 不同来重采样 dataset 到指定的最大长度 ``max_len``。 + * ds_ratio 为 ``Dict[str, float]`` 时, datasets 类型也必须为 ``Dict[str, DataSet]``, 其 key 一一对应。 ds_ratio 的 value 是任意大于 0 的浮点数, + 代表着 datasets 的 value 数据进行扩充或者缩减的倍数。 + """ def __init__(self, datasets: Dict = None, mode: str = 'sequential', @@ -119,55 +166,6 @@ class MixDataLoader(DataLoader): num_workers: int = 0, batch_size: int = 16, drop_last=False, ds_ratio: Union[None, str, Dict[str, float]] = None, pin_memory: bool = False) -> None: - """ - - :param datasets: 实现了 __getitem__() 和 __len__() 对象的序列或者字典。 - :param mode: mode 控制 ``MixDataLoader`` 运行模式。 mode 的取值范围为 ``['sequential', 'mix', 'polling', "Sampler"]``: - - * 当 mode 为 ``'sequential'`` 时,``MixDataLoader`` 将 datasets 的序列或者字典视为一个混合大数据集, 按照 datasets 数据集序列或者字典的顺序一个 - 接一个的 sample 完所有数据。 - * 当 mode 为 ``'mix'`` 时, ``MixDataLoader`` 将 datasets 的序列或者字典视为一个混合大数据集, 然后根据用户输入的 idx 序列随机sample - 混合数据集 datasets 的数据组成一个 batch 序列返回。 - * 当 mode 为 ``'polling'`` 时, ``MixDataLoader`` 按照 datasets 数据集的顺序, 先从第一个数据集采样一个 batch 的数据返回, - 再从第二数据集采样一个 batch 数据返回, 直至最后一个数据集采样一个 batch 数据返回后再从第一个数据采样第二个 batch 数据返回,直至所有的数据集都被轮询的采样完。 - * 当 mode 为 ``"Sampler"`` 时, 该 Sampler 是实现 __iter__() 的实例化对象, 其功能是每次 iter 时返回一个 batch 序列, 其类型为 List[int]; - 且 Sampler 必须将输入的 datasets 视为一个混合大数据集, 其 index 范围为 ``0 0`` 时, ``MixDataLoader`` 会开启 num_workers 个子进程来处理数据, 可以加快数据处理速度,但同时 - 也消耗大量内存。 当 ``num_workers=0`` 时, 不开启子进程。 默认为 ``0``。 - :param batch_size: 批次大小,默认为 ``16`` 且当 batch_sampler 为 None 有效。 且 datasets 上所有 dataset 的 batch_size 一致。 - :param drop_last: 当 ``drop_last=True`` 时,``MixDataLoader`` 会扔掉 datasets 中 每个 dataset 最后一个长度小于 ``batch_size`` 的 batch 数据; - 若 ``drop_last=False`` , 则会返回该 batch 数据。 默认为 ``False`` 。 - :param ds_ratio: ``ds_ratio`` 是控制 datasets 怎么组成一个混合大数据集的重要参数, 其取值为 ``[None, 'truncate_to_least', 'pad_to_most', List[float], Dict[str, float]]``: - - * ds_ratio 为 ``None``, datasets 数据集序列或字典不进行数据扩充处理。 - * ds_ratio 为 ``'truncate_to_least'``, datasets 数据集序列或字典会计算得到 datasets序列中 dataset 最断长度 ``mix_len``, 其他数据集会被切断 - 到最短长度 ``mix_len``。这种切断不是物理上切断,``MixDataLoader`` 会根据 sampler 不同来采样数据集到指定的最短长度 ``mix_len``。 - * ds_ratio 为 ``'pad_to_most'``, datasets 数据集序列或字典会计算得到 datasets序列中 dataset 最大长度 ``max_len``, 其他其他数据集会扩充 - 到最大长度 ``mix_len``。这种扩充不是物理上扩充, ``MixDataLoader`` 会根据 sampler 不同来重采样 dataset 到指定的最大长度``max_len``。 - * ds_ratio 为 ``Dict[str, float]`` 时, datasets 类型也必须为 ``Dict[str, DataSet]``, 其 key 一一对应。 ds_ratio 的 value 是任意大于 0 的浮点数, - 代表着 datasets 的 value 数据进行扩充或者缩减的倍数。 - """ # sampler 为 dict,则判断是否与 datasets 的 key 相同 if isinstance(sampler, Dict): for key in datasets.keys(): diff --git a/fastNLP/core/dataloaders/utils.py b/fastNLP/core/dataloaders/utils.py index 4f8fa743..4a648b99 100644 --- a/fastNLP/core/dataloaders/utils.py +++ b/fastNLP/core/dataloaders/utils.py @@ -14,7 +14,7 @@ __all__ = [ def indice_collate_wrapper(func:Callable): """ - 其功能是封装一层collate_fn,将dataset取到的tuple数据分离开,将idx打包为indices。 + 其功能是封装一层 collate_fn,将 dataset 取到的 tuple 数据分离开,将 idx 打包为 indices。 :param func: 需要修饰的函数 :return: @@ -115,7 +115,7 @@ class HasLenGetitemType(ABC): class OverfitDataLoader: """ - 实现一个简单的迭代器来模拟实际的 dataloader,从给定的 dataloader 中取出部分数据,来让 Trainer 实现 overfit 的功能; + 实现一个简单的迭代器来模拟实际的 dataloader,从给定的 ``dataloader`` 中取出部分数据,来让 Trainer 实现 overfit 的功能; """ def __init__(self, dataloader, overfit_batches: int, batches=None): diff --git a/fastNLP/core/dataset/dataset.py b/fastNLP/core/dataset/dataset.py index 0238a65d..f91bc930 100644 --- a/fastNLP/core/dataset/dataset.py +++ b/fastNLP/core/dataset/dataset.py @@ -1,7 +1,7 @@ r""" :class:`~fastNLP.core.dataset.DataSet` 是 fastNLP 中用于承载数据的容器。可以将 DataSet 看做是一个表格, -每一行是一个 sample (在 fastNLP 中被称为 :mod:`~fastNLP.core.instance` ), -每一列是一个 feature (在 fastNLP 中称为 :mod:`~fastNLP.core.field` )。 +每一行是一个 sample (在 fastNLP 中被称为 :mod:`~fastNLP.core.dataset.instance` ), +每一列是一个 feature (在 fastNLP 中称为 :mod:`~fastNLP.core.dataset.field` )。 .. csv-table:: Following is a demo layout of DataSet :header: "sentence", "words", "seq_len" @@ -11,7 +11,7 @@ r""" "Third instance .", "[Third, instance, .]", 3 "...", "[...]", "..." -在 fastNLP 内部每一行是一个 :class:`~fastNLP.Instance` 对象; 每一列是一个 :class:`~fastNLP.FieldArray` 对象。 +在 fastNLP 内部每一行是一个 :class:`~fastNLP.core.dataset.Instance` 对象; 每一列是一个 :class:`~fastNLP.core.dataset.FieldArray` 对象。 ---------------------------- 1.DataSet的创建 @@ -65,7 +65,7 @@ r""" 2.DataSet 与预处理 -------------------------------------- -常见的预处理有如下几种 +常见的预处理有如下几种: 2.1 从某个文本文件读取内容 -------------------------------------- @@ -97,10 +97,10 @@ r""" # 将句子分成单词形式, 详见DataSet.apply()方法, 可以开启多进程来加快处理, 也可以更改展示的bar,目前支持 ``['rich', 'tqdm', None]``, # 详细内容可以见 :class:`~fastNLP.core.dataset.DataSet`, 需要注意的时匿名函数不支持多进程 dataset.apply(lambda ins: ins['sentence'].split(), new_field_name='words', - progress_des='Main',progress_bar='rich') + progress_des='Main',progress_bar='rich') # 或使用DataSet.apply_field() dataset.apply_field(lambda sent:sent.split(), field_name='sentence', new_field_name='words', - progress_des='Main',progress_bar='rich') + progress_des='Main',progress_bar='rich') # 除了匿名函数,也可以定义函数传递进去 def get_words(instance): sentence = instance['sentence'] @@ -145,8 +145,8 @@ r""" # DataSet 的长度 len(dataset) - """ + __all__ = [ "DataSet", "ApplyResultException" @@ -255,34 +255,31 @@ def _multi_proc(ds, _apply_field, func, counter, queue): class DataSet: r""" - fastNLP的数据容器,详细的使用方法见文档 :mod:`fastNLP.core.dataset` - """ + fastNLP的数据容器。 - def __init__(self, data: Union[List[Instance], Dict[str, List[Any]], None] = None): - r""" - 初始化 ``DataSet``, fastNLP的 DataSet 是 key-value 存储形式, 目前支持两种初始化方式,输入 data 分别为 ``List[:class:`~fastNLP.core.dataset.Instance`]`` 和 - ``Dict[str, List[Any]]``。 - - * 当 data 为 ``List[:class:`~fastNLP.core.dataset.Instance`]`` 时, 每个 ``Instance`` 的 field_name 需要保持一致。 - Instance 详见 :class:`~fastNLP.core.dataset.Instance` 。 - * 当 data 为 ``Dict[str, List[Any]] 时, 则每个 key 的 value 应该为等长的 list, 否则不同 field 的长度不一致。 + Example:: - :param data: 初始化的内容, 其只能为两种类型,分别为 ``List[:class:`~fastNLP.core.dataset.Instance`]`` 和 - ``Dict[str, List[Any]]``。 + from fastNLP.core.dataset import DataSet, Instance + data = {'x': [[1, 0, 1], [0, 1, 1], 'y': [0, 1]} + data1 = [Instance(x=[1,0,1],y=0), Instance(x=[0,1,1],y=1)] + ds = DataSet(data) + ds = DataSet(data1) - * 当 data 为 ``List[:class:`~fastNLP.core.dataset.Instance`]`` 时, 每个 ``Instance`` 的 field_name 需要保持一致。 - Instance 详见 :class:`~fastNLP.core.dataset.Instance` 。 - * 当 data 为 ``Dict[str, List[Any]] 时, 则每个 key 的 value 应该为等长的 list, 否则不同 field 的长度不一致。 + fastNLP的 DataSet 是 key-value 存储形式, 目前支持两种初始化方式,输入 data 分别为 ``List[:class:`~fastNLP.core.dataset.Instance`]`` 和 + ``Dict[str, List[Any]]``。 - Example:: + * 当 data 为 ``List[:class:`~fastNLP.core.dataset.Instance`]`` 时, 每个 ``Instance`` 的 field_name 需要保持一致。 + Instance 详见 :class:`~fastNLP.core.dataset.Instance` 。 + * 当 data 为 ``Dict[str, List[Any]]`` 时, 则每个 key 的 value 应该为等长的 list, 否则不同 field 的长度不一致。 - from fastNLP.core.dataset import DataSet, Instance - data = {'x': [[1, 0, 1], [0, 1, 1], 'y': [0, 1]} - data1 = [Instance(x=[1,0,1],y=0), Instance(x=[0,1,1],y=1)] - ds = DataSet(data) - ds = DataSet(data1) + :param data: 初始化的内容,其只能为两种类型,分别为 ``List[:class:`~fastNLP.core.dataset.Instance`]`` 和 + ``Dict[str, List[Any]]``。 - """ + * 当 data 为 ``List[:class:`~fastNLP.core.dataset.Instance`]`` 时, 每个 ``Instance`` 的 field_name 需要保持一致。 + Instance 详见 :class:`~fastNLP.core.dataset.Instance` 。 + * 当 data 为 ``Dict[str, List[Any]] 时, 则每个 key 的 value 应该为等长的 list, 否则不同 field 的长度不一致。 + """ + def __init__(self, data: Union[List[Instance], Dict[str, List[Any]], None] = None): self.field_arrays = {} self._collator = Collator() if data is not None: @@ -429,10 +426,9 @@ class DataSet: def append(self, instance: Instance) -> None: r""" - 将一个 instance 对象 append 到 DataSet 后面。详见 :class:`~fastNLP.Instance` - - :param instance: 若 DataSet 不为空,则 instance 应该拥有和 DataSet 完全一样的 field。 + 将一个 ``instance`` 对象 append 到 DataSet 后面。详见 :class:`~fastNLP.core.dataset.Instance` + :param instance: 若 DataSet 不为空,则 instance 应该拥有和 DataSet 完全一样的 field; """ if len(self.field_arrays) == 0: # DataSet has no field yet @@ -454,10 +450,10 @@ class DataSet: def add_fieldarray(self, field_name: str, fieldarray: FieldArray) -> None: r""" - 将 fieldarray 添加到 DataSet 中. + 将 ``fieldarray`` 添加到 DataSet 中. - :param field_name: 新加入的 field 的名称 - :param fieldarray: 需要加入 DataSet 的 field 的内容, 详见 :class:`~fastNLP.core.dataset.FieldArray` + :param field_name: 新加入的 field 的名称; + :param fieldarray: 需要加入 DataSet 的 field 的内容, 详见 :class:`~fastNLP.core.dataset.FieldArray` ; :return: """ if not isinstance(fieldarray, FieldArray): @@ -472,8 +468,8 @@ class DataSet: r""" 新增一个 field, 需要注意的是 fields 的长度跟 DataSet 长度一致 - :param field_name: 新增的 field 的名称 - :param fields: 需要新增的 field 的内容 + :param field_name: 新增的 field 的名称; + :param fields: 需要新增的 field 的内容; """ if len(self.field_arrays) != 0: @@ -484,9 +480,9 @@ class DataSet: def delete_instance(self, index: int): r""" - 删除第 ``index `` 个 Instance + 删除第 ``index`` 个 Instance - :param index: 需要删除的 instanc e的 index,序号从 `0` 开始。 + :param index: 需要删除的 instance 的 index,序号从 `0` 开始。 """ assert isinstance(index, int), "Only integer supported." if len(self) <= index: @@ -500,9 +496,9 @@ class DataSet: def delete_field(self, field_name: str): r""" - 删除名为 field_name 的 field + 删除名为 ``field_name`` 的 field - :param field_name: 需要删除的 field 的名称. + :param field_name: 需要删除的 field 的名称; """ if self.has_field(field_name): self.field_arrays.pop(field_name) @@ -512,11 +508,11 @@ class DataSet: def copy_field(self, field_name: str, new_field_name: str): r""" - 深度 copy 名为 field_name 的 field 到 new_field_name + 深度 copy 名为 ``field_name`` 的 field 到 ``new_field_name`` - :param field_name: 需要 copy 的 field。 - :param new_field_name: copy 生成的 field 名称 - :return: self + :param field_name: 需要 copy 的 field; + :param new_field_name: copy 生成的 field 名称; + :return: 数据集自身; """ if not self.has_field(field_name): raise KeyError(f"Field:{field_name} not found in DataSet.") @@ -527,10 +523,10 @@ class DataSet: def has_field(self, field_name: str) -> bool: r""" - 判断 DataSet 中是否有名为 field_name 这个 field + 判断 DataSet 中是否有名为 ``field_name`` 这个 field - :param field_name: field 的名称 - :return: 表示是否有名为 field_name 这个 field + :param field_name: field 的名称; + :return: 表示是否有名为 ``field_name`` 这个 field; """ if isinstance(field_name, str): return field_name in self.field_arrays @@ -538,10 +534,10 @@ class DataSet: def get_field(self, field_name: str) -> FieldArray: r""" - 获取 field_name 这个 field + 获取名为 ``field_name`` 的 field - :param field_name: field 的名称 - :return: :class:`~fastNLP.FieldArray` + :param field_name: field 的名称; + :return: 一个 :class:`~fastNLP.core.dataset.FieldArray` 对象; """ if field_name not in self.field_arrays: raise KeyError("Field name {} not found in DataSet".format(field_name)) @@ -549,17 +545,13 @@ class DataSet: def get_all_fields(self) -> dict: r""" - 返回一个 dict,key 为 field_name, value为对应的 :class:`~fastNLP.FieldArray` - - :return: 返回如上所述的字典 + :return: 一个 dict,key 为 field_name, value为对应的 :class:`~fastNLP.core.dataset.FieldArray` 对象。 """ return self.field_arrays def get_field_names(self) -> list: r""" - 返回一个 list,包含所有 field 的名字 - - :return: 返回如上所述的列表 + :return: 一个 list,包含所有 field 的名字 """ return sorted(self.field_arrays.keys()) @@ -575,8 +567,8 @@ class DataSet: r""" 将某个 field 重新命名. - :param field_name: 原来的 field 名称。 - :param new_field_name: 修改为 new_name。 + :param field_name: 原来的 field 名称; + :param new_field_name: 修改为 new_name; """ if field_name in self.field_arrays: self.field_arrays[new_field_name] = self.field_arrays.pop(field_name) @@ -589,13 +581,13 @@ class DataSet: new_field_name: str = None, num_proc: int = 0, progress_desc: str = None, progress_bar: str = 'rich'): r""" - 将 :class:`DataSet` 每个 ``instance`` 中为 ``field_name`` 的 ``field`` 传给函数 ``func``,并写入到 ``new_field_name`` + 将 :class:`DataSet` 每个 ``instance`` 中为 ``field_name`` 的 field 传给函数 ``func``,并写入到 ``new_field_name`` 中。 - :param field_name: 传入 ``func`` 的 ``field`` 名称; - :param func: 对指定 ``field`` 进行处理的函数,注意其输入应为 ``instance`` 中名为 ``field_name`` 的 ``field`` 的内容; + :param func: 对指定 fiel` 进行处理的函数,注意其输入应为 ``instance`` 中名为 ``field_name`` 的 field 的内容; + :param field_name: 传入 ``func`` 的 field 名称; :param new_field_name: 函数执行结果写入的 ``field`` 名称。该函数会将 ``func`` 返回的内容放入到 ``new_field_name`` 对 - 应的 ``field`` 中,注意如果名称与已有的 ``field`` 相同则会进行覆盖。如果为 ``None`` 则不会覆盖和创建 ``field`` ; + 应的 ``field`` 中,注意如果名称与已有的 field 相同则会进行覆盖。如果为 ``None`` 则不会覆盖和创建 field ; :param num_proc: 使用进程的数量。 .. note:: @@ -603,8 +595,8 @@ class DataSet: 由于 ``python`` 语言的特性,设置该参数后会导致相应倍数的内存增长,这可能会对您程序的执行带来一定的影响。另外,使用多进程时, ``func`` 函数中的打印将不会输出。 - :param progress_desc: 进度条的描述字符,默认为 ``Processing``; - :param progress_bar: 显示 progress_bar 的方式,支持 `["rich", "tqdm", None]`。 + :param progress_desc: 如果不为 ``None``,则会显示当前正在处理的进度条的名称; + :param progress_bar: 显示进度条的方式,支持 ``["rich", "tqdm", None]``。 :return: 从函数 ``func`` 中得到的返回值; """ assert len(self) != 0, "Null DataSet cannot use apply_field()." @@ -625,26 +617,27 @@ class DataSet: modify_fields: bool = True, num_proc: int = 0, progress_desc: str = None, progress_bar: str = 'rich'): r""" - 将 ``DataSet`` 中的每个 ``Instance`` 中的名为 `field_name` 的field 传给 func,并获取它的返回值。 - func 可以返回一个或多个 field 上的结果。 + 将 ``DataSet`` 中的每个 ``Instance`` 中的名为 `field_name` 的 field 传给 ``func``,并获取它的返回值。 + ``func`` 可以返回一个或多个 field 上的结果。 .. note:: - ``apply_field_more`` 与 ``apply_field`` 的区别参考 :meth:`~fastNLP.DataSet.apply_more` 中关于 ``apply_more`` 与 + ``apply_field_more`` 与 ``apply_field`` 的区别参考 :meth:`~fastNLP.core.dataset.DataSet.apply_more` 中关于 ``apply_more`` 与 ``apply`` 区别的介绍。 - :param field_name: 传入func的是哪个field。 - :param func: 参数是 ``DataSet`` 中的 ``Instance`` ,返回值是一个字典,key 是field 的名字,value 是对应的结果 - :param modify_fields: 是否用结果修改 `DataSet` 中的 `Field`, 默认为 True + :param func: 对指定 fiel` 进行处理的函数,注意其输入应为 ``instance`` 中名为 ``field_name`` 的 field 的内容; + :param field_name: 传入 ``func`` 的 fiel` 名称; + :param new_field_name: 函数执行结果写入的 ``field`` 名称。该函数会将 ``func`` 返回的内容放入到 ``new_field_name`` 对 + 应的 ``field`` 中,注意如果名称与已有的 field 相同则会进行覆盖。如果为 ``None`` 则不会覆盖和创建 field ; :param num_proc: 使用进程的数量。 - + .. note:: - + 由于 ``python`` 语言的特性,设置该参数后会导致相应倍数的内存增长,这可能会对您程序的执行带来一定的影响。另外,使用多进程时, ``func`` 函数中的打印将不会输出。 - :param progress_bar: 显示 progress_bar 的方式,支持 `["rich", "tqdm", None]`。 - :param progress_desc: 当显示 progress_bar 时,显示当前正在处理的进度条描述字符 - :return Dict[str:Field]: 返回一个字典 + :param progress_desc: 如果不为 ``None``,则会显示当前正在处理的进度条的名称; + :param progress_bar: 显示进度条的方式,支持 ``["rich", "tqdm", None]``。 + :return: 返回一个字典 """ assert len(self) != 0, "Null DataSet cannot use apply_field()." if not self.has_field(field_name=field_name): @@ -747,7 +740,7 @@ class DataSet: def apply_more(self, func: Callable = None, modify_fields: bool = True, num_proc: int = 0, progress_desc: str = '', progress_bar: str = 'rich'): r""" - 将 ``DataSet`` 中每个 ``Instance`` 传入到func中,并获取它的返回值。func可以返回一个或多个 field 上的结果。 + 将 ``DataSet`` 中每个 ``Instance`` 传入到 ``func`` 中,并获取它的返回值。``func``可以返回一个或多个 field 上的结果。 .. note:: ``apply_more`` 与 ``apply`` 的区别: @@ -767,9 +760,9 @@ class DataSet: 由于 ``python`` 语言的特性,设置该参数后会导致相应倍数的内存增长,这可能会对您程序的执行带来一定的影响。另外,使用多进程时, ``func`` 函数中的打印将不会输出。 - :param progress_desc: 当 progress_bar 不为 None 时,可以显示当前正在处理的进度条名称 - :param progress_bar: 显示 progress_bar 的方式,支持 `["rich", "tqdm", None]`。 - :return Dict[str:Field]: 返回一个字典 + :param progress_desc: 当 progress_bar 不为 ``None`` 时,可以显示当前正在处理的进度条名称 + :param progress_bar: 显示进度条的方式,支持 ``["rich", "tqdm", None]``。 + :return: 返回一个字典 """ assert callable(func), "The func is not callable." assert len(self) != 0, "Null DataSet cannot use apply()." @@ -808,10 +801,11 @@ class DataSet: def apply(self, func: Callable = None, new_field_name: str = None, num_proc: int = 0, progress_bar: str = 'rich', progress_desc: str = ''): """ + 将 ``DataSet`` 中每个 ``Instance`` 传入到 ``func`` 中,并获取它的返回值。``func`` 仅能返回一个结果。 :param func: 参数是 ``DataSet`` 中的 ``Instance`` ,返回值是一个字典,key 是field 的名字,value 是对应的结果 - :param new_field_name: 将func返回的内容放入到 `new_field_name` 这个field中,如果名称与已有的field相同,则覆 - 盖之前的field。如果为None则不创建新的field。 + :param new_field_name: 将 ``func`` 返回的内容放入到 ``new_field_name`` 这个 field中 ,如果名称与已有的 field 相同,则覆 + 盖之前的 field。如果为 ``None`` 则不创建新的 field。 :param num_proc: 使用进程的数量。 .. note:: @@ -819,8 +813,8 @@ class DataSet: 由于 ``python`` 语言的特性,设置该参数后会导致相应倍数的内存增长,这可能会对您程序的执行带来一定的影响。另外,使用多进程时, ``func`` 函数中的打印将不会输出。 - :param progress_bar: 显示 progress_bar 的方式,支持 `["rich", "tqdm", None]`。 - :param progress_desc: progress bar 显示的值,默认为空。 + :param progress_bar: 显示进度条的方式,支持 ``["rich", "tqdm", None]``。 + :param progress_desc: 如果不为 ``None``,则会显示当前正在处理的进度条的名称。 """ assert callable(func), "The func you provide is not callable." assert len(self) != 0, "Null DataSet cannot use apply()." @@ -838,10 +832,10 @@ class DataSet: def add_seq_len(self, field_name: str, new_field_name='seq_len'): r""" - 将使用 len() 直接对 field_name 中每个元素作用,将其结果作为 sequence length, 并放入 seq_len 这个 field。 + 将使用 :func:`len` 直接对 ``field_name`` 中每个元素作用,将其结果作为 sequence length, 并放入 ``new_field_name`` 这个 field。 :param field_name: 需要处理的 field_name - :param new_field_name: str. 新的 field_name + :param new_field_name: 新的 field_name :return: """ if self.has_field(field_name=field_name): @@ -852,10 +846,10 @@ class DataSet: def drop(self, func: Callable, inplace=True): r""" - 删除某些 Instance。 需要注意的时func 接受一个 Instance ,返回 bool 值。返回值为 True 时, + 删除某些 Instance。 需要注意的是 ``func`` 接受一个 Instance ,返回 bool 值。返回值为 ``True`` 时, 该 Instance 会被移除或者不会包含在返回的 DataSet 中。 - :param func: 接受一个 Instance 作为参数,返回 bool 值。为 True 时删除该 instance + :param func: 接受一个 Instance 作为参数,返回 bool 值。为 ``True`` 时删除该 instance :param inplace: 是否在当前 DataSet 中直接删除 instance;如果为 False,将返回一个新的 DataSet。 :return: DataSet @@ -875,11 +869,11 @@ class DataSet: def split(self, ratio: float, shuffle=True): r""" - 将 DataSet 按照 ratio 的比例拆分,返回两个 DataSet + 将 DataSet 按照 ``ratio`` 的比例拆分,返回两个 DataSet - :param ratio: 0 1, f'DataSet with {len(self)} instance cannot be split.' assert isinstance(ratio, float) @@ -906,9 +900,9 @@ class DataSet: def save(self, path: str) -> None: r""" - 保存DataSet. + 保存 DataSet。 - :param path: 将DataSet存在哪个路径 + :param path: 保存路径; """ with open(path, 'wb') as f: pickle.dump(self, f) @@ -916,10 +910,10 @@ class DataSet: @staticmethod def load(path: str): r""" - 从保存的 DataSet pickle文件的路径中读取DataSet + 从保存的 DataSet pickle 文件的路径中读取 DataSet - :param path: 从哪里读取 DataSet - :return: 读取后的 :class:`~fastNLP.读取后的DataSet`。 + :param path: 读取路径; + :return: 读取出的 DataSet """ with open(path, 'rb') as f: d = pickle.load(f) @@ -928,16 +922,16 @@ class DataSet: def concat(self, dataset: 'DataSet', inplace:bool=True, field_mapping:Dict=None) -> 'DataSet': """ - 将当前 dataset 与输入的 dataset 结合成一个更大的 dataset,需要保证两个 dataset 都包含了相同的 field。结合后的 dataset - 的 field_name 和 _collator 以当前 dataset 为准。当 dataset 中包含的 field 多于当前的 dataset,则多余的 field 会被忽略; - 若 dataset 中未包含所有当前 dataset 含有 field,则会报错。 + 将当前 DataSet 与输入的 ``dataset`` 结合成一个更大的 dataset,需要保证两个 dataset 都包含了相同的 field。结合后的 dataset + 的 field_name 和 _collator 以当前 dataset 为准。若 ``dataset`` 中包含的 field 多于当前的 DataSet,则多余的 field 会被忽略; + 若 ``dataset`` 中未包含所有当前 DataSet 含有 field,则会报错。 - :param dataset: 需要和当前 dataset concat的 dataset - :param inplace: 是否直接将 dataset 组合到当前 dataset 中 - :param field_mapping: 当传入的 dataset 中的 field 名称和当前 dataset 不一致时,需要通过 field_mapping 把输入的 dataset 中的 - field 名称映射到当前 field. field_mapping 为 dict 类型,key 为 dataset 中的 field 名称,value 是需要映射成的名称 + :param dataset: 需要和当前 DataSet 拼接的 ``dataset``; + :param inplace: 是否直接将 ``dataset`` 组合到当前 DataSet 中; + :param field_mapping: 当传入的 ``dataset`` 中的 field 名称和当前 dataset 不一致时,需要通过 ``field_mapping`` 把输入的 ``dataset`` + 中的 field 名称映射到当前 field。``field_mapping`` 为 dict 类型,key 为 11dataset`` 中的 field 名称,value 是需要映射成的名称 - :return: :class:`~fastNLP.core.dataset.DataSet`` + :return: :class:`~fastNLP.core.dataset.DataSet` """ assert isinstance(dataset, DataSet), "Can only concat two datasets." @@ -966,7 +960,8 @@ class DataSet: @classmethod def from_pandas(cls, df): """ - 从 ``pandas.DataFrame`` 中读取数据转为 DataSet + 从 :class:`pandas.DataFrame` 中读取并数据转化为 DataSet + :param df: 使用 pandas 读取的数据 :return: """ @@ -975,7 +970,7 @@ class DataSet: def to_pandas(self): """ - 将 DataSet 数据转为 ``pandas.DataFrame`` 类型的数据 + 将 DataSet 数据转为 :class:`pandas.DataFrame` 类型的数据 :return: """ @@ -1003,23 +998,22 @@ class DataSet: def set_pad(self, field_name: Union[str, tuple], pad_val: Union[int, float, None] = 0, dtype=None, backend=None, pad_fn: Callable = None) -> Collator: """ - ``DataSet`` 中想要对绑定的 collator 进行调整可以调用此函数。 ``collator`` 为 :class:`~fastNLP.core.collators.Collator` - 时该函数才有效。调用该函数可以对 field 内容的 pad_val, dtype, backend 等进行调整。 - - :param field_name: 需要调整的 field 的名称。如果 DataSet 的 __getitem__ 方法返回的是 dict 类型的,则可以直接使用对应的 - field 的 key 来表示,如果是 nested 的 dict,可以使用元组表示多层次的 key,例如 {'a': {'b': 1}} 中的使用 ('a', 'b'); - 如果 __getitem__ 返回的是 Sequence 类型的,则可以使用 '_0', '_1' 表示序列中第 0 或 1 个元素。如果该 field 在数据中没 - 有找到,则报错;如果 __getitem__ 返回的是就是整体内容,请使用 "_single" 。 - :param pad_val: 这个 field 的默认 pad 值。如果设置为 None,则表示该 field 不需要 pad , fastNLP 默认只会对可以 pad 的 - field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 None 。如果 backend 为 None ,该值 - 无意义。 - :param dtype: 对于需要 pad 的 field ,该 field 的数据 dtype 应该是什么。 - :param backend: 可选['raw', 'numpy', 'torch', 'torch', 'jittor', 'auto'],分别代表,输出为 list, numpy.ndarray, - torch.Tensor, torch.Tensor, jittor.Var 类型。若 pad_val 为 None ,该值无意义 。 - :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 pad_val, dtype, backend 等参数失效。pad_fn 的输入为当前 field 的 - batch 形式。 Collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。pad_func 的输入即为 field 的 batch - 形式,输出将被直接作为结果输出。 - :return: 返回 Collator + 如果需要对某个 field 的内容进行特殊的调整,请使用这个函数。 + + :param field_name: 需要调整的 field 的名称。如果 :meth:`Dataset.__getitem__` 方法返回的是字典类型,则可以直接使用对应的 + field 的 key 来表示,如果是嵌套字典,可以使用元组表示多层次的 key,例如 ``{'a': {'b': 1}}`` 中可以使用 ``('a', 'b')``; + 如果 :meth:`Dataset.__getitem__` 返回的是 Sequence 类型,则可以使用 ``'_0'``, ``'_1'`` 表示序列中第 **0** 或 **1** 个元素。 + 如果该 field 在数据中没有找到,则报错;如果 :meth:`Dataset.__getitem__` 返回的是就是整体内容,请使用 "_single" 。 + :param pad_val: 这个 field 的默认 pad 值。如果设置为 ``None``,则表示该 field 不需要 pad , fastNLP 默认只会对可以 pad 的 + field 进行 pad,所以如果对应 field 本身就不是可以 pad 的形式,可以不需要主动设置为 ``None`` 。如果 ``backend`` 为 ``None``, + 该值无意义。 + :param dtype: 对于需要 pad 的 field ,该 field 数据的 ``dtype`` 。 + :param backend: 可选 ``['raw', 'numpy', 'torch', 'paddle', 'jittor', 'oneflow', 'auto']`` ,分别代表,输出为 :class:`list`, + :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`paddle.Tensor`, :class:`jittor.Var`, :class:`oneflow.Tensor` 类型。 + 若 ``pad_val`` 为 ``None`` ,该值无意义 。 + :param pad_fn: 指定当前 field 的 pad 函数,传入该函数则 ``pad_val``, ``dtype``, ``backend`` 等参数失效。``pad_fn`` 的输入为当前 field 的 + batch 形式。 Collator 将自动 unbatch 数据,然后将各个 field 组成各自的 batch 。 + :return: 返回自身的 collator; """ if isinstance(self.collator, Collator): self.collator.set_pad(field_name=field_name, pad_val=pad_val, dtype=dtype, pad_fn=pad_fn, backend=backend) @@ -1030,16 +1024,14 @@ class DataSet: def set_ignore(self, *field_names) -> Collator: """ ``DataSet`` 中想要对绑定的 collator 进行调整可以调用此函数。 ``collator`` 为 :class:`~fastNLP.core.collators.Collator` - 时该函数才有效。调用该函数可以设置忽略输出某些 field 的内容,被设置的 field 将在 batch 的输出中被忽略。 - - Example:: + 时该函数才有效。调用该函数可以设置忽略输出某些 field 的内容,被设置的 field 将在 batch 的输出中被忽略:: - collator.set_ignore('field1', 'field2') + dataset.set_ignore('field1', 'field2') - :param field_names: 需要忽略的 field 的名称。如果 DataSet 的 __getitem__ 方法返回的是 dict 类型的,则可以直接使用对应的 - field 的 key 来表示,如果是 nested 的 dict,可以使用元组来表示,例如 {'a': {'b': 1}} 中的使用 ('a', 'b'); 如果 - __getitem__ 返回的是 Sequence 类型的,则可以使用 '_0', '_1' 表示序列中第 0 或 1 个元素。 - :return: 返回 Collator 自身 + :param field_names: field_name: 需要调整的 field 的名称。如果 :meth:`Dataset.__getitem__` 方法返回的是字典类型,则可以直接使用对应的 + field 的 key 来表示,如果是嵌套字典,可以使用元组表示多层次的 key,例如 ``{'a': {'b': 1}}`` 中可以使用 ``('a', 'b')``; + 如果 :meth:`Dataset.__getitem__` 返回的是 Sequence 类型,则可以使用 ``'_0'``, ``'_1'`` 表示序列中第 **0** 或 **1** 个元素。 + :return: 返回自身的 collator; """ if isinstance(self.collator, Collator): self.collator.set_ignore(*field_names) diff --git a/fastNLP/core/dataset/field.py b/fastNLP/core/dataset/field.py index 6bb94416..e9795885 100644 --- a/fastNLP/core/dataset/field.py +++ b/fastNLP/core/dataset/field.py @@ -14,15 +14,14 @@ import numpy as np class FieldArray: + """ + :class:`~fastNLP.core.dataset.DatSet` 中用于表示列的数据类型。 - def __init__(self, name: str, content): - """ - 初始化 FieldArray - - :param name: 字符串的名称 - :param content: 任意类型的数据 + :param name: 字符串的名称 + :param content: 任意类型的数据 + """ - """ + def __init__(self, name: str, content): if len(content) == 0: raise RuntimeError("Empty fieldarray is not allowed.") _content = content @@ -36,18 +35,15 @@ class FieldArray: def append(self, val: Any) -> None: r""" - :param val: 把该 val append 到 fieldarray。 - :return: - + :param val: 把该 ``val`` 添加到 fieldarray 中。 """ self.content.append(val) def pop(self, index: int) -> None: r""" - 删除该 field 中 index 处的元素 + 删除该 field 中 ``index`` 处的元素 :param index: 从 ``0`` 开始的数据下标。 - :return: """ self.content.pop(index) @@ -60,10 +56,10 @@ class FieldArray: def get(self, indices: Union[int, List[int]]): r""" - 根据给定的 indices 返回内容。 + 根据给定的 ``indices`` 返回内容。 - :param indices: 获取 indices 对应的内容。 - :return: 根据给定的 indices 返回的内容,可能是单个值 或 ``ndarray`` + :param indices: 获取 ``indices`` 对应的内容。 + :return: 根据给定的 ``indices`` 返回的内容,可能是单个值 或 :class:`numpy.ndarray` """ if isinstance(indices, int): if indices == -1: @@ -80,16 +76,16 @@ class FieldArray: r""" 返回长度 - :return length: + :return: """ return len(self.content) def split(self, sep: str = None, inplace: bool = True): r""" - 依次对自身的元素使用 ``.split()`` 方法,应该只有当本 field 的元素为 ``str`` 时,该方法才有用。 + 依次对自身的元素使用 ``.split()`` 方法,应该只有当本 field 的元素为 :class:`str` 时,该方法才有用。 :param sep: 分割符,如果为 ``None`` 则直接调用 ``str.split()``。 - :param inplace: 如果为 ``True``,则将新生成值替换本 field。否则返回 ``list``。 + :param inplace: 如果为 ``True``,则将新生成值替换本 field。否则返回 :class:`list`。 :return: List[List[str]] or self """ new_contents = [] @@ -104,10 +100,11 @@ class FieldArray: def int(self, inplace: bool = True): r""" 将本 field 中的值调用 ``int(cell)``. 支持 field 中内容为以下两种情况: - * ['1', '2', ...](即 field 中每个值为 ``str`` 的), - * [['1', '2', ..], ['3', ..], ...](即 field 中每个值为一个 ``list`` ,``list`` 中的值会被依次转换。) - :param inplace: 如果为 ``True``,则将新生成值替换本 field。否则返回 ``list``。 + * ['1', '2', ...](即 field 中每个值为 :class:`str` 的), + * [['1', '2', ..], ['3', ..], ...](即 field 中每个值为一个 :class:`list` ,:class:`list` 中的值会被依次转换。) + + :param inplace: 如果为 ``True``,则将新生成值替换本 field,并返回当前 field 。否则返回 :class:`list`。 :return: List[int], List[List[int]], self """ new_contents = [] @@ -126,10 +123,10 @@ class FieldArray: r""" 将本 field 中的值调用 ``float(cell)``. 支持 field 中内容为以下两种情况: - * ['1', '2', ...](即 field 中每个值为 ``str`` 的), - * [['1', '2', ..], ['3', ..], ...](即 field 中每个值为一个 ``list``,``list`` 中的值会被依次转换。) + * ['1', '2', ...](即 field 中每个值为 :class:`str` 的), + * [['1', '2', ..], ['3', ..], ...](即 field 中每个值为一个 :class:`list` ,:class:`list` 中的值会被依次转换。) - :param inplace: 如果为 ``True``,则将新生成值替换本 ``field``。否则返回 ``list``。 + :param inplace: 如果为 ``True``,则将新生成值替换本 field,并返回当前 field 。否则返回 :class:`list`。 :return: """ new_contents = [] @@ -148,10 +145,10 @@ class FieldArray: r""" 将本field中的值调用 ``bool(cell)``. 支持 field 中内容为以下两种情况 - * ['1', '2', ...](即 field 中每个值为 ``str`` 的), - * [['1', '2', ..], ['3', ..], ...](即 field 中每个值为一个 ``list``,``list`` 中的值会被依次转换。) + * ['1', '2', ...](即 field 中每个值为 :class:`str` 的), + * [['1', '2', ..], ['3', ..], ...](即 field 中每个值为一个 :class:`list` ,:class:`list` 中的值会被依次转换。) - :param inplace: 如果为 ``True``,则将新生成值替换本 ``field``。否则返回 ``list``。 + :param inplace: 如果为 ``True``,则将新生成值替换本 field,并返回当前 field 。否则返回 :class:`list`。 :return: """ new_contents = [] @@ -169,12 +166,12 @@ class FieldArray: def lower(self, inplace=True): r""" - 将本 field 中的值调用 ``cell.lower()``. 支持 field 中内容为以下两种情况 + 将本 field 中的值调用 ``cell.lower()``, 支持 field 中内容为以下两种情况 - * ['1', '2', ...](即 ``field`` 中每个值为 ``str`` 的), - * [['1', '2', ..], ['3', ..], ...](即 field 中每个值为一个 ``list``,``list``中的值会被依次转换。) + * ['1', '2', ...](即 field 中每个值为 :class:`str` 的), + * [['1', '2', ..], ['3', ..], ...](即 field 中每个值为一个 :class:`list` ,:class:`list` 中的值会被依次转换。) - :param inplace: 如果为 ``True``,则将新生成值替换本 field。否则返回 ``list``。 + :param inplace: 如果为 ``True``,则将新生成值替换本 field,并返回当前 field 。否则返回 :class:`list`。 :return: List[int], List[List[int]], self """ new_contents = [] @@ -191,12 +188,12 @@ class FieldArray: def upper(self, inplace=True): r""" - 将本 field 中的值调用 ``cell.lower()``. 支持 field 中内容为以下两种情况 + 将本 field 中的值调用 ``cell.upper()``, 支持 field 中内容为以下两种情况 - * ['1', '2', ...](即 field 中每个值为 ``str`` 的), - * [['1', '2', ..], ['3', ..], ...](即 field 中每个值为一个 ``list``,``list`` 中的值会被依次转换。) + * ['1', '2', ...](即 field 中每个值为 :class:`str` 的), + * [['1', '2', ..], ['3', ..], ...](即 field 中每个值为一个 :class:`list` ,:class:`list` 中的值会被依次转换。) - :param inplace: 如果为 ``True``,则将新生成值替换本 field。否则返回 ``list``。 + :param inplace: 如果为 ``True``,则将新生成值替换本 field,并返回当前 field 。否则返回 :class:`list`。 :return: List[int], List[List[int]], self """ new_contents = [] @@ -211,11 +208,11 @@ class FieldArray: raise e return self._after_process(new_contents, inplace=inplace) - def value_count(self): + def value_count(self) -> Counter: r""" - 返回该 field 下不同 value的 数量。多用于统计 label 数量 + 返回该 field 下不同 value 的数量。多用于统计 label 数量 - :return: Counter, key 是 label,value 是出现次数 + :return: 计数结果,key 是 label,value 是出现次数 """ count = Counter() diff --git a/fastNLP/core/dataset/instance.py b/fastNLP/core/dataset/instance.py index be455938..b721472e 100644 --- a/fastNLP/core/dataset/instance.py +++ b/fastNLP/core/dataset/instance.py @@ -1,7 +1,6 @@ r""" -instance 模块实现了 Instance 类在 fastNLP 中对应 sample。一个 sample 可以认为是一个 Instance 类型的对象。 -便于理解的例子可以参考文档 :mod:`fastNLP.core.dataset` 。 - +instance 模块实现了 Instance 类,即在 fastNLP 中 sample 对应的类型。一个 sample 可以认为是一个 Instance 类型的对象。 +便于理解的例子可以参考文档 :mod:`fastNLP.core.dataset.dataset` 。 """ __all__ = [ @@ -15,9 +14,9 @@ from fastNLP.core.utils.utils import pretty_table_printer class Instance(Mapping): r""" Instance 是 fastNLP 中对应一个 sample 的类。每个 sample 在 fastNLP 中是一个 Instance 对象。 - Instance 一般与 :class:`~fastNLP.DataSet` 一起使用, Instance 的初始化如下面的 Example 所示:: + Instance 一般与 :class:`~fastNLP.DataSet` 一起使用, Instance 的初始化如下面的代码所示:: - >>> instance = Instance(input="this is a demo sentence", label='good') # 请补充完整 + >>> instance = Instance(input="this is a demo sentence", label='good') """ diff --git a/fastNLP/core/drivers/paddle_driver/paddle_driver.py b/fastNLP/core/drivers/paddle_driver/paddle_driver.py index f604994e..cccd87c8 100644 --- a/fastNLP/core/drivers/paddle_driver/paddle_driver.py +++ b/fastNLP/core/drivers/paddle_driver/paddle_driver.py @@ -159,7 +159,7 @@ class PaddleDriver(Driver): :param only_state_dict: 是否只保存模型的 ``state_dict``;如果为 ``False``,则会调用 ``paddle.jit.save`` 函数保存整个模型的参数,此时需要传入 ``input_spec`` 参数; :kwargs: - * input_spec -- 描述存储模型 ``forward`` 方法的输入; + * *input_spec* -- 描述存储模型 ``forward`` 方法的输入; 当 ``only_state_dict`` 为 ``False`` 时必须传入,否则加载时会报错。您可以通过 ``InputSpec`` 或者示例 ``Tensor`` 进行描述。详细的使用方法可以参考 **PaddlePaddle** `关于 paddle.jit.save 函数的文档 `_; """ diff --git a/fastNLP/core/metrics/metric.py b/fastNLP/core/metrics/metric.py index e2dc3dda..0f904041 100644 --- a/fastNLP/core/metrics/metric.py +++ b/fastNLP/core/metrics/metric.py @@ -20,7 +20,7 @@ class Metric: :param backend: 目前支持四种类型的 backend, ``[torch, paddle, jittor, auto]``。其中 ``auto`` 表示根据实际调用 Metric.update() 函数时传入的参数决定具体的 ``backend`` ,大部分情况下直接使用 ``auto`` 即可。 :param aggregate_when_get_metric: 在计算 metric 的时候是否自动将各个进程上的相同的 element 的数字聚合后再得到metric, - 当 backend 不支持分布式时,该参数无意义。如果为 None ,将在 :class:`~fastNLP.Evaluator` 中根据 sampler 是否使用分布式 + 当 backend 不支持分布式时,该参数无意义。如果为 None ,将在 :class:`~fastNLP.core.controllers.Evaluator` 中根据 sampler 是否使用分布式 进行自动设置。 """ def __init__(self, backend: Union[str, Backend, None] = 'auto', aggregate_when_get_metric: bool = None): diff --git a/fastNLP/core/metrics/span_f1_pre_rec_metric.py b/fastNLP/core/metrics/span_f1_pre_rec_metric.py index 07a6cd56..b264f93d 100644 --- a/fastNLP/core/metrics/span_f1_pre_rec_metric.py +++ b/fastNLP/core/metrics/span_f1_pre_rec_metric.py @@ -212,7 +212,7 @@ class SpanFPreRecMetric(Metric): :param backend: 目前支持四种类型的 backend, ``[torch, paddle, jittor, auto]``。其中 ``auto`` 表示根据实际调用 Metric.update() 函数时传入的参数决定具体的 ``backend`` ,大部分情况下直接使用 ``auto`` 即可。 :param aggregate_when_get_metric: 在计算 metric 的时候是否自动将各个进程上的相同的 element 的数字聚合后再得到metric, - 当 backend 不支持分布式时,该参数无意义。如果为 None ,将在 :class:`~fastNLP.Evaluator` 中根据 sampler 是否使用分布式 + 当 backend 不支持分布式时,该参数无意义。如果为 None ,将在 :class:`~fastNLP.core.controllers.Evaluator` 中根据 sampler 是否使用分布式 进行自动设置。 """ def __init__(self, tag_vocab: Vocabulary, encoding_type: str = None, ignore_labels: List[str] = None, From 417a27f6c9dcfe365caa5ff65c8c8ae660a44124 Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Sun, 3 Jul 2022 07:38:08 +0000 Subject: [PATCH 52/52] =?UTF-8?q?=E4=BF=AE=E6=94=B9Driver=20=E5=90=84?= =?UTF-8?q?=E8=87=AA=20kwargs=20=E9=BB=98=E8=AE=A4=E5=80=BC=E4=B8=BA=20Non?= =?UTF-8?q?e?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/drivers/jittor_driver/jittor_driver.py | 4 ++-- fastNLP/core/drivers/jittor_driver/mpi.py | 2 +- fastNLP/core/drivers/jittor_driver/single_device.py | 2 +- fastNLP/core/drivers/oneflow_driver/ddp.py | 4 ++-- .../core/drivers/oneflow_driver/oneflow_driver.py | 4 ++-- .../core/drivers/oneflow_driver/single_device.py | 4 ++-- fastNLP/core/drivers/paddle_driver/fleet.py | 13 +++++-------- .../paddle_driver/initialize_paddle_driver.py | 2 +- fastNLP/core/drivers/paddle_driver/paddle_driver.py | 3 ++- fastNLP/core/drivers/paddle_driver/single_device.py | 4 ++-- fastNLP/core/drivers/torch_driver/ddp.py | 2 +- fastNLP/core/drivers/torch_driver/deepspeed.py | 8 ++++---- fastNLP/core/drivers/torch_driver/fairscale.py | 3 ++- fastNLP/core/drivers/torch_driver/single_device.py | 2 +- fastNLP/core/drivers/torch_driver/torch_driver.py | 4 ++-- 15 files changed, 30 insertions(+), 31 deletions(-) diff --git a/fastNLP/core/drivers/jittor_driver/jittor_driver.py b/fastNLP/core/drivers/jittor_driver/jittor_driver.py index 542b39f9..ebcd7bfd 100644 --- a/fastNLP/core/drivers/jittor_driver/jittor_driver.py +++ b/fastNLP/core/drivers/jittor_driver/jittor_driver.py @@ -55,7 +55,7 @@ class JittorDriver(Driver): :param fp16: 是否开启混合精度训练; :param jittor_kwargs: """ - def __init__(self, model, fp16: bool = False, jittor_kwargs: Dict = {}, **kwargs): + def __init__(self, model, fp16: bool = False, jittor_kwargs: Dict = None, **kwargs): if not isinstance(model, Module): raise ValueError(f"Parameter `model` can not be `{type(model)}` in `JittorDriver`, it should be exactly " f"`jittor.Module` type.") @@ -67,7 +67,7 @@ class JittorDriver(Driver): jt.flags.auto_mixed_precision_level = 0 self.fp16 = fp16 self._auto_cast = nullcontext - self._jittor_kwargs = jittor_kwargs + self._jittor_kwargs = jittor_kwargs if jittor_kwargs is not None else {} # 用来设置是否关闭 auto_param_call 中的参数匹配问题; self.wo_auto_param_call = kwargs.get("model_wo_auto_param_call", False) diff --git a/fastNLP/core/drivers/jittor_driver/mpi.py b/fastNLP/core/drivers/jittor_driver/mpi.py index 47e9279b..2e3d42c2 100644 --- a/fastNLP/core/drivers/jittor_driver/mpi.py +++ b/fastNLP/core/drivers/jittor_driver/mpi.py @@ -34,7 +34,7 @@ class JittorMPIDriver(JittorDriver): parallel_device: None, is_pull_by_jittor_run: bool = False, fp16: bool = False, - jittor_kwargs: Dict = {}, + jittor_kwargs: Dict = None, **kwargs ): diff --git a/fastNLP/core/drivers/jittor_driver/single_device.py b/fastNLP/core/drivers/jittor_driver/single_device.py index be8ef1b9..eda11660 100644 --- a/fastNLP/core/drivers/jittor_driver/single_device.py +++ b/fastNLP/core/drivers/jittor_driver/single_device.py @@ -37,7 +37,7 @@ class JittorSingleDriver(JittorDriver): :param jittor_kwargs: """ - def __init__(self, model, device=None, fp16: bool = False, jittor_kwargs: Dict = {}, **kwargs): + def __init__(self, model, device=None, fp16: bool = False, jittor_kwargs: Dict = None, **kwargs): if device not in [None, "cpu", "gpu", "cuda"]: raise RuntimeError("Parameter `device` should be one of [None, 'cpu', 'gpu', 'cuda'] .") super(JittorSingleDriver, self).__init__(model, fp16, jittor_kwargs=jittor_kwargs) diff --git a/fastNLP/core/drivers/oneflow_driver/ddp.py b/fastNLP/core/drivers/oneflow_driver/ddp.py index fb992bc8..4a285856 100644 --- a/fastNLP/core/drivers/oneflow_driver/ddp.py +++ b/fastNLP/core/drivers/oneflow_driver/ddp.py @@ -46,7 +46,7 @@ class OneflowDDPDriver(OneflowDriver): 任何当前有多少台机器的信息; :param model: 传入给 ``Trainer`` 的 ``model`` 参数; - :param parallel_device: 该参数无效,**FastNLP** 会自动获取当前进程的设备; + :param parallel_device: 该参数无效,**fastNLP** 会自动获取当前进程的设备; :param fp16: 是否开启 fp16 训练;目前该参数无效; :param oneflow_kwargs: * *ddp_kwargs* -- 用于 ``DistributedDataParallel`` 的其它参数,详情可查阅 **oneflow** 的官方文档; @@ -57,7 +57,7 @@ class OneflowDDPDriver(OneflowDriver): model, parallel_device: Optional["oneflow.device"], fp16: bool = False, - oneflow_kwargs: Dict = {}, + oneflow_kwargs: Dict = None, **kwargs ): diff --git a/fastNLP/core/drivers/oneflow_driver/oneflow_driver.py b/fastNLP/core/drivers/oneflow_driver/oneflow_driver.py index 17777358..29027738 100644 --- a/fastNLP/core/drivers/oneflow_driver/oneflow_driver.py +++ b/fastNLP/core/drivers/oneflow_driver/oneflow_driver.py @@ -48,11 +48,11 @@ class OneflowDriver(Driver): 您可以在使用 ``OneflowSingleDriver`` 和 ``OneflowDDPDriver`` 时使用 ``OneflowDriver`` 提供的接口; """ - def __init__(self, model, fp16: Optional[bool] = False, oneflow_kwargs: Dict = {}, **kwargs): + def __init__(self, model, fp16: Optional[bool] = False, oneflow_kwargs: Dict = None, **kwargs): super(OneflowDriver, self).__init__(model) """ 进行 fp16 的设置 """ - self._oneflow_kwargs = oneflow_kwargs + self._oneflow_kwargs = oneflow_kwargs if oneflow_kwargs is not None else {} self.fp16 = fp16 if fp16: diff --git a/fastNLP/core/drivers/oneflow_driver/single_device.py b/fastNLP/core/drivers/oneflow_driver/single_device.py index aec4d0e1..84d77d14 100644 --- a/fastNLP/core/drivers/oneflow_driver/single_device.py +++ b/fastNLP/core/drivers/oneflow_driver/single_device.py @@ -29,14 +29,14 @@ class OneflowSingleDriver(OneflowDriver): :param oneflow_kwargs: """ - def __init__(self, model, device: "oneflow.device", fp16: bool = False, oneflow_kwargs: Dict = {}, **kwargs): + def __init__(self, model, device: "oneflow.device", fp16: bool = False, oneflow_kwargs: Dict = None, **kwargs): cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None) if cuda_visible_devices == "": device = oneflow.device("cpu") logger.info("You have set `CUDA_VISIBLE_DEVICES` to '' in system environment variable, and we are gonna to" "use `cpu` instead of `gpu` device.") - super(OneflowSingleDriver, self).__init__(model, fp16=fp16, **kwargs) + super(OneflowSingleDriver, self).__init__(model, fp16=fp16, oneflow_kwargs=oneflow_kwargs, **kwargs) if device is None: logger.debug("device is not set, fastNLP will try to automatically get it.") diff --git a/fastNLP/core/drivers/paddle_driver/fleet.py b/fastNLP/core/drivers/paddle_driver/fleet.py index 6668d577..137aa9db 100644 --- a/fastNLP/core/drivers/paddle_driver/fleet.py +++ b/fastNLP/core/drivers/paddle_driver/fleet.py @@ -152,12 +152,12 @@ class PaddleFleetDriver(PaddleDriver): parallel_device: Optional[Union[List[str], str]], is_pull_by_paddle_run: bool = False, fp16: bool = False, - paddle_kwrags: Dict = {}, + paddle_kwargs: Dict = None, **kwargs ): if USER_CUDA_VISIBLE_DEVICES not in os.environ: - raise RuntimeError("To run paddle distributed training, please set `FASTNLP_BACKEND` to 'paddle' before using FastNLP.") - super(PaddleFleetDriver, self).__init__(model, fp16=fp16, paddle_kwrags=paddle_kwargs, **kwargs) + raise RuntimeError("To run paddle distributed training, please set `FASTNLP_BACKEND` to 'paddle' before using fastNLP.") + super(PaddleFleetDriver, self).__init__(model, fp16=fp16, paddle_kwargs=paddle_kwargs, **kwargs) # 如果不是通过 launch 启动,要求用户必须传入 parallel_device if not is_pull_by_paddle_run: @@ -195,17 +195,14 @@ class PaddleFleetDriver(PaddleDriver): self.world_size = None self.global_rank = 0 self.gloo_rendezvous_dir = None - - # 分布式环境的其它参数设置 - paddle_kwargs = kwargs.get("paddle_kwargs", {}) - self._fleet_kwargs = paddle_kwargs.get("fleet_kwargs", {}) + self._fleet_kwargs = self._paddle_kwargs.get("fleet_kwargs", {}) check_user_specific_params(self._fleet_kwargs, DataParallel.__init__, DataParallel.__name__) # fleet.init 中对于分布式策略的设置,详情可以参考 PaddlePaddle 的官方文档 self.strategy = self._fleet_kwargs.get("strategy", fleet.DistributedStrategy()) self.is_collective = self._fleet_kwargs.pop("is_collective", True) if not self.is_collective: - raise NotImplementedError("FastNLP only support `collective` for distributed training now.") + raise NotImplementedError("fastNLP only support `collective` for distributed training now.") self.role_maker = self._fleet_kwargs.pop("role_maker", None) self.output_from_new_proc = kwargs.get("output_from_new_proc", "only_error") diff --git a/fastNLP/core/drivers/paddle_driver/initialize_paddle_driver.py b/fastNLP/core/drivers/paddle_driver/initialize_paddle_driver.py index 552fc622..e059e91c 100644 --- a/fastNLP/core/drivers/paddle_driver/initialize_paddle_driver.py +++ b/fastNLP/core/drivers/paddle_driver/initialize_paddle_driver.py @@ -38,7 +38,7 @@ def initialize_paddle_driver(driver: str, device: Optional[Union[str, int, List[ user_visible_devices = os.getenv(USER_CUDA_VISIBLE_DEVICES) if is_in_paddle_launch_dist(): if user_visible_devices is None: - raise RuntimeError("To run paddle distributed training, please set `FASTNLP_BACKEND` to 'paddle' before using FastNLP.") + raise RuntimeError("To run paddle distributed training, please set `FASTNLP_BACKEND` to 'paddle' before using fastNLP.") if device is not None: logger.rank_zero_warning("Parameter `device` would be ignored when you are using `paddle.distributed.launch` to pull " "up your script. And we will directly get the local device via environment variables.", once=True) diff --git a/fastNLP/core/drivers/paddle_driver/paddle_driver.py b/fastNLP/core/drivers/paddle_driver/paddle_driver.py index cccd87c8..0ba0dc1b 100644 --- a/fastNLP/core/drivers/paddle_driver/paddle_driver.py +++ b/fastNLP/core/drivers/paddle_driver/paddle_driver.py @@ -70,13 +70,14 @@ class PaddleDriver(Driver): :param paddle_kwargs: """ - def __init__(self, model: "paddle.nn.Layer", fp16: Optional[bool] = False, paddle_kwrags: Dict = {}, **kwargs): + def __init__(self, model: "paddle.nn.Layer", fp16: Optional[bool] = False, paddle_kwargs: Dict = None, **kwargs): if not isinstance(model, paddle.nn.Layer): raise ValueError(f"Parameter `model` can not be `{type(model)}` in `PaddleDriver`, it should be exactly " f"`paddle.nn.Layer` type.") super(PaddleDriver, self).__init__(model) self.fp16 = fp16 + self._paddle_kwargs = paddle_kwargs if paddle_kwargs is not None else {} # scaler的参数 self.auto_cast, _grad_scaler = _build_fp16_env(dummy=not fp16) diff --git a/fastNLP/core/drivers/paddle_driver/single_device.py b/fastNLP/core/drivers/paddle_driver/single_device.py index 267c10bd..86994b79 100644 --- a/fastNLP/core/drivers/paddle_driver/single_device.py +++ b/fastNLP/core/drivers/paddle_driver/single_device.py @@ -53,7 +53,7 @@ class PaddleSingleDriver(PaddleDriver): 关于该参数的详细说明,请参见 :class:`~fastNLP.core.controllers.Trainer` 中的描述;函数 ``auto_param_call`` 详见 :func:`fastNLP.core.utils.auto_param_call`。 """ - def __init__(self, model: "paddle.nn.Layer", device: Union[str, int], fp16: Optional[bool] = False, paddle_kwrags: Dict = {}, **kwargs): + def __init__(self, model: "paddle.nn.Layer", device: Union[str, int], fp16: Optional[bool] = False, paddle_kwargs: Dict = None, **kwargs): if isinstance(model, DataParallel): raise ValueError("`paddle.DataParallel` is not supported in `PaddleSingleDriver`") @@ -63,7 +63,7 @@ class PaddleSingleDriver(PaddleDriver): logger.info("You have set `CUDA_VISIBLE_DEVICES` to '' in system environment variable, and we are gonna to" "use `cpu` instead of `gpu` device.") - super(PaddleSingleDriver, self).__init__(model, fp16=fp16, paddle_kwrags=paddle_kwrags, **kwargs) + super(PaddleSingleDriver, self).__init__(model, fp16=fp16, paddle_kwargs=paddle_kwargs, **kwargs) if device is None: raise ValueError("Parameter `device` can not be None in `PaddleSingleDriver`.") diff --git a/fastNLP/core/drivers/torch_driver/ddp.py b/fastNLP/core/drivers/torch_driver/ddp.py index 47d9cbb5..28670071 100644 --- a/fastNLP/core/drivers/torch_driver/ddp.py +++ b/fastNLP/core/drivers/torch_driver/ddp.py @@ -249,7 +249,7 @@ class TorchDDPDriver(TorchDriver): parallel_device: Optional[Union[List["torch.device"], "torch.device"]], is_pull_by_torch_run: bool = False, fp16: bool = False, - torch_kwargs: Dict = {}, + torch_kwargs: Dict = None, **kwargs ): diff --git a/fastNLP/core/drivers/torch_driver/deepspeed.py b/fastNLP/core/drivers/torch_driver/deepspeed.py index aedff1e9..2fc6e96e 100644 --- a/fastNLP/core/drivers/torch_driver/deepspeed.py +++ b/fastNLP/core/drivers/torch_driver/deepspeed.py @@ -111,7 +111,7 @@ class DeepSpeedDriver(TorchDDPDriver): parallel_device: Union[List["torch.device"], "torch.device"], is_pull_by_torch_run = False, fp16: bool = False, - deepspeed_kwargs: Dict = {}, + deepspeed_kwargs: Dict = None, **kwargs ): assert _NEED_IMPORT_DEEPSPEED, "Deepspeed is not imported." @@ -251,9 +251,9 @@ class DeepSpeedDriver(TorchDDPDriver): if not self.outside_ddp: torch.cuda.set_device(self.model_device) - # TODO 模型过大的话应该会导致显存溢出,但是不加的话显存会占用rank对应的设备 - # lightning里在之前通过broadcast_list广播了log_dir所以没有这种情况 - self.model.to(self.model_device) + # 不加 dist.broadcast_object_list 会发生设备在 4,5 但是模型会同步到 0,1 的情况 + # 原因未知 + dist.broadcast_object_list(["test"], 0, None) self.configure_ddp() self.barrier() diff --git a/fastNLP/core/drivers/torch_driver/fairscale.py b/fastNLP/core/drivers/torch_driver/fairscale.py index 02dda6a6..304f0bfa 100644 --- a/fastNLP/core/drivers/torch_driver/fairscale.py +++ b/fastNLP/core/drivers/torch_driver/fairscale.py @@ -35,11 +35,12 @@ class FairScaleDriver(TorchDDPDriver): parallel_device: Union[List["torch.device"], "torch.device"], is_pull_by_torch_run = False, fp16: bool = False, + fairscale_kwargs: Dict = None, **kwargs ): assert _NEED_IMPORT_FAIRSCALE, "fairscale is not imported." assert not dist.is_initialized(), "FairScaleDriver does not support initialize distributed by user." - self._fairscale_kwargs = kwargs.get('fairscale_kwargs', {}) + self._fairscale_kwargs = fairscale_kwargs self.fs_type = self._fairscale_kwargs.get('fs_type', 'sdp') # ddp, sdp, fsdp if self.fs_type == 'fsdp': self._fairscale_kwargs['set_grad_to_none'] = self._fairscale_kwargs.get('set_grad_to_none', True) diff --git a/fastNLP/core/drivers/torch_driver/single_device.py b/fastNLP/core/drivers/torch_driver/single_device.py index b59aba64..483dc257 100644 --- a/fastNLP/core/drivers/torch_driver/single_device.py +++ b/fastNLP/core/drivers/torch_driver/single_device.py @@ -41,7 +41,7 @@ class TorchSingleDriver(TorchDriver): * *gradscaler_kwargs* -- 用于 fp16=True 时,提供给 ``torch.amp.cuda.GradScaler`` 的参数; """ - def __init__(self, model, device: "torch.device", fp16: bool = False, torch_kwargs: Dict = {}, **kwargs): + def __init__(self, model, device: "torch.device", fp16: bool = False, torch_kwargs: Dict = None, **kwargs): if isinstance(model, DistributedDataParallel): raise ValueError("`DistributedDataParallel` is not supported in `TorchSingleDriver`") diff --git a/fastNLP/core/drivers/torch_driver/torch_driver.py b/fastNLP/core/drivers/torch_driver/torch_driver.py index 60bd4147..a748aa32 100644 --- a/fastNLP/core/drivers/torch_driver/torch_driver.py +++ b/fastNLP/core/drivers/torch_driver/torch_driver.py @@ -51,11 +51,11 @@ class TorchDriver(Driver): :param fp16: 是否开启混合精度训练; :param torch_kwargs: """ - def __init__(self, model, fp16: Optional[bool] = False, torch_kwargs: Dict = {}, **kwargs): + def __init__(self, model, fp16: Optional[bool] = False, torch_kwargs: Dict = None, **kwargs): super(TorchDriver, self).__init__(model) """ 进行 fp16 的设置 """ - self._torch_kwargs = torch_kwargs + self._torch_kwargs = torch_kwargs if torch_kwargs is not None else {} # 因为 ddp 和 single_device 的混合精度训练的设置是一样的,因此可以统一抽象到这里; self.fp16 = fp16
  • JEb8#^WyOcnE_v`AmT$lo#M5@Z? zsb_DlC8ghJ>tc{?g5I>+(K0ODBwxQ;j4~CiR_x`Lm*cWf_v};!hg~|ZfU&kaEOlsE zb$LhqC+hChStaY9K5XLWAPa*ttkHCid256#hWqV~R(nn9d}TOP5leC*fVuN$+nylp?xnH9Bv#;d(_ zxQ{?FzXEz4rsT6EM|oEhGy&ROe;B0dc?PSKB)sfm77oUDlcQy`RX-IxwJu3`l1@rD z^TwG2yR-q1{&DV$LR{Q|0?#c_0ITHWtjjsBDM`_6tnAQ=Yh;|*a}){!0m$;D(H4u` zY`v97eO}TpZc#O<=-DgvFYkOC%X#Pzxc%FWgX3JC=#mH~wk%6XH33DQNm@)D{;>QS zC(ayK=_yB0!LPS5h$(wt@A&kxWxvk9eXfD;tyOh$X3(g2Y%G<|>9<0Eo$RL?Xb;lb z9S}@hO?}G0Df69{KE6BWeBbKbeSV16Ad9IIt@Nvl>z=$b2S4-45Srwu6WttveszOv zoJMjblhhq_yZV>~YpT`HR~`Rzjl3ev;bet~ls`rc1x!-dwWDSX1(`+V?fK3PF9enT z3Z zK?tn6i>w)p-B5_U-8qvv8wRxS@%6FJ1yD{~9F$jYrea(PM%DXseXcT*oR3fR-tG#C z(9YQ4O14Y6%wA9W)8#M=-@mg*IH@Hl07vcxTv^mCLGJp-o9e{BA9MXd0>r&T9d%^d zWAha5-9KE@sDuE9VF>hDumQiBhLcRecLrYg5*?3!`WQ^&aN`9t@&m=V1QXHx(e}KF zq(T=y`m3wz;!;4Un2-!R8UOl0`>~>&GClqq`Egn7`jVet#K{w|k@gY|un96V`9ykf zQmx{jZNj!q_857#*O2%0?vB-<10jDmoCVXpc1ONm?<#=N-P=Jy8&geZ{ z`Lot2>Ai~@tSHz%%o5RT;7(6-j11p2p(q7bi#~dYtaKJZG21G7a$pY9s5_P;3lL=s z$Ab$c@w+LG z${Z@?nQL#4h`!3w0wiK-wv@pZThOPVo>ee^%YZ$eL7CKwzX<~!(4H`mMUIR8j zI{Y3nrna89&Ejz?@c9UjOy_cwpO_EhSD|texUVl`sdKx$!}*gCYODvq)-`-YzC(zl zQ5D*Ks5$1gLAvN_Z*M&p)hJj}*mwAVPnA|*Urtis*y063p2Gi)F-T+m>x8L_^jT7m zh$_faB@00*1npl*r)}?bVYaLo(P})>!0$Db!ZHZ5xL)vJ@V9_s?=8=PIT<}8QMegK zc-iIrc%kGajpE1%8NBlk4wsQ{Ykz}}vD>k705>hbx*tE^-rkQWJp9&WkQQ9$($_F3 zs@Ks58$5bidXbNbhj;}Y{{z3}mctE%8TM|dJl~{ju7yCgdTd%OBM{%odP(`KG(Z6f zwy$d9%kbu~vc(Fn?ZqCPz6?%>A!LD8&JazSyK%TdM&PoiHFj7D^S|OFcr^ds9MbNv z=Xj>CxzL^;??_GY6_7LCF4I*e=B|1VAgr&{-&S@?Cl!oJbiT`wzv}UWcwCAH>mLRj zg(3gus)Srz=Nw{Y)pkGGGt|I}XtWg2vw?@+~#i_?HXMA#W>Lw$N(AqE2Z zdlwfqo(*So(tWri@V`WpM!tq9u`beRZ~*w}x9V)oXRd{saQfRgL>_J_x3wy0&h`9< zZ>&(o>*O%+1N!a*!unsnDkV2E2ZwD)nv4{b!l<_;lS0BWFK(yBu{#b|@k_p|qD7sN z-`~vL2e7QaV0foCN@av?usDy93LqBaE1`O%JE`75{dc&kZUn+!?9jxB zmspPnAZx84=@*SbQn^R^{mU?HXR~0>T&a@!yI0>Dfaq{(X;1PN?ZoDq@rCYl!mV1b z-1WSth=PbY7JIcf=inW%Pcc1!wdsYZtp}9+2xbr}bSZo3>w-Vr`P+`4au zQ}&1h$>N&y7C9wU)+j;-weqcI3Qs;(R{G@A1Shb#ybE9u#@}@B1IFqOOeRexGP$i9 zu1QkgXVBDh7z?JV-m$64NM$A_f*+#tWzPcBo8CEy^nv)3ONT})5-Us44Jvly?hF_F zF!bCXu)SfRI8IOXqr5X>33QdLj;r3;{;r4t{D%G6=GX}<$jWG+w+9<+B$>R9lg5i$ z#Lpj}7Rs2k4mS|T>=GnD9?xxb`zGT~`3(`Yc1z;C`<2PfsN9f{vwUE0vfCpCx-D^PnX`uU z#ojmLE(_j&tj=6QBA_>o2S1s{x@3MPqRR;qV1k(hAychna?q3ob3|zPHru#uK^VGi zw2}g%Gd;7A#FW929%-kRqF?={iH$i7*pOBj@h%hahEHAeijjFc-0>Gh z69B{wl}R-3qa!9xL7t@FR)JK>Ry@D6%oC$=SmKU(fv$TTpzicp*X&x0n zU&Dwe;l6U4c3f19sj8@a7lhNF_IUGNw-JIUO5^HZ;hejvvE*;LWJc1=!hq=XLe2@R zuW@lb=vx^1Yo$9!YQRxfd+hqiU`6iC*l||b;Ulo4zjbU9er^b1I42G;1A%bEbQmai zbLG|;#Cy+jiClK&`bS7tKW}h!*@51V4C>cHH)G8Hx&{GrMp|KNuyw`u9$!OMQqZ#<5gcn1Se02b8L_@3$#>MhU%T`UDrq%=W@$ zs^cmhCK5bZ=65)8%TSK{%L0Fn^IdQE z4?Zr9`Po7}gR(CB|0E5r2ZyK&?62IO901BV$K{)`&DRCh_@4^)x8MJppinQPf)zJ` zg;Ed>NpBhG*u?y&8O7FLOZ5B^9?&WDg`s4Pb;sLjgWQ#u{hn#q^fv zxOa&0DB<_d_&-siVwFTUHqWlNE(=(umqyMuMqFH#z!0B9DfSWUlPuqOlX9m)yt)wYjZy^4?`cKp8_vg7EF@VC=otd|&<->p{@EQS z1-J75@(m@La@qHZ=wB|25Y4&sQ6eJ+>gkY|Y|t z6Q-GfY6(&w9Q7kFvm%4I1_?TO47zv*H91xzR;!pQ{$7#<9d*|YVVvC=j}Xx_1i zg2hJw8>R8Um!jb^c9g}-Xp#O8@#sX*<(Y0?3q2T#(<&>G;Xhy{P^(X?7A3`|2rlem zf7Z&M;0-db zm#3=ks_zg?GwI-Gfgj_l7+O~^MM=PJ{}ElLZY0KBaxQ5-{F3VOxsNmJ8k9ewvjhmCi1y>XQ%;zL~OTM#6L-3&i6 zT|4-G)oHm>Dq|SaxmC=6&2!uCt_mL#LhVyyF{LkV4xEm>_I+?jcVB=bl!}!E(qV0x z0N@xNL2oOJCko1w;)Ni`#9T3MIbG&)YCt&6kkOfqW>>IT-t9n3=I19i6e9gu+zJW z1=9<}x^kvgnA;vXIPXN&dEGt3e{45BNS`TlHyyZn&vPa`Q>0*qb3gjK z)ZcdUMGUntZjhJ-JFf$~S!D=<{5rNT@Iwa9FT64-#9z95T&9k+I{@M#FG9fppySUE z0s&m1c>8TXE$6t${GJHDU^jZdX~)TLDX>8ttCLoO9*BH{Rl}M6c`hvp;^K}+W58nJ z!ug!40p%y*05FF<<_Jw~N^d`6kGH=38|~duxNCp7t0mBg(eqtsCk~;`-P|Mebo;Cf z8Se&Y%f8G4yHfk7)=~0oo}KCvd+WAX@sMtxupne%{XCne@L2c$fRHG^#GKsiz-qS-r(_gTzyGx zW_LJmWGmhRzP+X?*!5uswQu*&>Y5e%@;??nyNd(9_e1d2OSsKxKOHaq1bSVr0H>x3 z?&fG&&Iivwjpo?IFK*bCAN#Z_fUsp%|6l=HP=DQwC6cwY>^wtMgUvS#6AVDOp^0tw z0iXHBNAa^u-hNjX>~P(Z0L;9{&2?k;bN3zyK#emTScd&vIN73Gr6Z+83$f%GD&`Tc z>yAA}a#N~^_C+@nu`yx*%vu7kF+7}_GW&;4qp+Xh3PVxTpC3u0?v#O__a8PF()iiN z6W4C;UeCVq@;wbC*CJm80!)Z?R;U(NBj@?9gNe*i!qfel3d8dtE2Cq?wY!T4f{-rw z+?vo}aUoaPc!f5y#MaNqCpl$&5GV*d%z|cZ? zhHt>PJ<-lsM}B?+AFv7Kfj5uwUrt1R&84aO0-pR~%^9(4B6%ntZK$V}3qf%}@rr6^ z6tPaA$%k-fvM?;Vmx;pB=JlBet{nf$ejn_(@UBBnk3x~V*2Vza9yRWQBw&*1*!K`@ zx}4Y}LdnRa2$81oj0dWPPVwvRitvoLI;)-#zk*5^zRMR0rS)Oo*3LNSVK8ghFXO6zs7DD9M6HDo~LipC_R?)RBw+`#-t_#YjkBlJP zicd(4EMurdVd!Y{Y?$6SfzHRo#DeY@xFzK`kJkmq4)s;(4I=!)V@|E?PKi(tAAtOd zFrIKbgMOvZZgTl(Zk2LSp-FIgE{r5|7F%YnhvAfSYVthvc8$*hdfT7B!$pC*R9m<8 zg#gAWq+9{)bqn@-t}i%!y$>leO|sw)3cF&Z+%sTTb8Nc$NWX!%5ql%LL|svXIRH0s z;MWmrNgGVHd;Czc8}bhm8*sZSc65_f)TED$&=Lu4Qtf8=lcqYSm_%P>qsf|+7CcL2 zYIs;O(eF`Wn)x^VhE5H$G8I?44AFbfv1Y) zoEAl=FNKcll=Ul`MjO+|PF)Wls4CB0Vrx5wB;+?32L6~31r|nPtneupxH;}&s^|0@;n%%-u@U-=j zjUZPG@fTdZ_~95(zfran9z{p|<=dFRnr>8kZp4#G1Ywjj zr3;);v?*r9r&?m=OIjH6rTGqoS_`tCxe}bq>5!hhx%fKa#9*$K1FIBekiyf|RiM{v{LUPHaNSkxc7@f&wCZV_OO(Uq}x!>m?!2En>Le4Z)hddNDsUIFHtiJc=n9qYVcrfKWnHJq??j%Ohc}3Kuaw>vMV3B2WMlwH`0%$m_ntGEyT+bo#eDUAz#?b5um49vSR4 zl{UY?f=M2S7={5P`(EOXg>Q&ofpqc{2;~qmGo%qwYVu;TXhk;%n|QYG%f$H1dVQDZ z0LO1$3Igfu2t}#uA{U&Rj@!px1l?X>`g4=jqgLeZ7=e0^-%Dos6A#ESwg@i%TU z%69AO>-YD)jg{akFGqPsUgpq><-Q+6H}6aY4{lQRRFF17-~HGCy+BN~(XSF-o#VOG zAw|08=^cKlFUpA#=|5lw%Cl)sp5fzFUP*VjG?aKCN_jta&sSWQ-FA^CuP&meqK9`p z^Fw?@K#-UXpkJL(EtPV5bLE;@OJ^}{m(LIr@@d-aABR?~AB?s9;E;rGdM}`Vf$!fx zh=*{W0ZqnnspUJiL)EA!O#m5EEe!!AyNmaIG3bV>S9!gU{MW*^2A83!y7oJNYVUZw z&+x)@3Hn}Bmgnb#n_Llm&)DrgBT)mi| z3%J8mr+)3N)R^8j4}8|~{XWJ0@| z9Nrq1Gdj(H=JCB}cr^5iVH6ZLI!Nw(N_;u2x5du95~e;OT}09k`zbuRySpH_>~;^Q zm}3}^=V4*85$h45T1fyIhVm#lRH)YX4dIv%ZjITy{8^ki8fPLm@xTeIr%6+?mhaPg zUwn|%c%-X%u#a!Vt~Q=xWxCAGLWnj^s(IG{Yc9uaUXZt*&HZ`xz)tjDk5avQK6K-9 zRD1M_PJn9@sm7kV9d`~c_?h(ia3M6u+U)z4WLX1zH9bE@kDsXL>XTAXpJ!SlLabog z-DXXp$40x_z7A|ITkr54s+H+7>~Q&^%`o;#g8^f*m13pdK%-N6pF%wKlYdTm>=%7g z7>$sqJkK+Low|k zy|`&N8m^7@kOF(jGVWpY=DhVpMHI7@D^bY@5?btAjo$<9E-cR0Jl;gUK}o|?Cd6r) zTn)g8g`|&O8>dS`>cn=d#C(#7_7E|T-zLX-L{V4GCn;arYY9>OcM(Keu`CwNuq>MI zdS_+$QGaZQGVW=T6>NP&D7R0Zh=n!UbY*=c%0z1Z;g`Nxh;hn&7AXOpOH%GL_cMei zf25|wdTRxWJ`nG?jkMyDttLU56MhAp%uE5N-4fHAv_bJ*W0}xlkx>37Y z51PZ>E7Wr8DF>SMwLy?&yR>kn?X0234B55nonRQbbC)s|XFbt>bpcsnoZagNStcgE z3zGFF`VSeyD?5!VfDU3zcWc{WWU;r;~n z=bfRlLxB1QqGF(`EWrjxlHswu6+Vtm(^LJo|TYVZ}4X&d?aJ zOw0Xbff6qA{QhvJ>%mUGdf@YwI@n^CS3`FZ4SN+LF{z_Y`d^uWteSJ){V8vQScp=u zF(!5TZWod6M5vzbTg+0x%*w|BLGI-y$amYFbS!LFl|mbj7Ab+gJ%DwBPCHvzZb$et zqwOpo-wVw~31s$`K%a}-o~$vg>#+fNHPXeP?j{Y{<0?~jQiDWVN#Xfm$$FHzFb(4v z-?6YY+RIXH^41@f<7I^4VZawoa)~9itg$6is(A6N+10;0Dy|2X&_XHjEur@5NMvXZ zVil9x=wrdn$|C1+^MO8Jq8`n&+=-ahCb4<7?AB6lc{J(!f7#w%4j^)Hp#Bkq=k3!5qm_asCKDO+Z~ zj+7C#MUd;LlR*I`pi>W&a4#K%c(mFm!qs5f47LoI2*hu%V0}f1(_@p$+IVpGe7$7{ zy+^!HxTh5wThWyfB}lxln~M|oym|(URLSLO+l#%;B!LAJ^J|ItN*LP$dgf5yW~z8> z)bR04{k3k)Ul$X&HCQjreP3O`2TK)_BZatJ>5p(l@x{9{CZA_iQ}z8lx2q8`#&S|H}P~2ydj^P-AUc^2DOjXw4KZOdeh&&v3Os zB*i4^g_0)78&SXZ@f2~u-3B>W$_9;TU8n*=!J}4Yu}WHx6hnv$2|~Y}+Gq&2Q*`3T(n3JZX_dMUH43VC-q{ERzxN-XT;2`sIwL}`)cX=kY4Pm!fE4%&ZIq! z!^oDJnAS=<2^U*BRA2Sa0R=_L zOi^#Cphjw0SI)@IMaO0kF&P#p0vUkntt91k``VWyPw8jR!eQ}tb}`Cto1r~;oZr;H zKMu}^XFg1DZWUz@>LI@9*T^v%786-De9QuJ26@rC!@1Km^|EM`e!ww|7H0CdJDVz1 z5m6`l{LK^JU7GR$WwipyBBP~DL}|c$4a$A-%D|g7IE@&AMNW*bvx`K!M}pzQ9riRi z@Z@@d)-v;F-58gc_cOI?Y}t6TD1sMko^{IjtFqmQl;6Jtv%WmXPovc+Cl&5^p^l5`0$)$YIQjS$C*yV{XtmpPx6j(DWJI5JE~2OfG(Helem6K5sJZ&(K=d??na{*8@@x+(Om7#`*qJ61{seZETQNsG52 z-UYt?aO_daj1lgVOG1aFuP9jGt6{%p@l~l>CF8p7a;;fjjny(!-FkpKq%1*9&iJfh zE)_?3q!a0j3Qr(z6gNT02~kS^QlTIh0X}Rz9NvJ{$;lsW{c&Y?(<&N`VDF?yJVO1S zTk4D&vT^R5x)+h<&24Q^bi$l}2&pSE{vf9u6Hdwl5u@K1L_^z%XjJCWBopVSo>Wya zGDIT1#8T?Mj3w9X!%TywZmR=N)e&MsRW*T6Gx(pt_-0zumsb<;ZDz zbWstC!+C{k(P{%?-}t;1Q5wf(mJN+1t7ekFrp`0n!kz=7>?Dkbe;pB{u zh0$v6_(3=-A_db;jp~`bTxDOOQxK*|x09r~XrKK;2nSwaHC3$V5MTAVE5T`Ddlu>G zU9OVk`d^M`XCMO3yT8}E(eLi>lscX@5ye4rgr(10heRa-w)@U*X!MzaQAlC?7T}S$ zwUi!jkkUvX&>{v$N9Z!-6r-ZwLRdK2rDleTSe?Gi#PH*a>b5*Q>AonTJtII5yAM9W zGVwIm89q_SAKtopb>fzuwuC z3FN53cN6kO{^0U6s&vW_)oKUyT0JVEfnEs)7+)I}ZF|0vw5`JC>;I`4qEAWrNZy1@Y4MOPfnZ$0!P?k6Wlp-bM>oBj_l>O8C)bXYDjEMi}yA1IO2#;lN0 zoa_M{YaXm})g|~eG>}N6iar0Ja0#}0zp6I#8xN%_VTm|=LDidq8pRj4vTAvylUD#9 zZl#93F}YQbSH59q_AQejfzF)r>pk!B6whM8DHkQ0ZeEHDiCX^#LE&pFFq{5#8(YaZ zX?x%u%s^H5V=qk8NZO{p$rH80)UUo6t%NXorcH<>?)Q5mRLvnn3)6VKP^tvN#5D&s zJ^*q&&^moUg)csz5KOl7e#xfp*g8D)$Zu5X(naPm+=?Fq0CpP!*k5Ods(QGH0!=;c zfxI~U7PftB6mC;LNbW~%EBv&;{e0AA-;$=mEF?*v(Ed~_!+JhG@m?KGg*cXd#q0HO zAd&smLE-DWgLtykjQ)1Bze@h6>xF@(JOoKFvu7Fr`C9KJ8~W&`()fpby`{h#Hy7v1 zeqG!wg|Fzn_FK0YR&+xNFhKOUgWT(1+EkWfdEWgUM%?6^vm zoZ&p@ZEqI9+Ci_G?sO??us^!JCD4+Q!^tuY4Ay9&0WzC7um4@n!+5F8>+~HaN2OpM zTcD-#_C0BH@1VTK!I7MVlz=`>Rpd5cSPI=GD=^hdJaZiXD=$3yUt zL6n*Aok)aYNBM?V89Z+cFm)JokvIL73c)iDPn=(K+7WUH=i=%W%{^NNcTTIoVSWf{ z2mR2EcG|AwWbh)6IbJvmr}CF$`A9d^sgv$2g;!(!d1RJtyybub+`euxtbJ8otwtt6 zn{p5o6^*=gRLlyPW#tGWVFAfwEC1a_bj&|a2(XCnO<1b64!|Hi9ukn>B9&||Amn@( zSIv3KYIov#p>D9TFLv*A&!~E89S6@Ac+}{R#vHg_17J%s@-Y*C756e2uJ_bTren(T zoRey#azknHK!05i2$NbZ764m2v@-I+ZD-2Nw^v)4>OLvbn9{g56(yx`n-m2@s@vZy`>p}(um^kHSql2vm+<?Ob5V2~+1i$#Z>*?6?EzGGg7E_)Y#QW71x$6KnkD$GO>+Q_zg9$#-W=6_ z9JkL4#Ztg{XxInm702$D%ViR{yh$E!@_~HFZw98_BKr3VGTCoRJFDukLC zMy0DRx=BwTUm=6Y>uVombiQCc@AciTEPqhpQbB#5jWW$y41I_FfFId<$^o&oK5**4 z(cmC)Hp@R@uacA2xJ41=^xQwx5evN?@#`UjKZwE%ZKn2bLveNzpvqp(pcszT2y}=6 zro#thICW+aqa&AMMtaEPsR%=c>`ObMKujyUH3WTcrs4E1`xa|Un6)_kT0gVKdmsr- zn7f<@sqHMztAQU7{NAn%CqLxWrqf-z~Y~2I=H&_JY zi(NRsl&ct6#_XL_D^R<@%q7?fQNkqcucyi_L+ut&QpF^-P~!%5SlHQCqvd zj{SQ4jAf-;#1BBT?=ONtlm6){EB{6sg!8uT^}rizr6#fHOT^kI0QPZflRIdKaRFPP zo0>eII})bGcyHLIIt%X%4Al#nxD-Wyn~v1|hBs`d%_?1DK@XH&3By26Z_;$hs>!l0 zOo6UDNQu%(-2Op_V!Qz&0?-oSFxEQU0^Ze_21zX0AMswNlW3zj?VhXe8Zw8)!kLgG z-5N6Kr?R+iW8XZd;C>#vx{h5-bDIT9a zE-;m^!FD)>lgpS^oVFaRtx92`GGu*4cGG5)lCIltC2cAjO;%s`8SmFPlzlhC@qcnz z8xl%QYKqMzM!_lz0%WuvmmuUX4RFU?V@QzVL%M(@3_3TutD5E>MtR0c&Fs1#U2mlLjao(IF#?t6QyK=1{@=(iAIPU-I1W zr|Wt6m;2;RT~AM@uX*sxq8Bio84oS=9oT5H&Mfd(Qi7rjcKNYSS5JS?{eB?Hx{z7d zx9e1&jQ>R6<_3>=_zy<#9T-^*3N96bhK=Kgd2WrdEMb}53biaXoy6+60CyF-C(5XF zt<%+QRjrk@Ker_xn9FjS`{8L`rrWCdv}LcSz^e=UuAMpe1$~fv0CUfqFt9+|&BfZ5 zrJA28-~%<wz{S4-b1%R$CqVz?T_^( zs+LFZ^2!p6vo|A$)((tI0g>#3eiUmrg|WV~dt56MUHp0&YW>GRUgM#Nm3dS$1Y?V9 zW;E=^V*VZL=Yw0iyDl#aV$E+E{Jje!s_*n_nL~P3Cp~??7@{(6gXB}+s$xc$j6OI8 zq@Ws0O&^kL({F9m&spn&6u;6aS21H%o(~O*F&!RW^BgppB|ww|)+Ilm$%=+w%Jp2* zC(QAaVKVuX3jcAqp^_lykOec9V?RE~Cr`wAhEzc_)zZN+-(Xp3>f0;mhxDAwwq@Xs z+(SW4g#o9JCg^maj6{Wr`_c0!6JKi-IS8g&qw&kf+J=)OfMox{uT-zX#Xapjd%=_y zEtuKVpq(@#(-nJc?bkR3(tN=g#R9R7ZX$lG| zjjfKHD0DQE#iLUy01;Udg0c0;AnFV{gUu$SNK&qGuTtj1+9&jf%E6SAf6-Wa`!ut7 z)jPU6Zia0oYJoRV_koJvjuQ~M*M~BBTG-k(WM{bF2_e|N`2{#9HBG4%xq_#mt_#+ggM6;rMhg%i+W@Bv%Gvn!v-A2~aao?eFzxy6O{GN5uCsGrfv()=h4 ztJ1rJg0IsrsxS1X_+p>q@c+VT1x$FT!iRn097z&>ZiilBC%e_AY-W@3`=`r*-71K0 z9o1i9M~pYL2v8>fA}uP z1Wsu5S?oBCf4fRt6+sK%9r=JVx`#!s;1PmM;p~7o8qT305$-7C*P1Efw&&yz*(h57 z0hoC|o1qZ7EaFKD*tfWxu?>D-UMpwfTB+(;49(f0urn|T5!Q=k6^rY;2*74pugo`e z$n5^xgQjdL;5TKe^t4Z(klq>>js;aRhs@$Y7e32w^MWUiMN~<>yQ!m4I>{e=(PaQHG>QHkS$Y53dIEI7F`dO$-M<-|vFK%~W$)mMH7X{9r)qFIqb#P(Yima5AP8 zwbkNLmA7MSWw-zW-!;)t3VBa7sZ2o0>G5$6@;9baASgcnL_ic3mR;+h)F==BST<-~ z%84rAWpzC%7?F&F0*a-H$epN<|k%q*_;A6DI}p<$5ZVQ z`afVmX`%cg#vX?pfuDdH^3e1DdM>4;DTUwfv+yq{pw;hx<^n^i8u~5q`&*mHb7B(; zOvCMef$zXSVOmpuUD6-a>n*&YNlW}9|H0*jEo#MFFQrQXA(`OF*JM7 z^TVpcAG@NVOXLCV_Mo&{h?Gr-^~Pwdv+Dm)@ea_H1x>r~gp*`q+csum+qP}nwlmSp z#I|kQw#^e8_vC%Q@BaT?Yp=7;K4*9D?&|8Qr>d)p_R*7^Ng}u{^Fy+#V%TPBc3l1| zWg4Ul!QjSabyaZQ!6}8$_v#I7cQPJW)ss+UB69tJ`3Xr<;PKDnsptjQKa z&J!XgmEKUN-k!pY!pGC3k{>Bq)PCFwlLNISy$*a_dEAc z253o)rUd#S3i7>=x^D%wG^tcDPAFg}H?ML#vwqB;t*gZvD-s`1_>ZgLm9SW1Tu#mG z>XNB>%{II{jT1+}au1@At+OTspNQTtk-w{0Sm%WrZXRxi&UvET*%YaE^dY`;v79)n z8{@Wbf>S5!ltm`q6=`F-maj0S)-e9!FPIfMh^unC2&jsVf>V2x$sf}b%)1_yYq?x< zb4%C+Pcm=`UO>arlB&E+6|Z<588Li&yz7{?dUnZLkWm#gSq985?wkg|maUb?j-l55 z3Mor$aQ@o>y~z5sK8hAplF$jfqgreC1+Ga7!MpkvyCcUQSmEnO7$ONrP&dfhsKg4YEo`=hD~1u>NeJ;O_yfZscM|V zVA1~3C5?YLxw%%rjAgdjmIB4cl^WBb^k-b$5H^4iDou;ie7BCmqspE#5^(SmWv{^4 z{mKvOAu3fdbsd8@KC$f$F_HAy8Khj-hf`+w}#DZQD))u#bs>>>DzrJ=C=L4?u+DhS^!Ui9@7U*SXJF^|D&3+T+T# z;gC$|1q!gH18+c-lI|!lt)Ajx6LW#RKod9zRIZfsKg`yk?;}zNty7aB29OSs2iPf2 z#_I3;TUQYW)G2kCs#O~Ahelt6988N+>cQ$SshXI{)J~5C4yp!s*pW!j)$vZK@^a|r z)R(EUqK&g)(dXeYjN0MU%JCu5!fjPRCMzl#_3;O{uw^ZFa<=wT}GJRAw z*7QDXfdnf>>hyLOwao7g!STs~x{c%C0e{L56ZJNO3$ajwqT0!sob^tcNwn8CE(Y0+ z!__wNd|=Z0V;%3e#0^}V$%XgPzrbvFGXozC@-~ju(6ZM9=8>6m?%Lat;=z)t$rL%V{3g_i^Kd6(j}nr7_I<*8E9LPZ(0^*wuDIMVVXR-E!KIb?e`V%-URL9~ z?BgM#`+vI0*XVX*vsQsQo~}yKTTml^`!QV`iZ?_?x=>32XH5(6=k|HV@|xsMsbbj! zrR)N8Jnm(wyr%iBdj@H80LMkk%&W6-5Hx>V1<4G*ZiS63|SLPn&m6 zmO~aEAT6YM1@uhp5eS5*bAiaNmO4v2^AQxx5@l^CIyj#2d1Yx5uN#Kcb?__Y&On7_#h29 zwfa0))#8=tM*er|=^J)=cb_^7-wX?++N7Je;!7*vN0aDH>*x7|nFU^wL6E5L_x-B^ zGs%qG6qm7KF2bFele3R<1G@c0@N$;n$%g2Rh7!f^#*ZgMk`W)Nhsg(c@l!-6XS_J^ z!{pHFNB{7Y{wl8XUQ2<-BlNXKPIb;A)VIDv#+hH^zAX-X838 zo8_eLn`G*`?YKV%qd%hJ%=KAo8KZ#S^phkgTDCGt1jqRF>Vu0!%V2fQ1g?ao^DmZDa1qPZdQvD#Ih1w(d zDRy9Leb3ZPn=H@o6r#E0v6&loG~A3zYt>I2ppBk;(@%HH8zvw9yHV#Gdi}1I`+NPgaLO$Gc)hg>jvJ;jvAN~ zkMJe|x|sQl;>F`vNez58ip*ENg_vl^pHa`rEF&{PZHvF9WhpqI~{IR4@N@Qvkl9Felf?efT&y}`vp%HWlqz+Th_(tLM*W=!|S#Z@~i zh@j{p-8XRCexP3hSlnskj5kW}#Hs#kH6YpKyZQ%E{?6l8gez$^zhF$IQ63zbFQ%|e z&DbSDp3#bp4I*nCm;>eaWCBw0{&Avq`mUV(PkRO;0;y`~A!nlqfk@m5a!hsWR3@d7 zneQ#GJ)hgbd?`>d;sixcM$oJp9;g4Byzy|PuAJ^<>BYpZ9Dn-J_V6~3S<^ZsM=311 zC+e5m-Zk)g0l;8XyMPik`cw-@&)Azk{Krdh|AB+z z*sKnNkc>c32g5K5`X6iuetmu2?DxqF3IJKccMf}R^V&W zzp#J?4-c==9tq5;6%hVPvsUC65fhW*dgm~)vWl85kUHAMM-sfrD=wyV_w*DbMW%Z1 zaKGNQcXdtKf{FvS+WDVU`4H$dFx87Yv#>vIw+e#dJ*fEjv1U`5R2%(DhZ}8f;xRN>P!NmBCR3Km<^6mJ#s-ujVOS z739H1lo5YqQ?l|4BO-z)AZrSOQpInMnZ9_?CzQU+s@14o^(j zB;BnngO4l7%gawwo}d7qn+z!ACxyxtIF9uTIp7=Y^KG{(5o3_{-DRz3x#*EfWX;*| zqH*wBzUOdB@tuYw0f(q zQA1qFgP*aAaOu(>_Hei4esIs=@U!R_c}E=#3$geNGPuA`a#RU^MByOHcQde$CRYSS zN5?0sT5Bm=Xjs)t@CZ;_US0VyvdKxyJR*hWo*}82vnM3zz~<2D6K9!KH2)Y6`v!VA zZ;k5{fqkvhKg}nYx>+xnzPD+iBDY1^O$bz)frR0Mp~OI5FlVfNaN7=X%M=z*wjBy^ zH{IwioK%E31TXj4{$;+Y9rOx@GsIAe`2?H(91f%js5N@f-JLU#>2IQVXnV7XHW215 ze{pkjmvX11rVbG(l_=lKZsAKU>ap8*aus^>;R^|t*hCRRpGR6$1d%p(K( zS)xXqS}{*1%>IdUvXdMtH-sobe7ezQ%`)!j=!k7tV*B7AL^gwS;q|4I5u<>Rl?bD@ zJ2t$U2C7_>&KX(m=}Gl~aGHdT2|rbG9SIvL@-*_hk1Ds}oH5i1GQ6F}MO7Jc?*9N{rG1Zeo zu-fXyO5v$($({+J&9OG`~(u(9>CUZ37`2 z%JDA%*!sic`|`BY_f9X$;#CS`x=;)T-E!Ay#znPeqUuQS9~zY~CCZ~}sM6d^t$l>pj_6LX z2=Ierqt>{thIcaSis@VA2XEddV%qqDvbpJRViZJ#GW+Sv&U^L=hOd(dQS#^4K0@~GQK**_+G$AWU&N)8B)WpH2>q4t$e5*hZ{%T7YST|9m5eBC~lh7 z?y`ekd#C3mmu(s^N z{l54Rgt1m2*By$Lcr`z$`(&y)tpcf%kb6HaBi?L9XAAd=oQH7Pn<(f9b>Z8_4Xr^Z ztXdi7{vAdMVLL;OyW0I0X&tM#L|TYeH5xDYNEas#x=d76J5>$T39l+>hoGJL0`53# zGo;QfOlahY4*l$iH_YUfp_eE3P?Fb*3HHa2y(7M<$;ry=^_Ol@`l?Y7qSP@YJUkhA zpwK#nn@cPCzS%>AA64@_oI$CSNWHw#e!mKN9eQYdT*x2oQ|f=c4tGa z>(cU>daT!v>EM8yyJip8RIJuVg#GD_G1}wCXZ>cWZrD!#Sp+EBwZ+Adz-RPfy)s3b z1t-<;$Ivm{x7wgrEvtZ)Fc;|5-Mz$HiWCFFOOzNH0R*`1^14imm;FVro_o(`qj0pL zwLFEifBnlR;gUQEi>R6X9h7iLvBZ%2kvsWTIO*YYjL-)Sv7l8VyF z@`e~m5^-l#JSI4d{kZMyen5L9Zm$2Eqz4pl_rd=aijf9(dvBGeko4s+RJ~pg`~~!d zr{BPvF-Z8bIG~Lg$#DCqbU1A6Pw zKzY#Md@F0qL-WiK^pqzpzpY2z;<#sEVSMrgbqh$*M2qhzcI9BMFF58 zJaw}7W6)sl#01<%tIHlh2*~jA`8zx_d5!}9{@>hCkCyWFWW?EBVOj(C)IS}p4@gc- zwI?~JDj-q{wi+osUnI_aHx_P8Wc#m`IE7TNE^B^7iAd?rOrj+`fo|18qG4^uEnLl3hLP*URqN$LaoM|SB_O{KlKEcz(NEA zmY0{y^yI~s^-XlzTn|V@fY<^zKscvA*;27Lu1S#xJ;_`uXRLGhgtKk(o&KHaGSolv zZo$?*bz!fy>|jhgoVWtU)>g%9&`Rw;gV!5gC4tJ-Al{=c&?RCs`oM|Hb7??=Q4Hp} z!9&I`u{uQ-&qx0=kqoCmV#beMrTFNE%k1u6s_;jv`)}pxXe_7`d9C9aM%-$vi`Axr z{mY$&@1>&{A|j&yWCqu55E@VcvtwWZ>S0?vcsJeKUGQEe79U$nf8Y4GX-54BQHjG* zYPIiM zMMJN26U4Wga5!A!jASax_;E-&@9D@F0*S3b{X0=4*)|m)b5<-$Z_zg!xU zbq-W^W3$6!q2{41B_ci;th+&%RGJH%z*}7|{2ReOg8Ch@b`!&T70&20D%p$g zMF~1T+G5_GD$vh;{HZ_3i|%A6=;-Cj^J=oiY+L>Aq}PJLCJeq9??zN65PpnF$w>#E zZ~>NGy5}N)*K>>W8MUYvD|S>Z?i_a+J)E}X@4I;R2c0@dc#%;oK(j)0DwF3KesyxxF6fCr|8bH z$Dx&YtA^cv$V``Wa#!-rUWqf3 zr`cbe*DE1j{woe1C>nOWVMT_itGoM_P)0plS?`~*hU=Ppw!}!7r^hKrMg*bFf+3r^ z%)-&@WWTdmT`-IaW(vUZ>CyWY8mZ3$Wb;25;H~T`{7C)%M~r*F@Uq@s_>M_6723+T zkj>3axK1FhoDpdJCq5g38HJtOmpzhyqNS9C-0b*== z{sS8X6@P-d(}TLUw4$q$>#l}}L%E_G@9TwmRniYY8a~U7^ z#m7sJt<;KvX8dKPc;F^%miAwoYi54>91^${WCgKb2vvQ42ew3o62EaEfP0Q*Pt5gp z0AS1CT!~&N@!T0ok^q9;xFD%OWWL*6Rz?F5*Y5(h1YW{pA+0#TFF(Pg2Q-GMA=lsE zb{2;(*OHP@Nl8hU8#gvKW-7GUHUog8xu3XS-0%nP0DJELm`&yZKuc+Ry~XM3`dUC) z8I=zmhzb|Um!%7uL#{Wc{z5;u)R|#s4l<4eL_y(W!B;yWiYhN5q;i{oNt1*8e7vjp~G2U zYg6|)u$lI43}p-_sKe}`bD;jmb;w|Yb?|+3IRN1#ThC@hcT3b?M(l~ki*I^t z^*3}l(k1!x{y(TVl4mdH7YQUq{toL+Qu5mdEV_=X90bDWplzKxdqdKuMA{7)cUQe> zp`o9}C;d~Sx)^?_AWThuUSDQ%jrDI6jcDxQeQO;sJeZZOa<)n1XK?pQlHnS<>AxW_ z)GAtRy#JBvD4+Xlxv``9OtAS3>s+VWjW~28btB7w;gy-bOW}g1bKyDP`uGkF=nj<% z%Jz3$IeHkPSQ+7c>|FLTV#Ly~CG6g0@!A44V=(SDE`3t1cw0x>-W#mKu)1${FR48j z3w{;aQGBSNFTk*8*1oQoPxr1zKpOe*xb%tilmW()cNL>jSuBiozBeI{@xE4N)+D}9 zFK%8rGbH*v=$Uw%|6hbl$r}{Fb zvMW7Ht%mr;NlmsQN1zhXK;+!iF1?b*cqz1w=#^96tMz5&EJiz3&?(YRTxOJX*PCD{ zUsX9fB3|v3KUAeZ<_|onGhXFP4pfh+AShmuh*yAhZ~x&I6rrwvRti4IIkfg$mc;u! z_?diai3%jdzT(HuDG{x=Jvdg0$~kNmYYoAynB5HR%XhB{O`Noxnskx#kR2J*%nFz| z3*9LPjU7WCmxLzGJb0c;u2kvDakNHv{>wbYJe)eob6lC)H|E=NFmblIM?DMhI&I$@ zJq|rC8Kljtsw`UP;8|cdxm&nH^)l7?qD3 zP{!vZL-Op~RY;q4=z^Trl;Ihm$a-j*01z~=XT8Qy^^w5_3677ug@9srIatMV&}BaG zAZcOe?NfU(^*r&A`1C&Qu~vhh5p;E`)U@1)LHmieyF-?l$qLSfK%16(ENo4!`)Mp! zP~X(?`R$Q{Rtfv8@gs<&NYQ#I^Z|aTH%#>E>|D;`b6ko5F9fGG?B?uyEScebp-`id zjqW8(SH5#X^+gmVJHS9yot=5s!zu>FYIGLBE$gr=^&1WC*83D*;{_Q25Ugs<&f_=* z*X$&W;rlKvNUr;K)dZZJ=c{j8!r0B*(M(jMkxyIbb^(R$;nGC9f>Bl#1Oj|tb?3$N zy`QXJ@6G+I_0XT2_k+H_jTdpO1czNk-NQJXhMQnQ&-cW#9BW1Iqw7q^XlUbAfcGQN zMIX0**J!Lay$+6n#)(TW1!M57U$r#&oo#$wnVBliBC9l61ZHK`Z>OVIct#L? z{|Zp3Z)y(R_QR>U5Teh5B#c^b&&snu5RfUD-v2iifTGKgSC4hXG~Mp`g47i^ybjMO z?Bg9bK32u`vwA)_3b6^?$TB0SH|j>LdOJ9)=!9)xC?nHV@K) zAa$<*_wp^*dDro+HMldq4C6K5v$>9DQf6fU)&?HS#ct)X{hi;cmHeHVc0g|F?cztA7x#JdqLuCE zO!@J;OzFE}+^P70;Rx`ge4NAzGg54JYus{;=D_!T(+{;t>#(Y+;fQ=4oRVgkQ-T;! zJEc-hsbP+(2}{h9>-rpW9S>t!XX|8IX?&kNdW|P7RhGtGfJ2Zy9;zxb)zJ{GV^(Jj z^!vEQ?R=*jbd65lD2K(+2hhkF^Vij8t$c%3{p{6W@9eYR} zTgB&8X5&Rvv^SnBd7-2frhk`x!~CL+%|Kz;SW!-y$M^mL9B-n{Q0N`Z&@Do0@=W=x zIdBW(cR+)+hFzce;mP%uRw1m$mb6IjH34MH-1+q0RYqF8*QUj}nF`9ICwHh%9xS@v z*VVNK?%6Iq*gljMMC&9E%&L@+OW2k!4!R<`_q9CrS2H@E?@LFI<36~04(~n;9vxtu_iySS*7_Qa%0UNi=&mk9-`i0H^>NJ-#}BPhAj`m4iY7Kw8fB&d`!(8z!SYPug1-R;Rw4xzcOoT zrxsgb+32OZU-#)G<5JYIzq1)AYv|;bGPf$~>@J}^WsJ4)%gF-hQI^OycTSOui=`Ml zcuL!&c_&Bian2r;C0{9I0b|A{A$P46V(vJq*@{>bD|5e+XI@)s&0~t`drr;E+mY2< zt{M6xa6y_7qI!)UQ<7&?7RXYZT%da_>Jn-4y50(Mqv3Z)m*koqRks@&yxheCztzc* zh)lMmNJn8e$5WMbCaM|&%;J{0WBxxNDUc*kgZlrwy#gw@0A=EyU1G$SFI@{nybf^s^a6t+ zbL_t7sL0F>_P*z-=&5pegC{>b>R1$IoyO#x@(lpuUeBoTS+Zaq20)*7O2D8>K%d@J z&NbBNe|`Kj+|z>wXTPtHvChM`C)xC$!f_C>_QP0xE(dDys2DkD^a|a*xqaC2HM4fh zag_@jpP2@>V-$9vprLnC@Uu$?7b5 zV8MiPC0_1*fQ4*3a*<&jshU-hv*&uZ9_f8Vc<39Kn%cg4;5eREKh^5`5$Eayr{LvkC~MCGU(b@! z@Arz(J?SO8T?K(>$qpc(y>ub@@T7!qI|Nr_2cvCSzV83F?y_zB=5gnDyjOk*Dn-3X zuz+Os0_dqR@hMsO;u?GG0#6cr9}5~)1}uc+-;(+0_mDu7!c!EIiwg(HtihNY1nTz+ zu%6ua9G6XQh>ZCkm`yjBp=Isc7H-Sh@qx`hPNrL*NZOUOo+vBZ*F%ZELz^MTQy>Tr z4Nud|*6t4w@%9B4w;p~{pDf~`+$2sC@phm_`oo9AJi1yN(B1b8W1&o6)^#i=T8n16 zJ}~5BrcPxFwQ{3w5Q9?ZQcFXKbNGS$H6^GA;6hdv))JPE#8L4$4wj zv2$)Bzh+MzYQ%LdW@@+otA=?04Q9Q4o2nFC)A6&{OX-!cS;j581t7p}4?NyPIFDXK zw-fqpa|OyW*~tURll6u+(nU8IK8)XNaw@&ahx~GY0W`>MX2?B>9#Y;y z$U4>tDJh|BD11Vt(Xs7#qngB;hVTfq%Z*xlTk>W;ViD|TD*f6x9MCRd3Bt+3s7=?I zD14$?zyuK&k7UQ)pFpB779ewivijmmV(~f^b|*UIB$y&2N27^uSDKhw*NmQ+9B?wv z2_imF@cYjTwut^t)xn#wa>wT2kuY9Wp8R1l__o@B7e3S#DP%N(rflRpF7f1g`7Jp3 zz9)kPe-O;bD@W#7pZn3sa)2-u{=<}(&&ZQELNC#;Zx<=y*kMe0y!}Yj8Rj&cYm*cF z6$xKDV$0}b*cIx86Z>)EUo4lW8~@U^-7f#BD)+nxxGv95Jre8JPTVd15^yWlJ*Rf? z0!2l=e}RnNlFj1=p9DG5&-rdo7>8NGn4CpnDuuM6+DYIk_n zSNfhUD(G+y2keMzK10WAYVJ;8y^k$B71X*n>fRZq`te)CU+6?!*SM_j0H@}z4}{;7 z8Qpw(e7$ZLl*{+HmtIT$Vtz4Vqb-T-SYFWAS`;J~T}oFsJA+uCU4?J^pVgR7tgnV6 zTU6rV9079?*G~?POsYJcs9ciP(Cl|9EXXGkc$t=G6V>6vngOnPSPza>7jw$$up8%cc*9N>W=r+9F3BZ7=scoQ?j$*BSVHduXs(CujJOl!ml%Z|hC)2Flv$fzwXC z!HXCq>KvxnF6RdIy6a`Q4|hs)gU#p-rJ2PYcpIZ~QbEmU-9!$hLkvn2Ge>xD7sPLZ zUt4Q+nKA>^DnEv<((4tHASG<|ux*paZj-`a^LCWO(;@^mVcYf%X~3cgk3uJtyQ?%l`- zH?VG>7f9QZL~oJZUx#JsBn2uL>{4f$V&cg6*d#eeP42!f5a{n!h1qj5fjt+9{&9hz z{4HDT##@1oB2A7$ipLqBFRMLsXX#n;{1Rhl0HRVNoM=5((0BUb4aXZok$4G^<7R>n@Dm2>UhXl#`LbQ`HIcKn{iXF9~eFtao4HE zJ0$a;I^2kesIU?RVZVFSl%@)AOK*aZalNliqu<3NETley;h>Ziw&~8_fc#2`tB{9n#SQUAMA;)iNyBwBw`v-&X|-u7ZuE5AUbSN zn6$725qGr|+EEvWG=8k*v}NYoE78aJ3db;tN^z8*4*?*HAoRd&Yw-O8O~o@q_j^!7 zB(^6C;k|jxw>|>O2ius%%U@YEKd^!*^swiev8X@L09}6geJNQ3YmcvL?7YsN$FQpG zM|2VKo{o@(=zjT0jyP(QLl&*>Io^X`S|OK1p_Fa6afo-ICnl~><=}3%|3p5G6F8F{ zZRNhJ;c;2d$7411Rq0{!x(iGYGAWc|m*^rn*bPYi>t%RWj6 z)w8-%0!UHZJ_atw3-~_{*G+a;Ub&T{W7t2W6d;-FKg^dl>wbeM)!VVbb6t={DCB%@ zi@&}0O_|J@spfubV|$Mrn;=U2g7OMr{|($pkH+0N@(IV_yc|GjT90#c)!}#WfRIXi zqfq5>r@>#tiagr%fzdhepK3m0Vz6XDpVW*E(h!sx4{SHGVtgI3+j;Cal&-vRjwB|{ z#J6uECAw@kqUq!n3d>Wwo_gBeQ=+7rvYPx@y6PSIcBcqK6r=d$8Yh~zzjfW0@OI;) z+3(x%?F<=od#8L>6Fb@qA19O~>TvtEu2(-EGXJ9yTnXTXua`+1DC9$}HWq8=Cfp=3 z2;jpFxS$*G4S_gAM%=DXB^8a_mT+Qx**D~F-?iIc?q@+5QsoOYp~6F2p!CuETD@L} zpJB)iBaHvXF<+O9C}}NLQ-EO$P`lK{ulhZU8SZZGoT{+^SUtcua0t)7q76FtdvCq= z);+U!d!c7}uSi>W(=k=0$Z1OGj6`W9kLTM6JVl%b^0PekciM@} z_vo-quOi=+!+t1du6vA@Vca9P&~Uf&$gBM3ZDJT2n5 zK?iK^Y<&|Fgh6}6&8$=lbx}of!j}RkBAe)lg+p0fm=X3pXX0%O`EV!6k z&hSNAO(DaE#&ja*KZ)6Z{N&}=6_P7wH4ZK=Ao~fUsY}ixdOd*FN*-L~dmezw3>UYLh!nx8C*)i?94UQi$XN|shlxiJC0A723Pf2Ro(DB z7Z)2`5v42N!vOmH`+BbW`4+}nI0x|fyrg;aPE+R&<>VJ|U593Yf*d^xZD^|C1`HD! zd@J0V;m5 z_bhj(Yv}dP=WmUbT0wG+=Y6q7G4579MdFR@Tuu#QZrfn(uU@{-DY!FH58G2@Jn8=^ zfI76_C3lH^AQWNq9glSay#vbYLwk3g78tPN7;i5n?=Q9jGgJF@x>j#r4=ql65x9vI zd4tCsETgjylz@s6SHD}D7b^`>5*{ZdXe%V>S>=e(?f;g<-d}*)t^9l$ z{^>)~n6;2g{<;;P8ogZSAWoM%OcmH%*+PGZ?e_V!RpaogVQwXFoxcQJD%$x1JvRZcs>>0CC_cqQC~oP9@FN&<^N2?|hwgRw)=HJd*76&Rr{q`^_z?JGiBM`XA7V zr-LWw9zyq;zR(}bKLJSXD0#x>{a{hW9~k2T0hrOz}H6?!a?(6svc3yslTL zh7w9U!N6E4XATmk7}Zj}h6|YRs2$w7E*n2@hNT6To(otAXXwui${MeA?z^xL) z1!?VHJ4W(kZOduOXES)ohn}cQ0wj?kCD1(-o)Y9pdD!I$<|>2b&z0$mpcQA*O~`2- z>472j06##{WjExu$TbD4rYv*ts6uZ5;P)0Tvt>PC*QS;O$V6Lq z%t3_gydR66?G-$xzBgwU(300*Q@iD9MwN7`vAaiq>bfKGU9ZLJ!hM6DW6|q7<3n@6 z<^oq*x*YV{3!FJRv0;CD1SwBLg1;=JM75Q7NB&G&51M*>?>%t9fw%VhreW6}qbFtQ z&TvC1-7FG9mvt#vV^f5M(WM!O(gEnDWX^}qeRixk`!$IQ_qP?jZ3`!Cn^(?gt*6UD ze*-o+4>cs@V2?WfKeJZ^ZUBDz#N8nTeE&2}hxm<;o(|4Es4mg8nSo(ha(HQG=6Hj^hgIDZ^KgI)#Fea@AV zPA$$kMWaq_2@}K-k;N;G0?nmMoj~XJ5%oW1u3OzLV3l)(7=56%BJB+qey($PP;ugU zL3l(^T=lsLIf&zSSq_VwY;%F+;Ya*_Xx?aW)Vz^Jf>O(@ILMF@FF(QOR($&YJwnjD zVA@>UfO+0E-w_YlS0e-s$8}G;5{uaaY0?anX+!>2Wi~s)U4I)#z3G7Y(t{myuxrVv zY^>v{^t0hixkC03TdFerEXqp8SG-Cmg5_?tft(FoAgSl0C6yUwR5d*1%ed-wP)0-R zMvod$JW1_YltHH%M|YJK!5%mLEkPFV6s1f}x24ud|50J-0DF1Nd&VKHeGmK4uQjcP z7J_xwWN6`xR;uv%!)#I0UZgE~=G%37DRQNAiFW5716DO$m3im5o1FIi*%#g{2#qbC zz%{+ceyJ?q0Jq$BLYB7>kB+R0fd<`XmqScik83M!_ba6I&o>P4tuDL_llyT=6e3sA zg+(TnWTjEx409~)tQ#TEion4ctEHlKpyE-U1`2D_I*6*ucQD}54U8IEjJ5=#i^w?? zV){4JNk!u`$UJDxJhTDGA6s(C_a?utqrk~GO-m=CYHy;_xoA@zyA=6*uz0d)? zbVRx4P=UJdc}98+^hv`%i(=ifxb^URd6%1A>lPxYz~Ts?bzN-_<25#kIsTV99H=K0 zzW2w19HKJ*m7d0t8D?n6<~I|GkO0jSVKhHgs-Sfu{WFeyC8L?#8l;gOf0}dnQL8Hn z-L75GKF3d=dMgW7)3^cg7>5Wm!zpP`Fpe6?XVq|5$5h(qLkm`y31lbzpiJ%qD{5;Qx}x4}1tOLc*p0Ytl*jV1Qn`|Tcw%^aQ#yWQEro=A8DWGXmINVh z+eUIA_8`q(lU$%_m6Q$;dXl)%Y&D5f4J_ljz}wJL?0|%;V2FjfKW)jxZ`cf5vV)v| zXe~}BvaUG(IaR{K1v1R;CdO7AFRW3e)I@q(qVp3*3s0AjL3g4FfZ#``nVc{uvvR|uKyGLD#M z5YpcP)BRK~$kALd>^1uYdr_{Y;{b0~KerqR>=yjZO2{WGV|i+AcOfZjWtAvPMBSH{ zF7>zd_v6=6c-of*I!Er!RYnAzzpSu+Pz3GOuyQi^!Kp5$m}J1Bem$Dpa|O~Wp}f^F zpJQI=2=E>;EZJn$rCdg{xn@l6r*UMBDUN)XYvJmZX2R_3JiAj#`%&(@k;iijG*TvYFBG7^!(@-}`7FnVmol z&HQbcQ27hFz*>m#VJdS8n@n#Frd>o}!15QhFI|pu71LcTR^lXfArfh!$VId;m34?z zAeUjiYHt0_Gncu3g%qgk`2!Q%9bHN&nGXf*N-WIZP7QjV&sR??Qzf*w$sa>gX9++~ z<)4}%9XUr2V4fY+3jnRtR_Uf^TxfMdMMjdJd`Pnwy6rXc({eI-OQnfvv$Kk*XlU16 zkU;@kY6Re6W-d!Cf8{Ar?Oj|#($a7M?1itdXM0OvkHJkSjLB>xo$$rO46)*1cg-sni7SoEnIybs&KePg^h~m`XCH z;uc-K8+eU|hHvE3ifj{fI0!(`Tv|i|!Q;RqA5ZLq3^#rNdw#0jiAja4vV`l{|HcBm zSP$0^?d0--g^-O<&6*`ac4vnNYHWZhs)ScS$m3#5vlG0Eqmc7?tV-vjQxIyoL2Eh% zDBX~yQf`;Q3ew1E)gMw+lACQvb_cpfdF7!G}(;w{MN^CQ?qYL3xrPDtjG5HZ@>G@E6mE{R-7q4qnr zk#t8c1a3qW847)?G=I}~PYw3Fe+G=Dd4z5bC8+q)?B8pY5=UZtvc)tNh<``P<#qo# zI6uGMKxcN2cypu&Ag-Eol8m#!U?+Q#>4;2{iz(IVwkg`e4o^*q>FVNFg{8xd;W(*Q zlaY{s>FVkN1K|P`OGsOY!bSaKblj+U@pQPr9X}bU=d_X%{qLEcy;n%h%z_C2kr@U> zU`cTdK$2pp8`SImh*S);&H#tNBCo$^>|2xR3A161Kq9k?OHdNc-S|}@Q$fkzfWn9C zFqEg&Iq+0mnD!ta&qd_et)330a?K$BcQ1_=!9~EL6Ut;qgg&c6z7z{AO3Y2m7zFEF zCJwQejy;tgqo0KSli-+kU~*D3xta-q9u-f_Pyu$;oOpVriXaIlxgxoFWq8V^a;e|3 zFC{%4*B1#=*v7WHz4q<3`6VzA zQ8{ipats$d_2y1|j`g+$P&zP*wS`{)pm-Atnp2Q!?i&v05~TaxVh3P9_YM#JuP8?s zO*2)O(K9AU@IV043&n7YtW;dfpM~Rlf{nPV>*8s!A*lXRv={ZUNrf|X4 zl;Ak`b*1?;b8^a{}VqjDB`62JohK= z*mK3|3e-Oq+T9)VMfm!;d;VZO0^X!_(ONrhAX!_{su@45YAT{R*ny##{ZrN30Dv5D z3k3IlogcH>@5Gl77!HZmnQsdQQi?JDQ#YZjQjeQo6-KmAe#`G$bD$oI-bGonAM~Sr zff&g-cjpoe;<0piCp-T%{NX7q*4L&$y!JPbA6~5`wlLl@G#n3{(}s8bhA&|D-yv}H zI4uqSUZD+?)sv9xf};-!dD%0FT)*mI=Ips2SDg{V4RjFolL>pf*t%q5Wrj#na1IIe zk-(QYS5{WqdEua-wwokuzZ?i(huZq@GC&&hTeBCWR--3tDqH6V9CMs#+y!G%SxxoM zKU+xSmDl63qMKw-pF`U*B-534=T3;mszMX+xy_f51mz&Mg!s2*eLqEiBNOnfL9-0m zM2{Ttg+3w)emRwt%BE8Hli`8_H=gdzv&Vxeh!Y@&t`@0Aj1@Vbr=_Oqk67QbIwG9L zApTVX6)M>yr)aL(6xj8;K&^s~nk6TkBBvGny69a={|l)|4nuGuhlIFK=j`YDS;+4H zq3WBWGwFhMC${Z3wllG9+qN;WZB49+Cbn(cwv&l7G0r>R`v0@ex$29??%q|qdRNu+ zRGB!YbdXL`2H|HR{!ZFN$GAlSZxV^mE;2n_6`LB{)FJI`(xERnhDGrwLEzsa-E~Px zrXMC?9Q8)?g=wGd>@uij|Hb9;XzTxz*0=!5=JqFl3L$>idKmH305rJLjGjJW!uvo3^plv z=2T4;wXhg7s*M0mE8t^ljj3Q$!`b5;l%G26Z(!1PYwDW3A_U;L|VvJ2#B<=O+x2&q>fHsc!1(jj>QkZtY)Cx0xzQqA?g!HW-02FN_m+PtEOq7HUFuJYF)>d_ zIE&Nku8f|6qdRe0FDM3)cJ#?jxKOGrASiK<-p!fzmn|^mvZyv zEh`61?wd9$xdKLhgP|P$Zfr!?u4;b7lKz;rc!z)3>voubg=2EdjDvD0JteiEn-&UO zL*UOHzl-XEDE3^NrSSpgwTmO|Vnk%^iFkb!Xi`@ipuk<)>xHn#h~<$(3N3-QgIa@2 z^Wg@%^e6p!9ZZ4tASqv#T-<3CY4F)y&GS{oMD3__VnVSPR0TY-$yLaostgZLcL(^) z2jnoRXrraiXu12x{);|4Y*f`0A<|%X57C$2Fze^namVhWD=^u*6XO{Pc~Icee7{>D zIIrJ}vgyeRwh`s#rHeG(#7_Hx*`A|3MJ%kb#WBxy`=6b^Us#M zbk(bt&K%_>x~2l_QX{x9a6$6P`+f@eWDvw4`_k=qmh7iUkUH5+9xb?qyy(~a`Cw^58s2OHel$^rhu&Lg)C`?K<{hTbzc zg!hEqhJg4BQx@tgLW0Xbw#cG6R17r2LPA1?Mf4%Q4VV!@B-Q&@7m$&apVoazFXa56 z;&C_*ogsiV2%q~wXTKggTax3-?1=fFmzpiED5ehl<68DY*tg?+5#DbY?@RaJ;VVgE zG-%3JDUCh==clB#>F}fDZQvr2Je!-F&xt2xp+lZb2PtC6Od@&)KO+o#DhyO?TF1Au zxwz6F{TM}w_+m4#L`=0Dx7D<%W&h zM2k70e>R~91jnx{d zclY;BQfZ(~+H!XX-v*skMpH^E@hv$~8wp>ia*<$zvdA%s=$}zaq`Mrs`;ZRwGh+ki zWy#FdVhT*Aw&T?*;tEF&TxfWp@uBgudVBdSfIg0mj|~C-=IIl0#P43yW<#Lk%FE+R zb?RcrBt#B2NuD1~aJrnU0wvRUUXK}(l!WTh^u54MbjGX1;ye1lE^2T131;4U zT(Xrp_&~_>jQ;Rh-^{`E4Fj~YgE;lY+Ao+8|Ew$~$4BPQ$Ke&r4qMoAcq+?pWpyKp zuGFd56=&z?(njg>;&cw`D*RsYj)wc=OqK6cmA1Zu0zR7CK`T#nI&*0Tcp-V?*Btv9 z#8f&6z55EF`00@>`_mI3i$&!f!KW7QOXsBFnRugD*OX=rbX^ezQa>#aeECdXtOUFPN!~7iRb0S zXMf0xT)vQ!h(-XZc=Ro<*K24D2tUvzjifii9hyyhNmfW1I?!{9j9RUV;A!&=LIgVC zhn_nmZY{K%EN+J2Bz9fCXyD;&vmM4rpr)OF1Z5|u{QimeBYxw$y^b8~0*`>WOD z_%r;i6eM_41=A)O{|D1A2gNpK28mt-6Tp3??{krmPE1lvw4;lAc$WA;OJ(h6<|54r z@AD{z{5JN`ka^K@Ir8B_>CMh@;5L=mk4Q#`(vI>aMMoBi6y6cQWK>^&DZOs7WR^1g z%xGAVrO;J#UY!g-q)YB?FqWj%uKMr=~RG$DXhf&ubzs>Q>$1XF}uT;P`WYJrQfaf!*nJ6aAUeQYs-*CghlXv z{wBY$=>C@styIWaAtXlNpNq_7g13=x0@88f&+Grt1*b5`Ws-wgupHT z;#RMaE-{C0XeJjT?wx)W@V9f=t?2UGbZ*U;Gu22uf*R4l&lQzpGB=_Y(eBr_KD%In z=s|eEE(rE(FAkmFE!=t+6K0<=+G3@Mk`uMY8GC;bt*~DpBrb0}Sj(`a(Io2|(LppBZslk5AayYfgt}LOjkaK z0*Si=Y?=um*cP9JZT3zTMVuW6u?01d)0bM!$oz z`@XO~4ofnbo`W|&rS_RR3SGqI-&kS9LJ#hCHlp=+(TtPGTnRj1O&Hv`r@|7p-*e(5 zLyDD9fp_ThIs9o`y=Tg>l5V43vQErSALSt-A;CkR76F1tu-f3=w-zy+xwy=6P;IIx zqq`oNogu|`q-ZBdU^WTxn&psV#ekf&_jGVzTTw3FqyXYv+^ATYUJ}@-s3=8UW!MHC zo6*e=0>7Eze${jkPbuzAHfz717MMbS7zsf=giNSOuHFKe^q4x!pn`r|(e@8m1+(0S zhGoEmIk`O6CTy`cG?7^EVLZ;CI2^KAWMuJDEbY)2=+Y$H!}gqTHG9_|kLOSpp=~xI z$iJXZ+)`(8ClABjAV_iL)tm=NGOLdy5zM<;eaB6U%Z|h&$bK=y{G-T869rlBF@Z(m zpigR|7Qle-DvKRth2Kh&Y3Tw499xF$>Dtr^f85cF^};LXNGdGwZDVc zaHK0ykK(=Yix*PLpTr9mjS$`Zm~sQYN9c0t@LG^jLKztC7`|9FC*h-jIikR0oK~o0 z`nIa_EDprD43qWUsv`9YymByX?mCl4+v`1|9I-kwl@lh4IG>LW6sffrP0$9_Q1Bsw@(26TLwzfc#M0alAv0);2m=#re6q7Q zSe|d@l5{nr=7~8je(l{=tEa{C_2Pr?V`8vmW+KTpgHsBH)?J0C2hCZk;ReTTmk_7u z@_^V=huS6Tt|L|BUP}*i3t&@)M)KebG1fsN94Dagd^gj^LO1gkJV^wJuFpVl&8(>XO-72LvK^GFNWIlLEOC4)nd2rTF(>qMcagI5rSCAx7mY&Wj}A;2 z_nWvDIbUM(GA;qH0zfeN>@a&-HgFmY24ZejxfSF@ zl|Guq@G3wLovmmvQpe(`7zK?K0x^qZT%<6VhCX^o8D3IfGpQ$41;+326@Xt#vkh$Q z(o@-KVkY52!=c#%+FaYoiepqS40N^PBrKS?c;KeOfT9D2jlI%^27$pue6=GoLpVI* z_DRjLaAqDjNLmZgGG2g^B|xoR=(n@WsbRQ!1B|&ZGupJ1;MZ6|%7sc&0aYitY>5w0JY}E0Wa{^LYYVK|L@>-Cm!)J&_EmaC^J|X4x@M> zB846Eq*xhKJ+eH`(@s){^LL@?TH|P4rN@cK`zrh-7LlB$;~O~lSYVW_{M%Jp)xcyG zo(ypv_Cz+u(Z{OZ=9aA|Dm+DBD;f#ZsuM9y5d81s=-m)w=q(-a67;t??i|qDq0d%D zO=y2L7SKhj8?}>OoV=f62&UFLq!n!7UeC98$HXj)p{cxmnkCDf!xFP<{oFd>YbY9z z=?%Sxj^TB)&?bZ>E$Bl`)Y67Zr$%ayUgGD5Q0Ow#qhkR=Fl}=f3|{V3WM&IBG^(&! z+o&3kgYD5YUa>NEN8(i1=?wv~I)ID>4rG(c0DbVBQ4&Z=U_{5#8kn&Spx)z? zU~n97X;vXe5uFgR<%}eSO=s?h$1>-H38skD;a(+?b+9<5dBJQz#5B?BQMgs!`Zzr5 zX+~E3o;56G=b|2=<2INg)n|zYIY?@9wl4Sw8`)h5ImQk1RmZ%tgJfejwsDhcIR?pK zh3&bVWiNm7Bz0fh3HK-VBGs%niNZ;A<-B1!d9InKi(2o*v?3M{6gU*#n8AYM_Y!ll z!{ol-B{AYi>F^ZkGF*ISdzg`g6rR+REMS$&Xq7CiEk8emtr_BZYg2jHCweME--gY# z){GPHdz9fyqGf4Dr>YaAj@W$<_pgi>^!iDDIM%->nKW3Ha=HuxV6CTnmo84_pI+BewfRVe^_40w3JGcQYOHT|Dhw|rxAc!nRv zebwTLV|boJIfe|oG%lE6Y}}{Vxpoc`6ClEd|d8@t^aQ#o)eci&`wMb4T*(yNJs!l z2vd;%eJK4ShI=0XHHNfKB!K?CH|WQ3@~V&_ksP3i@dtVYZA=gf8G97CiL`DTnaJ6? z`1A0-*7pzwa$^%cO{{EX*=bn{q(iNBfP_YyTD5JRAn0*|G0RAEv2FB5s=C%#a=T={ zQwT#R{@)c zo(;bL@mNKx#sxEYu^-jbx=*#OF#PvxjR)zuH0m<`tGVT2TSOS}Q}9$xS`-`|x%O9t zrl67%`WG}3%1u5}M*&FzRTS?ZUT`rwwxJ+&t3b&_Hes)sN0u)hjy2PTYYM#i;|mMj z9}Q3287>wU(_BxJ8FAqUAI*4YKg_n2gxpm(t-k7c;LcrSm8@wl5V(r}=tL~(2HR?E z+HgarD{uaA#7Dy)DSOS%{GOBh=)dA>xxD0$pKQndnBN{FFygdOcg?&GS-Arp&Ja8* zsc_N~TQtkA&*_f1mzZcvXWHmJiAn3n?=xt=surH24nsHd{P zSyP71#V=MiA*HzTfL+CX6IE1l!wXQJa~WEo;oDnS02%#aDYy5u*g$qc@picZyFN}%TBgF2w7=5^iD~OB%S`7QV&D?%0<|%MRz11u|IK zUvJmrmY}#|f-M1NOxOg`@<(L9fPK9j_FDvOC%~A3=THnG1qReOaUXt=XNzO8{VwHD z)2b<}W#vZvyQN~0o%wjb_;>s?z1B}fY|0W8Q>@p}RhBy?x!1U9FSBF2Z-$5uTEX4S zMkBlrIix0!$m`)80^xp<6&nHfDwf`^%rkd*dgPS1(V{SR&dYV#&m^AW=I@h*5;(j# zRIv&djP8&_&qr7?r9$(M@7VA_5ujpXh5?IDw7+*w?pnBt!otDbo|WYNI}3Cxe@c^; zlpM^EN@#b`z>CxK^6@#H+@nxoO!EF_qGk)M>3)T^S*ae1!D0P(A!P7e z^=gm5!2ZF3&LA)_?4+yO=f3s)c-?*AFhkh4Hl^7cRdk7XAr0j3#UkzH{ld3_)= zhA9YQVg?5|7aD^%u%?n!&}yv{eb3TG7NE(%W;QB%J0dpOXF_)4d3v&?o(I!AjGzh{3AAf zT%ypM8^>x<07*Tq@ta<7*?sO%TI5ib9#5J{TPun?0dnYm$VkY{7Z?1z2Qarn+9fC2a$||?ME_+=KC<8 z7H8BtB>w5b{AzvOSDRG?0=!Vq#c#v$Reos1KcP4_h=3ASuh^_|2Tr$ah!O+b_|h9H zGG6z}+n45N9o~6H)IH{>y$P0+ag}d zhx#2-VR}7q+o3Dk+vJZO$^akDpYoNeF>HC0t*YeO_kBe=-zVtDO>8KGGKBUAftxe9 zukhl9nvo+$|Gsv-51lzutKd3Wq{J*phN~!f)xHRpE@=Dje(+Q06aVJPyq4JIVxaZl zug~P$IjXuo)_80Sx9)l!sQV@>+7lGP@Z%c^&uMa**KvA9kps3&eWFrA_&d_}z_!@6 zanSEiFLVD75dO9Vs9<^|ZLD=6w+nCM5Cwx!gy6!tpVZaGuA_)@wCAk^AUS<8i47|U zNYm=7ZiC_D^7G6i7xIzN=e?Q_vGEcK7WkWpC1|tRNo~v!EJcKIO?I=!Ha@^RrD%bt7a@?(+l0mPA536<9I!eRvu|Xx8p|j7-DS5F3 zJh)(_7dd+y*qGUVmms`u&Q#m)pK!cxXl}@}Gee5u@q?Q!-FO|l z?lqqr0gv{t1#TySsYTsf$28}7^&n`>)9>H6cM^4arp@H~O0tju`|UO;`^r78zOT91 z+qWt}f{K%)J}7;O$PY_xr4ch%ULekzl=QK$)!RY*AvDhkQ1$r7mUf~yqj1ELq2#qd zNXCgfDUL|Q*ji~pWXLX*)1SFxuCHsFTQe#l(*xa~kSp|1_{7F={0oW_kx0x~n>l+~ zO7`XXWY7p-PFBPT3~FP%+L47j#zZP(ws{xcoLX9Je@?*ZP?hBzEtL6*WMzAR)03J= zPUq7~6v!sv^m!&@IO1`F9Pq33zh-;o`{Xs3Sju;b_+Y-QI*{D{pfa|8UmJg_Bjrp@ zCo!BVhk5yO{L7OW(dgwM{b5&&kvlTKs>l-9dq9Xz#%PKsv*O z&yI@MA0<{jpJLg#h>~C=(Iiq5rM9+3>mwIv4l{$BpgkclpTS>Opp{Zgc9Zy89}be6 zjzRLSj!v!nKeW(BAN!6>K24L zI4slnLv=1(O&9cv3RP(NBhD*5{bxks@#Mq}_z@0{$%NS)*(d2C9J)L%A z1f5p>pjeYL$T6v@C((B)k9(H7<>%z1{%F&Xb|Nj$e%YPGh8kUkeZqCABzHQp;C^8# z+nqquV;-#59>>#}gm@xCv3BsV#8rM@$tObxRPJ}9yD_dpl_ly|Q}#&sM@m8#W+e}Z zq}^RZRD4go7p9GMNdh{uqY8^oi6@NSuOXmPY{hfn4+Ej zYsvHY2`OP$>@&=jEYTOL2ySaw&$jhG)#WkMx8#*1Nk#|M?FOVX0yNnmFL_GFk4&LY z4#rfnN-kne`Q@0F(WZ0G`aiX8k3{s;hZPdxi2x)4RDzYX2iPZ)^W8+#+v%8cZ(BLi z4J<_B&#Owd_EmaFaHFR%}!;B zI^IP`!;lef*Byo(@kmAmW1sTn&N>jkF#MIM=OqTx5TZ%D3VkAk3B-PPzC$^6(*AA9 zkMLpPNdkZKM$XU_>I4mb>Bjcl2tT?!coW9=w6tJZt8+w#fr};gg_g3X_eb+`{xRXh!aJD!?0)oWXAAA*9#3|VdP#x{DU0_`&7J>)GLYKiJ5G6AZu{DHv3O01KHk3 zH^S}h2vPFw!AQ%$z0(g7Wo6XcyF2SkYc^r(7wc4_TkI$GDyIfaP&Wjb;V09jpPKvE zJE0Hf3t6I2&k5DZd6ZDBKN6t4$rHX)j*kp*k0^a2jU8T=>5blC_%F7;j~wnP=}8)D z(w!}o@W4DGk`EAlhN$Z4m+AvdbB5K>Ve3B}0e^Cq=v;qXL%$@yVr>symLI~Ewa6|wUWFS*_=A=T&w`%*iYuN`D&NkOTB065=cL@w8co_jjal))?5 zb5Ta|6C)(R6y<#{kkZ}JC)<>f3zA@<;32>e!#b5{BGX%87JjP_;J$wXToKo^dO~S& zQpJP}Kazu4bPtTJ!SFS;aWlC4C$|VlRmVP%Z0}`%&hdnSL;WRNFnCQdeRm*?l+zSB z|D$ggW8n7^>Uyy}b#^fBDvl~};8w7ZrTt6&G9I{pX3C&{pJJq$|@`C;zNg zX~so%Id6Zbsepm=@&_d;DN|Z`!C5k+DUlo9p0pu2cr^ARS@MOTKB==yA;H6qGvi55 zOU=awaq&ImTs%y!EmT&kJxjM5yLTLQhe9Pt)pVB?G0eMatfhc|;CF(~Z#wdh3PcJA z1sIa@z!^^X2<^K!6)-8$e(5`tay&D6NUF)adh) z>5SRiOI6X$ptq^Iy8>#9r9tuS%*vJ>c82o=dcEtDnKDB@OVaZrI%*NFWNuj}HA0h+ z!npJNXz^BjVs3fj;TkD5S;|GU6_?pjPjZxBGLtt$Mx761*?H7b3P6Gm%YcM5MQ^M> zui{Xp-ktbtk1v33Gede@pedm;4>R#I6W}~zVMRuHiMsZosZfCXLdTWV|ADF8HZy*% zq8f9!NNW=5FzQk7HDjhsUEGg$*rl}_SyCUjCQuR93~gyU6B=*C=ds$skvC>4C8%$ zL?0F>O3%CP%^;?IMe@!&e9L%}LM>|tTKD6JE7RQt|D?MKxFxts(Bt&;;qHXCt}^+c ziDT2d<0itQ3(~iU=G4`eLzXx7Miq*B#?7Yz6}5BJ)kAWU3CB>L<=!l!24EJi&B3Sz_g)$051vbOdRi zVR%S|CTP#?~T^0NsgxtZq#k7u9A{00}!xoxv7nYr`!vv>$>bLo;P{CN8v6Ld+ zgWdvX;079%w-O$$M4Lq8B} z^mj0GUIQL;iux?r(aCZ|VIMX-Y^)V{P)BKbq^T>0Soe)_>Ic!d+2FVel+3=rzhtNTinRwsV- zq}6@5MHfLg|35XyLJWlXzBlfxF7tU2i%Ue)zXX14Gk) zhk@GeAyOm3j}CQ565sCg#jtP;^dtMW=hcwFtzpO;!rCWOx4+)2{TIEUWI6noi5=JK z<9G?QpBJloY`kD`>IEw_l7nvKtP3UgyS*A)pKpIcb%J!dr>v3kDU+ttS=N`~*#3~_ zyMMgVXXV8l$CbJdK|839K{tv%;to1g>|oPvl(hsNBEjI$=K+(f-a+I3H?kPm;WAPz zUG0tb^WG_V=re+7zz9UFv402zwPJeK$|w$bgmf!mS9$EE^5C$z(9h&GOJ6c2nAYys zzJ%dn66v48laqHKiw`IK_}4sO9N{aUYpwBJ2+93ovHB8J#&XbJBar#+RQg1P$<#%la<=(l|Xr+O4`!8R!J zYr*MAqMZr2;SZi&()l$+=V4R`;{X248dK=dC)ejT%4mc;+(yvXy@TH%Gn4SY-g)n;yjma(>FYeFV)ZW1HUvFSw!jWLpBeNDf4_{8~Jb%i=CmrG~FDt6G zd*SwY?(X9;n`U!g{O#zAhlB+Fm?kl@K!ROO_dJs3MPVLPI#nyO!b z4mzrTbNp#)IUXFK>wu|j@iroa?P`TmOQ`7F|n zVJNNPS42d$&#NMW6?#E`@pIc8Ele@Glz~I&B0ZN<%W zzgD_H1ypwocGv}lTwm4GcJ$e8x0*8mGF#6bPG83#dT+vDh+dC?5{7)Z#tRD0Vrrz4 z(165v)h(UvNPW}$+I5fvmk!8Ka^*UFY`k7B>^!5IAU5>!pkKTRCKvKmq} z!L^wcMM+>x^t{qdP1RHUL#&$1ZdU>NYxfrgQb_YYZL9Ke_-9`=wsaeRYHHkmZ}+Kk zO9CVr6p{r%M#P44V6HM%e(h`d!1YNhJ843#eiQRINHYk`~*hiZA zB3nIu@QKuB*UnVsK_f+D%$^K3)|V`a$xgi%XgVJ)EerRENg~0&yMLian?b6b8rb5!TO6QaAQ?;GG5^d;mP-6&<& z9b8FHqA^uOicUQXMT^Gr-!`q;W@n$zFXLNYxO32 zA-!?E=57Dmo0Ez#<1v{ar3)(4xd?px?zi;Ev_2lr*2E86a_GEgY;^@!H-hn6?=5a! z>^JTt{TzfZ>7(9ng?-@rAp|Qd5qmg)khVTuPeB&%ICPSn%DRCq!-{P=ma94f>S`$W zmexMi=MDyaKVaH#Te(@-l-qSkbp7_}a9@$WpcBsU71Q^# zE#7FgCd<*KDZw8C^uVDPc!O@h_XF|WwiUnrO}S)RXOYF&&Yi#5Xtjg}{%$DIse{X0 zAX+N^0}2pZj(YwIq9NWSJmjItI}z)5tR(<)mB`r$K?^eSQiN$FXA;TcY@bOAY@23E zMxs!}%-D35AMY41qvOt@m_vW-A{;%;UFvta*CN)1g{7q_e&(Yvv|<7!+ls8h?f3Pn z5HUR2d((Tq+{H!)4njiTXog&B<3X}&5Du1h2Ce&cR!z=@lSc4cCsB1gHrG7e8fkU3 zhTo8svp3|!Y#SQUKGgRx2PovIq7B{Q@pbj6 zKehz#g&qA1bsDLJgWUt`?0a3Rh+U9;U+aN-TumFQo!4-GrXA(z?TXh%mtRQRD=5pO zjfoI#E?1cL-Z>n03k~21I&Y<2iI$Q4$lFGn6dp)$4&Mh{-D5tVBs{qH@#tE=__9lo zq-9TTr|^jOG$bGd*9Zn)U-HQ~T2cdb;f^$UJq#4GJzTh&e=j$AxRUNI+83}^tqc3-ltTHD8X)$HvYYL&b!evJ_H61oBfU8$Q8^zgr%~URW z3i=`Wt^c)tmb(Syr-+l>Oi!(ehJ4QJ2v!H{dI-aGtJlB0%8>PdT)e$_Ms)^`DGy+O zcH`z|l~i+jjB+fOXWA7)psEwjo&PV4dtV0wVWPaS?ldx3z>++e2K>f-FEZV%&y3T!7I zoGX17aPB&o5`JIMX#H0NwGz&VODN|#m)TzJHacrkvG;xV+JcQ457T+u&gK);ahmBu znKy`wlkI?T|Crun8zPBp^qY)Fbj+t$oQdPEI00vZqbG)Afo251c-~kZaL*fp&?OH% z(SLPciud0k_RjNWPQl@+HbPrCD1>#CX5I}9dZSc8uzxv14=0Q6KSEK8Jl(SJdiV0z zX*(TtEO~r3&^`YsiG&lod2kG;|I3S86*R-f+tiBXr_YX5^OdN>rL+MXf3WouZhT(9*mns`n8j2C8M7Vl?7&GZ^G2j0nyZ-$t>~=>nkjr@g;BdOlPc0Ft{{`T9t+2O zU0Ct9?tw>@LkefFJ_9}-fK}V$R6=Jg$>6C5s#`cn>h`mn-wkg2qg0^(y=LC=wD-mU zwznx4;Z}A?`NUU9iu+b%2|pKvg&INBESLD|<`pilLwd9hU-v+d`X8~uOHfoy*i<6| z&WdZ8{Arh{J-i((BP;K^LTRPJHib0f1p$y!)O@%HQV=!t^|a8XHUt3T(4++A?&1Q{ z?v%iN#TgMEp>FU4P}-gEzwHXtn{!U%mVeW`DTpU03Z0!HAc+;OK?Br0fu*h0Mq;1( zey+6aD_u-rqirF8K_Us-iqhF{%t2JNR1K;6Q+7m=!E)^IpbR6~i{}N3FjIX(c6!Xu z3lbb+*F7NcmaoM`Znc~2B452avEJJCm}&CTz;9=k6IDr;2ZgslsqT*>{kq#-uPN7) zsM|!?)<ro7qTWAL!uq6u-xib_xa z#O7`!YX8xJA>e8d+md%mODAi<2QAa`710Evf%p8<#a7d%HP_C1c^ux@{Ff1>ZwuLvm)f%Kk?LU#qYu6%7m9StXVRsH0ydu^Z=^0ZAK8D z06iX0pI1wP=jil)P-FG86*sHIRYJ5z2%DvpvcIr;0B@M<+2=$Wz6v$z#%~6=0_dag zZ@*sQb&fS}uMm`=BVxqj?en3pd>J1HEBn(Nc9c=B7!+4-G9xs)pRsti^Fpfi_L+{>tz@+1UYkX+36Pa z4jUb}eAL?BQiXVKobUqUwLNJ|<;s^PA%D$`-0UTGNJ@68CGPlu~J zKbc(>8ATs+L-QbeBCsf51?f^m$ffAxP=`}a8$FCU=?PfUaox~G{ zlz59&RN);*QsxH8QVc|A8?BCc;MxRNp_h#)0K{rU1!Wlr zA(wjMn@E4ZM{|0zgPG4@4~~Sguln1WopHKwNMN#O~ea)uCA*YF|9-Re|>O-ArZy(_xCSX zM*%8}&p5p)a3C--f{fotmV2)1WYVjRM5o(SIMd7mSW2N@hhIffn=w}&-@lO};*VtZ zA)*yh6JaUjiiNGAG2c2t+d0N8?Za|d=uT4JoGoKH5l^|6bF+>>XKv`gl$f4MsyuZ4 z&TG(xq7C7##CjF=T)9^qfOt(540-sc1Ei`CbGj9j1#iQ|n^R$=3V%mhP2e2;o`RQ1 zJ=XzHNQ8qx*s{~-`^$2*bVnE$Os1fOJ8-Wp^u(&Z(+Qh)VmlPZgYTYLOKhI2Fr2T>NrUzZ6~^;=HfbC>ve{2qf-DayK;o za%@dDF3NjGF?d&u!sGZeh!13i{txV>j1B6o#wLJ5t_w+fMmV$H^S@ECA=8C{0tRNO?{Z|U|A4rJZ5motN8bTt*6 zuUtEs@(*Hi+Nn6uXHCkKVOOgOQ==O#raHzD-+5plc2tA<$*0nd6Y~iA;k#kW{<$6Lu_dN(8fuu|f59?Muvj+5doJTvbc&YQAeKshS_l)Gf!LRztgi zv-#NHocu1PP|dlUmE1ci+jX^Ri1=SDKotJ=l{5bx@D+9S{vd)x4}mt!eN*k1Se0@qRyN%n<V9WIVC3k_O02W_QZ0HnA-ZbH6)r^yC$#Ys+i%gGXDNsmOw zjA$bXR3kfz!Uo@mO#}Hp4)f)YQYR!_>9f~8gn?&DLVGv}FJ(rEu5>hCe0+?qczSO% z*7nUr(bg;uv>iOKuM}y$W}zVpnecGvX=GZyB(us)8RkcIM@dt~fB2}d1#CI>qNCHc zg5W+=o6B?PVaw)Q6v)PB&ruYVQt@9cLDWX@VbQgCcZ*VnH1r{9b*)+_^;kNgBE6Fs zeCWD~eN^iGWa_;nscDq7R(?msO&Pxc;-giNrH02*AT0c80i`6K<)u=I-=y`}qyFx_ zCxK~evWSR_oW`1OKAf7r*SUeHN~d;I1J(9F?3Ja0qF7h$WXvKR0TI{iwoBYmO8PTN zTGY!ZD5l=Og*H+QIeX0Ug6|^_#(zc}I7cB&ZBczOh>LBCn~(}Sa1`?+S`-8heYPJg z^nuhkbiN>Ty)u*sUNHjZnkhk*j2PYebfFZYq8Zw}7?vd?CZ5a?XxK!sZOzU9vjzxB z8n%ON4=nsJMiK+2Wl;>!e^p4>HwkDU;2_MIGGV{pS&;=!k@$21;wGRh$v`!v?VQnN zI*k-tn+)yn%F-l3yC%WBJOFtWdGpGU3N9#{Lllx{^?^}TV8n9}>J5=;G7_6hfqj-z z{|ta_o$=iO(f-7W#ehhm?dAD@8mt7@PTIF)RXkdyxv(=gHp&WM-3ee2!W|5;1|-)H z3c-i5t`>e9Cj|*1XQYqk|F!XJ-EjZmfW%Uf?r)gb_V9yAA~a+MkHK9Pq(>&)9{Id5 zL$OZHJTDl61m;a#GVB_Y1V}J(_CHXg=}B5(V_tFgf0l>r6dp6XM|T)+DhDC+&yDhL z%rh=-S2&l0d(2upbR2jiRWWeKu-X7Vh06)p(*f2TwGdq`CRybvwNk75TFIZIY2by6 zx{&@Vh>NIjrU|MHBBgLydJKp>EKJyL`9Edqj$SenCNx#?iWmM@*fNUbkMkJx*8oc^ zg48RvU+U7kzkdpsB?wZW2#sSge@P>_XcN&Q2sEemXt!!)QYl$93R=|OZP3lnXoIr4 zZ#R?`OHR+{TZHfg`Jme(Fb5NVBNlfw3lgDqL)J`?*i@{97xmtFoFH=%2~$O26v6bd z(;l@n7KSO~F$feP5r-@!wp5kQyXyP@EQs{ab2`o!1(c{J%(T2LZg3yIL!aB((O*i^ zx|rh%F0iv^D51r=)U3BP)voxbvFh(``Otm*(hrf3hBC<5F_4bKLV|@*vgUAiyK%~H zJnRvo#^`0jh6{RuHD&Ok6i366uPySXaiEX`TtFoVurI9NjoVWkcIHdIFEdj17B5E*gLxd_sfOtRxG{K``rR)86CXt-z_(de$ZWn znG+a>Q6MW@z!=5rg#xb3I6MdUKydI;b%ee!O4?Rt4fF-dbVi z6Nxx?llV;w;s^!?E^AG`eQ__o0pb+1Tu3$@@A}4q3A=x<5V}=32V6TCf?Qt+G`=yP z?FHM9PYc?{isX^<)#%R+KoVUq5t({YD#2}6=ADVDT8J``>%!gk!;rRn1}C(sW+x6n z8D58~hWvBrga!>Ks;#-+Od8&TS;yAHf5T`phdG@KW_Ikq-__i5*>xn8iU&933poWo zkHIK4>6CYU@y}=!j3S}*9$06s4Y;8aC3vMi-2VgRKpMZk&LDPfTD@Gz z&?2wcgnlueP%D}G!e9u+F^Zr>>H&y_{(i*p8#Dv=4g##HALEjxJMNx43{^2bW|ZX@ z!|LV(PesKzWYeQCPY1b=FEoNKW5)T@1sL?eZMgD!ma|POMp&gUXH!d%nyqj0IXu)-#D=@``3BnSy`_CNCe#hl!5XCH==j|nni5e#6C@rz zSBUr+PlWoZTA-#3M#rIi?xDMs(1e9%*dT^WMi5aj#y#{DLPFeNJNF)zev=5j*BSid zHw~-jkAOHQ1-UXn%U2=~zph`86a5z8(P|C?*J0yYMcZew%DMJR`F6M0v69g`(B2Xe z>;>2SK-xc@%1O|XsA+K6-}515+#u;#tlY$4u~@1ZiH-)JlzLq&icBVhTrTJTEw0#i z)^2oA+N$wU(+GCl6W44tLTf68(V~NaEA-W9)pN#}i8I9LcWmXd01Z)-a7LJlGrm+% zNY&h{1}d3`f30bNwMC=3UOCvToMv4FVkri!B@kIl%U^BW^i3>Wp?!%e>tdRb2t=&p ziaQ|j0Fjixiex_ApA?mCZp(6LO=`-X{{9SRD~k12lo_qin*p7LIs=*_FM=xfftCNi zn0v31iJ;;}b2o(q9!fDZ+$;SJ*St@Rs}J33?C7~7ewjg2kV!@85$=xg09A*%hcp7B zOOS>)@GoYR@!K6bn^%l}{lvv$Zf+xp3{tm9JsFISfPGG~R_KVtY=B7R(fkvOEyz6h zIljuaL25jYJbibFBd^0a$y<18`7h`_dIoMCFNT)raQ43|aKvXe){hNBOFg2qaqXh* zvpVR19c9d9TpiRZId-nQr?somW?)W&c0x_VkKgB(GyDw(1GHK#|7tWE`C@k-GzV%} zC=?35D3wZRG#dW3rt_dRjgQKSk2AvTaxs{6$k%70*pP=3Q$9+$qPLbc`&v?w9PV-- zc*uR>sq}-lGLSRIeE280qKdU|ttE?EaiXwvzdP_}txK2Lw>50}1 zzU1kt1;*aR=72;OQ5SET$N()8Zgoqe6{{z@KsC;$mLj{*==3=fQ%d+~oS0Az>(>lM9q>8=Dk9XlVkyXg+#{fwXSE@B z*E(#M%*VeMhC!UZ1<9Mak0s%lu=piR&}ZP_JI`aS^jX}_T|HpJ)0mX>Hr_qh7f+1x zK}(EiR<2z{S`~@Mb~Ioxng??dv~ldlFEu>uQ353;B`7T|<(&ic^Ok^KORv}S#SS$x zsI$P`-5nkt9(>dRH9G9Z$JJ2KYC1}2`-}DY$SO-gjxG&FrW~MID}L+7LQE($X7L53 z<)0}fZU|6EAV|{%{%)b1!Yi-kKxrvz$-%4#(ftjHJ`r+b62zp`<<}H(a+TV00V6FZ z_!vS03Cyytuqb=Ors@re!mqMFbcQF`j`x5TqsffyQVX)nY%ocAB% z#3Bp))FOm<$e>ZmD(_8OjSA{0PzS-`zY9^=1P1|K!{0x~6CN5_3)HObL6Z;;?Cymt z`362`16@ww40PE7brkf9ba%Q>(@gJl8AL~Q#4s4;k&@*RJYt&EPg@LgSrKGzUd=cw zfk|tE)K|@E0i{SfcpTZ*a9GZNg7;1h!~9z!5fi4y_uL!=kp_MNp%^~;9-LkECccYa zfytqc@dQ>FO3R=(=wY@PP+C;X>6&hER|t2RC)w-fP*>0Hj7REz~KHpP!GivgSV(Zz{AN)ImYZl`B`^=H>=pUtj)}8itO} z0#^^tH5~&cvk{kzFCe8P5oMM_mtIRK25TA8O3(6zMjC{ZV7dI)oW?yn=5OI^c+4RZMKgMe>7$Ei;k9jxw!2RSk7!bhCGpgRW`;lC1KbH=KcBwkCtq>=^`3!&NTHzH6 ze79u_f=0~3%)vf_>SZzNpieywPh~~I!Vao)Fj^j+-h>&CZ^8CA=Rl?C98UU%=u}Pu zI$#9*yI>tw3t|r&=M$TmOeiWU;++F@89`fu){{CeXrapx{QUgj>FHT5y1-QfwBzy$ zJvSL77oJ91SrSaPrY8Y&H8j#7r8JQ%XAq?7f*7Cv@X-WTTb|Ke9OYouUVyA*50vJ= zT~K@1A-AMLUi>jEr9LpKM#1bp0y4SMrE+Ks*zW`VjplMjMp`lVyKQ66f!c^r%nc*O zIc5ZEEQs)yLE(^pgy9;R>L8%WDCrIW!M@5Cs#*JhZk;%+r#CKT>ToWt)Ty|zj64G$ z5zxC!?RlQgSw-AEq1!Oh?^!GMV)Sy8aYjX3{@pSd&7<5&AmVrRl5;wmQZ9qhtV7B9 z!|;e53W;3Um?QQch#ST_eBj<2(;w?y>wAM|Ey7^O4~E=;YbyRJjTnntBWj-555s0X zf?>j&REgE7M=941jg)ZhQu6Ji?c}2gn3I6vBy?8D$}4S&faG>br*aasb?i?i>5>8( z=AW6F37t;Y+SO_lnh)Xf<;y56EJRRH5L7A^?<{a!1Km{Y>$_>mDoI6R{;x3Fv@WW7 zOF)pNPexWo3ZgyY5$iXQo3s?XA=aGjcq-*!(n3YIne_{}X!8 z+hA39sZ5U9p0_2kM{@8ED}xai^UWwR(LUaulo1iKxt~y>-h^&`Hh5_im5u_YR5cIU zKTiHr?6iZxQ$xe;y?FzfQEt@GZ62vyj-4-ZYWV?;C{S~9j0)E9?~P_FGI9+_&()z! zUwe6;w>x`U#OU~xM-fPP2Z2SYfZ0N$RP`uM`VH!cI4Hf_Uo?%rAQSquGf+uI+;3h5 z71u7>KH5$Rb9^%x&8Aa13Fs4^XE*G{()-5n87$ihj)n(y5ai|MAvZU--6dV2ITDhS zli~00&o4uuj)JBdANGg}J~^Y&fHS$jAwze*g=*ds5L_xbfoxq0`Ui}Ik6QrWCTAlo z9OYmwI}c^iR!D3`Em&{YBC};dTKE=p`fFkIoB*OwG)WF^0sHTicj6fgIG1OI&eDh? zV$4Q*N-j)}KV^{^ZXrDo=-#*zV=F$3#0-DL_40Rlq9>X&;qP1vV!1YXxhX57fZE%A zX-m+x@idIx83Mf(=ojP3hsiRkKu5c14I3wCxu>$}_q&Gl!=`R>O0^r`I4FgF%lIIR}tyOl{$sw+vcK5&q2Fg`q(=a>}T`Q+wDu%E3~00h+>% zoT1pf9ST|ln($C#*o!io5eBa*{EivU$f2!J)h7)&&l%S`YvWISZN=wtV*eo&S&MP@ z$D{BaKMsKsCAwVSU67N2WPXdUza531E#M@e&?OS*@@?oTGQdNF$_OBa^JL`daWTWW zD5uCk6(1Vko_+^c8+Wx7BYXSc==makxqwip!$t@!l7P`!urE1KDdd&!X`v2Eoi+~z zC-*|`8wPby6l5y5R<3Pr%=%JjGm~L1$bdpBhFYWIodncDKwoLQ>`M(8jHW=Bt37=C zaOSkHpi?;s+8Xw!#5CUkeF~qRo-S~=0vqy5{iC1ggO6{ zeE8%Bl*PtCf60bxR<6R(sv02Yr(Iu(*h%PgP6BBg+UCf<#DE%t z)Swo=ouL?hgJ$5~L7al0`Y|q9y5sJ-!>SBBa~blBt#I?<2KS2Q+Vq9U)5+oEs}Xq9 z%{YI$0E6zo6&bH(IomWI+P322;>vaCbbxNm_;8d-O7R(iR}#Z}$Oz0B!X2No59#I* zOq<^Wm1dk-&&k&YHP?2zT^_nJ6zQe#^i@KTifGBh71bczIj9}ux#OYh_lZlud)N% zP_TbBHXa*}`=4}l5LEnC|2Oje-otZugh5>NDORue3AkylbM50ARKOXzO1Zh+=Z(19 zT+AqQ%0b}kAxD4aAZQW_nHb%}-EiV!u}jX!q6ior1-k*vI|)Sf&*$ZO82x*>gI+8u zUxq-9Be}`U{l=s)nl1cxzGkX}n6)ruUW6{=JU4;MAW?fjuJM9Q!@at3uj=aCjygIA zPNmt9mPZijFG8R2x}62|duClJOl3td(XAP|V%2fKZ>yzZiG&z(wUT!L(4_>lehP)W z+({sp^Yy0A4n7Y-Jq`jQl2nrqgHa#l59n;Xt;qUHCvy_ef#m4t3tPJymDmjrY7o0> zYeQq=W<2*v9zw&7ICtI~cm3xf^!)o>to*DLF%deXk_0?CzbZkEHTwYGTAzgB3+H2~ zuj9lbL+|mExfA{ZA~fH3k8$Rk&8h3u!kB{>mYqUoga`Cz@)286X+A)!gGiGG4rFz2&_v~Vt+(*4JcoZpbmn!_+*o+L=(KxHab52VKfX4f4|_w z5(5u4O;G5?vgtbs%|}3&bEN0!T^2<^@pf#rFMq1d06>%byU*V z@@)~z^A#AmLZb*wbO|i~YT@&FQHOzrZo6zKhDlclV>SW(kKZIjB8Nn#fLN~J9SOA1 zlm}4pT%D9E-Z3Dph#)90 zvhrvGxjk|~!s!s@Neb(W6fhAnOkxF#Bwz?aXE0;r?DhC#>jJ2h%!%BooCIwNN5g}L zF$<3(Xc$rOI6U-J2twR!IQQOCe3Ph$*O`CdH*GcMj}Rj#C6_b*%avM^_bb+~KY@M= z9-|--(%=kzbHp zT=gTcmEhPLyO1do3lW#K3XiKWEpYt%yIhFhN) zji@j=Y-f*R##h-e4%Fbc^BUa$${c9Qe@G8+e$6AJaj1N0(m}c_L2i-jbFt9w?Wti(Afw}9lPXZ4 zSHM?Ne-c%@(Oe!)P~j|~TO@MEH+2S3=K$RSfp-q@uU7u^@>0GXvz6bLnK}ySs{vM> zj{l5zEYM?gYVJzDe1w2D^J4k=IU}DM^)jv~WZbpo%5rB##TLm@NqJ-gEp{h>timBc zkLCZyM-GT9k7;PG%Ce{-ZsQLk7>u?Ga}u-?j)n()V(hA&48)d99Q^z%WZR@Lp3j56 zyBLwzjf3Q^r?C8&-WWaOR*V;GL3C&F-xWvTGkZP82DRJ(%7>D1?Gs{rILgFgw$d0e zZYFDoJ5^M8@(lYo`u zyz5C4XGQk+Ut`O$D9m2c3m#3;^{t{tW#K3XVspVyRBS`az7trVNgy@modU z>%q#kNob#F3()%6?+Ir~Lu@XCP1c;GPTX%oYk<_x4H6XN-2UIW-)mP`lh)(i4bzLW8|J3#i4f_76qys7aW0`ax{i(F04ScM-Nk#;Gb2r`4){gzaG&+)j%^o9(h z9ECDZcsoQbvRw}t4B>RDUke@F?pyIC%5=_cQPmPS8Xk5-soB_av1{FSNTx2p!bcv$ z?SuXJ8!QQ*fWzcl1fZ&_7|2)76t2i#WxrecgxFq_EV4OPM;s-;eL=3AK{>lNT zkCZH3i#iA>EFS@*BVk|fPmN)kd{(Yd;eyo%XG)dGF^E8eaf%x#C1w%ItP*Isq9;-q zA%{sMhgl+rRiXfqDY?l{4UwEHN)5yc4J0ZJq$)Q^)o$E-HN;BpZ`^f>RE|GNdLhr^ z!`Cg>;>*7W_c{GL{T}^3J&qnv&qL2g&r8owuS2g#uS>5_>p|;7>qYBF>q+ZdS38P0 z_5O-ZgySVDTp^xND3rWBnz|h~gVCAkR9Xu)M*cdR0jsrbD#O%hYL=Iin4mRD;j88h zhSDS)Jf00}xCQ4wc@L+C&BrZ~F$hzC51o~pP&9rB2px{m_ngJ5H@`#lipdDA_K?d8 zLuncG2KqF|fYPF3Xf$qcR|tO+M%&Yz0omOg0=={e2ALf~*N{u#spe!_l8$4?ieQVd zB4x|>I5%PvCUgl#kop)5`~zDByn=kuZ|rsWXZcS25_1Q}hB@2|?Rk9twG5N~#Tj$O zWhj9N5{(KPh4A~Ou>?S>bZ!l-34SOvg~D5&;nZIF&!%$oZEoUHmdnzkDnigmai(yfF!?RNMCX^MF!X$TxPsN_+ zHbXHAwVceoJ)x}lnnhcPVvCwPm!i@;YAP#0sYwoZAGdmUbg+kH+Mm!UM5Qp4X zAiUvXKlHol#;W%o12K8&z*=AHfA1Rfcl;n=EXLa}BaOkRJ7a2|7``KBVz~XSBnH>q zRP#FxLrglTEG1mqyefB_V2V`z(0L>&wN&u;y9B8b2wlYFc>PA_c?>ziSPLEroQ(y%77N5#R!Q) z4|k~+r4~))xjbatm?^g++2RESXZ%Y<5=gA&2?a$qQMIIomCGP%ycdGqQ9@W|W?oCIwN`v7Pk)M=qf{_K0m3qFrWZ(K9Tzub_M zgks8^{@AqPB_#Uv#F$%WAuv#Zm_EaB^yAm?f&ByQS{jhs9@ zCr-dL9}%?udZQN=h&mF}9xKM4q+B4ZD#4>oOW+%B#a~&pQgPU|GWMYBq;YA7zNeD#yO~!lPV0pt~dTK zNW-1i8+VMlf*1dD5IV66w?EPYV)rae8KU82C&Q5WLoofbJX|n&phu-HEyJY$8-kpT zM{w^h0#!&T);!|eQiDsyaUc>eL2`ig65g293oGy}&K_I}J_Us75LOJEfHRAdvHQV_ zWPOHjFh6D{_D&JP8<#o}f!y4qr5VxT}#fFeAY*k?I;o2j^C9@^Cn8K1A)p7tFzt2XuV)&pH8@ z&Pv~EFPwJyUY)*~cjThvI}zg#I0ap@s`JH|f7%kvw_TGCXoo>N3Ho5Ba#>OA?HCDC zl`;C;g4RiBFchdBdoy=@;Z*GRgO1PMT^HcYUDLOkhNG{X*6AC^{JF5^b%lwuaoEJP z{g%wc(U;G(RflvqO+Zm(Iy@-`lb#f@^mYh~WmwUC5Oyi}e-L>aYoLkJf57o60_2t8 zf~&jEdY3Jjjr$g)elredw$EDU(b45_iUlSKff#XG~9~z2s21=saA1J+P?bhZuk8 z5?r&#TzETvh@DTo5;yEV1JirZ7gFz4I`E*^wTA{?4AwwaWrzi1$u=Wz@S%m9Acm{q z@w!c!sBFw7vu9|KuGO3EJ&Wjicbe*ut$WWN8py1?$YqvbL^Zt&wl+ke4v-fh4I=4c z@gW#H7?x4Mg7&X;97SUQ_18$y9oD`qBSAD8P4*`ttZIsz0ThcpaqKd|1;5{Cj&G^* zo|^4yAo`nhBY|bmw~93>!C)}#GpdXP^Z{|)4`$^=bm{cWk^FM}1iVm?9{evzs_`Y#Ew#RoPg9Nwjb^u?9h3DOfg{j{O<8k;U*LU7doQadJ zzA5#GC|r0RZtVO+j6eJmbpEA?nI~P1o6tM7p4Om)&P-TA7ek>&#J4p;D4GnxJu+sD zbyb1Zv;)^FHqHw#i$l8BibH^GxP%I+>9aoxxkW*&=2bx5FdDZkJpnW_w6zn@aj@Q2i z_M5I_%$Q=+vy}EXgQ2vidLLK;98-(cuMO4tUFFYC!7W0C7kl{g4zaLY`a!zb`LqMJ!W01oesdtS*3wzArH$uxV)I=qIf)KfbM- z1BEXg(F$q0K`%AfC?AO+TDua8D<4vzB4>>Sx++6$CHGIb0dA)RkI&bBPSTBejIPkY zw`;eCY;Egx37Ayo?R`UrZD{{JzSM)t61TbBqpBu?>ZTa_GUhMvvv>rI!6Va}{o%ms zx9Dqnfb7Aj6E#GnE+LpZEsAZe8upl)@E%C*YlHzzOPH#VTazqSp;&>Ehl)&b6&ND} zS<&!+5B&>=Eu7Ab1Q=yTf?OljNRavvvmPv#(rqc)xeBc#tt+iJRRC#8RX(yc5~R0D zhcZSK=LQd=uKI*mfoN|=Z%83k<-?>yjEsR-%qNOiqAe^JJG*VsQ#))7FFTDzT1fpxxV-N60h@I`tiVnq>3j?|TkE5cReJ-?T_)w3F7u3mhxRjt zM9HURXGO=>x`-LOAS&Be0t$T0lYlYsSbeqWX%=n<8moxbL9?J(Gx$SaC*CZb4?I}) zpnapLst{xaLM#rqqT-CHI;0@!J7Xf;Q74=Zi6~zK@A}_$cZo0bWEl$D>%fr~cjtpE znqX#euwrtt{*^J(7*$3B`T(nK3}H-x*UZV93#0~`u|B7|OHt)J5{a1J1#}xapw9)X zB<%<-BZj3sS<4G7BY~xSWUCCOw@Iq|y%-TmXXAieTa37)8A@IY;ItXgZ!powISb{% z6)lF7PXx+IHJJ1zcur645FzdGT~jI6G&NyD-dc>y+h*oC9&Q@K`Pk4r1>+63zT0T3J6*^HI$X%oEj)YUPE}spdpxo?ZDI;y%!x-I+s-BX%iyprCF}(le zg$Vd_!;RlkHY`#z9$1P8Rm3gjgBlAgrGyUl2hwG$H>k@ytqYA4Ks!TAfwdHmROKTz zXkoU>hZTG$H4u$uh_GDnX+4~>3vxpd6fO$I8HDZ$!*Ind1M=Xbh!7$kh%rATl_+2i zCme1^dt1^W8@P7JB~tHK%bhUwUbXI1(@;R}6Ge$aZLUI`3>I|32~KiDaQPBB zrd|Gi8xE*JK#PT-#Y50z?1#Ppxx7ud$JOK8A{hR@rte zf|)Fv!H`3*-O>V=S;8?I=rD>cbrC2T0v+c2BzCPc1{7)_pyMMNAW*AW3+E6S2vV0b zSo*Pnp5qR*uib~WhQDi8f&O5QQN-ljGxeE(6+`*_a667lBibwkFWUBH(J0Yrk(D&5CAXRa+elrjcAk!kAjEHM?e953U zmWKmX_)??CS_)x$MuWa8AE~ppnUrIzO=w%lvWwKw2dzniY7oUtj6j!SFr2bk1|)`o zMmm$Y1DxyxuQ(wp9*BmEbb86`6rK)cEY)CQrpxPxB4)oG$lzG^VQuSptZGu>6`SFA zG$G(CfJ7_O6LJ!dXHyHPtaRyprd1#XZXpI+>-anDfNODSWI|ip6a*0$gg49UL zRMu1jfmN(~C=oo0T$>@H(6^C6C3Q`uV;X@dEy-fNH-}YL8(9M%U4@}3f~E^nvjfy2?+P>_0m_L_#0VPe;dprXhH4+~Z}tk7VQGbi}5Q>72pjWN@^ zpd}?`C5uPE7--sq_Qj%b4}yUpT`|@T-F!7;AJ9Vj3&r5b_(z1w@zALM7-uaqu%7LP zx$J-^zZDFP4E}XeR7kvGE=lN6nn{<=@qJdLt^Sd|8U}CH)7n=yW7D2wHrt!X5;IiD zoQ^oBnXf8mz-TuaBqXPX0ZHfJAa!vV0-WU4$&tPf+}{OLxIjtN=TyCACgg zeJlkpUHPz-j&$WCJ^yWg+a!}RX0?fBJn3i?AJLyQ=<($GK!Ye%f(o5iU=T&wAny%< zD9of&0hiLD7|AY06B0!i!IQ%266sV);Zx3!yQ3{q753D&u-dG@idf&;!HXuf4E1r+ z4z08uG&uM$oI(r^A#T1p#W;B{6N!nU$3hvhD$`-C zpmp;2hOQlQiBvCwbnhQ4D+fInsl~cJEw@xFLqejE>(Vp=B$Uzg8Ikln(!7J719$te zt?MK8T%?xN``6ljq#J97ibzZ~UD@q0rGf=3%oXTt(6}*fv$Td2feyDdL|Hrn#(-jt zq0HdZZH+_;ADF<(mNKj*;}K~ZIR@z#NDod5858mjq8K}&h)x(>{}fMNSK!ddctB4S z7ZjpMD-g`FM{)^latvv=&q&QCVD~qfWL69vK~k4CGJXq@v0IoHQG?3&FR+57aqyCH zo>vp$BPEZFdHiIYCvtqBW$q3~tQjq_SbHDLf*CSK3d~55L!?JUwTuN;3Y+c}kJQWv zofVIcfOqu&lvL#-UGcCUUa3l8dn|xt=}!5uiuIcTv%+Smn|0hwZDJWvQrjc7P0UV+ z^t6VOIpT2YIpD}3lNb3eikKyt~CB$fful+ktSK-pa@F)@Zj3}IfW-8Bg#+}|ncKeXtwn>+n8V1@7 z84Y3_jZPV@4$@~1!Y!)^23iCaE$A@n(6CPViiBdtX%-(Z^-1P*sK=kVL^e5bvZIcN z7&{)L%1A)1f{)&O5}G870>}cRW#_sg?2{iibqdMv88aNM#oRJ?54Var5gy+ zm7jFw!`jALV>Ou8(bP7fvh<{kWoi$rO;Q!Hp4ubbSd*zedQ!wv_rcm>$5u#F<+Iw! z8aSr6ed@iL{ysfjR=SZORk=^~EJ=T#-u~8rgw0-L4?PDo&7K{mRQP(|h%u~4B3ld0 z7+5-a5@>x}(%0_MnVdbR;ZIt5KZ0Ta8b3VCV+>wIln)>(1<*+9<gm$G_K&1-@K0BUw1rQ&QYZzXJk~h0JQ|}wM*r2SHm{x%7FDyO;Orw`Gkp4p!y=Wk%Nmt-gT2P?0!YR_=%0gCd`ROvZ=`yOE zVbpr#rjnjZge47Yr4ZKZA(X{jm=O2CCpfZ}nPPbnG9y85Lp@qPU$K>)qa>rsNH9`K z4R&JYp%ul93P-xqkv>b@n~@-0@km!X(iM{7R6NqXEmCL3Q`^E)lG9zft#-(y9H+id zl{a-pHoYB&t36mdfn5se(-UaS4KwJ+Gc$C$_LmOPi{%?S;>R^@~PN;&Mv6%W*Uhn_h043>?4oB?1Jt` z>w4o?c^HGvEwr}zL%i}~6tj;z1T(ztUv{obmugSP!N<)*S&k_O3`Vw? z&+g23XSds%aRx}=99MNT@C69D3I}C5!9YTnCbT)nAgJe~AXbZj7KMWb%uRY1^zIIkHW}pk)e_2(>6~G)vg#OhCP@N_Y#>!TQXbp)VDJjxyqOxv45`G}(2s!6DKltC9p)=gmW5}G{Y5R!u^k!#?QsO)05N^`2r%~}l0 zeQk(J!?UcQuTW2i4gLsf9xCkWO^M*uyz<3u(r)zVzOpN#qW)PSA^F+ zH2sOR;1+!O-enly>&&}|y{=h`_bwaXZcxxe8}Qzv*J8=_Kg2Q7xAE~^`y)Myf>2t5 z&{LP=hLiWf9}dYMs9IyeEnjx33xlzvkgeipDa$fZmRnQ-_3p8x1L!3?LA()Fn;X%| zA&30NUp@)tzTWRDy=@)7sV>BB(+hjK2X%yMYM>QYAYk*6COiD7;RTdNw}6@q9T}A7 zRGAyqD83D}K^1ywvv?b}R|O$=l9`9MycA>nc4Xd;HQW4{KGWZ$%(3m8uwq3UTvMlE z<|MbRXH?js<14-eyS3&aAMT7YcLy^RRS^-Wt{4;UDkR zqbEti|9cY;{B8|GMjWgE^faD)KWuB?6j?OzO`r`yIo3tyZtKL1133?!K)z+kp>Rke zeZ7J`@I=$(cVL(dGt39EB+I+8gF8YrjL&-1BkDwtG>NwPd7f zk_SyuFN)$}B&HNPvM?#ChPbR?gEIt$H^~2cpLF2IOv54Pllw@k!|UrbOgQgQ?A|W3 zibz8Qy59$%v)xw;v8Fb}oPOkcdpEq$I4rdr9oYBsgR!8D)N`jc-1~GDu>a1sWu}IL zO^z_8E223AZKX+MFm@D1ospoV(3|UVVHgYsgOLT%XcTVuz|QEmhI%&=NFoQ{5G^$r zD0G_T1c?D{*MF)o#{LL*|MqX#`+`ew>xm!Zw+p>pw_|jHDuA1-svyhqa94$qqtM9d zbWMIx6`*+i1F*+9mOphXo^Ke7%J>?rEjSauxo9RS%=_gvcC^r?^{ji^&fH;!P(lFIFcrsSpaATgSeugMUJ8_ zAcHgdsYu7G@Y;<_(Og*!dDV6V&OHmK&IMNf;aR*LtU!6V4#q(z;m76H4|FvA`|tS6 z%9%Lx;_1k5ms!B%QxC;tYqwilfU=ocSEtUaU~F9+qQEB&1$`HdG5pYbw7xlvF$Lbt zedH)(WQ}h6i9{Y)?>>6|a94xDU@#a11XaqaHf_R;nb{BUx`PfB+9JKDGb{8vM%c_z z94p>>7wdd`;QC!NctDumuskYHy9c-bZEsxs^V@O!vRg6V({)ouXX@+f(AL&A+|^*@ zDD7$gG#ycHpbVNE$`85-*H%=(r?0^ySG|Gd(R{2}UyS2#S&V)7I&7+!0Mm=!K*NU* z;i0d0$HkX)HW2ie_H0U=gjc^Wf(A!oxgtknfg#~N!N)MIBm`ckYP;SZ06BY+3o!rZ zKSaq`H;h&Pz&)>3LD?e@Yt{vD&}~Ox51|Fy>V%}sI-1{q3(tQ!8K+;7Y9L6ff9qxP$+icf0`(W*GRZ08gdOjb7#twVr7 zl<3q{+*!eEs9(Aa@75B9JhmN8(XlXu5-d1`$Fo;Hjn8-41&922chg7^TfP(z{LhDD z9-WQiZe-TBeuXFRS&tdNI39Zz_RVlXQ%uwIYGsaMMgqo;0y7dY7z_qu#6bsRTeoaM zG!kjg-Z413SSMm8B2Vv(wC9|4n5j1Ywj!@q2 zIQchk;=W7gpzAQcGs1+kao?@4V*VAE;I8A}#|?Y*_KR+7jbPO`_*6-ol@Z3G^3;(Z z$V*Wa)02h4=m}GiM#4*pi?#s+kUT`Me+$2VRlwq#e~xkd7Cf`@DbVLZxOMUmp--;dI-36e7XBq3j!Uj62EX||Y7J>a}8$q$tEIV*9)J+zjnYBh*Tk9}#+Vc#BIgcENr)XXQ~;Di;=%ydN1M!FJCbi$vPor}4*UL#Mq#DkHZU*<)?*pV1jMgq#l zZA~z;%kIKpFc^#+A{Y!}`?hU^w*Q#iW3n(4r^M%QE@d7id!B@I&ZUaEjw5{E;^Q+P z#%(WMfEUlGu&=BOQ*izRH{-SW*Wr=}j>W%!IcqR0J>sBC48q~AY11}s+GJZZP&PXL^d#W0-9!>mPfF(#>G3uBMLS$jN=Ki#<;g){fZDW{d9 z)HxNq&i@=w-}_s{_(B}=)8oMVzrmtCOUQli#;miKp#If0*rJTZ#CCmHNAuTj;D!Iy zL(3lvy!{NGDcc82ezceEs%haC2;n9;+zvQI8njR}gZa@I3=V@F!J#9sK}J&%`H+ns z&Ur9(|7j>NWhT$H<9>jdkG_M)zVlRl}1>?AN7nIzn%Vaxc_+R|bM zE6hYBgW-TNx}nM=b+s2o9uiTYozXCY8^45e;vV2dn_4qpfH_AVkn)8evrf1aT}*d- zorcRh>G$(+Y`jGvqDbl4qg=9+#6SS`r+hU>XYO zdVOjXpOO_ZIPZ>M;Hibb#x-}GivxeV z59}u_5HS6s2XMoy3-Rkq|Aa$tmqzTsAskZ?jcerm>gG9)(^nLk1%oOfLdKFlQ^>M4?v0QG_>q0q6K#A-YCps?m6J4V0}b;3x8U-A=fjPB@5s z)SJLsZa|F$k|3CI2>2KWaZMa;Q=KsUBlAhZwJ6Bnpde3jndgz7PfAycsc~pk?|WjZt71m)qT~k$~>YjwgX<#CQ@U zaO`#&?p*vl4*A2Cc;JksxO$f?x_@ydmOOYJUSD)GF8|x9y#_tZRleKMhQ>Cz$8CB$ z_f|#?U1#eS8QDRVW}1rI<#0gL^hBV$Pz0g1pMWo#08jZ8a3e&~RO6)HRVyGwo8fo6 z;PtrRaXL+<*&!vAW(s|tI&}%Mq(Tr9OD%*54`F2t!6F&TL>GC!j0~AUo(+x-QTSS5 z%Ix#HP0tB>KBc6$1Y=|)D=MCS^DCTr^q!E|l7rD@B%lwdOV3=?`-Th#gTcrt>S}9E z7w^GFRt;O}52axk*czeD`0}F+gsK5!?bjNi&0pf94>xyRrocH-K5Gu9beF(x=*r6^QGd8`%XaSYLCMOZ>___hYgKyg2RRqqH5jC*3{TIXc;vaqXSbp zrf(dHI0WYGx{xp&L}GEo>eiyQdM$+f3V2G#Lv;IxwZ0V7yQ-z$G!7WyM$^4MpD4;y zL3Uel2qejjG`(Y#ByH0*I&Dnbwr$(Sv~AnAZM%Eg#50Srr+1 z#YSvVeE@xeYD%52=XE=bl98i)tch7~Ln!haY(ZWB@F>($C=E4yZm z-U~%R!A9)bja8UEg>Bmlx$HH_xnlcu-sfq5=^S|Ga;*3;)qn5L^0r)NH(pQsk|2#y zU4t1t#>*;y1Se*v7aV3MVqVJaBKN_NR)i0e4x*yoDZutmX%$4tL3R^y02}R+gkTfF zgV5rMRu#iJTJc8PBZE4cDTuC}y&xT>cXHv{C+!ysLXVkQvFVH|@OLnI1wtQ|U>FOE z^TdUzQuGm+jHVKga=nsa3N=EDXTgQ#vKYVqtumPgo$&3&u!yFr4lFTzyQ|F|`H9%MGHl;2(nBicyx!%Mq(yk$ZoP7;{aI<>LrvMdr9O>l3UqVn)wxQ!;_WkS*vc7 zoH8khY8eH5^ueJ_lzV<$;AMem@G8O0?a_YF4GlZ!Y;S<-EdheROgL-~L_%0kBIL-? znA*n8VxI#8YVDocG0Uu!ebWb*)lV)h_TMOi5rI)*jRpKD@%zjs`MQOf_qWjD-f-41 z@HR35Z*G)}L^L84uCeWQ)0m70N7HJDUZ_zUP>~--bEgQCoszC%fC2-g#x>_Hq0yrO zrN6^A?Aq?`?j>dtP0Qz?wevh!9>sKIMS@VW$)8g%);G)UmGFBUi(oukfk_x&a{=-& zW_QHw-CyKVe>o%(4algAzxY)mV+NOM7w&?w*hf$mQ0+rgCAPu_nSPP4r z1R=a{5I)9be}&4(ZyzZb38%|S5Ko9w>va6Ek(hB(#v=#m+daedBGPz#+O!#%(BZxb z_jytOJG29f6Xd@EgfqOr>i7?EkSFYDg0-KF?+Q!|N;ur@Vn?rVq&_n`b5H_EQ&UrZ zb+t`vml;)WFV`?d#(AthAJJ~_xe#R%Asmk$Bxhck;Jnu5eM}vt0WrZ0#EyK#zR!X}Fu=9K@ z$o>fi{KQJf*ZGLfny7eBnb>eG=)R>3oouJ9iPhx5M&=R2DA^8AdbgLj{Gl6Top%2r z5ZmcWBj~VhfHX^t%*=3R1RJnq!s#|yZY4+S1q#6xMo-`fQmFaazl25%mqb!XWU{%@ zH(|3wT1Z#0yXJf*!=pkfUOl1NU4JG29)vrjq=btRnI6LqpAKijmNE~~rTx~eUGY{G z4QFZ%(Z;vb#;xnEOeNa9R?XXUI=4E`A%QgzJIRkq{j+)Igf>o-l7QvJzSxEkX;BtI zf>O0KMK{QfL2>VZp*5Oul{_WmNjI^Hn>sftx*+1obIc0(GpRaII};MJ6b2UGX=Oq7 zM>~ia0OKvpu>E{)bG#?)VAygZAZojaAM{l6Z0k*z#O@t%`IP$cXr7L z+iRvn9OMGlcZTahVi7sysEtx|3H5zU|pPSq8s9$rR&s3lah;rdBrJv0@czff=-f zXPo-9L-{Lv`d$p`LtK1O0r9vju3Kv(Q;d3;MgKK67&6s$p#_F`oArL}kYXj{LrCX* zX5W1g?;caxH0`HHzc(PXVT3Gk%d%gXw@|FGayqT%{5hlL&3+v4VPp)s*i@6|$ zwi>_!>S!)H>#)sgkAKWUF<_Sdx;WitDAr}O2?Q?v>?vuC+Y_qWz zBm`Hu5krX4A`a%hFnUlO1Qn7PHx@&>u`Z)g?z<0kQ)2W;jN!lJ^<|Re=oAHL76_}-9hTKCAO>f{ z&Tx_j?%Sd3EqE83EeD6MfCdiiW~zGijVH=FEKB5ZjE;Fs7{H+`j2G5iX6GRm&shi1 zDg{RALCD)ek5*yC^z7TVN(A+lLjUc8gsj-qTEU2_puC6mS7(cBi2lmsdn4)YJY>6E zS|EI~?7Sb$3QtY$-{1jN!6pQ?p@ScLFy^NzY;-{;lN)?@xoG&NHPWxw-YHM;_jCt6 zR~29hofep7Me;M-3EYkbHD%(fu|oOQCQA^C$NG(Qy`D&+-*Yc!9rw!6#Q+>&s<@AI zC14a({5|cB8^Ba*s!=Cbp7FOnjFeQhVrcLqV5%%WwoFXl`Uma)wTp{Z7_hvvi?3&A z)8FDCim;9pq6pB0@U`i;o!le{|tw_o@_}-*-TwpkDUc1zU1=vY5=L@8qSZsuPEG zY02lEx9I1u*jnis<;6*%0^AXCqHLN1YTV;nh4JG8Og-B}L~No>wBb~&Pd;^+j9-XF$z@&`l? z1LRKajQ-R(y#mk>L6M^*3O1vQwIOXn>@;eE%q z5VY26#LCj-h|XxHcEwt7f(h>nrLvXn+fEtsmp@-2X#d-^nB8Rx1D^`0I3xw)0Pba` z-4DB&)OZ%iG7*hfVWB~Lfn%L`&U>o%a9fUFYqVv30&&Qqnq-kI2@uef4&mNH5ywi& zV|~is(I_C{N3aM)E*6AR4%CW~1A&qGS|Qf6%`5qHLd^xXFF5i+uV)A%sDRD+=LFDoQ=|E`?N)4`eW8(m88su>7fMOF{1*~kcB9LBq3vW zANbZnD!2-$_>{Wyw!Y?4e4Mq@u9zXdrc}E^PYT{4G;Bac^8XLJ!Z9TnfI^;p1+8GJ zyB>7jQ8utXH+jyOmQykbzz+;a>yz)tJo)_PrhA8>yZTA2&BPdlg!J)Co~H-AaoQ8h zOS;8)Q~u&4ZhxHGH^%Ho3tn6iar2IhWe}S2WU#%c>MB@Znf9XNZVC+eLzh_DprPavXa-Z=atY?_muNBhi3uFmv zZ4T*A{)YOT$x6^)?o$`x1)_$4*INSpO5>RHZ0HBcM_&t&I7b2?n z{D&ZvtT2k<$XzyRjEJLX9K}H$OAK9782&<2oL?0R(2{QV0qxH;EgsUgAx##@_~JPT z!a-VMGMAph4&Y{<&?JQ)e{DCqCs<2KIspam=Oz`EAKhnB$uqPZUZyEGbzq8Z6#y<5pu5CkJ%e^CS@(1ki*<$FWgv|E+U@7x{Xo=(Tne)oEUGHE>;e`_}xM}53bf*+rgnKGyB2q8xHrPexg$hF-q9hG_3ic zonLCt=Kghkt$3JSMwFh5N&aYE`1QUt1HW=G(@;?z{;V5=XvF|q+sg>$SmE@?2KF%{ zIIGdi2`G;X2Q}hgOY&AI1d@r0L@&lICoQYVPy4@E{G_snrp9-OMi_AJL>G5+d7oMJ zsk+e=1^64%MdXinj0z`<`~iJP@h#bTQAsEr9yKiuyy-IVMll~GuK~AS4hkQ46goD} zoai3SrmL;`CtzPKlK(1vh*}f8p~YE)<&XhvM@wEJgi$Gq0Xz zYdHr-odVwxPA(^2P;!ZFe@1O9mfmdJKP|5dhd*7PHp$-LvRnH0P_K3?|ynEFJz4Z61Ard#FwWTQ5}U#w1H}9`srml=HhZsh?^E%UmX1O zO#`c*Gl$%|9rm`KW?FIQS6E58b@SepU?Y}s)^~h@m-C;<_gk+8FaK$(N-7}-)D4AT z%@_Bs!%VJv(B{raFt+dTQpJn9^}q?Fd2VYO(xNtJcl7EI2U))Wah?vZqg+3Pro|Oa`<^ zoP)mc)?OIU=lltES3`~4b%PQl03NN+FmEpvXhC5P_gtmlEUFAj4E>o~kP(9B#u-NO z@Uj~^@*PP_PA67ufgk}+Q^f(+f|sh1k)TF1qUi&TcB2vRG2%^VdZP(-G?j8JZZg!O z&hP7XIVWqX*6j>`DF)Zg^roTFftJ<8)#)=*d#9LomlE4<)B9XOb>B_I9q=*M{9aZK zbu=Hi2U|0G%Kw$YYNMyCKTT6|cmg*KkIwp z{LWS|G`>J?&q1CiNwh>0+K%4~ZTA;j7mc{=9^b0p`|R}ayx?6bm)A+j;&GelB|wMW zz}ZMceT?-ZCVvDN^_vFn%$-sSmm$;dDYgbFB~m&BqjshB9{&4C7*BL*o1@#QnSg49 zd3=+w7JA7X@XvQ3nh;0~<*9+B`LP-{SdTB{aiB!Q?Kebv<-Yeu|MX&*q!VTBdd7l zv(^Ar;M_KZmo0=H9lL(C+&V01WT2{wNTXk1bt}l-FqO4!w43H~FO+YxJe7DHrYN$w z=tjGpf=(-T3HoybW7~?QB@2ogXaiPL*cNG7T>K6t)Ic6dp^}2E^tCB%Xzf?^thEgG zIIK&VtuoidWCx?E6d@0rF`Q2i)KdsDV|iI=%t6_YEAH|Nw_@9har9Ipf8N@zVx5%T zz_epO7?p~tw5|$#q5h=ftvCG{#sOm08iwczmn~5>}Q<=x{jo!W)zor0P?q1E* zEpr9Hn)%=FT^F@Mxf+0YJ+Sn;kz_~Yd%Knaf<~6$ znj2BR>FNEVQo={Hllk zsriOO{NQ{1Y@>N=nAr{M)^3MG?3jvS*W;(PWTD-5*$9|uu zVMq4HAnH%R0s72oy1h!FIU#gc*yu@}acuA`5-K7^1E+8en|_BEo!rmaHN~-ujJZ3s zoo}VVwD0o?%Nb|2WAGmAL&+qaxS_Xb5GR!;!ceZ*YQ*ShnzH_#DC!|-%kK1z%Vo2> zJWeW;abgf6izQt0an#sJPJq15s8Fvr;)*KRyi zvq`bL_XdLPL+188@C>sZz?Si!RAzDfxlv1R;QuJ3ye*OFP5&hIUK(&>HA;F%qo?C* z(;aSe>!dl9WW`f_tbr_sE12w$pbh&q))r!2UpiZ^&Me5FEB^F=DBng%JZ_We9&xfkJ2>Kz;*4GLjh99+){hD#|+X>O)2*K{{$eY{` zRNwa&DV(;BaFgY4T(5&dY1cl!o>xk|>{k-HwaQ)`j){$*n2i|@e76?b;eZ~ji2u4{iH7JGSDp>qaZBZ3OG^&9a0pqaS&efvz zCkXmfWU8KMX+iQih#W$ZFLjtqfS+i!@^xq6=3x7sM<#}K_o8s?cK^uecIak%;ATCt zPN@~_z2LWlmRGbYuGaU?kNwrX)TYl_D~eH#f1BB5MBA8+rD}6OuZ!XGzq=2uS@qgp z!iYmpwDg30{8`eV1u_)onTG$A{Qnchvbkpik3;8_S)6ah`ED&E98b~U1ZpC_I%Yp6 z8997y`1LL3EjR~v{S5Z}*tlPiqWgN@m8ajSW4h+50y8#&SuKMqIy|blU&*T(*vWqY zF+4wt-$@;Sah#-6w)QxKlc74Amum){7be|xpxcVSHmGq&gBtg8o7^S2Hy=A^yX}%} za+`pU51Cr4w;{e?4!Q5wt{CK!8~p>v5nZac)w&7A9skP&kBt!f2Pxn(T4Y>UG~oE~ zBk8Udx8{>T+t~(;%A0GM#}Rc~9Z&Q-@`-})8;xc=Onj^4J$B8l2ne6=5utng%TRlM zOkkFuhWM9CrV;e5kCpE56%{P%!WbJ5efq}BlkP5+>p+|xjYz~!?{HXduU~gcLQ5w& z7v%KX!E17;_Hy2sL4Q!rT@;|zXC%JT(UtDoT zxGmEC(l5D;n@gjZ>Hj;rN8_a{U;i^9DYHufjOg(<*u5DRD^Tw3u@s%1tEAxyyRYPR zq|fPZJ&y)WC*t&lZz%7zi+WIH9G|A0UC&HaUx&ip|LUP4D?t~YHG{jz-kCuuq4l?C z^akC`st5e|KY_8ajNQp5`+z>KqluV2XLq3XKNt1xia8FC{q{bhy89a9QjN4E1m+&Q z>VG;VZ{610Ajo;u;*h~9X^?c@3(UK`5W|7PA`P1t^_vVS=QSONb<_--gNC0#YgLWb zqUeiM(Gm4F{7>b0$Z)(7f+1>$T<|CReaUkJg-?s^MC$I>ux~Z?Fx^i9noUkuDgX$i zG{8~z7rc-};4mbZ9O#W-7HrfbXm+e}`r+%IYPD@r0+BUxbP98vk;TEhke-=*0A;r41`1CfG~V3CeiB^Umi8k)|U)Lzf^Q2 z-mn&2=v$eFCo`EY?9{R~pJT#xppX1&`GBwE!^SUSBGZDTPS_*@yRdhkJx*071Sta3 zM6>Dm3b0M!7;%@myFQo3(^wS$Bj&{a+Q0dUB;GW_0etH05s~N_x^UF89MJGzi z@iB3rW(2YF+dnLAY8dqc4epiKV7jJ7#mH8WI2$f316X_4-Cjhpr0=@0BDxwYL9LK> zPQ2|&m8R`y=msIcJ;AEHlWlGTc0=mB$&X`f9Ce+gXQ20GNmGfP|#kkIaf8 zXVnby17Y15VcTmh|6W%ZOaqPJ9pR>pT}zse3N$R`DxYt`&Q8J5EQ*)|9tRQgKOoal z`-%1Hb+0%mop-;sw^MV^1cYf;BafRY6m5+lo12I*Zb)tPu-cKr5G)C#>LJ5O(i_j= z7K;4CDDLfVfj47(B(i2DO;H&&{L_vFtTzJbyUhtJxpo`S#CD&04uYyJM&N3PtZF4W_B+Syn?h%oQN%cMb-p+Xi9fPnluWVyW`kS( zb&NVpIU!kGHq-Sq&;oc(WE}hHfoZ8LZu?vZU|QXGa;WS6q%s(I;(6bR)M@=-(39P; zqpzS<_tEQF37<5M7%XJv?KPVq-gm`j%Ci|ci8`pd%#FfXT>wW|rORv2Oh)D3h!irx z@w^y_7Et@aN6I-Ke+Y%@Zd01K|?IIiNo zea*WbyzkFg{YhR>&^#Yp)HykLD7_i7Gva}Uf1`}0-bKvrg7lg%H+i7}aKw1EHEY1p zIji+XU6^`3cfj9jwS;~&x6jNo`PO+0w?$o-L%fXP;Fvp|**q;Gt=ULCa;2D?u~ZYz zn1N-H`IaUQMX~KXuwsH}3(p=0jAl2Tety<0*O==* z@6_%n5;*N&4LG{vh238?Rx`#gGyD(eZH9VOmp~4rxJBTwDJm9kkC<=vCPXd@-RFQf z4DGy5g6RI<+9dAnkq|Y4=%!61XPi$(2$bNQUkv9H5EPZomJp3{GT!vomr1b2DRAPd zT|J@J{Pb`oUvnX%{|tsnnIRtEbl|#unqm@lVwcio0B!3986A5_VoRpCxZ}yW7Xa-{ zc%;waZK(Dm_N0fki}Me(*8C?X(*(wmZ;GCx8W zh;fPmPwV?qYB&v{stF{LY&wkK{-;_1OxcP&!r<*qogdd713D>H1<bc3+(OlB~= zAmEGWj-&rjTl@g=lFN!4*rSV|-Z7;%4Ptg#Gu(eBi099zh^{3};mJy2#7vlK1M>a- z{V3}$n@ieUZ-dE%{kF%6!PWXdjW0T!l=|D~uv>t)B1;fPoV&;}Gw4{h`}x~V#_fL) zR6~JdbQa9uC2S%4l0ryO8x0&POI83b2v7)?y;x*`KVvINCnRWVIFsZ`?ZTRC+OMXX zchI@vs{x%Zz5$|G z1=7WaSlJW86p#@m(R@U;aadxboG#or5ke^D>v+n#X#+8KX=w%%%?aqOELaD(XK8Qd zws7euv$D`#zUAyNGo6qaAfo^Wq(W&#OIkG}NE!e}2Y8NP7h{}Gpq{tldvsR}u}U$6 zq@5Jlj1X4?Oj**omf1&H4Wm(B|0eWWU_ph?UpkPYSVF{|me|d99xItUTDMkO{2rJ^ z+-$(=TgvM-8IUZk@1{lNzLVFH}# zpqZTbpmB*>y>meVNRS+2BWJ+_HEq{pqGn|VDCXk--T^@dN&EiDq+oK3Ymjmnk(S5z z!CR%^t6Xz}fmjCJx;vI$ALT|}#ss6_{?Tyez3f=Ku~c)*W}0P%2oY>WXB$7giX3RU z_ha#!ZD#uODqsogRDRQ*F>saN2E@;S!VERiz`8bq~F#fyE+R zi5_>s#tg#DOnh&2SZRoMm10&0Xt&>ntclykRuv+I_iow<);m<{VPI3wwoFjI;F($`E+jfj zPcoX5NFuK8#royETl(P#6yo#nMJ*(8ij9o#fBIq`;EM~Mk7wk9e9`7 zMHJD_d)4u9y?z+l{6@R}BLePjJ#{KjS4|kXlqKBUlBazwO2|BaIs2DN=&{Svn0>Ug zrvMr~PdW9>Yaz>jx}uWGxKLo&WN9*-AgOTFMxiomuG54RMlf)9I_OjqB?6`I44B^3 zZ%Wa~zhIpXgt%hu0Di z8Q}{AN+3W9oHZoTB#{+@Yc)b!NTL0h3Y$1EdB3)#Bjd_YmER*$X?8c5;^m^zNN1;1 zR*6L59(loBJ+?-oS060JbxGXD#lPl$2x*)mg*=)YB28cx4g@Hv0+!3UvCO*{@k>mG zBrwF!CQBTd#|^@^Vl%TJAB9@Fvc~KHsUAQk|DO>A+=w8bS;0$YVx++id-Bf4ZV(ms zM{6nA#}qXo03X}?F)VbMqkD`94(RD~wVdf?IguG0&uTe2RctDRiLxUF(!d7i*d2l0)v4CEW764jAND*jfaagFlcE@7vCSp=M7CH1IifF+xA1dMhAZR z6mG;g!g@W?l<-Z>KenutEvDV2Hid~Vo{ezTf}}PCS>+NDiwa;wNrQsqGhEZ!h5GqA zTC9oqs6i;WokF>#o?#GSZJPti`=sHz1v+#}*MF;0NBmin<+oa2CxP30km2!57`S&BX?MDw3Kk~mhqk!$7jVu4DyT+M?Z~jc()EH zEBHTK*H20}L#f=?;0Hq1nSCX~rv+j0!MZ;J1PBtq!FWQyafN;d2V;3c4Ph`0t+tLe zZ)R0(%da$lnwd^q<*b)$B-(X9`!;V{*lkZ=PIj`}ZI7}A4*;);IMUyDAWSlmomV3L z^%Rx-|AhVu>xMa0o>>AM=DJt?kw{Y~qDLzI9vl^8L#6@kX+a5*Dja zL`tg0s&p^Oxm+-F?-c~azczty(^!&4j6l{zl9jwz_`M35vTZI!VFoY^$>s;#nU%aq z_vxp9bUP}rgUiDF>1zKT{Y#x$uP>xv1c){Y#1s6N>*g)+I&%C+Oqjbcs)k`Z%2Ko{ z8G8#(suO<1)+9uhEr+C=cv{|kY$Xtd{VJqlIEtsTxuP6m3jZM3$wre3;-25ia(9x(K#;g1c1uEu_{I01(W}UV+ zKD`ONv$f~gA}o&v#rdzc9!Z-3Ry!H4Ze5iq0g8IWiO^I?W|T>+ys=U|lJMurbA$vW zwdcr&EexeWHD(Di7<%BeC^3IGPwT*WjmBj>E|n*8|~3XjN`7>SKh-S`hV z$!i=z`Sx++Wf<_n=9bt(WX$pxf9oVfRT6Kd9DqaRjHAvTy}=tvkxhixuJDZYkPXFr zDn5Ur*m6YKG@$*sa=qS#OLE63h>^TMeu*J|)?4}Mv-k%Ed%}XE&=ke+XO$&#`XT*F ze<6lDbIyOPbi%Ue%{oQpJhbZ2%i#WnH4(dz9k!gMmDmG59Vi$Z5~=^daW|=S#>@eq zV^g9RP)9g&wf2ajc~qY}RW-8i$yrogi**cELS$LZy=NF$LHc9u;G+?sezOIYk_rc~ zZuPQw(TE0Ij+PjZWkk|YVL7XUGsNp2UXj}J4A(}Va2?=N`n|C>b8W9VU#2p%qVEtD zeD_uO!pmti%HZO^xP$}Pj0-=)rGmaf)=+}|3zsZnm^`kfur32};R5KE(6J+S+8cly zYgbu{{4rYumZFJ3Xq~V44zJ)!%tEh44<5Pp9^Q7J zJlDJgYBBH7IdX(!*xNET>#Tdd$o^LniORLO%+KTcc)$_o_|=4%J5?1!(RUf6at8@7 z8%AC#)T*|YLT<`jElRzUcbGhIf=Gs}ND4zaRAP@jD9}R~uc)>%H9TtB zO7FVO)aswP_3EA?*MN)z<~`SYIR_>~cjQ@jP-O$=*zy2@-wiEZ(V%0&N92FQW`cWKWGWraIf$#uS^{ChnG@6fC?i)!Er);W{2 z38N+#*t3gNE*Cd2AV4{my^^{xCJ}}hP$axuK19Ub+1CZ9l{lclKsY=aL+mC%phSwr z&hDv^lB9S58FBByU0vs%)8PkPrCA__r8GH~*LPJ11gzZKbwBIt09Y7sC#EDFG0=b4 z!SOZKYCxI4aKfHSZcCAWSq7J1LmAyQQbJ74`c{IkFciZVq@tCnpRu2V5E?+hKCr|e zaR7q}V=}f%{qFfZoZBA*>Jw%_l0QztI*7wS7%Z;#^uL}9`S*riGiu-@_Q00pzbZOIHz&6df_j3iGuV1gwf&-vQL1xy!5v2X zV5s2;@#rN5>8p#Lubx^}!M=RnnHLuU5tu*2!-)+jAQ^&KZM^2}3d4dbV+;jM-dP`g zNxZL?Irsr}u%v5LWN>bc7sJDVOO*DihkJblmeY$@&SY{SZaXW8v)}D5YE2-3Th^a9 z@3dBWl4X3c+~oW{w7zz7b-DC9t7L;^!D_k!u9XGywTSk@>cdHBbEo4I#T!S*NupjO z{?hFFLw9|{NoF%4uf08z-SQ=u=C_m8uyEzLpqs%kF7uHmIw6DZHzP?kj6|9TK2tPq z=eAb2FoSX&LHs8nZExLZ0zQVfuDCz$iTiU)v#Zx18bfmWS>8#j}7_@6A-*DXGdhm%te#-2mSG8$^~(l(5kk!=g&Lu zK}=fjaW&oy79v#h&vk9jDo=z}02*JF$V}dT)h4}7jjZ8y906zA-=MWc=_qg5FCXy< zT}FN+_MHsySdLGhxeNsPw^(HNMUEWxCi(Gswj?}`Nyf-3DELM}=`hJzMJ2{yoA zIgGEz`}3J;%smia7fpZD4YvyD@^}1Z3yXSm-7>x*8!sr*!&az<36abU(!?}0p+vEQ zIql+_w1C$%{afd1-nDs#zh-_&yR|Da9yL&$lC-6)%i$v|*^N$D>YvBPB9z$i<2&ac z8l}42kRR@h6=(-pJ1cyV-}d{O{{LwKGKU$WRs@dDDa-M=wT@@GgY&?`8cPLI{D>t; zp0M&q(r^Hnj!9fZ!%QirJ%=#Fc01gkbS88t0azBs5ILoDp-o7JMGGTs^H>$6@_sWn z<^doTV4GZ)-m|NEqS0|VE8;Z>nE`?Rs8%O%po-%w;V!OIi?U)Hk2zRywlKiR*>1|S z3&VBKyv3m(i7P#hrT+MxD-iTPM@zpDG6$y17RaX@EdpKccKR40d-QEvtBD}bm*AII zeaEA+s-F~^sef>cyhSY-tUC`{lSbl`zZ!`YLA$lF?IIo$^?G@xiz#=ha zE>tO}Cjs&R=CV3Li;8o=xMWkkvJY*a2#-bIFMrQpq@S z=?IUTpx0fGNX;HV`=yqCWhw}V`y6l;9oEk@ei2Af3sqP^I{$=E;y*#Ce}2kxoY1#t z4~`!y*vnB;iHEU(5Pg9IXLcH|45&@vQTR4T-rYw*iHVS@>&4B;v;H}$n@twZARM$= zxmlw32}Fu1R)IQ9Ay>(lYvK^X8%+ofp1&|h77p;{JfOt(q-Ha{nz}@U6tXGSQV~9n zmjQB^rO4(Gp3VvIoM&=XtBc`cemC2a?GyqgKH)Y^<)gB0_4;8M<#2yLaK5^u3rVQU z>|*OMKwOL~Z-@u{1s!6slf+8;ID3cT;ldX1sQ7;DV3%cPhC|+fcy3)W8cpLpZKZj0 zx332lND<$LYa#n-*4>n zcdg(Bq1+(Y55ayEEWR4mu8FL6TGX}uVHuL2t!}jDkeY*m;<4@94F;oKOq(>CxGP_$ zLT0hoAD!8ke{2KBhY~2?!t#m%?<;az(cxRYq@$HZaWVn%iIATDWwfBi!H{~<1n4{S z6Vg`1?5{8mr3=rENlm6We01^pEum}pH((VN2x6yt^B#LLx%knQAXv6=;7XYR#U{Yb5tU{~ zoU)3b>aU#9hQ#c)Kds+)lWlwxEbDBpI2)rVZuI3UtjLNYj5ccv$)B|qSW~waeNv{^ zOtEUcCW)0EZ|y9SN997JzpEe6UevPKP%Ug-h=(v^LMi<2=wtd8psKCB#V|A>4{>S| zifI&WR+?!d#+sVNG9K0K3FWiG>BSd#trDMi)ITmO+ZFk`gSU9Au!3Wtb$Jt54I>Z( z<{Tl`9I>8@rq~V*!dhkJg^DOCNI>!uK}d|4{?Z%HVoI?Kc)YOJdrB65_wO-m7;9Md z5G6ng6(D?U9y#8TB5(#_l?o5irs08ezi1kfq$CiO$3K!H&4K)jsXmcQ5j5IzRaCN< z0aJ`qu!?oGjEtMI^k%>Y(o#-{{X*n58SXB!+WP7~QvWAmM^U#(yPok@2@1h}FTM9dc^)d`=nMb2tCA2I+P_KE0hW~gzE>KraL{>T=UvD8!!|^h0*i7zlR4 zwW%)Q{W}=VpnY6uf*{H!GwXWrHBp&OHgI4M2G;z8LK4YU^<(b^Vis#LIuXE#TT|Kz ziDjaPv=Qm;`7@U}tcv3JogN0r4d(}7Lvh#`T(l{fNK_tfC?UMLGnNXM8PX_%iDM(i z&{KC_+H+h9bI*u!;-MIKNSbl`KXHFqKk{KpysR-6kf3s&3F}i4gbgq183jpgD)jEbxF(M01*%Gk<8t`+67xH801iV7h|bXc zw>y|) zk%eAI4je)8;UouN^eYYJuGu6HB>B?vTOLb_AUQDqhYhp`*GMrm^hWJ@;3+SI)&HaF zRfWempA{S|v}PAO^wLle<*OLoyTttKVV&NX1`@R_Vw8{5+=9Fr7SD_VDQ>*^x*Nt} z{ESh;OvVj3ZUYJn&=kuAgP`UuAf1aGK$}(m8H%g?N+3iFUb#X^H}J>e)rRY~eHR!I zcjoj7s2K5N8ejM+y;>g4k*;8BUE$B*{8 z|4kUI&X~ejPkx`b%F0V&3L5yv6S!i3`_|5$+eDj#4SY+YykKE~VOH#GsvO>-iOmZN z&FR)Hbb%U&SL@unB8AC+oBC~}CkpI6{US$-xkgb?k!dyW1yD5Ecobsx?)k2?#<| zl37kKZgifsrP@KHJM4w-J&Y8GN9hajvv4Y=6-oaeb??|FS+K2(cG<5j}Eow8)rWNz6&LlJo?v z_^K1*kD40~q34xwC)nc!r$nyG*~;ie?N8C6kjQGh$#4VK0%Xw(dG`P2@M^9k2$)ii z+fXn<6?w1rBL~Z4(hBsaC|M=oD+qYn16X)%#IEe(1VOd7^z0FIX+h+1XRJc5?nu;I z5UzIE7e%nM)qJpEx~(95U&qi_HZ=n=pjlT)x)D~g?FaP9bhgdm@??L9?YrFaQX?6JkbES~ueVoS?Moj&c=zmvnXkqgXx3!M@?9wJFpE-XNrJV}J!knu`So z0b(Cd8$hzXctdy(U-3A(^xl%?=fYV_rP5@rt{D!>o;t9VGnMSx5E3zUT8pe$#D-Te ze%J0)EnPWnhr8eyc}ot?r#_b-r2p|?lD;tICZ`*bpsK%)3>r4+Kb|&J^B9)zKSmPS z^?_wi0#y3GGQ3j(VK+q9{%{DJ_fXxle~_s+@PaYj=L?UdG}Q@4@chJvyKldVGGzBd zai7)u=B&qlb+EYYsjt%EstyzmET6^w;t_I+-3gn~GqZ#TbNFxWeov_3RtFy4hA+aD zJ1Iz1O?qUq>vuuiT{}#2#bDdr8ygbGr|CxL`oA$N2Sn$M?dK%N%;?UAY4qz}zC7e^ zD0I<7QUD?EP(T5J(mqP3?Q3ko8etNvFfmJqB}&uuZ^nWK&Mna=V$^Wz*HoDp>NtO1 zHjUguYXxO+$@XYAaCwRKBrR36;PSL0nE+!YgAb!qif_<6>8y%gR<0x&b2ZZ^JAc%( z8e^3SnLruHq%P7-N~QS9R*{>lHGBf-_fR|jp^n_Pg8D6SHsv0X7 zF)IdDlB*);Xr@`0QpJ<#nuIRr0-^^7$%W53Wo~rB^VbB00+dkoW@Nk%olg2ONa&{w zAZ8L)l=%Ea7-n)qmet$sD=IuT+Tz<9Y!C%|KXlF;649)y?!s!R!iAN){JL)~P1|YP z){UvJS-0;Oin9N!b1$#|T5hkz(y|~LpO_X$tk<*`-%|VcXOxot$LV~0LhmyN=(T;8 z&#A%7aq;#}=Xj~%ACX%5z?c<9Dk3*`R>k$nGx`cQ)xxdR(8L1eCNBG$V0jhYC~;Gu zNT2Mo&o7s@{ZNXV#RZt#Q4Nyyt)F|tPmXiDlT*Vt$xnCm0Zk9d_Dn5TmH|v`2q9Z7 zJv7Y=22wJhry;K+HqYQl3u?-R=%4XdM1UzxF$Yu!Q1U2IZOGX$4lWGFvD{)m3xTNhzp{CZ}tA~Lr^AN&Ni8W&3 zAEV5qn3BF?SL@JJ6wnDWsw3ncgd$x@T~ockPz(VA7b%l>v6}6%_n1*Ff*F*eAvp4wTJk zE2h(Zw^U6H3H4{IS`I8uLIhA1u+`r?SlS5zm^|kx^DL=30 z+m5<5i%dIr=xPI=XYOwn+EG~K(8`hRxL-)|f1xO3k>#~?VGYEk16yIX-z?GH`nC4N zD9Z6h;sPmG7TTW%B3|h-qUCOO5p3vu{!7b|{!qd+*2uI(B&F^|J!^b#;19p}TFapw zUKO;g^*<4brzC1v5!3xC9FhN=7*T){fB`fv7D(Jj_P(3)aZ@hPCal7Q9UfSt{*(=j zgqI#OhNo)0uqs$q520`zjkt=|#H3X4-Xn5_)KXbQ7QgE@Qbv?0l$P}+?70$96%Nvl z#OBQkp)^*-sN_lF=G8DJ30lyN@9IXAbp>9K2gcP7f@5X+d`gWM$bTmdiDE)3&CKsN zVb)+RMk$`WCQshKYwbuUmpjh6z@SRa01M(Qx6j6s$m`Kaf#9n~i&~H1*?NFA?#f1$ z&=i&!i!MhLT}IA&hwmxWEDohYOri*aAXu_1%NzsYvkxH|6VgQM)UN$YH+`g};X&tX z-t6VEfyR1T0-&r?8Yiz+XK>2som`MtZV0mVT8BzP*99APD%9*)h9=|r*VyC!{bkc@ zwe3D4iayH1n31*n8+FRJ4VmQmv`^}UrZ0H|yK0*#DOC~lL+jQ^-;eu#VRj{LA!zQ1 z&3@(3ZohE9rgu879hBgZJHST|Gj+K8(&IMJOMmM0DWY`e9nn)Qe z;AqnQ_}%B`BB<2hS0#(_uhygcSULM!5G{udz3ZV@5*E3Hogp`eGZ(qN!P%aHcCaSx zm)rS%H1>;@O#As8S?J9q(J7D7^ z#>Vlx^SL*-o8#i7+m}KJBTyfBN0zV0V=;&AF&u|=WuS9CFL*lB+>qy7%xWw`Vl?+J zxz-(O3)rbyd{q4IZx~cP4;=jTDl~SBVL@$|{iqd1W+2J5p1$rpK#jo=Qxy120-(Cm zOm^lutHu@$!l*?SKsm}h6|y7LD2~d}im);{mO;FNggA#}U&ItLNCdW%lo>B7#&(%e z&%Z#D|16pyk2;7e5f^`~VF9CG%H$7j20CWDBv&pbn9bGS0%smVK}^VY(O(x2!8;4Z zrIhC~9f?nAs;WVz5olf;__G+8){Vp?bo0FNQPl%Qq`>>_Yz5q!)MO<{--=cN8Ajrd zrzg$1yeLZy+ZuYZ%0hc+K2b3yk=c>04~EJe{u|G$btUOB{M}bE#8?8}26!!@MJS$t z$+b}}nkO+^^|*Pq=sD{jYN_u;Wji#Vd|hudD#6_VEqu#3ev6^8LB5^r5Wby40@gfj zhyEiwQK?M2G0JyuvIaSJO*TY%;b`6ud)%DWd5m?aR+4Cbbr|2Eg>HxWn2dh8dIX%f z(<{m3MQ6m(&dzN)VrC{RFY=VTU-j9kn#tUP_@fH7y3q?e>lULVPS7 z-dE)H;~ak{G0_zD=idYNqr6(TX9eK<&-Aw?6`Lz&;}u}?#P zo_G!jO$qbZ#V%q#=UojZX*u$nAk)3}<1ZM!J9OW_Iz+7EUFH%nTi(Qe6@#!kg7In` z^>(#$glicA73bRZ-WVNQj&X@h3`z{67dqd`;as1m~fFIj&{RZQzW=%q;@Kf=m{Ece}Bly|v^ zEj)z%b`EADFHmwG`{Oh+%--m0)7{vwGAcis28}hEgVL6@ts8PbY*k??X&KPJbC8QAhxB-?MWZitY|C@c9_jtrG z8ojx_H{s3_a=?tpl?M`OUlhH!oh9HuaT~tAaPIZhHfGI)v6k zXoMnS5&M1v3dZ?ED6X55x-rQV_Ao21x%rmFYXsepOzdCYR4mr_t;U7T9u}>%j2fqe%Y|>5VMj+OaA^FS3*-b=6$0CByS7wIs%+g zz8L{`N(!`tEq2bkCl@u0YJu+?)bEuN*QUN*+|Wk5Klp5yi?M9N?o_9Fl;-=T!eSiJ zT=5u!HfBvhF@dCaIR8>VaE8QlZu648qBJI024_5ks}aDQfs<1BlAzieEBQr8gYCfS zl0Rg_bo3Hyok#*mf11Y!sb5+MH#(VnU@vu&d0$rzL-s(B3{cr~;MzOq#bY&$S%N4s zF87^2v^ov%h)k(!w04hsxb z8G%zHf53MY{*a{hiC8xo-c%#lJbuQjfK|LlF*8T%ON|sQGW)Bh4m<=0v6S3QC`tou z+nWf-^ZO2IB`}QO3mJ>zSFf)%p+YRu7%s@YG^Pw;GMbrIuM#Fu- zi~<(mzsl-P==k9Jc<;o%U~_Zk!bi~b(Cy@Xnj-S!m&#O+u5YnX-$Jzly-Bm>FW(w& zilg+mxi?)H_9hD^t867o9I^XXpAUQ}VWK&QmY_!!(D@rnt)~Vah-lebBc`E|ZlVh&Luoz$e0mxUK(Sl}y9p&Ckh^ba{C>tW~hYGHq$ks$*kKoDwkHK7IfN3JJrD z@hGm{i_=pT8b3v3_5L8nr~JmE0Y>y3) z<1EcJV1PB(7o3Z*i@zw7!sfC<5V zU#A+0fKFWfPqJv2Skn@2rFOXV)0`YBOFYH~H37s9TfO8VaPQk!)vnJT#trvPHTM@3 z#}9zHUA5$lc%2m!zHe!Mb!&D=Xl>7+2EwIal+IA%Fzn|(~u3dXO3zEuu zIM1-j+pV`m3DMO4O53dj}NAqeo50A@xlivyZ)4U=YDu!TU zn~$_xo^C>d;2Amc+1&`#7a^wH5q-up`5rSfxR?(nCT(+H%iVTJyzqoJza!*Xort(Wu;YVRbq9^M9%7_c2v+ZZ`>p=1&%Z5v1 z!fTY{06DhP3i@_Yp03}v-`S+d>05qKb|x;=Cx!2Lk#l#q%{Sp~4KFXv4RgA3$A_8n z1hXjDV83}Vau^hd+MV0)=b*rgzK6jdqVrRU+%1#%w{xp}!`N_=f zxn;As&pucrSu#~qbW)LhbJ3`uk z=B`ZI5P$0Z^R%q{+F-(q7dUJ_VuMO-XPP2y|J-gc*f$7(^>~AEqUBYP))X>eE=nb3 zJ=PfMn@E}(ke-8BdFv+^_sWji{+QT{L$eUDAvq;5F3x=HL@pDPsYKCO{qvWzFoChE zDl_6blNdshPFPkPmGw^iUr2Y6_Pc{3qzZ6tX5E3^ zbQ>6A0AW=9jZ<2JR@=v#?p#Ip_8&7cW>{>C$H*0NB@fj z_>l;u(?cb=6~KN~05ZC-9jCSw#&8t4ZIGHSXV9d)EFdv1l(9Oih*(z1-Q54<469d6N3p!y#itcRmYdUzZ+Fz<+ApnYH98%A&xs1o{L) z<96RnwAVK<#6!4lMEJAQ2WvL!m%ND6JWh^HbrLwY$7zlX7RC^Rwd332#6@!b&2HN< zR@E5CGO_qvV*3p}TXEyR2;|N$-X+3)_UheT4Jq^xVQ$88_F}B+U!@)5vVzvHv*%}J z*1Y*{o7(8lt152@KC5Ij|9WS+_*^tk%@-taK1+8ou(8Gz#>(HWf@UFR7~GiXPA{L8F7pbX)j5 z(k<|Y3m|Gfa(~@9!bOIK)z$n$Io|MPAg}QO_VX^rGLq7)SK$He>7?KO_@n&_hVSbP zcckq=^fcZ=2_HMkexdHzL-)O2=xi>DQ5QuSZ?#m8co|sEZduqyt$l?KnG~^C@MNp& z>@c_d8q4lnAS7j6}e za!9AGdVmg@1!Pr?$kr>&P1~DFq^MgeNv;_-v2IKi7cpIqwl2uF`5aonld|>-9gv{`u#RgScb<7oj<2C@-fNu z@@S2d%I6HUWu6gj@byTVdo^e`iB-T@!TU;*qtX2XL)QUv!hKh`e*5FW#hoM`K2DgE zG1HHK{3FbM^Og*XLIe%B>@ha@XIsQ!5<4U;XjaR=3WOh2)i4R0kN>e8Hqn_5Tx3;$tQluhkmDinTS0lB5^de4zQ-5`veT{zw&7wVjCPM9! zcZe9lC)B&Pr$<#lUK-|oTDhQVt^4iTP?Kv98|QE=2*nk#WYNfga z#T6?bCyg)OBA6DH)bSp78tj)pfLtKwbYoo5{hntQW3!-p>)VZ7z-B#&L8^V{@oWsqVAnzS^4I4s^V!OH0pJSHr*aYpIyL2|{gEMp?{hz8vUB|4lLX4` zh|s>m)BvLrr<4!_-k{V8g{;LzoGpoeo@kVh%d z0#p$8>9&vdnvP>de7#;t4+@N>b5zm0!A4M1-MBcM1rcDfBo0YkgzTanv%da;#gkn! zD<@xhvo?b&^@*q6$X3BQiEyX%d((qH<2VwmL9zka%7KXfWOE znCGuA^1m>4eP{!q)_x95ITUGwyJ9U4y{#<~RMcX!mnZfEIq|J|}8lseQaVn6KAYm6{j}pi&BTBCA@#%y#I(-?m23*2 zpxabs`vCiP6E?2T4#5p-KvJ^XtT49V77#+qi+`0AE$JH@(7ejp!7@^hZNK2lg)`kw zpU)H8>cpbt<}HIo^Vs@zIcS4s^1cIMY*Fn5=_S}mPJqrybx?Tmg4TF zzXRW|8}@6QBr@*g`mU$Gq2zcTh{(6yNg3Ba+Vr7(?9OK^4Plt;JVlp`=wL(j187U} z5H{VpapA9)C9t%QnE!((0RSfa`??lJ(aSgcL^r+M8tFd_xD>mK<~O_0qf%7@%lIh0 zUJbfhs9+Hku){or%*ptS?hgq8mP@oBD&$)pCq5U^-xeI>V8ro(lji2uqGb@H zN@uPOv&LtxvdfmGHB-bmF5rrq;+*~6%BspxeRYtzX&ZGGT)rRm0cGZlEnP40yla$w zI_lxT1&H9~6I;y22$|t}AFN<{A2v|b`+(=<3pKYB{x8%!=ZxZVU`gnY{>|j{*r~S) zv?G50xbiON?G<5=52$YKTPcwdy1w)ye>|d(msMKp!qX&6`){5~1#DOSzc0*1!A*-1 zicUV5J~PrAzq{d=|16?n2yBPaTz~mDrl^%zFrYhYSZqah zsP*aNl`id(t`N>WL<%-ZuUTj8i@j1SRv~NbmXjz8e+aD1D-sqUndJHfNp`FRG$Ge+ zzQeWz`;2T`F>@1fF86}l*)l*QbOD)}b_WlCcNXiZa@M5)^r|!ZfPJnYCNT$}vPd6D zF_CY6;AgpaoZ~MKB6DQ%WClBiD8C6UkyaKI^ zD_aE&8Daqi>W}?fuR3ZgVtC$1>4W1=de^h*x`U9O-I_fAssZ1q{YJ(54}YK4kp{E7 z_O_}wnW}Ysf!mRjky^!Xlr4`#F6D77%At4n+d<$7o_pnqv(Vvy1x1Lk1?aS4u(lQCoIQGi~ssnM2)(aq4d-tgPS?r)bhZeuidc}5#x z)z5tyZV_WyzB|aQ_{+YvpkaSbgtpSD;dGU#dXLQD4+YU>Z2|sCet{y?YaXo%9b`uO z&ol^q$-rCeGDt&t5zWRQt;?gL^W_G?H5dGFSSc#$m~0*OUZKF5FpUUQqDbS0K!vhf z5D+|XAha4+eN|vd4l)4~*+q+kYUd10hQFSb+pB!D2HnNhB*k8tJlPQ1r_C z-t4i-e+S&+v7cqEmU*5*WIg9V%PBBiwf$nC0~T63LZ{HonBSQ}H94;CZNKf>o969E&wkAU|g8PyAa#%-$~$-0}%Ie3I(|a0&QrKhig1`bt`GQ}mPJ zvVQ$`=T19X??&sRAP=*^hZ~;fMyktkm<1A=gZ*IcJBa2{Ya(4m*l{T=0^$XdfSXc-47*HLxI(pXycY$#n3REw5$ye#piy1d>UIB1m37;v_ae zPOb~Z)H=In6MxZyxxYJAbo$~)r1>JOU?d9`ga2$!NI9aP zlME7X?u^ga3s^oh9HVqGK?oiYF1HmCFd!R>en41bB~duT2L1I;m3B~`HY#}!p#7Qa zb3kC@9jXxn^oEBOVj0bMBdWQS+n$m$O~V-p3N+T%`eJi0uji#4mTPfpO$Ia2^$AAfY{72%Iy5hkQjoWNDJG@7S3zgc4o1 z6^1uXiVb-sopGpO3&eV~_kt6w+49G3f-P)E{;{w{EMYTqIO<%S_#<-h>b|KcutF1( zh_Z^?zJ)S zAQHX)1CQ(1ONJYpBRPcj3Rsj8=qfp9bN6Ng2F>vT<_;;e9-l13X&o$8gQ<4`IE9o3!?Po0 z3#`K%h7|{rj?H+?W-%6odHU|W1R!6`r{Bh-gkeMCo+0>#^QB`_R=ytcFzYq2i^pk& zGMa#x(NYz}WR`L+Y&zKo=N zi^2A~Tb>-h=W50HUUnQz&ZOdNzJ<++1((&@xDh&LNl_wgE$pf3H^F~RKwMPW>6wji z*r_O<IbL7iw52aE$R#-Q?hg#NLvbPsERY$>pPvK1FyQZr{lex8<@YGsMnU6$ zB4Zsf5+E7@ilo_!D7dIO>A&yVe>qnY2#|8{&Unlt1q5wPd)rNg@FY=;=^?h11F7{y zV}ar=!g=H6!ZDD_Kuwgv4S!03B2S*nfC8!G0X&({EWCNEhNn*+pbGoh&!p%jz$h-Y~pBbpveW8uhp zIlm4q%lgN}nxxH5L@&A~ena<+GoxZH^TTljwSfTAR*4`qb=f1yW@i=E#MmxfEGyA# zDSG|cY{*n1g%T+2?whye$3Z=YKQ-NK{FUT?D*zs2iUs~&LnL~TfB|QoQXCeLvZ0}Z z>UqeE1CJ_b!g!QG<_(#(@xFDN5`08K5yUAPpf(k*E@2B+^ZwrnK$lyItY%WGb7CxLbVedg4$;d@_`v z)|PV!QjNY*weIkcX`SE)2$|okS;RN%3e6K*0`%t&YTV)YRm9Q7TUkb z*)OLow}(YC?FLkApeH-rPmS(_f4Uw2!b)@r?Ax<@5e_ATta$)11k6>x^Sm%;+`;nK zs>{kZK)pP96`{rYvS-Ilkl*9&tx!G=GZAZkhGi?SI(r5H40B5{%p)Zhf(TWK3db6W zTI)?a^Wu&;&x9aVnhCQ^g^I!-3LUe8%&-OdLv6%nB=|eq<2KCHZ^~r8ZIylQe1{^KPn?!XI_ zV^!ebFgPI%hiH4%$54?=MW;JF(h{PgGTYqJAw7pSVvIA{V2DF_+eqN~p?=;QIuz0P zn#2R6a+yfI9=w7L#&C*q1u&Z@sB~#T^cRWV!K>N@YByq;*92!%e(NUkwvG?LvN6YX zy)h%U+7g~v@IEd-Amt86h|pwbhc=_w7}Xp`xK&sTZ}uf4hftB1 z0FMG=hejZu@>Aj(Ag!E!Jc)nGGA~+-fhe5eQeoA-Gw$3J_wCArC0)E5LgTYQ3Io%G zzu=56ftIISWS;9Qp)su#44l_f#yiV)m{R5e%^9ZkS$%5=A1)c9((i}k<89*+Gl~%l z+d>&o1&Wg-bh1K9JPFC75xonSQ4z-*hGm@j%Us})iQ1&-TP1|=kqO+4BjS*SA1F6P zX|8!Q>y=fxl9xtdi)z!lPUS>wZq-a|CNy^p#oR%s%j}Pc#}U%RSeGM5>BZ+r&MiO~ zIIbi^^!N{vBgIStXJp`(S=kGw3hSsMmGdOKP#O&>`?n|)G9jOYBD1lqaNx}{Qn3iF zwZraKOCh3WBQl2mV-5YzfZ*DEXM87>ajXM#R6<)%KEkGimpY!@7&Ja>58x>P zo1-H7WP3=-VC!FBGN6uSV5}mU$ujeUupqQDx230!oK=}5M;m86Gbx5hDeE=gu_bw- z*%?{N9`HEe$i#F>TvdpJ5&_C0bCsYhY`(2<4M8J^h3(0dSHRZ@>j4_JY3(PdZfk>yANosZ$gHUwm{KkJBnhe60)^4J7w1Jmw6X+$B67 z`1AVWvvnJ0e}j8gL9aBG=QzL7dweh0|3vM;Rwd;>zW>oj?uct#X+$rSg#V%>eQSQ5 z|3#~4q@;JO)VLR3KF|Is^^Z2y=Y4v`v~&Mtt-g3v zc;b}=k%iR;6G;RuS5gw?8@0kmiYe!_o+U}+t(IzgOjASmJUY|iKscz~SlPH$%BFfg zz=JcXz1A2GTm=-Edfd*GR6<=rFVj&&(Ujw@M8I+;=YcGn>U;O^Ju4G|&kirT zNP}N9CHTTIK+;5j0YRFG0TxUZOk^=xJksS5l*F?07AG{lSABb{g1aF>+{dnHum%UL zY6!$s{iOJ=jLL+%6G5hxgJ26uCHa(zUewm;V9A7Ssojj^)Pd3JZfLC9!&bQT2c}~N zKMN}FladgDZwGQ%Nq#2ND3lXr^Ff>+Yh(7+dO!*Qy!Ym&hXwI%O{ zBP5YJ?)w*S#ywK!RJm@JJ!^9pk_*{@xFr+UPK};NvZjwz+bT;VdI?nk`{X|Y0q|i- zu$Ux&t&QU*VO;Y$vI5CssG+lo4v<0$I){MI5~aU|&`$QOg@NQ(QGW=K4!b(?RU#Ms zjLEnw(_wo;H3FSyXBA6F!gE-P1`TXqPWH51PzdQi3LxBEUs*84ry z|HwJOo5c;C7|+MDs6d;SX1LY~ zRx+A)Zy~8_RP}9gCi)PHlWT-=-iG2xdh=o}uaSJM4nyEZv&}rBsSbcLA-8GYxO3ll zc306dmEb(EEEpX=6h4Kni2Y20bpdo|Mr|hTG{@_zZ=9~@KWl0BETX|!JT7{>t*(EP z0!&5b=H|UOy91q{Znu&iVkEDhGI}3N^2+F5KcD3jIns^s#$cPh&nC3jDSPwG`pbd4)XoxA&i9(i{jVvCrNqkl~q zpEo~+CMF`cIQ)0=dZ1XCbX}^_=>8K|0FhP*4t$Pl0s;_Zv1bR7=_>U%K%|cb`D|Yx zi_vMdQ3DZ0HEGz19*vbbtOovXm?5h;v-$D^@YpnRo&F-1@Y_QOuK!+*GrW~b>)$`B zKTn)W@(1)D@M9DjyL~(V8BowZj~oAIaQW{cGw}ofmlr^mySdL5P2yw!m!9}<_x^3n zW)hs|;VRBFZw4yxD5*wz+mJXeG78GnJ50?co<$MH1ywuJ@Rk?e>x;X1kM7fx!qSJ& zTa*h|2^gmvN4H~${p6s;NUJC-!(Z3CN3KhX+g2S!WSCr)Su=8HSva55<&AS{x8x)E zDGhUu%}VvzCAf-N5JgW*Dt>#(;bueR4v)sVDKi%ow5)ziTs9butpMM9zjinZ7oBL9 z4|-ToPAnG0DhWxhre**d;HmYdLgZVoYJ_Eks@^xE)}o+@9;P4j)ocNs!-@g83~ckm zvFF%Y^%i0}6WygK=^_e^8a6AcL7_)wL=+^8LG@&-7wU;x^?Sj*1G{`Lgj^n?mbcom?imDdI-ij=5(6$WR>j(MO2`ryvunF(R#{ zW<1?^*)c(CE?d%R%g>gTuC>gfae+Cp)uzH!?tA}Njf`oLgE5xrwD_(D#tL?nRV+>2 zMb?*k4>Rl4T!Ea@-3m?7G2f)(3$tRckJa>v#k=gqSt)7Z%~w1EKBiulbvxJ7z*7$jSU)EC8I3AElOrj=annXb|e%9~n$Mns+HF zH^KUe>*DL5K6X@(GCtI&AyqRlHyo9z1f`AoS!_dr4db&L=kv(*E zyK~lbXZgnjn=ZVx;id6$Xx<-uO0T;Gq7sy(8nq>b4SCVI&N;OM1G@yCbPqC6rd z1q}+WstFBxHdOCL_`fZyOT||aG{$@lXv<6QILk}Lb!WKq|EBq@ElD>ef0`6|k3}|L zh@Deu)>yo@G(5X2%B?ngM*hKg_@o!7Fqt+lJoSDhkTM$o)0}fAgM&vjZRnvf{HUyE zWhO1=;}_biG#_urku;Fs2kw& zJ*BiXs&>qax~a;|KHJD>^DJhwa{kG|7G3)a%*tt=1)M91gid_YY9s5Krd;!~==>k_ zODBa6%R;WoQqT2&?H*fVY_;Ubw*F`TDuxzmTl85qt*&LZdycHbyzpJ~-#*VQnfdq2 zV5h8^3j_sJ3}{or=lVUsgkoOO{$2!Tg{8uK^Xa9OVY~Ba1H@rwV)o+qpC1U_4>7M; zHVa$cCuv8oeA^n>_jmZ5055Ou;~%clIGrzE->-=S6OpdBa$2_=etyh+g)x`+Zds-| ze6MG24B5H+2-(4Uxn&|8C1|oA-QTX8^`;;p=ge%rS6e3^oriLH7bMIOGxO1Z<#ztggK zA;d0vFt|$K#(BQgF`cfzH{4QJfnJ}(e%^eJT^^Y^J<&2O(-h-%yf}Y;wcd}oiPjlu z=5v4LUQbMKkamBQKeM_&Rg*Df?*K$y28zulOon4<0$88_1dT9zRH;JIX?9@I^*krN zYUrjS$Za}CdVHPyjMAVO+({lN-OJj4G8t}~$M?JL`QCMJO#C`IcX4;y)x2lQ2xF#$ zyCN}JT66z+`4j-Oqg4-}0a?-0`Tq+;~4dCc3{}rm^bIV{WYoYJ2w%*C(b;Ww1gsX|twd z$=YpJzc)P~cKvW$WJJr4Au0B23H`v@_`Fs(=WxKQypE4%)Y8#9Pzb~zoJsVQi4 z<+FYmmWg*!n`UF;XUET#F}?W(bN>N#H>JV#MKP5dt(Q%ogf~AW^3hH3@#$bX{GBv- ze!D#_IXMlOb9eG_V%eq%+#QfklSOdeoZ@l=lF+3MhMR&A{jSZv9jQ-~Egl!v^^*4* zum4SIvL|{q?ETz5`=ir_oKb1H92hW1(MH3S;-zPQj{iirKU?{6WjgaWJ)dn8{2E3` zoqGS87u)I3n>HHmlV5N64ugRoyeMDeeCDfKi!IfSqO~|C%C1rSvMev%`Ur5wEX1oU zgvX4Fl6}YGD#0s!@!J0M6HCwDp&RY|f|kexx`gF@Jx-c*(iE-AzMqfH5*_BI3@^K|nz%Ia>y_d_0F*=qp9#+op&gQoJNw?(! z5IlEJYEp?xwPoNdjGQ8%V_h?EzGuy#L`C?#x4LJlAn1OaX+L@$zos3KLpM2zUoDj2 zaUta_zrVbzF4_!lnN_B8-<>kYk-48;Q{eeRoDi zEl&4r`!DlGw9GD)-MLBOd=gp|R^y%gX6@**;dxx3p~#Hnoh*&((XjJn^^nHDYCEI6 z&sq{v1^G)>=3%-7ZW!*EU@Ue(aozLm zO^+bDeO|0#-=vu9_N1qWg|9ICR&CVF}75niq(+Y!1!`}7h3>+U@%9h+8Dw4-hY``V?*!25! zIO3@OSvB_JK{3$_bsytdVS^)cJ}n(q5lw)1X&(dO(K)fz(KWgU=8 ziyrZ1O5c2~AA!*eS7?(HMpi}htP7mYdAU~&#~QP*yvMiDSb~6pEj9l8zQob1kb}*` zsn}%fZTA)7oGrWbpL?3yT6%ZGR=*ZuM|he@)M z|Bj@ycMHI1l4Ofhd>xV)oONy`v>f~@9TC8g~- zgsVtl-T}%ov-NImjq`c7Jr$@lb0xDbz)5Kh`2K!_a+wZW!raej?_tX-N!OP%@c&t6 z{da>Yf%d;K^8dut!~Zvn_dk_=@c)}^`fprp@c;1#KKQ(jftD8*zi#!AS>d(w7*Rz~ z#lW7+aC{akXw)6@@(Ns(D_ykIHr1Qqtv9{w9PAu+YCs_J2&Li`LK}cdJ-*nWBzvVp z3%~CH4tgo}*I3h>(;V%`+TcM%K{WsIApMW_@IODO_&)Ui8bkd@`V{c?h<+64zXSjK zn%=lh1o;1bjmfw|4D|mT$ArbQPWX>{{Qou!g>Ct-70UlM69U)zue9p_Hv9i^H%=`6 z2#5csKtOtuE!gG6f1q9zPei!9F0O};q6VJFZ9+0;^7^gt82 zdqTtK^#UQX_>-zO1|ADnZ4i$N$+v9;S&tWza5)LRXc}WIw!1cU$?i%4dusYPb6z_8 zGBs6ZeDZQGvci*4KZCbn&7V%wP*6Wg}+^1H9@ zt9R@Ed#d)S-E~gw?zMZZ)#6xeoH~ffr);6Vp6r|C3WLr)vaOF)Ce>JkkdBWcjftDF z*z5Vb?+^X#1`R3e29FrdC8k0$Yl1Vm;3!j{w`(?}CR`duG`ZOPgp%-rGLIDc0dvra zkZdy)@y|4)5s1CTg_KA~Y}le1=nY|?qOV_bNW~719*1C;$F2jlzNh=Eg;3PQoQ{Ws z_{Xkq=)R}#npO8bAxQR@xkK@Hf_6}d?$XSZEq0+%wjWM|6 zzCkzT)GAX-QX%%2d*|_YQnu{S!4aowMaaZm-B+>#1P`M?$la4)%DmIaY)<=9E7QOo zeYVl2pDURs@xQ);lc-_xi>u**rCT9~f~b}ZpRgim?%~Y}rwUCnXNd#*I!Wq4mJsu> z#9|?#;z~~P?`bpl#)Ozw8tOvZ*h&u^UFC}Qn6EXWr1dNZiGCj^E@S?r(N?w!pjb-b zi?!0a=Uux<;PYOoDQxSUDVzMd5>x0eCd+={p{Y`)0Loo1|5dgN?|RN1FX-jJ z*Q8!O_mxi9hJIJ1R0}on!k4Nq8KIxcO3fnrT9#*aRU!DLr?5RKXa_c%IbComSo0NM z%oLt}&#nH+2~bwsR_AFpV_M|gMJCD{pJKIKdU->Ozt6aQdySLiC*5_-7dKc5@MH2b-9wy?T!5-ZY3l*QGZP_(|^@0^apNbl~~$Z zsYEKT9eefa@vwBTE|wihcZfK-{7aewCLT`>D=(~O-MI%NG`>EhUKs2a@iscDS+igU z!0}X-Fm|>9evrWYTFrp-cwI30WSw($K@FuV{s+M~XU~$`U7xQE!tn3ezQA&8EfG3a zQN2q+`FH5JVg#^%+G}2jl6Ch^Zp_u)>t^XXcjhZK4A{?5RWinJU z#ZZ+a_n#;6y(z=GFZNm|{t;C~Jg*(Ni;M%5elLM8Vy-fI;6^g-chF8JigFq=%C8&M z0?9jf04M$72w3KXH8;1wRqfd$aV)*`YsVh6cd66qBV}l z;~hwB^0}~@u=T-eJb11w%GL$k`7*~~zzL3~^x^M(wRC_};6?V8L521y2j(zZ z$h>|XhzIS)*sp5p$}yAY{BvcdyC)GA)}cPpeQiMMiD}0=t(PK_Y*S=nyA?lx3nlf_ z!(^c=5eKc#Ejdch=mm~^1W&8JLiw#U>tkKmPz7WdGkaxfJ;)8nhcMNAh?cpsd0!+i-sG>4a1iM@ z4>uY<%nfm7nLyZNh_A5`%@!I&YC|dhRp*UpM($$Ns|Fb$ge_(an z*A8JvrxTr~Lo4W>vlc0ZD5RwNZ^o#{TS_LVXnQ;oGP5Q4I4UXoD>_Lc?34#vA;+)@ z8PR36)LZn`9rHD zB;3R|KAah?#}psi2?P0=L4IPc3%BoXd4@63n1T%$0eKS!9mF&VqYQ16TD0$*F<#&2 z2u$zTw@|Wq7WN=ym zbjlCAVHExvxJQjK;J={O_rYsj14lL7^&^;&4f_n0w$?2=Bh-u}Y2u2=Ny(S+>rqIM zk_+CacB79@-zu9K;7Z3M zODR$NZwyaY^~zu0YKIG{%PcKuGK7stijgR5CFPL|OZb41pt|DTH6)T=lhA=+(+tN} zB~gJ1`Ui5#1h+7-sE0z8Gv{Yj!CrNYQ)PUzkBSepmTbC*N{!>r#3Ny-2sLLB7`HuF zdkJX*x+w_9U5og4Y65tpl#3@L6U@?fhE#)~9Nc9~a zb3umetfHt_pTSZY!@?=^W(>s>tM^qp+Zw9|N#20Rwru)fp`yi>os9g%csn?IK88-m z{eydSMPlH4FOi%yP6u#ya!Kw~k+i%ez2aY?S@}}VQVCOzkSEH)Ta z3dJG9?%12-UOAUaJD9@yEn`^}_h2pOaARJ$N<#I{=&lARuJW(n8Ya2!3& z-Pd?W3p-ozHEq2*I;qn%k&xG^0mYwPOn(2=?w#2rn;uae*$S{I3d&2F3cA^=aAW#U z*bY*!|LPP}lyijMC^uJ-kJDHQ-9k`OHj2ovwU+_rzd5=J{pnobp|GMmyjPTP_c*b? zRNwk$J8@PO`C!!b{(c_W!(AN(CUedUr?=Ft4#3;lU1~WHL&Se})Oo16FI3K=)7BF9 zwI2TaQ(qlg^EZeHLtvx^;Fb3_s?O#4!T0IrE{30~{w#@sI|S_94=yLjRvH=qHUksr z-vo5z`O!lzRI0+WXLmU=@`Eg;w&tgc+Hx(g^{yKdx%H^PWmLG5#as_f%a0E%kD@)+ z{B>a04r$HwH=N-#W8z>4<>@-w8&xg8CYNgM!uU8#2@PIC78@HR>&ujloM};2jCxv$ z-0+7JmhJF57C1oCAI)ME_3eR$iJKqE&Mus7wDIJ+&j22*F_5YyqdG6?YcF7Cd&PuL zZ*DvrOUW)Tp2bjy>yHKtJqkX1dkA_|b^dl4AG}y3$YNOE?%oDfX=F|fS`@z=4r&5f z1=kz1)_wAaM&?|xU!H&-CvZYuc!kd7^ynRq%a-H#-Uxurd*(r_CJ}k zQQO0js(QCFbVOJveu)DMb9BB+Z4Q2Odh)m-(tR@ES)DR`rd$zf;>=jt?ilA7UQa$?>Nd{3Vd5ng7q> zp|YHfd31!jjq+qC3J$_%ggLhMeM8)huBAU?m_=N9d+SIvAV2kw=M<7lg>AIkrqdQ` zXjXqcU}L7eguT70Gn7_VceZyNhux;KxBdZ7*Y|4OGQV{&+EkQhG0c_M9&FTpxZJd- zzuHw^%l+4H9nF;9ra<&6>*az7Q|pPg3_+GMd_lz!T2xRD#CY(?vL{}Y!oaZgAr!yq4B&YbwEy6LoBX_eNLh=y5 zbOIqp`v;Jkx3<^*3lz>`{zunRh%cV?os7s_S2Whzkz|y9E4p`=p3+DSzuyY(JMk>0 zz*Z>0%VlI#ozn5+eapeU>SwC@*C-~3Yf?E4)efGSOU>_b4Y#si#h?|gX<%DYp&Dn$ z{^CnLJZ%;POA9YGT(>AbC0==|__SOK+>w&r{^F^xP1{Pf2o;MiVNYEyWV!&*7Bhsh z3j;)4XGpm951L$r#pK%TSg4!nL(}C?A`p~W(JI2-ux^6C#IVXZw(2@8*EU;PJ~}I4 z5)|q)hW!}2u?bHkZ;RY?&-oNI?6^Mx=o~fCwpJ5AU1qsCOJ3+*24Yp+b|bd@=&)uW z;Js=C8$Ef3IC;(Gv~DsF*R}IEy>Ukhreh219M1GTIu-);-T;3%xnxDxjrAv#@V9KQu30ciLSGj&PsvCl4)EiHNbPqfwUOPnf!n|*v=6U*)4^# z(_=)`?m`$S*nHKS5dXn^>ELm5aJ70Vi`1hpj}Is0(}2L{dZw`2ruy0t-26O2vb1Wp z;A@QHu1pfj*QxNgBbl^YpFhJ*(h$|*6#R8%Dny$OA>k@?+Dqgv6B}i^j9Y%$JE4Ye z@3Pc|XL&lxBTUayboULnVbtHqc5dEYi1pI>per(u(hs&=l$rpVPA5L~$+6wh1jO?( zM@+KNnd);;aiO&Xv1>~p8r`gyGqe8LglLL0{G}t zWTVifq0Kzq9G^9w^FIe#^DOsH)xQy{&3U#_#35vSAv*F1xB#wuq=p=l&m?#56cM&Px;qu z@C|5jc{0X0OiQWyB|s`x3wYPcUdn+(SnAp~%Hz`EjWNn;%Ww9p=x*wTSYS-{?6{K z_T;@p{Vq!~LYyHo1#U%BYbYR~TcB-9Kx{g}nd)N&#))GuQF=rj?QwIW5l1i?Ch#@$ zX7tYp#`t`@lqjJ1V5@-op~~WT>`5V4Y((R%D$Wv>2Z$Yxn*jR}8-}on|hW?ui0PRA`8D$*>ul@S%yp4CxIQiwJqz|0y*C^HueHX4y z&XPvDl{^Fej&$bZS4`3L(d8C$twANV%V;6c8eU+Im{VeQIgaD7}Dx4kWxOC(8JJex3`zEn*NTm_tmN)n{~Q` zbo2|rk=w_>24K4eD-Jp2_#g!pr|itGw@BGp0&9MiKU@;%cn55u3WryMMh3Sd~SG7Hp#Aq6?>T^WPw&#vpbePo!n-$)j_r>5Yt%ON+k2(Q( zk#=?m#_o?7)mbq)Yx;=h!WrG0!(Dl!h)iHIcIDjXfs7LIhtslxumM$nBxq_tp#~aB&*Kg6H%!xUNRvt3N9f#-aw@yp#GVHiTyAzMCM7bJdchC z(NR?13t~%D(MhxWZ+@i?XWG$v|7>Bcv!+$CBL2G23&@AE9`iBu`A)Y5h1W`QXm1(E zy1xxba2BQq232e_&c}+HYu4F|oip=r(9@*rNI`m+fu_cf!qv;?x=;`&eZJt%HX|@w z-GGl_`5B^*^a=ByIgv`0`RvbEcH7J8TIA$7eFbi;jV@(S-Mtt!^QC=Fj_k{9$ARg* zsd?ty%py;A{l7LTqmE98V=8=(E2F)Al4?(Y82C5?63Wv)0x~OV!Q4++4)3 zdPsb(N1`oBowerIb|}TxB5Q#3gH@bBfcV3!kyY+1)kv`}gqz-MUCfV@lSeGBHd>qg zpFBLxRWL9|4btjP8M@ak_@;kfGMD8 zwL8*BA8(GM$K);Jo+feLm$f)oiihgxu_>Zkd-A*PLQo1yJ&Ttl7uYzOpIm6nmr?*g z=0J$C(Cvs?cY97&nvDv-pGl1iTbtu}n60lSt$^`hvO5jD;nx{39p=E(UPZm)`1=2j zN?-c>EDx+Z41G!Y>AF$YbQ*`tXZgBp>>_9GPL-HrL2rTXHv>%+vPP^BXqebnD&9X) z_0DmkB%Lw9{jlWrID#SNVmErV?Vhl?yU7KTIDCbE*2(}F82-3Hy}pAVL4O{}@%R%o z%aY&VCQ=v-fG)XtK45^_K?IxKGwLK8C7j|YDD3ibU4ulTlW|k9IHV>QZkRKMz*5q;c`Fn|t&89(Edzf4FU=gO~n;Knmp-w+JI(u|FT_(FwJgN3zZre1QmapQTuura7!#`gnvIo$^9GUjx6kl~tHkPz=dAB7tX+uXQm ze?VS8H!Q!INpoLf*mdI}ys)W??>SXC?&bcd!_{;o*NrN13te0Is+$HOL9}kYQgaMe z#+~T=gNsnvp0_uF3yev~^UG|ffqeFXvg$S7!+1x1|M}V8fkn{6G3=V#Uj@8T8m|*H zoBmQDE=TSgSjDf%s7|V))M7uPS+(VYm_D`Ir=*IkpA(W#mrAw6pe>#GCCxL(0@WE| zs=ieVCdyFrP~@dFJFOY8rXLCF-O>t1 zm&=q-VU!u7vTrA_1@epspm(d z(@50HT%eMC1w$lwkWc+~=95TnjH$x}Cz7Iw*mIp7@~)=@U%!z`+#NBKW1??E#UFM~ z8RoQIykWyayw12#7Rg%9zW$2YTvMG();=9o!>+rd{^ zIfhYBG7~s6a5dntW8w)81eEq>{oS>NA`{yK?Mt9GXAhrG8P}_CKWnVHpro?Z`3Gu6--pU#MgSCYh2Wq&?3xjw8ZG12$h#Jsl-ZeX%J z|FS02*Bn~oJ~5)6^fe|0`5*jjcRR4YAO)Z0Ae<%WHWiGz=s+^(wC`=#5!{=$kB;-1 z!c@lh7p6sre77hJMzg*9p>uIQzrB`otn&H8r`x%|fq7Gl+xkaFxl;~eS%m#+@4@Yn z;;vZ<|0_*S=VXgSE%@4u?4)nTND-UvO;;Wl8d8^wE z$K1<>L82hHc&GyYc9A)A8L?nWyjuLSdPJUbS) z#gFUH*SC$YtdK6kJ@GmWSbciHuTU6Iw_4Hl7a${pIn}PN^hw{I_Mk8QYl7PlCT0Rn zf0H5mK4RDMJFDKFG4KBh}ci2m{PQU*KI2T=3MAq-k-Y4OF99g=+BvMnigf4T3^0_%OyZ^p^k0}}g|;mZ9v}I5AT|;?Ly=8Wp%$9V zBV9&C&m)TPz6Y8TTdv{AiTqFvzNy9r^8?H5u0J^@PopYeR)F1g0&$;Em2J{;(~%AF zf@-(QZC5;aF=NAdE8SWGf2a|zG9utJ&GMZWV=>>!W|e_s#9AiiV<>dVpE>xyVQDc9 zWP$YS!?FFXjdxnE&16kv0jAYBR0BQXSvJH*2?7UNRl&MRo4V`WwBj6jvMm)UN-yBl zc0F{V3dZ4uV~@X<51irVbzJ*~_HK^Zs^iCR)EW?6Mt|BgS(EoT0jhZMg3pB;b~=2B zG6*(9%W*jjHI6FC@6BkS3kr?mL@`FE=7#1Pu@VOv|1iGeOXWeLm%=1dY-J7*WE*1K_BSz(N z5Hwu24E{@$oyl({*x+Od0SWPNc_R?;`@@A_S7#qtUZ5BopASOh6SDS{#K)3N5V_nk zE`~XmlAuhfXEr!@UsdKujz5IX+bQ`YS z>3HMQKi>*Soib*&fjT1eigj?flFwRyL7RkZQvZM|gNfLk&Sk*xDMP-cu^07B9w)$8 zgCyq7oR80kw^GwA{6%jRYIx?^ijmZUF4+z~Wp77Izxk{jhZ5s0UaU2uX!}Y7k!<<< zw`G<=0OU4>^J8AVU#3(;25-Wxmc9!XDQa zvG)~E2UoqQa2bWiLu_2<6pG=yx2L$9S%5gwn8FI|K=Wtyj^7G0?2;_Bjp=;2b zeXvKdp|$zT*oPu*TUk-7?~`gOfW@!;Xgf5wBUyBJtf%f{Q#woTBBD&4gKoo{X3Gxz1zTWhL{sH&+|hHnre2*7Jt z8;t~}E40qzd4I1o4{w4NCvaq_W;-@fbyR*p=CMNb1h)*OkR(q-Og!!A7$w&9`yO!5 z$A{HT}kjRcfachR%yaqAQTdAEbK9zLeD zDU90D7i)(DLkvAuCb5Oe2u}6ngyW+qX4dL0`tXtqjEX?M{opR-7l15^_d2e%eX5#+ zdbrjjd^;R|PxNGmE+RYLSs|bJZrrr~{*^XejCZYgoh$tt$3HrO7_Q6|i#tD3c+4S_ zd2X85r7`U@p#AU7pS*=OS&SH-w!xddhGl=jp*Wuun4s4KmG25?W;dwqZ?#JB+Z$=% z$E*c)N8P=;v6yQ~;tkhPwmzms&fIRR-;5_$iOU|Xe){3m{X9KvFkN*OuAD>ZeC!B# zS0C9{H-U)K@eBX@l_W;<7rdMvb^GFLQ}CD+bx+497F~z3Z|s=9X3}wFkQMrcaJI-M z-QU!IK@h|SqH5^zW~)~#Eo46ml$>@!ygjCT?jswubEkIBOdM*38(d(8^ycW=>m}%E z4foA~PAr|J_3?8%CWd;s8M5n*UA-nnt>usZy`iVhu ziB6ro@M3l`JRQ3;HlkKXWlyY}*=>dw_*6fVMVKtHApq<6ehD4rI@%xHfQg)}nB}AO z1PE>tt=L%%%VjbO-wiT_i1AIj-USgEJJMHTio4IQr?0^br$~cD%5qG;neILkn>`U>PWMmv zYjsqdeCVQsTvtHx@wpm{P`A107P%W-*B?6|W2NPf*C>t2doAL^1;l~``l839|lqaI3DCp>qjK*BxHMTAh;ER20AG88dO05 z$aK6U(#;iV7Ox*hYOVfA^_wDcN|9C?fsJi7G15n{sLt2UskHF+jbPn}8;?|0Kpf^x z+m_Kcl%qjA+yYqaMi~+y%*b93E!%z6Sxk$sx4J>B9X$>Nba7zK6ebM&sc6#_nOFWE zl8F-YfQ98*&a6-K&mQ2uNC;Cd z(t)rhnh!!aVW+r8zbKxrKMLaz3fa&Q2{#|!jjp?sQ&C3;6pHj_*Pt&&!lP(e>&W#7 zr;t(GokcaN0FCceV9ufa4O0!W#AC|k%VA-CByW34v~bc8B9=idQ^lPvlf@Jl8wXr} z47%hkLhjfzq|TF|WU%8Mb5)6ue9wU`x~$;tD;crCgg3)b$<0IjJ}-f9DIn=V2iqr8 zIE}T287|n(U?^T|U%dNCaxc9wfoEi0q)&BH(n)XX36 zCS?*02k)pWQw@!M;+)m#^rfUHf*%`u0TibuJ!9OOnfttlh-xD}Mqfm4iQs&{y}n8= z2y2V^+fS1GzTnCDa(^r>>y6dXh0sM0DJY}90eTQ>W0kOpme*jA`K3QRZ`eQ%R~MfV z!FJ?fVSTqg*Jvc?t?Yge?tOh(=rNwlh}N%>MSP`Q^be)(fI)9<$Q8d=|NDjLAUn?Q z%{j7I05-#=o-zjo42B?FfN!^*lxAmfECx&)SDHCsSB`l9k}kE&?X!w+3Y`PvnpRx| zT!6CWNHL!H_eeJGG%=aoF8M~uz>1bWewOceFyj6sd*B@tBG)l_ue%NBJ)jX1(_VKV z{la}7yz#u=g`I`s>hD>YGPB5hm7xrKKDR0vZwNLK3P>!*1SC19qED;4t6o#B` z!6Nm_8`W|orE*Zh-I*hg3}WH;Y+Mh9g_2qlKJI#4cjs_AzZVRPBa<*0cI!S}t1DfQ zTcxpwbo-bbSx!Tr53=W&tH=|~J;D25_xn(8N9aKq7g+`xIWX{N*o$9Tjy3s`G^}2gO3Rox^XoZ81;?2nJsh8HrQKh#5GrcXo6) zw8z!hQA}u$H4|LGE8WEk-}>90yM2&{cl%EPxNSmy1VuDVaf zjXm3VK_+*q3p2~CK3mw*^lG97i*Jz)XyumJt|KpC809t));~*y zo_hEZV)_YPKjx&}E06`ZeLLTX+3Nj(Z8SFsTz)03lKWe58ON{0&qv#UZa8*07y5`S zU;{DP*E$>qwOR)%d7Gg6-ry>j!-*CCGGGN7cEMy*MjEBUXt=5PcRQCT5ef7$TP(ok z>m{X z{q`lxEjoKBGwuXYZ4`jv%WT65CyE~(=VeLYr+tXnHU?&}HTGkLn~3pJf2bjiM8Guf z1ssan#eW(UXK^#(-XjbCT5hb*6o#JvHc7y%O=OC;z0;B7p>dItKry!BDNBCMt}~@4 znfMjz<6JEUj4vJE_jw0;T!nx7;{Matdbc%^ZF4N@nU4S(j=p?jztF|Im^HbiU#4#ugNQb% z=3H0Wcuj4$B%F_>>_Enu^E|&YAEwq?RzT_}9oS%tL#b%SI4QIEn{AP2Ax~U#6+ItI z#RL9Fy9Mt?6sCVG#m%Q(#;I;H*!os9{_E_wi+&Q0U%HP3TcMNB$=OxLj3Tam;(f|J zrH%zq)pIU&EKc$jV{**;gI{G4ZPwaGNoa>%02vFG|8S5L*V?wzyUkb+F?!^Lkj7m}G{3aD| z^f5i69hh(=rS(3$Co6ML&K$GFacEwiK5XnP+pA*_8#`~(jh_-(Fs>srL*NYe^6-?W z>}~{}Ujiv!?iZqWjDw?q1~M=C&yJM1Qe(?>-()u&Ioal{f>N2}k(c$$w*AF%kxNEh zoxdv_MP^&g-}q}y>C6niJSDXJ+T9ui{Af~6** zDmZQ)wkxJ!uR7LN5nx@(%zdvT*FCLD{a%k4ZJ`k}&r5kkAvim{cB^S$erN7D!QSMk zu%tzF6)rewp%(ERG-onDk@)ZRZT_-+N=?1@WPCaBmHx5#sC>s|#T<;CcH|_$UP9h) zRjS4Gh9*1523tho;K8&#d9_H~nNhozF(@m3<;&hny*G!}zohW~o!)V%@+}2d+ z;f24@O>ONwK-9_DFj;}kl=JD=sOc(K2jz;)-=#_geslXmT;zP0lj%UUX{}WPUZ?by zT!O)#k~4pJV##t}mW&i-^N!V+T$vD|PgWdd>h1uUH8#@ehdDcqNWu3x zg!_-82NzO?+O1@bhrx>Yf+9WJCMzwFUY7Okhu8gjt%BEZUg3&lQfA zxsg^{4a@MXDJ@OUuyLh)0c=S0bSx+4FEHA+&qh6@Qn9gS*x9^g>z>1#ww1}ZU^jlM z97}phuixYU`fSZiGDYQCqlm}w6l(p)qqw!Fz_Ey%c-Gis`8E5|Xtm(J;G4iG-tA8C zo#zhMUp*W&>^NiRlMpyeXyRh8!v1Aj+{Pn?F45CQev?eaFuK-RBec~x_E+;1O=;Iu zRBL-IG>%wIc9w%BJg-W_fB(978Eawb2=KFh6`kZgyImXCzI|eB(``Q%AYqS=Z0tS2 zt=L(J9G*&2B)sqsCW55)@AN_t3mjIt6=hHv9j>!DL`ww2NMpDZw!=fe1JF2Iy2 z-Duqi)$Qh29Lm91aE4=iruVwiF6yrno{_yO%F1#bbxXjriEc4A{4e*&DaCTyzu2-% z4|(j~OLIk2|LQwcax2YWw1UQQYEZ9za zb$%=3CTk0kpBank<62xQvC5l`Uxp;*o@ccZ9&=x`t1Q88Qkz)=B3C~e-`gn1$p?u9 zPViYmL`=vecQykDkQBQjdU%cmc&HV`)f~W_DwYc+xPJmw6XG=5`NmHjv?G}qumYKl z($-c6>RoQSVug~hB4%u47itR581{EXfUVm?gTP5cbNk;$=NX_VMI&IOBqK0QmB}f- zvh)ft;sz?%%WLXE!pw^W(J^FWj?YVjpocJui!vxiH}-AgOq%mgRUt?rWT`tQCi=iG z32|arXkub124hI?J4GF5=>%ld=!x-^vTO2-`II6O5-#Ke{0e^;CIz=SSUwLtSkg6n zrJnW`Npl<%a!FE?w^}WmZjcU$G|LL8F~_g|L6O-MGAZB(<^fG6=FR<5!Lu{*CHgQi z7ay+1<3A?)D2C!E$YCLysg)K}OL*Vu;RnE3;U3_CnQ@z?)1wc_S`pa3IDYDLt&zXi zrttB>=2**g%;x78vY^l(Z)=KphA1+#utaBKirudi7X-{xD#Fr~%+5e}i7Ec|>lYIn z8WA}OR6g}^nSLn+jfJX|8;S^Eu&=e9zaoXkfIh)xQ0ney1iLJTO4PWQUOuUKYnlitr z+}*RIiKM;~T-ruOxy(ov2C|ohc{Q8H;pBp<-oe0lO?(THpqjthu?}(Fgg36=>#C3Y z&94E}zo4O@r6sWK=u}N{P!wtzgN+Q8stIiV7Lf=Hp&gk|ou(1Z(W19KeZ7I0MMm*%^`1tOKkbY?LY338+gBm5f+;?kq8&>j@1V zN3L!>Y<@gGd^}@a-zT!xiTPt3eq8mimK!l)0T>bn|EBR?Ftk@^gkhWlCiYRFrIaPWQk2OYox zaB@v9_8iefZSO&Tx>fFx+?u3`e+?kF0- zgHR;ej~MLjDGHhX+$8o<5r_$UGflpskhwJ(Iyf_`K;j9r9`XRt5|(9`F_#hkYspva zpRAp~yYqa~MAE&*ZI@W>=O_hYrb>$?Fyk;GQmC3@^B_Ha-mi#%-gFhOSh3(B3e%8D z`A;c6v1ah^>~RXt=2(~6I*DmgIiA^StKJyySJ zbl|bzhD-kw8%gfD94K|cAkli;8{BMqvBBOo{OOvea{bcp|L9)<>OVW46}cAyx_C|CDxYI!+`Kxs9Y z#o&Wr=e7}3Lv=a+sQk8Z`8c0mL&I)wl*9{#m8CRzjrrlDLG5Jgci@jyd1Ox;0yiWD z-4*z=^3eDVGd}>-hnnS+3>b&z0ln7g>%gI~w0ye-l+DFpE?}S=3OV7urXI+nqDD$> z_8d5}S;ypgM4v-wipeG?a+hYw!2?7NIa3%f{6S7@b%3-RsVA+uQ%!ol^?bhdJ&--T z8|1V_Dh=5k4pdL;`4H&tt=jvd1^|4h;Kq%{Fpx5Ln#&V@!+Tn=WRzUrij*|bwN?ZR2bJydER2UzB1^-xXy?`pMm?5UcBpYUh zZU6Vk4S$}rmvkJJ(MKTQO-WLe;AAmJ>@!yy_#Z*!dnPP*n#|gIEJD$lL&JY$dg&4j zP4d7I*22$R9T1sk6q9#pB=NZ+bkefqwt&2?APmSeB%M!I#}iaiKY0Os_}`_PnH0V# zdnn5LL+ES;$=uL+iu4$e{+`AF;tQqH{!s>D_ zEQg{);_-)ILs1HmVEspk$I=!3SC|qB(c82-WO2kru%xz9*XrbZB3_@{(8V-<_m|W! zcB`T-Mnk!k%1(pG-+py{m~~P_qVnD_t4x$)UZ)+uAJ-$uFN({=FvUYs7!>A zR0qc&O|C;3P|y^H0kk4K2kOa@^xAN9{YUpixy*jij?(S|k9`F zw*?hJjGnuN!T2-ky70H1nDY+R_P?)VjaFZt516P`o;`@{HJ$^O(8~E_haMC-Cxokc5q#_H7IyhPxMZ zM!n51;2*caDpsyd#e8k~Gvjp?t)hp9-t8A}V)1NBo%f^t@3XZ8KrvIlq}RTL((I*c z@(Jp42_w}-q8t$;dhWriuhJjZXO5|?_GxIX-px%5>B88vZcGm?iWKsSxYUY$*@wwr zpu!C=^5iFOe&hoa z-wB0C$#mmi=8iZ<4t02@~k!6^=_?9He zsla(7WR!cD8*Jr{Z9=OaQnd25gEQIB?{k~3u?lvzFD=BOOrj@ z30OYFh+&<@jm4NMGP07xanYn8U>p?L(V8>q@Y=D%peb76U&}u`=SgVQkAEwdz%{4V zusS^mLM+rpcF$Ru3)0?7prv_?0HMUhvJd1LtMvQ(q<8wtECmG#@$Tvn2?suJR^W9j+{LhyG7+fg(9979{!1h>2{j9jG`M<= z>cq^3Alr~fHPt1@DH@>hBpA!Gju^@d9Z{a*!BJ~Y#vn-yfKod(SBc0Q2|_csE0~pViJa_zW(qmY z$gQ-=fXGZ#FE_1G20jbgxN{8G) z!P0l@DkOGEhx`UY#fmC#)U=LM1HTo)BJT~R)Xm?s)wJ+{7lm(qw!L^+GD;TExY>k2sqxSieZ7CNF`fDT~U5H?^&FO={AhHDBCN zQyUUSCpBHyFU6#I=W9mGtGrSv8pP7Vz4_e7hSo5JEK^$e#PJ)>_FE`$wi?hB!x-0G zBx)A+5TH*nqSHBrbzVmjQTCef917X9?$2Cu8GQJeSBIQLGLHjS*%yX79U$lQwD~b> zW?)unV6rL%#v_B>4)~9}92Z}>qIkA5%f9G#0$@v?Q+wTONv$3Nbh z2mU-N)cgEE>FG~DK%J?r#;?+z3f;PJy|&SSPg%6?t#{)0xb!4<$Jh~Eu6KdG9QjR* zupaY&YWt?{O1NO#{-TbJ9ox2TCmnZe>?9p09ox2T@7T7TbZpzs&ACtK{(w6l*V9^K z)u>UUX3bf%AL~p8Q`CW-9g$VtNc~@@+6AM3I1dzROw-~Mf8eFhgEtjRQ0;Z#IK*xC zcae+ER>s3b((}bq{+fb$lozY176qLjJ3n9GL}50r8V|L);s5-_Ou|^x%Qih#3RL@P ztN^j|d``Hn8c~lOf>XL=K^8q^LbTr_c5k7MgxCnovr3`>`22Qf3WK2jFNCQigkCOc ztmm(D;CX$Z$6Kw51*<9xb8yfw3bfi%RbOzw-tO(v7W28!gW<+L=lTNrvD&f7DXdoS z8xu%F176n!efh)1&?(~Ens5(qR}fm-U#vUG&hY~>gF%1z##HZke*vZbLoHK^L|UW= zly>~92)?o#Klm>YY0tYOdNT(jdOkwJXeL&l$+#Hnx3;f@)#F1Hnv9qjd%`9Ng1%LP!=3{1Q3YPxpb?1>sk*$K_BZHh~`EDRvqZNgN zUNtOo(9PM5o8le_$6Jw{dgh9I#+Q+o&cMnaOHYy7GVdGLYI%IaejhN8Mv!E|`6ajC~`*qSxb*Y1dYh zC+%UB9Ho`dqBTjc+dCo4i_u7@A2bMgDb2f2hF_vyXB5x$%YK73Qx@hKt&h2_g5L%z z7fMbiJP>eA=fTs@{{iYAiRTulWW!OfZfCfnJuUJ%Tr@Q|FvMEdO)uo{`$af4-s?_S zstNU>Y7`WXxz_X#dd)`7y(RUV$**O}3%4{8T{SdX+S+o>kA`Yln1zFW6RePqvpYp% zx4jN~Bo03Rd)sjp%D^jxSk?T_2ez5<(V{TRUjsx(wt!YAtjE`P|7gTV3tnsbOEMrp zzs?`@$B5AhzYSip=FM>V7_)TI8*1tRH>KeYp==XIwkEqj)7GWb zL>Zw;ji#sR>8BQFU$FpF|nWk^LZd3|Vq_x9>`MegPm+beekwD=0SVL@TjyP;{)@ z4sZ3K7sVTfir_FVRQN?e_tHw)1+ocv!J0O789uGMDZZ&Gxz-8Fv0N%(;7*8cl}51^ zYhIac8t#CEkdv+-`+Y6lX3NcK09IF0T(4gP<_^nq3d%qT&YvYqh)$DVY{Q8SGkwz`=#dB> z!w+K9vlSr6J-3DmGw_lrFJ{kmGP&$7?6EmHXM6F2PHEJg<{5vQ8IWj9@kTe|8XNl)vkCy z6*Khj45eGn=2YM>FMsHCe&z@`TWOmwM3eDx__bG-q@|L$0Jf8+yu(cdHS<34%iFt9 z5?AYLWJfG5rS2G?$oE@Jn?4e5#c;lh;=3z9lujLm3%UDI`$y7vS+;BrCsvA&AxCPW zqYp38y{q};o2FwO%gMnUN9NT1Xz@JRsyk(JT?vO-r;shx;H!n+WlTWaUUV>L?fs?*YH~4)oY+}=qF)k)$@QkPr;cFhMIUxt> ziGM8HT^xgnNdCcvoYa;*2me@-^n0+aN#$1RVHeh~Dc$tVT$0ucpFi3nZM^kAqdt@@ z@v1KJi9Bw)_c?2jTBrELlcB^@KKfCYQUWy>wjKHnr;_34HnL!7PY_F&*$M=i$svP!%YPNXfqF*A zYi5*mMVV+77Gy`9d9r_SzGCPTv-PF^4KK~9>U*F<>r&s3^HpEa-@hHjrDKuK;je(E!Xp*KN9NzWiz(-jA`S>2@wezhYnChI!43>S z1gjqh&f$dP|19nR^ju|krEGsO#gTBu&)9gF51QNWwbg07IKtJ5fo;uUokGyGVl%2@r<-7uV}BAY6|}`KbOjf&5I5c}c_7u5vN)4;Y#rFz1?T`p zaH0058xU9S$Yd2jeV(CbjSC~gqo5~|F^f|0h~43MTvxu6Le15TM8gI|&QuM+J%!+d zz>7zRNK`eyIAL^l6)Tcq23o3s!dY$KBrb_t2z~F|=%-yh$dAkkRfMc3DnhHJ8X~j( zUU<9yg9rsh>@$Js>L3rm?#ln@d58F#Xd|(3ly=PXLLb<&DVU80b|^u*UV>veUH~fC zsf(zpl25pUH8h^TH)k=i-U`AC!XnFRlt+`HFmSNbR+1(^&mjmC-sH(a`Acd&`b+N7 zbuXG+__z>R$_^Z>z8Y8SHeY0?TX0~^e<$@RSr8BRJT_R8^tG|f?ST8wq(?Z1Bv!gKfJY| z`jEm32t?#20jnmUK9Y4r6P;M8Bj` zK1vEeF<6qQG%NRIXCHC}uVY1(_UonIF3(0iMKodiZVS~cZ3F;G7(ov2x9AR^xKU36nf^Vt zo7ZseKW}dS6qMAWQsZ*$)rrD+ZSo@FCfoSi8Te4uEgtcI&w#glK41vPH4}zGDXz27 zM^5>TrZg6^BkGWIayYxi zKjZ2v6-{ocGn%+k$~|_6xilddLa~VtcpkMAgJ=x!qcaE7=2`*n{hqltT*@?5_N>($ zPzunTm#8?xQbXa8^opLSPPkDw?j4l2V#x3JQp#$=o8^tHK}K)&*aK05%~UW0b}n;8 z^d2!Teiw|D(54YC`j`iB zk7&;$HnsjI8a1Q|)Xdh`w6X;*6UyvcYX-~r`5o7cEbV{XVcFWhtYMLe6FgFW!rRLl zGttclQ{yu&`^a$*!rl>UK{Z{C`Srq?Gh z$>9n7%Fp@}o+IWah78}<6vXYPH4OS8d!*qW+G2F+#}8)S;Ed{9eziOGaX=*U-;o1x z=EnL1fzz)9cYxp~hC!%U{-01cB9ZJHA!`ww=y%l^OXGhwaicBJY8VCAG|MQc5B$*z z$FudUVmc0}?2hTs96W;PT+KlVzDizC(QyZ!7+elS2v6<|!)T8@g0@fahr@#N*ZHtz zAwJ&{AY>`B*Gmi}_+C~5kdzd!f6i%%VI{t+?c|9w0$R`LECE=#xcvB;7WbVvRCtg& zLH!AJThLxcPCxrsN|FhY{o2z?j@r+pO2kIYjW-fEgMZ*_Hh4R^k3myc3*F9k$N^S= z_S6xc=4Rs^K|Lq=WoTz(_7Cplr_QUCPM6=STC$?>@^SmRU$Ai)M`dE;S`fCbrbV)3 zg4^B{>O5fgL^^99&Qfmni=eBS!iLTQ^aq}chvSb7#}kot9zQ9)vuEKR<$b0DVL4+; zh*XFrQx-O)JZA*7BQ+ZUq&6Sha*-~MA4JIIYgbTD#YoD`RP2;otByDN+6DD^6TUIC zD{HTWu7Uhm0gdcYvWF89C&`BMtg`SM{Wts}N{8tFT`?!ats^=G>DvE9^nNjZ86Rs0 zSnx9-+Xsay_=NuK^vCk>_DkYEVn(ebmRt{K*RNdMtaPY4n!KY@aykN`ZAs8RUJW`{=ti}oPcnN`-zFw+y4$0W zh`SdGcduhyi2)>jU4;Vxz%4wV5n8OvKIMWVq{f}SWT5TNmQV(dDb=XdK-1L|X@vr$ zmTpyXu6Z=ZPz$C-0tRQ;oUFRr!C>~YktyO-MS>dAxX$b`>JrYvwbc3ZLBr^9zc7kDPCDW|I5RQ=yU7-vX}@_34)fg+obW$kFV!$fN+nhkDb z)*qRT&dDI=6%w1;6W^jwR^YqMqNfy;%`#R#=*%P6-25_i!nm`VzWBlHX@M)y-DYr^ zQ7}vvMfsWU)ft|HO*1_ejfrqrq|zZSml}*Brd&zO8_rp*w5;48RgXtQ)@Ei<+spKU zg@zWxER|ZqUJ>pOatnD{k9((hS@9)Qa$@4pn%JvF?QX`z||9y*Z<=*WqtZ z*8{_xM&j>f)upO5J#|+KbRx+FQHIks{c2eUWV@g*iNQ_mp`3@0hWiQT)6WuyWNOSOIZ*j8*>F0dd^?a0j9_c`z(LUwGU?0{zGVZCZ z@VFWkJwt%bzyE|CleZC`FgLBnE(J4}hMXKL^UY1MZ*Ttuh$YvU7)o+7rp-22t~doW zpq12ZsnQDoWYm5~vb3V&`yU#FfzZaIN!(ZNG)oBio?Qp^CTjPYACQ!Diq{Ingk=P5 z6G~rTX)ET*wm{L}Hje8CGsG%eT6qY1XiG-ZINZgI>wYcG!4AxC9O}b9;bGp%x|eK( zP0z9TjYPv=*&H-*u|@~)`Q>!ORXkves(G!29qe0VdexQIJBER<=!E(qsSn*fzXgz(CzWfFL>C zmL6WkSI@d8FE1D#mV+Fh#o zzn(uLj&zMfq5vOyzs66cr2G~+MAHzW39VbG$TXnp4koZ4hlh!^qyfsFsBcIruGSB@ zH7o8WL(74#-m>%Zm$+8QhT?(ZKHay`V@XKBi`ZqLGbk@}YD*74MiCX9rR>KKjCN@; zVO7f46MbkVW=aEr=%9DMjB@Cm)lGavF!I}aM$ygv<_FjZhq=e&bDK>9PU=)y@9 zyH5r%Qi{o;zL&e$<2H0@-2U03#wrZ1o>{#QrbAXPujZGUB%BQ0+0Bjrz)S_|7&(C$ z4U?zvtQij>c{JdmgQT4)t( zwuELrI^hs~Z{p`A6lyc-7zg7EuuLcP)YkKrR`U?#~0IM zmDWg$XGPa|?@s~xeR98*Y}x{JE*}>B$?3|0sCbrzh@8G%^-!{mt$f;s{D z<`O)ahh}IetYz}Q$6#BVX@wT z5OS4QQ9)-|E)Q(uq9l9zc?#O+hQ!L$S_uRiu@a5Vo|VT%t~N{BDT)gr`x#Zfmt5iE zcQe1ms#8f(rX>mrkc0MGRL_xg2FlJmK8b`}-+9m-Gvh3VYyt<#`nz9_v|}+aDuns- z_YdF)@%I4Fr8Iz}8G=3o7{*aq1kUOpQ(+l`l0#|e7|7Zc0q~Qq$1?it5NlzO?;E;i zI8;p3x-VAEpY(ODy*P4Q!M#KE1S;-EoRBh?oETVIDfGDG!5P}N3)S-p6;Ue?2t|Dz zSJ&pfG0NSS@73o4RuNn5-;otg`O~{FAz#FOU8#l;P&qSmSd-9W$8nMLG&E{}!I