Browse Source

Merge branch 'dev0.8.0' of github.com:fastnlp/fastNLP into dev0.8.0

tags/v1.0.0alpha
MorningForest 3 years ago
parent
commit
6f96d86e9f
75 changed files with 496 additions and 2204 deletions
  1. +5
    -2
      docs/Makefile
  2. +7
    -4
      docs/source/conf.py
  3. +2
    -2
      docs/source/fastNLP.core.callbacks.rst
  4. +1
    -1
      docs/source/fastNLP.core.callbacks.torch_callbacks.rst
  5. +1
    -1
      docs/source/fastNLP.core.collators.padders.rst
  6. +2
    -2
      docs/source/fastNLP.core.collators.rst
  7. +1
    -1
      docs/source/fastNLP.core.controllers.loops.rst
  8. +2
    -2
      docs/source/fastNLP.core.controllers.rst
  9. +1
    -1
      docs/source/fastNLP.core.controllers.utils.rst
  10. +1
    -1
      docs/source/fastNLP.core.dataloaders.jittor_dataloader.rst
  11. +1
    -1
      docs/source/fastNLP.core.dataloaders.paddle_dataloader.rst
  12. +7
    -0
      docs/source/fastNLP.core.dataloaders.prepare_dataloader.rst
  13. +3
    -2
      docs/source/fastNLP.core.dataloaders.rst
  14. +1
    -1
      docs/source/fastNLP.core.dataloaders.torch_dataloader.rst
  15. +1
    -1
      docs/source/fastNLP.core.dataset.rst
  16. +1
    -1
      docs/source/fastNLP.core.drivers.jittor_driver.rst
  17. +1
    -1
      docs/source/fastNLP.core.drivers.paddle_driver.rst
  18. +2
    -2
      docs/source/fastNLP.core.drivers.rst
  19. +1
    -1
      docs/source/fastNLP.core.drivers.torch_driver.rst
  20. +1
    -1
      docs/source/fastNLP.core.log.rst
  21. +1
    -1
      docs/source/fastNLP.core.metrics.backend.jittor_backend.rst
  22. +1
    -1
      docs/source/fastNLP.core.metrics.backend.paddle_backend.rst
  23. +2
    -2
      docs/source/fastNLP.core.metrics.backend.rst
  24. +1
    -1
      docs/source/fastNLP.core.metrics.backend.torch_backend.rst
  25. +2
    -2
      docs/source/fastNLP.core.metrics.rst
  26. +2
    -2
      docs/source/fastNLP.core.rst
  27. +1
    -1
      docs/source/fastNLP.core.samplers.rst
  28. +1
    -1
      docs/source/fastNLP.core.utils.rst
  29. +1
    -1
      docs/source/fastNLP.envs.rst
  30. +1
    -1
      docs/source/fastNLP.io.loader.rst
  31. +1
    -1
      docs/source/fastNLP.io.pipe.rst
  32. +2
    -2
      docs/source/fastNLP.io.rst
  33. +1
    -1
      docs/source/fastNLP.rst
  34. +1
    -1
      docs/source/modules.rst
  35. +0
    -1
      fastNLP/core/__init__.py
  36. +9
    -3
      fastNLP/core/callbacks/callback_manager.py
  37. +7
    -20
      fastNLP/core/callbacks/checkpoint_callback.py
  38. +2
    -1
      fastNLP/core/callbacks/load_best_model_callback.py
  39. +2
    -3
      fastNLP/core/callbacks/topk_saver.py
  40. +2
    -2
      fastNLP/core/callbacks/utils.py
  41. +1
    -1
      fastNLP/core/collators/padders/get_padder.py
  42. +4
    -1
      fastNLP/core/collators/padders/paddle_padder.py
  43. +2
    -2
      fastNLP/core/collators/padders/torch_padder.py
  44. +8
    -2
      fastNLP/core/controllers/evaluator.py
  45. +277
    -65
      fastNLP/core/controllers/trainer.py
  46. +2
    -2
      fastNLP/core/dataloaders/paddle_dataloader/fdl.py
  47. +1
    -1
      fastNLP/core/dataloaders/prepare_dataloader.py
  48. +2
    -2
      fastNLP/core/dataloaders/torch_dataloader/fdl.py
  49. +11
    -10
      fastNLP/core/dataset/dataset.py
  50. +0
    -2
      fastNLP/core/drivers/__init__.py
  51. +2
    -2
      fastNLP/core/drivers/torch_driver/initialize_torch_driver.py
  52. +6
    -0
      fastNLP/core/drivers/torch_driver/torch_driver.py
  53. +0
    -5
      fastNLP/core/drivers/torch_paddle_driver/__init__.py
  54. +0
    -193
      fastNLP/core/drivers/torch_paddle_driver/torch_paddle_driver.py
  55. +0
    -4
      fastNLP/core/drivers/torch_paddle_driver/utils.py
  56. +0
    -2
      fastNLP/core/utils/__init__.py
  57. +1
    -1
      fastNLP/core/utils/dummy_class.py
  58. +2
    -3
      fastNLP/core/utils/rich_progress.py
  59. +0
    -49
      fastNLP/core/utils/torch_paddle_utils.py
  60. +84
    -75
      fastNLP/core/utils/utils.py
  61. +5
    -3
      fastNLP/envs/utils.py
  62. +0
    -9
      fastNLP/modules/__init__.py
  63. +0
    -10
      fastNLP/modules/mix_modules/__init__.py
  64. +0
    -310
      fastNLP/modules/mix_modules/mix_module.py
  65. +0
    -233
      fastNLP/modules/mix_modules/utils.py
  66. +2
    -2
      tests/core/collators/test_collator.py
  67. +4
    -0
      tests/core/controllers/test_trainer_jittor.py
  68. +0
    -0
      tests/core/drivers/torch_paddle_driver/__init__.py
  69. +0
    -122
      tests/core/drivers/torch_paddle_driver/_test_torch_paddle_driver.py
  70. +0
    -0
      tests/core/drivers/torch_paddle_driver/_test_utils.py
  71. +0
    -204
      tests/core/utils/_test_torch_paddle_utils.py
  72. +0
    -0
      tests/modules/__init__.py
  73. +0
    -0
      tests/modules/mix_modules/__init__.py
  74. +0
    -378
      tests/modules/mix_modules/_test_mix_module.py
  75. +0
    -435
      tests/modules/mix_modules/_test_utils.py

+ 5
- 2
docs/Makefile View File

@@ -6,7 +6,7 @@ SPHINXOPTS =
SPHINXAPIDOC = sphinx-apidoc SPHINXAPIDOC = sphinx-apidoc
SPHINXBUILD = sphinx-build SPHINXBUILD = sphinx-build
SPHINXPROJ = fastNLP SPHINXPROJ = fastNLP
SPHINXEXCLUDE = ../fastNLP/transformers/* ../fastNLP/modules/* ../fastNLP/core/drivers/torch_paddle_driver/* ../fastNLP/core/utils/torch_paddle_utils.py
SPHINXEXCLUDE = ../fastNLP/transformers/*
SOURCEDIR = source SOURCEDIR = source
BUILDDIR = build BUILDDIR = build
PORT = 9000 PORT = 9000
@@ -16,7 +16,7 @@ help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS)


apidoc: apidoc:
$(SPHINXAPIDOC) -efM -d 6 -o source ../$(SPHINXPROJ) $(SPHINXEXCLUDE)
$(SPHINXAPIDOC) -efM -o source ../$(SPHINXPROJ) $(SPHINXEXCLUDE)


server: server:
cd build/html && python -m http.server $(PORT) cd build/html && python -m http.server $(PORT)
@@ -24,6 +24,9 @@ server:
delete: delete:
rm -f source/$(SPHINXPROJ).* source/modules.rst && rm -rf build rm -f source/$(SPHINXPROJ).* source/modules.rst && rm -rf build


web:
make html && make server

dev: dev:
make delete && make apidoc && make html && make server make delete && make apidoc && make html && make server




+ 7
- 4
docs/source/conf.py View File

@@ -42,7 +42,8 @@ extensions = [
'sphinx.ext.viewcode', 'sphinx.ext.viewcode',
'sphinx.ext.autosummary', 'sphinx.ext.autosummary',
'sphinx.ext.mathjax', 'sphinx.ext.mathjax',
'sphinx.ext.todo'
'sphinx.ext.todo',
'sphinx_autodoc_typehints'
] ]


autodoc_default_options = { autodoc_default_options = {
@@ -53,8 +54,10 @@ autodoc_default_options = {


add_module_names = False add_module_names = False
autosummary_ignore_module_all = False autosummary_ignore_module_all = False
autodoc_typehints = "description"
# autodoc_typehints = "description"
autoclass_content = "class" autoclass_content = "class"
typehints_fully_qualified = False
typehints_defaults = "comma"


# Add any paths that contain templates here, relative to this directory. # Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates'] templates_path = ['_templates']
@@ -168,8 +171,8 @@ texinfo_documents = [


# -- Extension configuration ------------------------------------------------- # -- Extension configuration -------------------------------------------------
def maybe_skip_member(app, what, name, obj, skip, options): def maybe_skip_member(app, what, name, obj, skip, options):
# if obj.__doc__ is None:
# return True
if obj.__doc__ is None:
return True
if name == "__init__": if name == "__init__":
return False return False
if name.startswith("_"): if name.startswith("_"):


+ 2
- 2
docs/source/fastNLP.core.callbacks.rst View File

@@ -10,7 +10,7 @@ Subpackages
----------- -----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.callbacks.torch_callbacks fastNLP.core.callbacks.torch_callbacks


@@ -18,7 +18,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.callbacks.callback fastNLP.core.callbacks.callback
fastNLP.core.callbacks.callback_event fastNLP.core.callbacks.callback_event


+ 1
- 1
docs/source/fastNLP.core.callbacks.torch_callbacks.rst View File

@@ -10,7 +10,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.callbacks.torch_callbacks.torch_grad_clip_callback fastNLP.core.callbacks.torch_callbacks.torch_grad_clip_callback
fastNLP.core.callbacks.torch_callbacks.torch_lr_sched_callback fastNLP.core.callbacks.torch_callbacks.torch_lr_sched_callback

+ 1
- 1
docs/source/fastNLP.core.collators.padders.rst View File

@@ -10,7 +10,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.collators.padders.exceptions fastNLP.core.collators.padders.exceptions
fastNLP.core.collators.padders.get_padder fastNLP.core.collators.padders.get_padder


+ 2
- 2
docs/source/fastNLP.core.collators.rst View File

@@ -10,7 +10,7 @@ Subpackages
----------- -----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.collators.padders fastNLP.core.collators.padders


@@ -18,7 +18,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.collators.collator fastNLP.core.collators.collator
fastNLP.core.collators.packer_unpacker fastNLP.core.collators.packer_unpacker

+ 1
- 1
docs/source/fastNLP.core.controllers.loops.rst View File

@@ -10,7 +10,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.controllers.loops.evaluate_batch_loop fastNLP.core.controllers.loops.evaluate_batch_loop
fastNLP.core.controllers.loops.loop fastNLP.core.controllers.loops.loop


+ 2
- 2
docs/source/fastNLP.core.controllers.rst View File

@@ -10,7 +10,7 @@ Subpackages
----------- -----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.controllers.loops fastNLP.core.controllers.loops
fastNLP.core.controllers.utils fastNLP.core.controllers.utils
@@ -19,7 +19,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.controllers.evaluator fastNLP.core.controllers.evaluator
fastNLP.core.controllers.trainer fastNLP.core.controllers.trainer

+ 1
- 1
docs/source/fastNLP.core.controllers.utils.rst View File

@@ -10,7 +10,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.controllers.utils.state fastNLP.core.controllers.utils.state
fastNLP.core.controllers.utils.utils fastNLP.core.controllers.utils.utils

+ 1
- 1
docs/source/fastNLP.core.dataloaders.jittor_dataloader.rst View File

@@ -10,6 +10,6 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.dataloaders.jittor_dataloader.fdl fastNLP.core.dataloaders.jittor_dataloader.fdl

+ 1
- 1
docs/source/fastNLP.core.dataloaders.paddle_dataloader.rst View File

@@ -10,6 +10,6 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.dataloaders.paddle_dataloader.fdl fastNLP.core.dataloaders.paddle_dataloader.fdl

+ 7
- 0
docs/source/fastNLP.core.dataloaders.prepare_dataloader.rst View File

@@ -0,0 +1,7 @@
fastNLP.core.dataloaders.prepare\_dataloader module
===================================================

.. automodule:: fastNLP.core.dataloaders.prepare_dataloader
:members:
:undoc-members:
:show-inheritance:

+ 3
- 2
docs/source/fastNLP.core.dataloaders.rst View File

@@ -10,7 +10,7 @@ Subpackages
----------- -----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.dataloaders.jittor_dataloader fastNLP.core.dataloaders.jittor_dataloader
fastNLP.core.dataloaders.paddle_dataloader fastNLP.core.dataloaders.paddle_dataloader
@@ -20,7 +20,8 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.dataloaders.mix_dataloader fastNLP.core.dataloaders.mix_dataloader
fastNLP.core.dataloaders.prepare_dataloader
fastNLP.core.dataloaders.utils fastNLP.core.dataloaders.utils

+ 1
- 1
docs/source/fastNLP.core.dataloaders.torch_dataloader.rst View File

@@ -10,6 +10,6 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.dataloaders.torch_dataloader.fdl fastNLP.core.dataloaders.torch_dataloader.fdl

+ 1
- 1
docs/source/fastNLP.core.dataset.rst View File

@@ -10,7 +10,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.dataset.dataset fastNLP.core.dataset.dataset
fastNLP.core.dataset.field fastNLP.core.dataset.field


+ 1
- 1
docs/source/fastNLP.core.drivers.jittor_driver.rst View File

@@ -10,7 +10,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.drivers.jittor_driver.initialize_jittor_driver fastNLP.core.drivers.jittor_driver.initialize_jittor_driver
fastNLP.core.drivers.jittor_driver.jittor_driver fastNLP.core.drivers.jittor_driver.jittor_driver


+ 1
- 1
docs/source/fastNLP.core.drivers.paddle_driver.rst View File

@@ -10,7 +10,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.drivers.paddle_driver.dist_utils fastNLP.core.drivers.paddle_driver.dist_utils
fastNLP.core.drivers.paddle_driver.fleet fastNLP.core.drivers.paddle_driver.fleet


+ 2
- 2
docs/source/fastNLP.core.drivers.rst View File

@@ -10,7 +10,7 @@ Subpackages
----------- -----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.drivers.jittor_driver fastNLP.core.drivers.jittor_driver
fastNLP.core.drivers.paddle_driver fastNLP.core.drivers.paddle_driver
@@ -20,7 +20,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.drivers.choose_driver fastNLP.core.drivers.choose_driver
fastNLP.core.drivers.driver fastNLP.core.drivers.driver


+ 1
- 1
docs/source/fastNLP.core.drivers.torch_driver.rst View File

@@ -10,7 +10,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.drivers.torch_driver.ddp fastNLP.core.drivers.torch_driver.ddp
fastNLP.core.drivers.torch_driver.dist_utils fastNLP.core.drivers.torch_driver.dist_utils


+ 1
- 1
docs/source/fastNLP.core.log.rst View File

@@ -10,7 +10,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.log.handler fastNLP.core.log.handler
fastNLP.core.log.highlighter fastNLP.core.log.highlighter


+ 1
- 1
docs/source/fastNLP.core.metrics.backend.jittor_backend.rst View File

@@ -10,6 +10,6 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.metrics.backend.jittor_backend.backend fastNLP.core.metrics.backend.jittor_backend.backend

+ 1
- 1
docs/source/fastNLP.core.metrics.backend.paddle_backend.rst View File

@@ -10,6 +10,6 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.metrics.backend.paddle_backend.backend fastNLP.core.metrics.backend.paddle_backend.backend

+ 2
- 2
docs/source/fastNLP.core.metrics.backend.rst View File

@@ -10,7 +10,7 @@ Subpackages
----------- -----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.metrics.backend.jittor_backend fastNLP.core.metrics.backend.jittor_backend
fastNLP.core.metrics.backend.paddle_backend fastNLP.core.metrics.backend.paddle_backend
@@ -20,7 +20,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.metrics.backend.auto_backend fastNLP.core.metrics.backend.auto_backend
fastNLP.core.metrics.backend.backend fastNLP.core.metrics.backend.backend

+ 1
- 1
docs/source/fastNLP.core.metrics.backend.torch_backend.rst View File

@@ -10,6 +10,6 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.metrics.backend.torch_backend.backend fastNLP.core.metrics.backend.torch_backend.backend

+ 2
- 2
docs/source/fastNLP.core.metrics.rst View File

@@ -10,7 +10,7 @@ Subpackages
----------- -----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.metrics.backend fastNLP.core.metrics.backend


@@ -18,7 +18,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.metrics.accuracy fastNLP.core.metrics.accuracy
fastNLP.core.metrics.classify_f1_pre_rec_metric fastNLP.core.metrics.classify_f1_pre_rec_metric


+ 2
- 2
docs/source/fastNLP.core.rst View File

@@ -10,7 +10,7 @@ Subpackages
----------- -----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.callbacks fastNLP.core.callbacks
fastNLP.core.collators fastNLP.core.collators
@@ -27,6 +27,6 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.vocabulary fastNLP.core.vocabulary

+ 1
- 1
docs/source/fastNLP.core.samplers.rst View File

@@ -10,7 +10,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.samplers.conversion_utils fastNLP.core.samplers.conversion_utils
fastNLP.core.samplers.mix_sampler fastNLP.core.samplers.mix_sampler


+ 1
- 1
docs/source/fastNLP.core.utils.rst View File

@@ -10,7 +10,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core.utils.cache_results fastNLP.core.utils.cache_results
fastNLP.core.utils.dummy_class fastNLP.core.utils.dummy_class


+ 1
- 1
docs/source/fastNLP.envs.rst View File

@@ -10,7 +10,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.envs.distributed fastNLP.envs.distributed
fastNLP.envs.env fastNLP.envs.env


+ 1
- 1
docs/source/fastNLP.io.loader.rst View File

@@ -10,7 +10,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.io.loader.classification fastNLP.io.loader.classification
fastNLP.io.loader.conll fastNLP.io.loader.conll


+ 1
- 1
docs/source/fastNLP.io.pipe.rst View File

@@ -10,7 +10,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.io.pipe.classification fastNLP.io.pipe.classification
fastNLP.io.pipe.conll fastNLP.io.pipe.conll


+ 2
- 2
docs/source/fastNLP.io.rst View File

@@ -10,7 +10,7 @@ Subpackages
----------- -----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.io.loader fastNLP.io.loader
fastNLP.io.pipe fastNLP.io.pipe
@@ -19,7 +19,7 @@ Submodules
---------- ----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.io.data_bundle fastNLP.io.data_bundle
fastNLP.io.embed_loader fastNLP.io.embed_loader


+ 1
- 1
docs/source/fastNLP.rst View File

@@ -10,7 +10,7 @@ Subpackages
----------- -----------


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP.core fastNLP.core
fastNLP.envs fastNLP.envs


+ 1
- 1
docs/source/modules.rst View File

@@ -2,6 +2,6 @@ fastNLP
======= =======


.. toctree:: .. toctree::
:maxdepth: 6
:maxdepth: 4


fastNLP fastNLP

+ 0
- 1
fastNLP/core/__init__.py View File

@@ -63,7 +63,6 @@ __all__ = [
"PaddleFleetDriver", "PaddleFleetDriver",
"JittorSingleDriver", "JittorSingleDriver",
"JittorMPIDriver", "JittorMPIDriver",
"TorchPaddleDriver",


# log # log
"logger", "logger",


+ 9
- 3
fastNLP/core/callbacks/callback_manager.py View File

@@ -10,8 +10,8 @@ from .callback_event import Event
from .callback import Callback from .callback import Callback
from fastNLP.core.log import logger from fastNLP.core.log import logger
from .progress_callback import ProgressCallback, choose_progress_callback from .progress_callback import ProgressCallback, choose_progress_callback
from fastNLP.envs import rank_zero_call
from fastNLP.core.utils.utils import _get_fun_msg
from ..utils.exceptions import EarlyStopException
from ..utils.utils import _get_fun_msg




def _transfer(func): def _transfer(func):
@@ -25,6 +25,8 @@ def _transfer(func):
for callback_fn in manager.callback_fns[func.__name__]: for callback_fn in manager.callback_fns[func.__name__]:
try: try:
callback_fn(*arg, **kwargs) callback_fn(*arg, **kwargs)
except EarlyStopException as e:
raise e
except BaseException as e: except BaseException as e:
logger.error(f"The following callback_fn raise exception:{_get_fun_msg(callback_fn)}.") logger.error(f"The following callback_fn raise exception:{_get_fun_msg(callback_fn)}.")
raise e raise e
@@ -186,6 +188,8 @@ class CallbackManager:
for each_callback_filters in self._callback_filters: for each_callback_filters in self._callback_filters:
if each_callback_filters[0] not in _record_duplicated_callback_names: if each_callback_filters[0] not in _record_duplicated_callback_names:
_record_duplicated_callback_names.add(each_callback_filters[0]) _record_duplicated_callback_names.add(each_callback_filters[0])
if 'filter_states' not in states[each_callback_filters[0]]:
states[each_callback_filters[0]]["filter_states"] = {}
states[each_callback_filters[0]]["filter_states"][each_callback_filters[1]] = each_callback_filters[2].state_dict() states[each_callback_filters[0]]["filter_states"][each_callback_filters[1]] = each_callback_filters[2].state_dict()


# 3. 保存 callback_counter; # 3. 保存 callback_counter;
@@ -212,7 +216,9 @@ class CallbackManager:
if each_callback_filters[0] in states: if each_callback_filters[0] in states:
if each_callback_filters[0] not in _already_loaded_callback_names: if each_callback_filters[0] not in _already_loaded_callback_names:
_already_loaded_callback_names.add(each_callback_filters[0]) _already_loaded_callback_names.add(each_callback_filters[0])
each_callback_filters[2].load_state_dict(states[each_callback_filters[0]]["filter_states"][each_callback_filters[1]])
if 'filter_states' in states[each_callback_filters[0]] and \
each_callback_filters[1] in states[each_callback_filters[0]]['filter_states']:
each_callback_filters[2].load_state_dict(states[each_callback_filters[0]]['filter_states'][each_callback_filters[1]])
else: else:
_duplicated_callback_names.add(each_callback_filters[0]) _duplicated_callback_names.add(each_callback_filters[0])




+ 7
- 20
fastNLP/core/callbacks/checkpoint_callback.py View File

@@ -19,7 +19,7 @@ class CheckpointCallback(Callback):
only_state_dict: bool = True, model_save_fn: Optional[Callable] = None, save_object: str = 'model', only_state_dict: bool = True, model_save_fn: Optional[Callable] = None, save_object: str = 'model',
save_evaluate_results=True, **kwargs): save_evaluate_results=True, **kwargs):
""" """
保存模型 checkpoint 的 callback ,其保存的文件目录以及文件名命名规则如下::
保存 checkpoint 的 callback ,其保存的文件目录以及文件名命名规则如下::


- folder/ - folder/
- YYYY-mm-dd-HH_MM_SS_fffff/ # 自动根据当前脚本的启动时间创建的 - YYYY-mm-dd-HH_MM_SS_fffff/ # 自动根据当前脚本的启动时间创建的
@@ -29,8 +29,9 @@ class CheckpointCallback(Callback):
- {save_object}-epoch_{epoch_idx}-batch_{global_batch_idx}-exception_{exception_type}/ # exception时保存。 - {save_object}-epoch_{epoch_idx}-batch_{global_batch_idx}-exception_{exception_type}/ # exception时保存。
- {save_object}-epoch_{epoch_idx}-batch_{global_batch_idx}-{monitor}_{monitor_value}/ # 满足topk条件存储文件名 - {save_object}-epoch_{epoch_idx}-batch_{global_batch_idx}-{monitor}_{monitor_value}/ # 满足topk条件存储文件名


model_save_fn 为 None ,则以上每个 folder 中,将生成 fastnlp_model.pkl.tar 文件。
若 model_save_fn 不为 None,则 fastNLP 将 folder 绝对路径传递给该函数,fastNLP 在该 folder 下不进行模型保存。
model_save_fn 为 None ,则以上每个 folder 中,将生成 fastnlp_model.pkl.tar 文件。若 model_save_fn 不为 None,
则 fastNLP 将 folder 绝对路径传递给该函数,fastNLP 在该 folder 下不进行模型保存。默认情况下,本 checkpoint 只保存了 model
的状态;如还需保存 Trainer 的状态以断点重训的话,请使用 ``save_object='trainer'`` 。


:param monitor: 监控的 metric 值。如果在 evaluation 结果中没有找到完全一致的名称,将使用 最长公共字符串算法 找到最匹配 :param monitor: 监控的 metric 值。如果在 evaluation 结果中没有找到完全一致的名称,将使用 最长公共字符串算法 找到最匹配
的那个作为 monitor 。如果为 None,将尝试使用 Trainer 设置的 monitor 。也可以传入一个函数,接受参数为 evaluation 的结 的那个作为 monitor 。如果为 None,将尝试使用 Trainer 设置的 monitor 。也可以传入一个函数,接受参数为 evaluation 的结
@@ -46,22 +47,14 @@ class CheckpointCallback(Callback):
:param only_state_dict: 保存模型时是否只保存 state_dict 。当 model_save_fn 不为 None 时,该参数无效。 :param only_state_dict: 保存模型时是否只保存 state_dict 。当 model_save_fn 不为 None 时,该参数无效。
:param model_save_fn: 个性化的保存函数,当触发保存操作时,就调用这个函数,这个函数应当接受一个文件夹作为参数,不返回任何东西。 :param model_save_fn: 个性化的保存函数,当触发保存操作时,就调用这个函数,这个函数应当接受一个文件夹作为参数,不返回任何东西。
如果传入了 model_save_fn 函数,fastNLP 将不再进行模型相关的保存。在多卡场景下,我们只在 rank 0 上会运行该函数。 如果传入了 model_save_fn 函数,fastNLP 将不再进行模型相关的保存。在多卡场景下,我们只在 rank 0 上会运行该函数。
:param save_object: 可选 ['trainer', 'model'],表示在保存时的保存对象为 trainer+model 还是 只是model 。
:param save_object: 可选 ['trainer', 'model'],表示在保存时的保存对象为 ``trainer+model`` 还是 只是 ``model`` 。如果
保存 ``trainer`` 对象的话,将会保存 :class:~fastNLP.Trainer 的相关状态,可以通过 :meth:`Trainer.load` 加载该断
点继续训练。如果保存的是 ``Model`` 对象,则可以通过 :meth:`Trainer.load_model` 加载该模型权重。
:param save_evaluate_results: 是否保存 evaluate 的结果。如果为 True ,在保存 topk 模型的 folder 中还将额外保存一个 :param save_evaluate_results: 是否保存 evaluate 的结果。如果为 True ,在保存 topk 模型的 folder 中还将额外保存一个
fastnlp_evaluate_results.json 文件,记录当前的 results。仅在设置了 topk 的场景下有用,默认为 True 。 fastnlp_evaluate_results.json 文件,记录当前的 results。仅在设置了 topk 的场景下有用,默认为 True 。
:param kwargs: :param kwargs:
""" """
super().__init__() super().__init__()
if folder is None:
logger.warning(
"Parameter `folder` is None, and we will use the current work directory to find and load your model.")
folder = Path.cwd()
folder = Path(folder)
if not folder.exists():
raise NotADirectoryError(f"Path '{folder.absolute()}' is not existed!")
elif folder.is_file():
raise ValueError("Parameter `folder` should be a directory instead of a file.")

if every_n_epochs is not None: if every_n_epochs is not None:
if not isinstance(every_n_epochs, int) or every_n_epochs < 1: if not isinstance(every_n_epochs, int) or every_n_epochs < 1:
raise ValueError("Parameter `every_n_epochs` should be an int and greater than or equal to 1.") raise ValueError("Parameter `every_n_epochs` should be an int and greater than or equal to 1.")
@@ -74,12 +67,6 @@ class CheckpointCallback(Callback):
else: else:
every_n_batches = sys.maxsize # 使得没有数字可以整除 every_n_batches = sys.maxsize # 使得没有数字可以整除


if topk is not None:
if not isinstance(topk, int):
raise ValueError("Parameter `topk` should be an int.")
else:
topk = 0

if on_exceptions is not None: if on_exceptions is not None:
if not isinstance(on_exceptions, Sequence): if not isinstance(on_exceptions, Sequence):
on_exceptions = [on_exceptions] on_exceptions = [on_exceptions]


+ 2
- 1
fastNLP/core/callbacks/load_best_model_callback.py View File

@@ -19,7 +19,8 @@ class LoadBestModelCallback(HasMonitorCallback):
model_load_fn:Optional[Callable] = None, model_load_fn:Optional[Callable] = None,
delete_after_train:bool = True): delete_after_train:bool = True):
""" """
保存最佳的 monitor 值最佳的模型,并在训练结束的时候重新加载模型。仅在训练正常结束的时候才能加载最好的模型。
保存最佳的 monitor 值最佳的模型,并在训练结束的时候重新加载模型,默认会在加载之后删除权重文件。仅在训练正常结束的时候才能加载
最好的模型。


:param str monitor: 监控的 metric 值。如果在 evaluation 结果中没有找到完全一致的名称,将使用 最长公共字符串算法 找到最匹配 :param str monitor: 监控的 metric 值。如果在 evaluation 结果中没有找到完全一致的名称,将使用 最长公共字符串算法 找到最匹配
的那个作为 monitor 。如果为 None,将尝试使用 Trainer 设置的 monitor 。也可以传入一个函数,接受参数为 evaluation 的结 的那个作为 monitor 。如果为 None,将尝试使用 Trainer 设置的 monitor 。也可以传入一个函数,接受参数为 evaluation 的结


+ 2
- 3
fastNLP/core/callbacks/topk_saver.py View File

@@ -33,9 +33,8 @@ class Saver:
:param kwargs: 更多需要传递给 Trainer.save() 或者 Trainer.save_model() 接口的参数。 :param kwargs: 更多需要传递给 Trainer.save() 或者 Trainer.save_model() 接口的参数。
""" """
if folder is None: if folder is None:
logger.rank_zero_warning(
"Parameter `folder` is None, and we will use the current work directory to find and load your model.")
folder = Path.cwd()
folder = Path.cwd().absolute()
logger.info(f"Parameter `folder` is None, and we will use {folder} to save and load your model.")
folder = Path(folder) folder = Path(folder)
if not folder.exists(): if not folder.exists():
folder.mkdir(parents=True, exist_ok=True) folder.mkdir(parents=True, exist_ok=True)


+ 2
- 2
fastNLP/core/callbacks/utils.py View File

@@ -8,8 +8,8 @@ from fastNLP.core.utils.utils import _get_fun_msg


def _get_monitor_value(monitor: Union[callable, str], real_monitor: Optional[str], res: dict) ->Tuple[str, float]: def _get_monitor_value(monitor: Union[callable, str], real_monitor: Optional[str], res: dict) ->Tuple[str, float]:
""" """
res中寻找 monitor 并返回。如果 monitor 没找到则尝试用 _real_monitor ,若 _real_monitor 为 None 则尝试使用 monitor 的值进行
匹配。
``res`` 中寻找 ``monitor`` 并返回。如果 ``monitor`` 没找到则尝试用 ``_real_monitor`` ,若 ``_real_monitor`` 为 ``None``
则尝试使用 ``monitor`` 的值进行匹配。


:param monitor: :param monitor:
:param real_monitor: :param real_monitor:


+ 1
- 1
fastNLP/core/collators/padders/get_padder.py View File

@@ -121,7 +121,7 @@ def get_padder(batch_field:Sequence[Any], pad_val, dtype, backend, field_name)->
# 这里 ele_dtype 传入为 None 的原因是防止出现 paddle tensor 转换为 torch tensor # 这里 ele_dtype 传入为 None 的原因是防止出现 paddle tensor 转换为 torch tensor
return TorchTensorPadder(pad_val=pad_val, ele_dtype=None, dtype=dtype) return TorchTensorPadder(pad_val=pad_val, ele_dtype=None, dtype=dtype)
elif backend == 'paddle': elif backend == 'paddle':
return PaddleTensorPadder(pad_val=pad_val, ele_dtype=None, dtype=dtype)
return PaddleTensorPadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype)
elif backend == 'jittor': elif backend == 'jittor':
return JittorTensorPadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype) return JittorTensorPadder(pad_val=pad_val, ele_dtype=ele_dtype, dtype=dtype)
else: else:


+ 4
- 1
fastNLP/core/collators/padders/paddle_padder.py View File

@@ -141,7 +141,10 @@ class PaddleTensorPadder(Padder):


shapes = [field.shape for field in batch_field] shapes = [field.shape for field in batch_field]
max_shape = [len(batch_field)] + [max(*_) for _ in zip(*shapes)] max_shape = [len(batch_field)] + [max(*_) for _ in zip(*shapes)]
array = np.full(max_shape, fill_value=pad_val)
if isinstance(batch_field[0], paddle.Tensor):
array = paddle.full(max_shape, fill_value=pad_val, dtype=dtype)
else:
array = np.full(max_shape, fill_value=pad_val, dtype=batch_field[0].dtype)
for i, field in enumerate(batch_field): for i, field in enumerate(batch_field):
slices = (i, ) + tuple(slice(0, s) for s in shapes[i]) slices = (i, ) + tuple(slice(0, s) for s in shapes[i])
array[slices] = field array[slices] = field


+ 2
- 2
fastNLP/core/collators/padders/torch_padder.py View File

@@ -118,8 +118,8 @@ class TorchTensorPadder(Padder):
batch_field = [torch.tensor(field.tolist(), dtype=dtype) for field in batch_field] batch_field = [torch.tensor(field.tolist(), dtype=dtype) for field in batch_field]
else: else:
device = batch_field[0].device device = batch_field[0].device
if dtype is None:
dtype = batch_field[0].dtype
if dtype is None:
dtype = batch_field[0].dtype
except AttributeError: except AttributeError:
raise RuntimeError(f"If the field is not a torch.Tensor (it is {type(batch_field[0])}), " raise RuntimeError(f"If the field is not a torch.Tensor (it is {type(batch_field[0])}), "
f"it must have tolist() method.") f"it must have tolist() method.")


+ 8
- 2
fastNLP/core/controllers/evaluator.py View File

@@ -56,6 +56,8 @@ class Evaluator:
* ddp_kwargs -- 用于在使用 ``TorchDDPDriver`` 时指定 ``DistributedDataParallel`` 初始化时的参数;例如传入 * ddp_kwargs -- 用于在使用 ``TorchDDPDriver`` 时指定 ``DistributedDataParallel`` 初始化时的参数;例如传入
{'find_unused_parameters': True} 来解决有参数不参与前向运算导致的报错等; {'find_unused_parameters': True} 来解决有参数不参与前向运算导致的报错等;
* torch_non_blocking -- 表示用于 pytorch 的 tensor 的 to 方法的参数 non_blocking; * torch_non_blocking -- 表示用于 pytorch 的 tensor 的 to 方法的参数 non_blocking;
* *data_device* -- 表示如果用户的模型 device (在 Driver 中对应为参数 model_device)为 None 时,我们会将数据迁移到 data_device 上;
注意如果 model_device 为 None,那么 data_device 不会起作用;
* *model_use_eval_mode* (``bool``) -- * *model_use_eval_mode* (``bool``) --
是否在 evaluate 的时候将 model 的状态设置成 eval 状态。在 eval 状态下,model 的 是否在 evaluate 的时候将 model 的状态设置成 eval 状态。在 eval 状态下,model 的
dropout 与 batch normalization 将会关闭。默认为True。如果为 False,fastNLP 不会对 model 的 evaluate 状态做任何设置。无论 dropout 与 batch normalization 将会关闭。默认为True。如果为 False,fastNLP 不会对 model 的 evaluate 状态做任何设置。无论
@@ -234,8 +236,7 @@ class Evaluator:
""" """
调用所有 metric 的 reset() 方法,清除累积的状态。 调用所有 metric 的 reset() 方法,清除累积的状态。


Returns:

:return:
""" """
self.metrics_wrapper.reset() self.metrics_wrapper.reset()


@@ -357,6 +358,11 @@ class _MetricsWrapper:
metric.update(res) metric.update(res)


def reset(self): def reset(self):
"""
将 Metric 中的状态重新设置。

:return:
"""
for metric in self._metrics: for metric in self._metrics:
if _is_allennlp_metric(metric): if _is_allennlp_metric(metric):
metric.get_metric(reset=True) metric.get_metric(reset=True)


+ 277
- 65
fastNLP/core/controllers/trainer.py View File

@@ -1,4 +1,10 @@
from typing import Union, Optional, List, Callable, Dict, Sequence, BinaryIO, IO
"""
``Trainer`` 是 fastNLP 用于训练模型的专门的训练器,其支持多种不同的驱动模式 ``Driver``,不仅包括最为经常使用的 DDP,而且还支持 jittor 等国产
的训练框架;新版的 fastNLP 新加入了方便的 callback 函数修饰器,并且支持定制用户自己特定的训练循环过程;通过使用该训练器,用户只需要自己实现
模型部分,而将训练层面的逻辑完全地交给 fastNLP;
"""

from typing import Union, Optional, List, Callable, Dict, BinaryIO
from functools import partial from functools import partial
from collections import defaultdict from collections import defaultdict
import copy import copy
@@ -7,7 +13,6 @@ from dataclasses import is_dataclass
import os import os
from pathlib import Path from pathlib import Path
import io import io
import inspect


__all__ = [ __all__ = [
'Trainer', 'Trainer',
@@ -62,12 +67,20 @@ class Trainer(TrainerEventTrigger):
**kwargs **kwargs
): ):
r""" r"""
`Trainer` 是 fastNLP 用于训练模型的专门的训练器,其支持多种不同的驱动模式,不仅包括最为经常使用的 DDP,而且还支持 jittor 等国产
的训练框架;新版的 fastNLP 新加入了方便的 callback 函数修饰器,并且支持定制用户自己特定的训练循环过程;通过使用该训练器,用户只需
要自己实现模型部分,而将训练层面的逻辑完全地交给 fastNLP;
:param model: 训练所需要的模型,例如 ``torch.nn.Module``;

.. note::

当使用 pytorch 时,注意参数 ``model`` 在大多数情况下为 ``nn.Module``。但是您仍能够通过使用一些特定的组合来使用情况,如下所示:


:param model: 训练所需要的模型,目前支持 pytorch;
:param driver: 训练模型所使用的具体的驱动模式,应当为以下选择中的一个:["torch",],之后我们会加入 jittor、paddle 等
1. 当希望使用 ``DataParallel`` 时,您应当使用 ``TorchSingleDriver``,意味着您在初始化 ``Trainer`` 时参数 ``device`` 不应当为
一个 ``List``;

2. 当您选择自己初始化 ``init_process_group`` 时(这种情况要求您传入的 ``model`` 参数一定为 ``DistributedDataParallel``),
您应当使用 ``TorchDDPDriver``,意味着您需要通过 ``python -m torch.distributed.launch`` 的方式来启动训练,此时参数 ``device``
应当设置为 None(此时我们会忽略该参数),具体见下面对于参数 ``device`` 的更详细的解释。

:param driver: 训练模型所使用的具体的驱动模式,应当为以下选择中的一个:["torch"],之后我们会加入 jittor、paddle 等
国产框架的训练模式;其中 "torch" 表示使用 ``TorchSingleDriver`` 或者 ``TorchDDPDriver``,具体使用哪一种取决于参数 ``device`` 国产框架的训练模式;其中 "torch" 表示使用 ``TorchSingleDriver`` 或者 ``TorchDDPDriver``,具体使用哪一种取决于参数 ``device``
的设置; 的设置;
:param train_dataloader: 训练数据集,注意其必须是单独的一个数据集,不能是 List 或者 Dict; :param train_dataloader: 训练数据集,注意其必须是单独的一个数据集,不能是 List 或者 Dict;
@@ -80,79 +93,248 @@ class Trainer(TrainerEventTrigger):
device 的可选输入如下所示: device 的可选输入如下所示:


* *str*: 例如 'cpu', 'cuda', 'cuda:0', 'cuda:1' 等; * *str*: 例如 'cpu', 'cuda', 'cuda:0', 'cuda:1' 等;
* *torch.device*: 将模型装载到 ``torch.device`` 上
* *torch.device*: 例如 'torch.device("cuda:0")'
* *int*: 将使用 ``device_id`` 为该值的 ``gpu`` 进行训练;如果值为 -1,那么默认使用全部的显卡,此时使用的 driver 实例是 `TorchDDPDriver`; * *int*: 将使用 ``device_id`` 为该值的 ``gpu`` 进行训练;如果值为 -1,那么默认使用全部的显卡,此时使用的 driver 实例是 `TorchDDPDriver`;
* *list(int)*: 如果多于 1 个device,应当通过该种方式进行设定;注意此时我们一定会使用 ``TorchDDPDriver``,不管您传入的列表的长度是 1 还是其它值; * *list(int)*: 如果多于 1 个device,应当通过该种方式进行设定;注意此时我们一定会使用 ``TorchDDPDriver``,不管您传入的列表的长度是 1 还是其它值;
* *None*: 为None则不对模型进行任何处理;
* *None*: 仅当用户自己通过训练框架提供的并行训练启动脚本开启 ddp 进程时为 None;

.. note::

如果希望使用 ``TorchDDPDriver``,在初始化 ``Trainer`` 时您应当使用::

Trainer(driver="torch", device=[0, 1])

注意如果这时 ``device=[0]``,我们仍旧会使用 ``TorchDDPDriver``。

如果希望使用 ``TorchSingleDriver``,则在初始化 ``Trainer`` 时您应当使用::

Trainer(driver="torch", device=0)


.. node::
.. warning::


如果希望使用 ``TorchDDPDriver``
注意参数 ``device`` 仅当您通过 pytorch 或者其它训练框架自身的并行训练启动脚本启动 ddp 训练时才允许为 ``None``!


例如,当您使用::

python -m torch.distributed.launch --nproc_per_node 2 train.py

来使用 ``TorchDDPDriver`` 时,此时参数 ``device`` 不再有效(不管您是否自己初始化 ``init_process_group``),我们将直接
通过 ``torch.device(f"cuda:{local_rank}")`` 来获取当前进程所使用的的具体的 gpu 设备。因此此时您需要使用 ``os.environ["CUDA_VISIBLE_DEVICES"]``
来指定要使用的具体的 gpu 设备。

另一点需要注意的是,当您没有选择自己初始化 ``init_process_group`` 时,我们仍旧会帮助您把模型和数据迁移到当前进程所使用的
具体的 gpu 设备上。但是如果您选择自己在 ``Trainer`` 初始化前(意味着在 ``driver`` 的 ``setup`` 前)初始化 ``init_process_group``,
那么对于模型的迁移应当完全由您自己来完成。此时对于数据的迁移,如果您在 ``Trainer`` 初始化时指定了参数 ``data_device``,那么
我们会将数据迁移到 ``data_device`` 上;如果其为 None,那么将数据迁移到正确的设备上应当由您自己来完成。

对于使用 ``TorchDDPDriver`` 的更多细节,请见 :class:`fastNLP.core.drivers.torch_driver.TorchDDPDriver`。


:param n_epochs: 训练总共的 epoch 的数量,默认为 20; :param n_epochs: 训练总共的 epoch 的数量,默认为 20;
:param evaluate_dataloaders: 验证数据集,其可以是单独的一个数据集,也可以是多个数据集;当为多个数据集时,注意其必须是 Dict;默认 :param evaluate_dataloaders: 验证数据集,其可以是单独的一个数据集,也可以是多个数据集;当为多个数据集时,注意其必须是 Dict;默认
为 None;
:param batch_step_fn: 定制每次 train batch 执行的函数。该函数应接受两个参数为 `trainer` 和`batch`,不需要要返回值;可以
参考 fastNLP.core.controllers.loops.train_batch_loop.TrainBatchLoop中的batch_step_fn函数。
:param evaluate_batch_step_fn: 定制每次 evaluate batch 执行的函数。该函数应接受的两个参数为 `evaluator` 和 `batch`,
不需要有返回值;可以参考 fastNLP.core.controllers.loops.evaluate_batch_loop.EvaluateBatchLoop中的batch_step_fn函数。
:param train_fn: 用来控制 `Trainer` 在训练的前向传播过程中是调用模型的哪一个函数,例如是 `train_step` 还是 `forward`;
默认为 None,如果该值是 None,那么我们会默认使用 `train_step` 当做前向传播的函数,如果在模型中没有找到该方法,
则使用模型默认的前向传播函数。
:param evaluate_fn: 用来控制 `Trainer` 中内置的 `Evaluator` 的模式,应当为 None 或者一个字符串;其使用方式和 train_fn 类似;
注意该参数我们会直接传给 Trainer 中内置的 Evaluator(如果不为 None);如果该值为 None ,将首先尝试寻找模型中是否有
evaluate_step 这个函数,如果没有则使用 forward 函数。
:param callbacks: 训练当中触发的 callback 类,该参数应当为一个列表,其中的每一个元素都应当继承 `Callback` 类;
:param metrics: 应当为一个字典,其中 key 表示 monitor,例如 {"acc1": AccMetric(), "acc2": AccMetric()};
:param evaluate_every: 可以为负数、正数或者函数;为负数时表示每隔几个 epoch evaluate 一次;为正数则表示每隔几个 batch evaluate 一次;
为函数时表示用户自己传入的用于控制 Trainer 中的 evaluate 的频率的函数,该函数的应该接受当前 trainer 对象作为参数,并
返回一个 bool 值,返回为 True 说明需要进行 evaluate ;将在每个 batch 结束后调用该函数判断是否需要 evaluate 。
:param input_mapping: 应当为一个字典或者一个函数,表示在当前 step 拿到一个 batch 的训练数据后,应当做怎样的映射处理;如果其是
一个字典,并且 batch 也是一个 `Dict`,那么我们会把 batch 中同样在 input_mapping 中的 key 修改为 input_mapping 的对应 key 的
value;如果 batch 是一个 `dataclass`,那么我们会先将该 dataclass 转换为一个 Dict,然后再进行上述转换;如果 batch 此时是其它
类型,那么我们将会直接报错;如果 input_mapping 是一个函数,那么对于取出的 batch,我们将不会做任何处理,而是直接将其传入该函数里;
注意该参数会被传进 `Evaluator` 中;因此你可以通过该参数来实现将训练数据 batch 移到对应机器上的工作(例如当参数 `device` 为 None 时);
如果 train 和 evaluate 需要使用不同的 input_mapping, 请使用 train_input_mapping 与 evaluate_input_mapping 设置。
:param output_mapping: 应当为一个字典或者函数。作用和 input_mapping 类似,区别在于其用于转换输出;如果 output_mapping 是一个
函数,那么我们将会直接将模型的输出传给该函数;如果其是一个 `Dict`,那么我们需要 batch 必须是 `Dict` 或者 `dataclass` 类型,
如果 batch 是一个 `Dict`,那么我们会把 batch 中同样在 output_mapping 中的 key 修改为 output_mapping 的对应 key 的 value;
如果 batch 是一个 `dataclass`,那么我们会先将该 dataclass 转换为一个 Dict,然后再进行上述转换;
如果 train 和 evaluate 需要使用不同的 output_mapping, 请使用 train_output_mapping 与 evaluate_output_mapping 设置。
:param model_wo_auto_param_call: 是否关闭在训练时调用我们的 auto_param_call 来自动匹配 batch 和 forward 函数的参数的行为;
如果该值为 False,并且当 batch 为字典时,我们会根据 forward 所需要的参数从 batch 中提取对应的对象,传入到 forward 函数中;如果该值
为 True,那么我们会将 batch 直接透传给模型。注意该参数应用于 `train_step`, `evaluate_step` 和 `test_step`;
:param accumulation_steps: 梯度累积的步数,表示每隔几个 batch 优化器迭代一次;默认为 1;
:param fp16: 是否开启混合精度训练;默认为 False;
:param monitor: 当存在 evaluate_dataloaders 时,默认的 monitor metric 的名字。传入的 callback 如果有 monitor 参数且没有
在 callback 初始化设定的,将采取这个值。如果在 evaluation 结果中没有找到完全一致的名称,将使用 最长公共字符串算法 找到最匹配
的那个作为 monitor 。也可以传入一个函数,接受参数为 evaluation 的结果(字典类型),返回一个 float 值作为 monitor 的结果。
如果 evaluate_dataloaders 与 metrics 没有提供,该参数无意义。
:param larger_better: monitor 的值是否是越大越好。
:param marker: 用于标记一个 Trainer 实例,从而在用户调用 `Trainer.on` 函数时,标记该 callback 函数属于哪一个具体的 'trainer' 实例;默认为 None;
:param kwargs: 一些其它的可能需要的参数,见下方的说明
为 None;
:param batch_step_fn: 定制每次训练时前向运行一个 batch 的数据所执行的函数。该函数应接受两个参数为 ``trainer`` 和 ``batch``,
不需要要返回值;更详细的使用位置和说明请见 :meth:`fastNLP.core.controllers.TrainBatchLoop.batch_step_fn`;
:param evaluate_batch_step_fn: 定制每次验证时前向运行一个 batch 的数据所执行的函数。该函数应接受的两个参数为 ``evaluator`` 和 ``batch``,
不需要有返回值;可以参考 :meth:`fastNLP.core.controllers.EvaluateBatchLoop.batch_step_fn`;
:param train_fn: 用来控制 ``Trainer`` 在训练的前向传播过程中是调用模型的哪一个函数,例如是 ``train_step`` 还是 ``forward``;
默认为 ``None``,如果该值是 ``None``,那么我们会默认使用 ``train_step`` 当做前向传播的函数,如果在模型的定义类中没有找到该方法,
则使用模型默认的前向传播函数,例如对于 pytorch 来说就是 ``forward``。

.. note::
在 fastNLP 中,对于训练时使用的前向传播函数的查找逻辑如下所示:

1. 如果 ``train_fn`` 为 None,那么在 model 的类 Model 中寻找方法 ``Model.train_step``;如果没有找到,那么默认使用 ``Model.forward``;
2. 如果 ``train_fn`` 为一个字符串,例如 'my_step_fn',那么我们首先会在 model 的类 Model 中寻找方法 ``Model.my_step_fn``,
如果没有找到,那么会直接报错;

:param evaluate_fn: 用来控制 ``Trainer`` 中内置的 ``Evaluator`` 在验证的前向传播过程中是调用模型的哪一个函数,应当为 ``None``
或者一个字符串;其使用方式和 train_fn 类似;具体可见 :class:`fastNLP.core.controllers.Evaluator`;
:param callbacks: 训练当中触发的 callback 类,该参数应当为一个列表,其中的每一个元素都应当继承 ``Callback`` 类;具体可见
:class:`fastNLP.core.callbacks.Callback`;
:param metrics: 用于传给 ``Trainer`` 内部的 ``Evaluator`` 实例来进行训练过程中的验证。其应当为一个字典,其中 key 表示 monitor,
例如 {"acc1": AccMetric(), "acc2": AccMetric()};

目前我们支持的 ``metric`` 的种类有以下几种:

1. fastNLP 自己的 ``metric``:详见 :class:`fastNLP.core.metrics.Metric`;
2. torchmetrics;
3. allennlp.training.metrics;
4. paddle.metric;

:param evaluate_every: 用来控制 ``Trainer`` 内部的 ``Evaluator`` 验证的频率,其可以为负数、正数或者函数:

1. 为负数时表示每隔几个 ``epoch`` evaluate 一次;
2. 为正数则表示每隔几个 ``batch`` evaluate 一次;
3. 为函数时表示用户自己传入的用于控制 evaluate 的频率的函数,该函数的应该接受当前 trainer 对象作为参数,并
返回一个 bool 值,返回为 True 说明需要进行 evaluate ;将在每个 ``batch`` 结束后调用该函数判断是否需要 evaluate;

.. note::

如果参数 ``evaluate_every`` 为函数,其应当类似:

>>> def my_evaluate_every(trainer) -> bool:
... if (trainer.global_forward_batches+1) % 1000 == 0:
... return True
... else:
... return False

该函数表示当每经过 1000 个 batch,``Trainer`` 中内置的 ``Evaluator`` 就会验证一次;

另一个需要注意的事情在于该函数会在每一次 batch 的结尾进行调用,当该函数返回 ``True`` 时,``Evaluator`` 才会进行验证;

:param input_mapping: 应当为一个字典或者一个函数,表示在当前 step 拿到一个 batch 的训练数据后,应当做怎样的映射处理:

1. 如果 ``input_mapping`` 是一个字典:

1. 如果此时 batch 也是一个 ``Dict``,那么我们会把 batch 中同样在 ``input_mapping`` 中的 key 修改为 ``input_mapping`` 的对应 ``key`` 的 ``value``;
2. 如果此时 batch 是一个 ``dataclass``,那么我们会先将其转换为一个 ``Dict``,然后再进行上述转换;
3. 如果此时 batch 此时是其它类型,那么我们将会直接报错;
2. 如果 ``input_mapping`` 是一个函数,那么对于取出的 batch,我们将不会做任何处理,而是直接将其传入该函数里;

注意该参数会被传进 ``Evaluator`` 中;因此你可以通过该参数来实现将训练数据 batch 移到对应机器上的工作(例如当参数 ``device`` 为 ``None`` 时);
如果 ``Trainer`` 和 ``Evaluator`` 需要使用不同的 ``input_mapping``, 请使用 ``train_input_mapping`` 与 ``evaluate_input_mapping`` 分别进行设置。

:param output_mapping: 应当为一个字典或者函数。作用和 ``input_mapping`` 类似,区别在于其用于转换输出:

1. 如果 ``output_mapping`` 是一个 ``Dict``,那么我们需要模型的输出必须是 ``Dict`` 或者 ``dataclass`` 类型:

1. 如果此时模型的输出是一个 ``Dict``,那么我们会把输出中同样在 ``output_mapping`` 中的 key 修改为 ``output_mapping`` 的对应 key 的 value;
2. 如果此时模型的输出是一个 ``dataclass``,那么我们会先将其转换为一个 Dict,然后再进行上述转换;
2. 如果 ``output_mapping`` 是一个函数,那么我们将会直接将模型的输出传给该函数;

如果 ``Trainer`` 和 ``Evaluator`` 需要使用不同的 ``output_mapping``, 请使用 ``train_output_mapping`` 与 ``evaluate_output_mapping`` 分别进行设置;

.. note::

``input_mapping`` 和 ``output_mapping`` 与 fastNLP 的一个特殊的概念 **'参数绑定'** 高度相关,它们的存在也是为了 fastNLP
中的参数匹配能够正确地运行;

.. todo::
之后链接上 参数匹配 的文档;

.. warning::

如果 ``Trainer`` 的参数 ``output_mapping`` 不为 ``None``,请保证其返回的一定是一个字典,并且其中含有关键字 **'loss'**;

:param model_wo_auto_param_call: 是否关闭在训练时调用我们的 ``auto_param_call`` 函数来自动匹配 batch 和前向函数的参数的行为;

1. 如果该值为 ``False``,并且当 batch 为字典时,我们会根据**前向函数**所需要的参数从 batch 中提取对应的对象,然后传入到**前向函数**中;
2. 如果该值为 ``True``,那么我们会将 batch 直接透传给模型;

.. todo::
之后链接上 参数匹配 的文档;

函数 ``auto_param_call`` 详见 :func:`fastNLP.core.utils.auto_param_call`;

:param accumulation_steps: 梯度累积的步数,表示每隔几个 batch 才让优化器迭代一次,默认为 1;
:param fp16: 是否开启混合精度训练,默认为 False;
:param monitor: 对于一些特殊的 ``Callback``,例如 :class:`fastNLP.core.callbacks.CheckpointCallback`,它们需要参数 ``monitor``
来从 ``Evaluator`` 的验证结果中获取当前评测的值,从而来判断是否执行一些特殊的操作。例如,对于 ``CheckpointCallback`` 而言,如果我们
想要每隔一个 epoch 让 ``Evaluator`` 进行一次验证,然后保存训练以来的最好的结果;那么我们需要这样设置:

.. code-block::

trainer = Trainer(
...,
metrics={'acc': accMetric()},
callbacks=[CheckpointCallback(
...,
monitor='acc',
topk=1
)]
)

这意味着对于 ``CheckpointCallback`` 来说,*'acc'* 就是一个监测的指标,用于在 ``Evaluator`` 验证后取出其需要监测的那个指标的值。

``Trainer`` 中的参数 ``monitor`` 的作用在于为没有设置 ``monitor`` 参数但是需要该参数的 *callback* 实例设置该值。关于 ``monitor``
参数更详细的说明,请见 :class:`fastNLP.core.callbacks.CheckpointCallback`;

注意该参数仅当 ``Trainer`` 内置的 ``Evaluator`` 不为 None 时且有需要该参数但是没有设置该参数的 *callback* 实例才有效;

:param larger_better: 对于需要参数 ``monitor`` 的 *callback* 来说,``monitor`` 的值是否是越大越好;类似于 ``monitor``,其作用
在于为没有设置 ``larger_better`` 参数但是需要该参数的 *callback* 实例设置该值;

注意该参数仅当 ``Trainer`` 内置的 ``Evaluator`` 不为 None 时且有需要该参数但是没有设置该参数的 *callback* 实例才有效;

:param marker: 用于标记一个 ``Trainer`` 实例,从而在用户调用 ``Trainer.on`` 函数时,标记该函数属于哪一个具体的 ``Trainer`` 实例;默认为 None;

.. note::

marker 的使用场景主要在于如果一个脚本中含有多个 ``Trainer`` 实例,并且含有多个使用 ``Trainer.on`` 修饰的函数时,不同的函数属于
不同的 ``Trainer`` 实例;

此时,通过将修饰器 ``Trainer.on`` 的参数 ``marker`` 和 ``Trainer`` 的参数 ``marker`` 置为相同,就可以使得该函数只会在这一
``Trainer`` 实例中被调用;例如,

.. code-block::

@Trainer.on(Event.on_train_begin(), marker='trainer1')
def fn(trainer):
...

trainer = Trainer(
...,
marker='trainer1'
)

另一点需要说明的是,如果一个被 ``Trainer.on`` 修饰的函数,其修饰时没有指明 ``marker``,那么会将该函数传给代码位于其之后的
第一个 ``Trainer`` 实例,即使该 ``Trainer`` 实例的 marker 不为 None;这一点详见 :meth:`~fastNLP.core.controllers.Trainer.on`

:kwargs: :kwargs:
* *torch_kwargs* -- 用于在指定 ``driver`` 为 'torch' 时设定具体 driver 实例的一些参数: * *torch_kwargs* -- 用于在指定 ``driver`` 为 'torch' 时设定具体 driver 实例的一些参数:
* ddp_kwargs -- 用于在使用 ``TorchDDPDriver`` 时指定 ``DistributedDataParallel`` 初始化时的参数;例如传入 * ddp_kwargs -- 用于在使用 ``TorchDDPDriver`` 时指定 ``DistributedDataParallel`` 初始化时的参数;例如传入
{'find_unused_parameters': True} 来解决有参数不参与前向运算导致的报错等; {'find_unused_parameters': True} 来解决有参数不参与前向运算导致的报错等;
* set_grad_to_none -- 是否在训练过程中在每一次 optimizer 更新后将 grad 置为 None; * set_grad_to_none -- 是否在训练过程中在每一次 optimizer 更新后将 grad 置为 None;
* torch_non_blocking -- 表示用于 pytorch 的 tensor 的 to 方法的参数 non_blocking; * torch_non_blocking -- 表示用于 pytorch 的 tensor 的 to 方法的参数 non_blocking;
* *data_device* -- 表示如果用户的模型 device (在 Driver 中对应为参数 model_device)为 None 时,我们会将数据迁移到 data_device 上;
注意如果 model_device 为 None,那么 data_device 不会起作用;
* *use_dist_sampler* -- 表示是否使用分布式的 sampler 。在多卡时,分布式 sampler 将自动决定每张卡上读取的 sample ,使得一个epoch
内所有卡的 sample 加起来为一整个数据集的 sample。默认会根据 driver 是否为分布式进行设置。
* *evaluate_use_dist_sampler* -- 表示在 Evaluator 中在使用 分布式 的时候是否将 dataloader 的 sampler 替换为分布式的 sampler;默认为 True;
* *data_device* -- 一个具体的 driver 实例中,有 ``model_device`` 和 ``data_device``,前者表示模型所在的设备,后者表示
当 ``model_device`` 为 None 时应当将数据迁移到哪个设备;

.. note::

注意您在绝大部分情况下不会用到该参数!

1. 当 driver 实例的 ``model_device`` 不为 None 时,该参数无效;
2. 对于 pytorch,仅当用户自己通过 ``python -m torch.distributed.launch`` 并且自己初始化 ``init_process_group`` 时,
driver 实例的 ``model_device`` 才会为 None;

* *use_dist_sampler* -- 表示是否使用分布式的 ``sampler``。在多卡时,分布式 ``sampler`` 将自动决定每张卡上读取的 sample ,使得一个 epoch
内所有卡的 sample 加起来为一整个数据集的 sample。默认会根据 driver 是否为分布式进行设置。
* *evaluate_use_dist_sampler* -- 表示在 ``Evaluator`` 中在使用分布式的时候是否将 dataloader 的 ``sampler`` 替换为分布式的 ``sampler``;默认为 ``True``;
* *output_from_new_proc* -- 应当为一个字符串,表示在多进程的 driver 中其它进程的输出流应当被做如何处理;其值应当为以下之一: * *output_from_new_proc* -- 应当为一个字符串,表示在多进程的 driver 中其它进程的输出流应当被做如何处理;其值应当为以下之一:
["all", "ignore", "only_error"];当该参数的值不是以上值时,该值应当表示一个文件夹的名字,我们会将其他 rank 的输出流重定向到 ["all", "ignore", "only_error"];当该参数的值不是以上值时,该值应当表示一个文件夹的名字,我们会将其他 rank 的输出流重定向到
log 文件中,然后将 log 文件保存在通过该参数值设定的文件夹中;默认为 "only_error"; log 文件中,然后将 log 文件保存在通过该参数值设定的文件夹中;默认为 "only_error";

注意该参数仅当使用分布式的 ``driver`` 时才有效,例如 ``TorchDDPDriver``;
* *progress_bar* -- 以哪种方式显示 progress ,目前支持[None, 'raw', 'rich', 'auto'] 或者 RichCallback, RawTextCallback对象, * *progress_bar* -- 以哪种方式显示 progress ,目前支持[None, 'raw', 'rich', 'auto'] 或者 RichCallback, RawTextCallback对象,
默认为 auto , auto 表示如果检测到当前 terminal 为交互型则使用 RichCallback,否则使用 RawTextCallback对象。如果
需要定制 progress bar 的参数,例如打印频率等,可以传入 RichCallback, RawTextCallback 对象。
* *train_input_mapping* -- 与 input_mapping 一致,但是只用于 train 中。与 input_mapping 互斥。
* *train_output_mapping* -- 与 output_mapping 一致,但是只用于 train 中。与 output_mapping 互斥。
* *evaluate_input_mapping* -- 与 input_mapping 一致,但是只用于 evaluate 中。与 input_mapping 互斥。
* *evaluate_output_mapping* -- 与 output_mapping 一致,但是只用于 evaluate 中。与 output_mapping 互斥。
默认为 auto , auto 表示如果检测到当前 terminal 为交互型则使用 RichCallback,否则使用 RawTextCallback对象。如果
需要定制 progress bar 的参数,例如打印频率等,可以传入 RichCallback, RawTextCallback 对象。
* *train_input_mapping* -- 与 input_mapping 一致,但是只用于 ``Trainer`` 中。与 input_mapping 互斥。
* *train_output_mapping* -- 与 output_mapping 一致,但是只用于 ``Trainer`` 中。与 output_mapping 互斥。
* *evaluate_input_mapping* -- 与 input_mapping 一致,但是只用于 ``Evaluator`` 中。与 input_mapping 互斥。
* *evaluate_output_mapping* -- 与 output_mapping 一致,但是只用于 ``Evaluator`` 中。与 output_mapping 互斥。

.. note::
``Trainer`` 是通过在内部直接初始化一个 ``Evaluator`` 来进行验证;
``Trainer`` 内部的 ``Evaluator`` 默认是 None,如果您需要在训练过程中进行验证,你需要保证这几个参数得到正确的传入:

必须的参数:1. ``metrics``;2. ``evaluate_dataloaders``;

可选的其它参数:1. ``evaluate_batch_step_fn;2. ``evaluate_fn``;3. ``evaluate_every``;4. ``input_mapping``;
5. ``output_mapping``; 6. ``model_wo_auto_param_call``;7. ``fp16``;8. ``monitor``;9. ``larger_better``;

.. warning::


如果 ``Trainer`` 中内置的 ``Evaluator`` 实例不为 ``None``,那么需要注意 ``Trainer`` 中的一些参数是与 ``Evaluator`` 一致的,它们分别为:

1. ``Evaluator`` 在初始化时的 ``driver`` 参数是 ``Trainer`` 中已经实例化过的 driver;这一点使得一些参数对于 ``Trainer`` 内部的
``Evaluator`` 没有用处,例如 ``device``,``torch_kwargs``,``data_device`` 和 ``output_from_new_proc`` 等;
2. ``input_mapping``,``output_mapping``,``model_wo_auto_param_call`` 和 ``fp16`` 是 ``Trainer`` 和其内部默认的
``Evaluator`` 是一致的;

当然,对于 ``input_mapping`` 和 ``output_mapping``,您可以通过添加 ``kwargs`` 中的参数 ``evaluate_input_mapping`` 和
``evaluate_output_mapping`` 来单独为 ``Evaluator`` 进行更细致的订制。

另一方面,注意一些专门独属于 ``Evaluator`` 的参数仅当 ``Evaluator`` 不为 None 时才会生效。


""" """
self.model = model self.model = model
@@ -174,7 +356,7 @@ class Trainer(TrainerEventTrigger):
evaluate_input_mapping = kwargs.get('evaluate_input_mapping', None) evaluate_input_mapping = kwargs.get('evaluate_input_mapping', None)
evaluate_output_mapping = kwargs.get('evaluate_output_mapping', None) evaluate_output_mapping = kwargs.get('evaluate_output_mapping', None)


train_input_mapping, train_output_mapping, evaluate_input_mapping, evaluate_output_mapping = \
train_input_mapping, train_output_mapping, evaluate_input_mapping, evaluate_output_mapping = \
_get_input_output_mapping(input_mapping, output_mapping, train_input_mapping, train_output_mapping, _get_input_output_mapping(input_mapping, output_mapping, train_input_mapping, train_output_mapping,
evaluate_input_mapping, evaluate_output_mapping) evaluate_input_mapping, evaluate_output_mapping)


@@ -273,7 +455,7 @@ class Trainer(TrainerEventTrigger):
if not (isinstance(progress_bar, str) or progress_bar is None): # 应该是ProgressCallback,获取其名称。 if not (isinstance(progress_bar, str) or progress_bar is None): # 应该是ProgressCallback,获取其名称。
progress_bar = progress_bar.name progress_bar = progress_bar.name
self.evaluator = Evaluator(model=model, dataloaders=evaluate_dataloaders, metrics=metrics, self.evaluator = Evaluator(model=model, dataloaders=evaluate_dataloaders, metrics=metrics,
driver=self.driver, device=device, evaluate_batch_step_fn=evaluate_batch_step_fn,
driver=self.driver, evaluate_batch_step_fn=evaluate_batch_step_fn,
evaluate_fn=evaluate_fn, input_mapping=evaluate_input_mapping, evaluate_fn=evaluate_fn, input_mapping=evaluate_input_mapping,
output_mapping=evaluate_output_mapping, fp16=fp16, verbose=0, output_mapping=evaluate_output_mapping, fp16=fp16, verbose=0,
use_dist_sampler=kwargs.get("evaluate_use_dist_sampler", None), use_dist_sampler=kwargs.get("evaluate_use_dist_sampler", None),
@@ -302,7 +484,7 @@ class Trainer(TrainerEventTrigger):
def run(self, num_train_batch_per_epoch: int = -1, num_eval_batch_per_dl: int = -1, def run(self, num_train_batch_per_epoch: int = -1, num_eval_batch_per_dl: int = -1,
num_eval_sanity_batch: int = 2, resume_from: str = None, resume_training: bool = True, num_eval_sanity_batch: int = 2, resume_from: str = None, resume_training: bool = True,
catch_KeyboardInterrupt=None): catch_KeyboardInterrupt=None):
"""
r"""
注意如果是断点重训的第一次训练,即还没有保存任何用于断点重训的文件,那么其应当置 resume_from 为 None,并且使用 ModelCheckpoint 注意如果是断点重训的第一次训练,即还没有保存任何用于断点重训的文件,那么其应当置 resume_from 为 None,并且使用 ModelCheckpoint
去保存断点重训的文件; 去保存断点重训的文件;
:param num_train_batch_per_epoch: 每个 epoch 运行多少个 batch 即停止,-1 为根据 dataloader 有多少个 batch 决定。 :param num_train_batch_per_epoch: 每个 epoch 运行多少个 batch 即停止,-1 为根据 dataloader 有多少个 batch 决定。
@@ -491,6 +673,36 @@ class Trainer(TrainerEventTrigger):
# do something # do something
# 以上函数会在 Trainer 每个新的 batch 开始的时候执行,但是是两个 batch 才执行一次。 # 以上函数会在 Trainer 每个新的 batch 开始的时候执行,但是是两个 batch 才执行一次。


.. note::


例如:

.. code-block::

@Trainer.on(Event.on_train_begin())
def fn1(trainer):
...

@Trainer.on(Event.on_train_epoch_begin())
def fn2(trainer):
...

trainer1 = Trainer(
...,
marker='trainer1'
)

@Trainer.on(Event.on_fetch_data_begin())
def fn3(trainer):
...

trainer2 = Trainer(
...,
marker='trainer2'
)


注意如果你使用该函数修饰器来为你的训练添加 callback,请务必保证你加入 callback 函数的代码在实例化 `Trainer` 之前; 注意如果你使用该函数修饰器来为你的训练添加 callback,请务必保证你加入 callback 函数的代码在实例化 `Trainer` 之前;


:param event: 特定的 callback 时机,用户需要为该 callback 函数指定其属于哪一个 callback 时机。每个时机运行的函数应该包含 :param event: 特定的 callback 时机,用户需要为该 callback 函数指定其属于哪一个 callback 时机。每个时机运行的函数应该包含
@@ -646,7 +858,7 @@ class Trainer(TrainerEventTrigger):
self.driver.save_model(folder, only_state_dict, **kwargs) self.driver.save_model(folder, only_state_dict, **kwargs)
self.driver.barrier() self.driver.barrier()


def load_model(self, folder: Union[str, Path, BinaryIO, io.BytesIO], only_state_dict: bool = False,
def load_model(self, folder: Union[str, Path, BinaryIO, io.BytesIO], only_state_dict: bool = True,
model_load_fn: Optional[Callable] = None, **kwargs): model_load_fn: Optional[Callable] = None, **kwargs):
""" """
加载模型 加载模型


+ 2
- 2
fastNLP/core/dataloaders/paddle_dataloader/fdl.py View File

@@ -162,9 +162,9 @@ class PaddleDataLoader(DataLoader):


def get_batch_indices(self) -> List[int]: def get_batch_indices(self) -> List[int]:
""" """
获取当前 batch 的 idx
获取当前 ``batch`` 中每条数据对应的索引。


:return:
:return: 当前 ``batch`` 数据的索引
""" """
return self.cur_batch_indices return self.cur_batch_indices




+ 1
- 1
fastNLP/core/dataloaders/prepare_dataloader.py View File

@@ -10,7 +10,7 @@ from ..samplers import RandomBatchSampler, RandomSampler
from .torch_dataloader import prepare_torch_dataloader from .torch_dataloader import prepare_torch_dataloader
from .paddle_dataloader import prepare_paddle_dataloader from .paddle_dataloader import prepare_paddle_dataloader
from .jittor_dataloader import prepare_jittor_dataloader from .jittor_dataloader import prepare_jittor_dataloader
from ...envs import FASTNLP_BACKEND, SUPPORT_BACKENDS, _module_available
from ...envs import FASTNLP_BACKEND, SUPPORT_BACKENDS
from ..log import logger from ..log import logger






+ 2
- 2
fastNLP/core/dataloaders/torch_dataloader/fdl.py View File

@@ -170,9 +170,9 @@ class TorchDataLoader(DataLoader):


def get_batch_indices(self) -> List[int]: def get_batch_indices(self) -> List[int]:
""" """
获取当前 batch 的 idx
获取当前 ``batch`` 中每条数据对应的索引。


:return:
:return: 当前 ``batch`` 数据的索引
""" """
return self.cur_batch_indices return self.cur_batch_indices




+ 11
- 10
fastNLP/core/dataset/dataset.py View File

@@ -400,15 +400,16 @@ class DataSet:
new_field_name: str = None, num_proc: int = 0, new_field_name: str = None, num_proc: int = 0,
progress_desc: str = None, show_progress_bar: bool = True): progress_desc: str = None, show_progress_bar: bool = True):
r""" r"""
DataSet 中的每个 instance 中的名为 `field_name` 的 field 传给 func,并获取它的返回值。
:class:`~DataSet` 每个 ``instance`` 中为 ``field_name`` 的 ``field`` 传给函数 ``func``,并获取函数的返回值。


:param field_name: 传入 func 的是哪个 field。
:param func: input是 instance 中名为 `field_name` 的 field 的内容。
:param new_field_name: 将 func 返回的内容放入到 `new_field_name` 这个 field 中,如果名称与已有的 field 相同,则覆
盖之前的 field。如果为 None 则不创建新的 field。
:param num_proc: 进程的数量。请注意,由于python语言的特性,多少进程就会导致多少倍内存的增长。
:param progress_desc: progress_desc 的值,默认为 Main
:param show_progress_bar: 是否展示进度条,默认展示进度条
:param field_name: 传入 ``func`` 的 ``field`` 名称。
:param func: 一个函数,其输入是 ``instance`` 中名为 ``field_name`` 的 ``field`` 的内容。
:param new_field_name: 将 ``func`` 返回的内容放入到 ``new_field_name`` 对应的 ``field`` 中,如果名称与已有的 ``field`` 相同
则进行覆盖。如果为 ``None`` 则不会覆盖和创建 ``field`` 。
:param num_proc: 使用进程的数量。请注意,由于 ``python`` 语言的特性,使用了多少进程就会导致多少倍内存的增长。
:param progress_desc: 进度条的描述字符,默认为 ``Main``。
:param show_progress_bar: 是否展示进度条;默认为展示。
:return: 从函数 ``func`` 中得到的返回值。
""" """
assert len(self) != 0, "Null DataSet cannot use apply_field()." assert len(self) != 0, "Null DataSet cannot use apply_field()."
if not self.has_field(field_name=field_name): if not self.has_field(field_name=field_name):
@@ -451,8 +452,8 @@ class DataSet:
apply_out = self._apply_process(num_proc, func, progress_desc=progress_desc, apply_out = self._apply_process(num_proc, func, progress_desc=progress_desc,
show_progress_bar=show_progress_bar, _apply_field=field_name) show_progress_bar=show_progress_bar, _apply_field=field_name)
# 只检测第一个数据是否为dict类型,若是则默认所有返回值为dict;否则报错。 # 只检测第一个数据是否为dict类型,若是则默认所有返回值为dict;否则报错。
if not isinstance(apply_out[0], dict):
raise Exception("The result of func is not a dict")
if not isinstance(apply_out[0], Mapping):
raise Exception(f"The result of func is not a Mapping, but a {type(apply_out[0])}")


for key, value in apply_out[0].items(): for key, value in apply_out[0].items():
results[key] = [value] results[key] = [value]


+ 0
- 2
fastNLP/core/drivers/__init__.py View File

@@ -9,7 +9,6 @@ __all__ = [
"JittorDriver", "JittorDriver",
"JittorSingleDriver", "JittorSingleDriver",
"JittorMPIDriver", "JittorMPIDriver",
"TorchPaddleDriver",
'torch_seed_everything', 'torch_seed_everything',
'paddle_seed_everything', 'paddle_seed_everything',
'optimizer_state_to_device' 'optimizer_state_to_device'
@@ -18,7 +17,6 @@ __all__ = [
from .torch_driver import TorchDriver, TorchSingleDriver, TorchDDPDriver, torch_seed_everything, optimizer_state_to_device from .torch_driver import TorchDriver, TorchSingleDriver, TorchDDPDriver, torch_seed_everything, optimizer_state_to_device
from .jittor_driver import JittorDriver, JittorMPIDriver, JittorSingleDriver from .jittor_driver import JittorDriver, JittorMPIDriver, JittorSingleDriver
from .paddle_driver import PaddleDriver, PaddleFleetDriver, PaddleSingleDriver, paddle_seed_everything from .paddle_driver import PaddleDriver, PaddleFleetDriver, PaddleSingleDriver, paddle_seed_everything
from .torch_paddle_driver import TorchPaddleDriver
from .driver import Driver from .driver import Driver






+ 2
- 2
fastNLP/core/drivers/torch_driver/initialize_torch_driver.py View File

@@ -55,8 +55,8 @@ def initialize_torch_driver(driver: str, device: Optional[Union[str, "torch.devi
elif each < 0: elif each < 0:
raise ValueError("When parameter `device` is 'Sequence' type, the value in it should be bigger than 0.") raise ValueError("When parameter `device` is 'Sequence' type, the value in it should be bigger than 0.")
elif each >= _could_use_device_num: elif each >= _could_use_device_num:
raise ValueError("When parameter `device` is 'Sequence' type, the value in it should not be bigger than"
" the available gpu number.")
raise ValueError(f"When parameter `device` is 'Sequence' type, the value in it should not be bigger than"
f" the available gpu number:{_could_use_device_num}.")
device = [torch.device(f"cuda:{w}") for w in device] device = [torch.device(f"cuda:{w}") for w in device]
elif device is not None and not isinstance(device, torch.device): elif device is not None and not isinstance(device, torch.device):
raise ValueError("Parameter `device` is wrong type, please check our documentation for the right use.") raise ValueError("Parameter `device` is wrong type, please check our documentation for the right use.")


+ 6
- 0
fastNLP/core/drivers/torch_driver/torch_driver.py View File

@@ -167,6 +167,12 @@ class TorchDriver(Driver):
""" """
model = self.unwrap_model() model = self.unwrap_model()
res = torch.load(filepath, map_location='cpu') res = torch.load(filepath, map_location='cpu')
if isinstance(res, dict) and only_state_dict is False:
logger.rank_zero_warning(f"It seems like that {filepath} only contains state, you may need to use "
f"`only_state_dict=True`")
elif not isinstance(res, dict) and only_state_dict is True:
logger.rank_zero_warning(f"It seems like that {filepath} is not state, you may need to use "
f"`only_state_dict=False`")
if only_state_dict: if only_state_dict:
model.load_state_dict(res) model.load_state_dict(res)
else: else:


+ 0
- 5
fastNLP/core/drivers/torch_paddle_driver/__init__.py View File

@@ -1,5 +0,0 @@
__all__ = [
"TorchPaddleDriver",
]

from .torch_paddle_driver import TorchPaddleDriver

+ 0
- 193
fastNLP/core/drivers/torch_paddle_driver/torch_paddle_driver.py View File

@@ -1,193 +0,0 @@
from typing import Optional, Dict, Union, Callable, Tuple

from fastNLP.envs.imports import _NEED_IMPORT_PADDLE, _NEED_IMPORT_TORCH
from fastNLP.core.utils.utils import _get_fun_msg


if _NEED_IMPORT_PADDLE:
import paddle
from paddle.io import DataLoader as PaddleDataLoader
from paddle.optimizer import Optimizer as PaddleOptimizer

if _NEED_IMPORT_TORCH:
import torch
from torch.utils.data import DataLoader as TorchDataLoader
from torch.optim import Optimizer as TorchOptimizer

from fastNLP.core.drivers.driver import Driver
from fastNLP.envs.distributed import rank_zero_call
from fastNLP.core.utils.utils import auto_param_call, apply_to_collection
from fastNLP.core.log.logger import logger
from fastNLP.modules.mix_modules.mix_module import MixModule


__all__ = [
"TorchPaddleDriver",
]

class TorchPaddleDriver(Driver):
"""
针对torch和paddle混合模型的driver
由于是两种不同的框架不方便实现多卡,暂时先实现CPU和GPU单卡的功能
"""
def __init__(self, model, device: Optional[str] = None, **kwargs):
super(TorchPaddleDriver, self).__init__(model)

self.model_device = device
self.torch_non_blocking = kwargs.get("torch_non_blocking", None)
self.paddle_blocking = kwargs.get("paddle_blocking", None)

self._data_device = kwargs.get("_data_device", None)
if isinstance(self._data_device, int):
# 将data_device设置为cuda:x的字符串形式
if self._data_device < 0:
raise ValueError("Parameter `_data_device` can not be smaller than 0.")
_could_use_device_num = paddle.device.cuda.device_count()
if self._data_device >= _could_use_device_num:
raise ValueError("The gpu device that parameter `device` specifies is not existed.")
self._data_device = f"cuda:{self._data_device}"
elif self._data_device is not None:
raise ValueError("Parameter `device` is wrong type, please check our documentation for the right use.")

def setup(self):
if self.model_device is not None:
paddle.device.set_device(self.model_device.replace("cuda", "gpu"))
self.model.to(self.model_device)

@staticmethod
def check_dataloader_legality(dataloader, dataloader_name, is_train: bool = False):
if is_train:
if not isinstance(dataloader, (TorchDataLoader, PaddleDataLoader)):
raise ValueError(f"Parameter `{dataloader_name}` should be 'torch.util.data.DataLoader' or `paddle.io.dataloader` type, not {type(dataloader)}.")
else:
if not isinstance(dataloader, Dict):
raise ValueError(f"Parameter `{dataloader_name}` should be 'Dict' type, not {type(dataloader)}.")
else:
for each_dataloader in dataloader.values():
if not isinstance(each_dataloader, (TorchDataLoader, PaddleDataLoader)):
raise ValueError(f"Each dataloader of parameter `{dataloader_name}` should be "
f"'torch.util.data.DataLoader' or `paddle.io.dataloader` "
f"type, not {type(each_dataloader)}.")

@staticmethod
def _check_optimizer_legality(optimizers):
for each_optimizer in optimizers:
if not isinstance(each_optimizer, (TorchOptimizer, PaddleOptimizer)):
raise ValueError(f"Each optimizers of parameter `optimizers` should be "
f"'torch.optim.Optimizer' or 'paddle.optimizers.Optimizer' type, "
f"not {type(each_optimizer)}.")

def step(self):
for optimizer in self.optimizers:
optimizer.step()

def backward(self, loss):
loss.backward()

def zero_grad(self):
for optimizer in self.optimizers:
if isinstance(optimizer, TorchOptimizer):
optimizer.zero_grad()
elif isinstance(optimizer, PaddleOptimizer):
optimizer.clear_grad()
else:
raise ValueError("Unknown optimizers type.")

def model_call(self, batch, fn: Callable, signature_fn: Optional[Callable]) -> Dict:
if isinstance(batch, Dict) and not self.wo_auto_param_call:
return auto_param_call(fn, batch, signature_fn=signature_fn)
else:
return fn(batch)

def get_model_call_fn(self, fn: str) -> Tuple:
if hasattr(self.model, fn):
fn = getattr(self.model, fn)
if not callable(fn):
raise RuntimeError(f"The `{fn}` attribute is not `Callable`.")
logger.debug(f'Use {_get_fun_msg(fn, with_fp=False)}...')
return fn, None
elif fn in {"train_step", "evaluate_step"}:
logger.debug(f'Use {_get_fun_msg(self.model.forward, with_fp=False)}...')
return self.model, self.model.forward
else:
raise RuntimeError(f"There is no `{fn}` method in your {type(self.model)}.")

def predict_step(self, batch):
if isinstance(batch, Dict):
return auto_param_call(self._predict_step, batch)
else:
return self._predict_step(batch)

@rank_zero_call
def save_model(self, filepath: str, only_state_dict: bool = True, model_save_fn: Optional[Callable] = None):
r"""
暂时不提供保存整个模型的方法
"""
if only_state_dict == False:
logger.warn("TorchPaddleModule only support saving state dicts now.")
if model_save_fn is not None:
model_save_fn(filepath)
else:
model = self.unwrap_model()
self.move_model_to_device(model, "cpu")
self.model.save(filepath)
self.move_model_to_device(model, self.model_device)

def load_model(self, filepath: str):
"""
加载模型的加载函数;

:param filepath: 保存文件的文件位置(需要包括文件名);
:return:
"""
return self.model.load(filepath)

def save(self):
...

def load(self):
...

@staticmethod
def move_model_to_device(model: MixModule, device: str):
if device is not None:
model.to(device)

def unwrap_model(self):
return self.model

@staticmethod
def tensor_to_numeric(tensor):
if tensor is None:
return None

def _translate(_data):
return _data.tolist()

return apply_to_collection(
data=tensor,
dtype=(paddle.Tensor, torch.Tensor),
function=_translate
)

def set_model_mode(self, mode: str):
assert mode in {"train", "eval"}
getattr(self.model, mode)()

def get_model_device(self):
return self.model_device

@property
def data_device(self):
if self.model_device is not None:
return self.model_device
else:
return self._data_device

def set_model_mode(self, mode: str):
assert mode in {"train", "eval"}
getattr(self.model, mode)()

def set_sampler_epoch(self, dataloader: Union['TorchDataLoader', 'PaddleDataLoader'], cur_epoch_idx):
# 保证 ddp 训练时的 shuffle=True 时的正确性,因为需要保证每一个进程上的 sampler 的shuffle 的随机数种子是一样的;
return dataloader

+ 0
- 4
fastNLP/core/drivers/torch_paddle_driver/utils.py View File

@@ -1,4 +0,0 @@
from fastNLP.envs.imports import _NEED_IMPORT_PADDLE

if _NEED_IMPORT_PADDLE:
pass

+ 0
- 2
fastNLP/core/utils/__init__.py View File

@@ -11,7 +11,6 @@ __all__ = [
'is_in_fnlp_paddle_dist', 'is_in_fnlp_paddle_dist',
'is_in_paddle_launch_dist', 'is_in_paddle_launch_dist',
'f_rich_progress', 'f_rich_progress',
'torch_paddle_move_data_to_device',
'torch_move_data_to_device', 'torch_move_data_to_device',
'get_fn_arg_names', 'get_fn_arg_names',
'auto_param_call', 'auto_param_call',
@@ -32,7 +31,6 @@ from .jittor_utils import is_jittor_dataset, jittor_collate_wraps
from .paddle_utils import get_device_from_visible, paddle_to, paddle_move_data_to_device, get_paddle_device_id, get_paddle_gpu_str, is_in_paddle_dist, \ from .paddle_utils import get_device_from_visible, paddle_to, paddle_move_data_to_device, get_paddle_device_id, get_paddle_gpu_str, is_in_paddle_dist, \
is_in_fnlp_paddle_dist, is_in_paddle_launch_dist is_in_fnlp_paddle_dist, is_in_paddle_launch_dist
from .rich_progress import f_rich_progress from .rich_progress import f_rich_progress
from .torch_paddle_utils import torch_paddle_move_data_to_device
from .torch_utils import torch_move_data_to_device from .torch_utils import torch_move_data_to_device
from .utils import * from .utils import *




+ 1
- 1
fastNLP/core/utils/dummy_class.py View File

@@ -1,4 +1,4 @@
import functools
__all__ = []


class DummyClass: class DummyClass:
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):


+ 2
- 3
fastNLP/core/utils/rich_progress.py View File

@@ -1,7 +1,6 @@
""" """
该文件用于为fastNLP提供一个统一的progress bar管理,通过共用一个Task对象,trainer中的progress bar和evaluation中的progress bar才能
不冲突

该文件用于为 ``fastNLP`` 提供一个统一的 ``progress bar`` 管理,通过共用一个``Task`` 对象, :class:`~fastNLP.core.Trainer` 中
的 ``progress bar`` 和 :class:`~fastNLP.core.Evaluator` 中的 ``progress bar`` 才能不冲突
""" """
import sys import sys
from typing import Any, Union, Optional from typing import Any, Union, Optional


+ 0
- 49
fastNLP/core/utils/torch_paddle_utils.py View File

@@ -1,49 +0,0 @@
from typing import Any, Optional

from fastNLP.envs.imports import _NEED_IMPORT_PADDLE, _NEED_IMPORT_TORCH

if _NEED_IMPORT_PADDLE:
import paddle

if _NEED_IMPORT_TORCH:
import torch

__all__ = [
"torch_paddle_move_data_to_device",
]

from .utils import apply_to_collection
from .paddle_utils import paddle_to


def torch_paddle_move_data_to_device(batch: Any, device: Optional[str] = None, non_blocking: Optional[bool] = True,
data_device: Optional[str] = None) -> Any:
r"""
将数据集合传输到给定设备。只有paddle.Tensor和torch.Tensor对象会被传输到设备中,其余保持不变

:param batch:
:param device:
:param non_blocking:
:param data_device:
:return: 相同的集合,但所有包含的张量都驻留在新设备上;
"""

if device is None:
if data_device is not None:
device = data_device
else:
return batch

torch_device = device.replace("gpu", "cuda")
paddle_device = device.replace("cuda", "gpu")

def batch_to(data: Any) -> Any:
if isinstance(data, torch.Tensor):
data = data.to(torch_device, non_blocking=non_blocking)
elif isinstance(data, paddle.Tensor):
data = paddle_to(data, paddle_device)
return data

return apply_to_collection(batch, dtype=(paddle.Tensor, torch.Tensor), function=batch_to)

+ 84
- 75
fastNLP/core/utils/utils.py View File

@@ -10,10 +10,6 @@ from typing import Callable, List, Any, Dict, AnyStr, Union, Mapping, Sequence
from typing import Tuple, Optional from typing import Tuple, Optional
from time import sleep from time import sleep


try:
from typing import Literal, Final
except ImportError:
from typing_extensions import Literal, Final
import os import os
from contextlib import contextmanager from contextlib import contextmanager
from functools import wraps from functools import wraps
@@ -22,7 +18,6 @@ import numpy as np
from pathlib import Path from pathlib import Path


from fastNLP.core.log import logger from fastNLP.core.log import logger
from ...envs import SUPPORT_BACKENDS




__all__ = [ __all__ = [
@@ -43,10 +38,10 @@ __all__ = [


def get_fn_arg_names(fn: Callable) -> List[str]: def get_fn_arg_names(fn: Callable) -> List[str]:
r""" r"""
返回一个函数所有参数的名字
返回一个函数所有参数的名字


:param fn: 需要查询的函数
:return: 一个列表,其中的元素则是查询函数的参数的字符串名字;
:param fn: 需要查询的函数
:return: 一个列表,其中的元素是函数 ``fn`` 参数的字符串名字
""" """
return list(inspect.signature(fn).parameters) return list(inspect.signature(fn).parameters)


@@ -54,24 +49,18 @@ def get_fn_arg_names(fn: Callable) -> List[str]:
def auto_param_call(fn: Callable, *args, signature_fn: Optional[Callable] = None, def auto_param_call(fn: Callable, *args, signature_fn: Optional[Callable] = None,
mapping: Optional[Dict[AnyStr, AnyStr]] = None) -> Any: mapping: Optional[Dict[AnyStr, AnyStr]] = None) -> Any:
r""" r"""
该函数会根据输入函数的形参名从*args(因此都需要是dict类型)中找到匹配的值进行调用,如果传入的数据与fn的形参不匹配,可以通过mapping
参数进行转换。mapping参数中的一对(key,value)表示以这个key在*args中找到值,并将这个值传递给形参名为value的参数。
该函数会根据输入函数的形参名从 ``*args`` (因此都需要是 ``dict`` 类型)中找到匹配的值进行调用,如果传入的数据与 ``fn`` 的形参不匹配,可以通过
``mapping`` 参数进行转换。``mapping`` 参数中的一对 ``(key, value)`` 表示在 ``*args`` 中找到 ``key`` 对应的值,并将这个值传递给形参中名为
``value`` 的参数。


1.该函数用来提供给用户根据字符串匹配从而实现自动调用;
2.注意 mapping 默认为 None,如果你希望指定输入和运行函数的参数的对应方式,那么你应当让 mapping 为一个这样的字典传入进来;
如果 mapping 不为 None,那么我们一定会先使用 mapping 将输入的字典的 keys 修改过来,因此请务必亲自检查 mapping 的正确性;
3.如果输入的函数的参数有默认值,那么如果之后的输入中没有该参数对应的值,我们就会使用该参数对应的默认值,否则也会使用之后的输入的值;
4.如果输入的函数是一个 `partial` 函数,情况同 '3.',即和默认参数的情况相同;

:param fn: 用来进行实际计算的函数,其参数可以包含有默认值;
:param args: 一系列的位置参数,应当为一系列的字典,我们需要从这些输入中提取 `fn` 计算所需要的实际参数;
:param signature_fn: 函数,用来替换 `fn` 的函数签名,如果该参数不为 None,那么我们首先会从该函数中提取函数签名,然后通过该函数签名提取
参数值后,再传给 `fn` 进行实际的运算;
:param mapping: 一个字典,用来更改其前面的字典的键值;

:return: 返回 `fn` 运行的结果;
1. 该函数用来提供给用户根据字符串匹配从而实现自动调用;
2. 注意 ``mapping`` 默认为 ``None``,如果你希望指定输入和运行函数的参数的对应方式,那么你应当让 ``mapping`` 为一个字典传入进来;
如果 ``mapping`` 不为 ``None``,那么我们一定会先使用 ``mapping`` 将输入的字典的 ``keys`` 修改过来,因此请务必亲自检查 ``mapping`` 的正确性;
3. 如果输入的函数的参数有默认值,那么如果之后的输入中没有该参数对应的值,我们就会使用该参数对应的默认值,否则也会使用之后的输入的值;
4. 如果输入的函数是一个 ``partial`` 函数,情况同第三点,即和默认参数的情况相同;


Examples:: Examples::

>>> # 1 >>> # 1
>>> loss_fn = CrossEntropyLoss() # 如果其需要的参数为 def CrossEntropyLoss(y, pred); >>> loss_fn = CrossEntropyLoss() # 如果其需要的参数为 def CrossEntropyLoss(y, pred);
>>> batch = {"x": 20, "y": 1} >>> batch = {"x": 20, "y": 1}
@@ -84,6 +73,14 @@ def auto_param_call(fn: Callable, *args, signature_fn: Optional[Callable] = None
>>> print(auto_param_call(test_fn, {"x": 10}, {"y": 20, "a": 30})) # res: 70 >>> print(auto_param_call(test_fn, {"x": 10}, {"y": 20, "a": 30})) # res: 70
>>> print(auto_param_call(partial(test_fn, a=100), {"x": 10}, {"y": 20})) # res: 140 >>> print(auto_param_call(partial(test_fn, a=100), {"x": 10}, {"y": 20})) # res: 140
>>> print(auto_param_call(partial(test_fn, a=100), {"x": 10}, {"y": 20, "a": 200})) # res: 240 >>> print(auto_param_call(partial(test_fn, a=100), {"x": 10}, {"y": 20, "a": 200})) # res: 240

:param fn: 用来进行实际计算的函数,其参数可以包含有默认值;
:param args: 一系列的位置参数,应当为一系列的字典,我们需要从这些输入中提取 ``fn`` 计算所需要的实际参数;
:param signature_fn: 函数,用来替换 ``fn`` 的函数签名,如果该参数不为 ``None``,那么我们首先会从该函数中提取函数签名,然后通过该函数签名提取
参数值后,再传给 ``fn`` 进行实际的运算;
:param mapping: 一个字典,用来更改其前面的字典的键值;

:return: 返回 ``fn`` 运行的结果;
""" """


if signature_fn is not None: if signature_fn is not None:
@@ -226,13 +223,13 @@ def _check_valid_parameters_number(fn, expected_params:List[str], fn_name=None):


def check_user_specific_params(user_params: Dict, fn: Callable): def check_user_specific_params(user_params: Dict, fn: Callable):
""" """
该函数使用用户的输入来对指定函数的参数进行赋值;
主要用于一些用户无法直接调用函数的情况;
该函数主要的作用在于帮助检查用户对使用函数 fn 的参数输入是否有误;
该函数使用用户的输入来对指定函数的参数进行赋值,主要用于一些用户无法直接调用函数的情况;
该函数主要的作用在于帮助检查用户对使用函数 ``fn`` 的参数输入是否有误;


:param user_params: 用户指定的参数的值,应当是一个字典,其中 key 表示每一个参数的名字,value 为每一个参数应当的值;
:param fn: 会被调用的函数;
:return: 返回一个字典,其中为在之后调用函数 fn 时真正会被传进去的参数的值;
:param user_params: 用户指定的参数的值,应当是一个字典,其中 ``key`` 表示每一个参数的名字,
``value`` 为每一个参数的值;
:param fn: 将要被调用的函数;
:return: 返回一个字典,其中为在之后调用函数 ``fn`` 时真正会被传进去的参数的值;
""" """


fn_arg_names = get_fn_arg_names(fn) fn_arg_names = get_fn_arg_names(fn)
@@ -243,6 +240,9 @@ def check_user_specific_params(user_params: Dict, fn: Callable):




def dataclass_to_dict(data: "dataclasses.dataclass") -> Dict: def dataclass_to_dict(data: "dataclasses.dataclass") -> Dict:
"""
将传入的 `dataclass` 实例转换为字典。
"""
if not is_dataclass(data): if not is_dataclass(data):
raise TypeError(f"Parameter `data` can only be `dataclass` type instead of {type(data)}.") raise TypeError(f"Parameter `data` can only be `dataclass` type instead of {type(data)}.")
_dict = dict() _dict = dict()
@@ -253,21 +253,31 @@ def dataclass_to_dict(data: "dataclasses.dataclass") -> Dict:


def match_and_substitute_params(mapping: Optional[Union[Callable, Dict]] = None, data: Optional[Any] = None) -> Any: def match_and_substitute_params(mapping: Optional[Union[Callable, Dict]] = None, data: Optional[Any] = None) -> Any:
r""" r"""
用来实现将输入:batch,或者输出:outputs,通过 `mapping` 将键值进行更换的功能;
该函数应用于 `input_mapping` 和 `output_mapping`;
对于 `input_mapping`,该函数会在 `TrainBatchLoop` 中取完数据后立刻被调用;
对于 `output_mapping`,该函数会在 `Trainer.train_step` 以及 `Evaluator.train_step` 中得到结果后立刻被调用;
用来实现将输入的 ``batch``,或者输出的 ``outputs``,通过 ``mapping`` 将键值进行更换的功能;
该函数应用于 ``input_mapping`` 和 ``output_mapping``;


转换的逻辑按优先级依次为:
对于 ``input_mapping``,该函数会在 :class:`~fastNLP.core.controllers.TrainBatchLoop` 中取完数据后立刻被调用;
对于 ``output_mapping``,该函数会在 :class:`~fastNLP.core.Trainer` 的 :meth:`~fastNLP.core.Trainer.train_step`
以及 :class:`~fastNLP.core.Evaluator` 的 :meth:`~fastNLP.core.Evaluator.train_step` 中得到结果后立刻被调用;


1. 如果 `mapping` 是一个函数,那么会直接返回 `mapping(data)`;
2. 如果 `mapping` 是一个 `Dict`,那么 `data` 的类型只能为以下三种: [`Dict`, `dataclass`, `Sequence`];
如果 `data` 是 `Dict`,那么该函数会将 `data` 的 key 替换为 mapping[key];
如果 `data` 是 `dataclass`,那么该函数会先使用 `dataclasses.asdict` 函数将其转换为 `Dict`,然后进行转换;
如果 `data` 是 `Sequence`,那么该函数会先将其转换成一个对应的 `Dict`:{"_0": list[0], "_1": list[1], ...},然后使用
mapping对这个 `Dict` 进行转换,如果没有匹配上mapping中的key则保持"_number"这个形式。
转换的逻辑按优先级依次为:


:param mapping: 用于转换的字典或者函数;mapping是函数时,返回值必须为字典类型。
1. 如果 ``mapping`` 是一个函数,那么会直接返回 ``mapping(data)``;
2. 如果 ``mapping`` 是一个 ``Dict``,那么 ``data`` 的类型只能为以下三种: ``[Dict, dataclass, Sequence]``;
* 如果 ``data`` 是 ``Dict``,那么该函数会将 ``data`` 的 ``key`` 替换为 ``mapping[key]``;
* 如果 ``data`` 是 ``dataclass``,那么该函数会先使用 :func:`dataclasses.asdict` 函数将其转换为 ``Dict``,然后进行转换;
* 如果 ``data`` 是 ``Sequence``,那么该函数会先将其转换成一个对应的字典::
{
"_0": list[0],
"_1": list[1],
...
}

然后使用 ``mapping`` 对这个 ``Dict`` 进行转换,如果没有匹配上 ``mapping`` 中的 ``key`` 则保持 ``\'\_number\'`` 这个形式。

:param mapping: 用于转换的字典或者函数;``mapping`` 是函数时,返回值必须为字典类型。
:param data: 需要被转换的对象; :param data: 需要被转换的对象;
:return: 返回转换好的结果; :return: 返回转换好的结果;
""" """
@@ -320,21 +330,20 @@ def apply_to_collection(
include_none: bool = True, include_none: bool = True,
**kwargs: Any, **kwargs: Any,
) -> Any: ) -> Any:
"""将函数 function 递归地在 data 中的元素执行,但是仅在满足元素为 dtype 时执行。

this function credit to: https://github.com/PyTorchLightning/pytorch-lightning
Args:
data: the collection to apply the function to
dtype: the given function will be applied to all elements of this dtype
function: the function to apply
*args: positional arguments (will be forwarded to calls of ``function``)
wrong_dtype: the given function won't be applied if this type is specified and the given collections
is of the ``wrong_dtype`` even if it is of type ``dtype``
include_none: Whether to include an element if the output of ``function`` is ``None``.
**kwargs: keyword arguments (will be forwarded to calls of ``function``)

Returns:
The resulting collection
"""
使用函数 ``function`` 递归地在 ``data`` 中的元素执行,但是仅在满足元素为 ``dtype`` 时执行。

该函数参考了 `pytorch-lightning <https://github.com/PyTorchLightning/pytorch-lightning>`_ 的实现

:param data: 需要进行处理的数据集合或数据
:param dtype: 数据的类型,函数 ``function`` 只会被应用于 ``data`` 中类型为 ``dtype`` 的数据
:param function: 对数据进行处理的函数
:param args: ``function`` 所需要的其它参数
:param wrong_dtype: ``function`` 一定不会生效的数据类型。如果数据既是 ``wrong_dtype`` 类型又是 ``dtype`` 类型
那么也不会生效。
:param include_none: 是否包含执行结果为 ``None`` 的数据,默认为 ``True``。
:param kwargs: ``function`` 所需要的其它参数
:return: 经过 ``function`` 处理后的数据集合
""" """
# Breaking condition # Breaking condition
if isinstance(data, dtype) and (wrong_dtype is None or not isinstance(data, wrong_dtype)): if isinstance(data, dtype) and (wrong_dtype is None or not isinstance(data, wrong_dtype)):
@@ -402,16 +411,18 @@ def apply_to_collection(
@contextmanager @contextmanager
def nullcontext(): def nullcontext():
r""" r"""
用来实现一个什么 dummy 的 context 上下文环境;
实现一个什么都不做的上下文环境
""" """
yield yield




def sub_column(string: str, c: int, c_size: int, title: str) -> str: def sub_column(string: str, c: int, c_size: int, title: str) -> str:
r""" r"""
对传入的字符串进行截断,方便在命令行中显示

:param string: 要被截断的字符串 :param string: 要被截断的字符串
:param c: 命令行列数 :param c: 命令行列数
:param c_size: instance或dataset field数
:param c_size: :class:`~fastNLP.core.Instance` 或 :class:`fastNLP.core.DataSet` 的 ``field`` 数目
:param title: 列名 :param title: 列名
:return: 对一个过长的列进行截断的结果 :return: 对一个过长的列进行截断的结果
""" """
@@ -442,18 +453,17 @@ def _is_iterable(value):


def pretty_table_printer(dataset_or_ins) -> PrettyTable: def pretty_table_printer(dataset_or_ins) -> PrettyTable:
r""" r"""
:param dataset_or_ins: 传入一个dataSet或者instance

.. code-block::
在 ``fastNLP`` 中展示数据的函数::


ins = Instance(field_1=[1, 1, 1], field_2=[2, 2, 2], field_3=["a", "b", "c"])
>>> ins = Instance(field_1=[1, 1, 1], field_2=[2, 2, 2], field_3=["a", "b", "c"])
+-----------+-----------+-----------------+ +-----------+-----------+-----------------+
| field_1 | field_2 | field_3 | | field_1 | field_2 | field_3 |
+-----------+-----------+-----------------+ +-----------+-----------+-----------------+
| [1, 1, 1] | [2, 2, 2] | ['a', 'b', 'c'] | | [1, 1, 1] | [2, 2, 2] | ['a', 'b', 'c'] |
+-----------+-----------+-----------------+ +-----------+-----------+-----------------+


:return: 以 pretty table的形式返回根据terminal大小进行自动截断
:param dataset_or_ins: 要展示的 :class:`~fastNLP.core.DataSet` 或者 :class:`~fastNLP.core.Instance`
:return: 根据 ``terminal`` 大小进行自动截断的数据表格
""" """
x = PrettyTable() x = PrettyTable()
try: try:
@@ -486,7 +496,7 @@ def pretty_table_printer(dataset_or_ins) -> PrettyTable:




class Option(dict): class Option(dict):
r"""a dict can treat keys as attributes"""
r"""将键转化为属性的字典类型"""


def __getattr__(self, item): def __getattr__(self, item):
try: try:
@@ -516,11 +526,10 @@ _emitted_deprecation_warnings = set()




def deprecated(help_message: Optional[str] = None): def deprecated(help_message: Optional[str] = None):
"""Decorator to mark a function as deprecated.
"""
标记当前功能已经过时的装饰器。


Args:
help_message (`Optional[str]`): An optional message to guide the user on how to
switch to non-deprecated usage of the library.
:param help_message: 一段指引信息,告知用户如何将代码切换为当前版本提倡的用法。
""" """


def decorator(deprecated_function: Callable): def decorator(deprecated_function: Callable):
@@ -549,11 +558,10 @@ def deprecated(help_message: Optional[str] = None):
return decorator return decorator




def seq_len_to_mask(seq_len, max_len=None):
def seq_len_to_mask(seq_len, max_len: Optional[int]):
r""" r"""


将一个表示sequence length的一维数组转换为二维的mask,不包含的位置为0。
转变 1-d seq_len到2-d mask.
将一个表示 ``sequence length`` 的一维数组转换为二维的 ``mask`` ,不包含的位置为 **0**。


.. code-block:: .. code-block::


@@ -570,10 +578,11 @@ def seq_len_to_mask(seq_len, max_len=None):
>>>print(mask.size()) >>>print(mask.size())
torch.Size([14, 100]) torch.Size([14, 100])


:param np.ndarray,torch.LongTensor seq_len: shape将是(B,)
:param int max_len: 将长度pad到这个长度。默认(None)使用的是seq_len中最长的长度。但在nn.DataParallel的场景下可能不同卡的seq_len会有
区别,所以需要传入一个max_len使得mask的长度是pad到该长度。
:return: np.ndarray, torch.Tensor 。shape将是(B, max_length), 元素类似为bool或torch.uint8
:param seq_len: 大小为是 ``(B,)`` 的长度序列
:param int max_len: 将长度 ``pad`` 到 ``max_len``。默认情况(为 ``None``)使用的是 ``seq_len`` 中最长的长度。
但在 :class:`torch.nn.DataParallel` 等分布式的场景下可能不同卡的 ``seq_len`` 会有区别,所以需要传入
一个 ``max_len`` 使得 ``mask`` 的长度 ``pad`` 到该长度。
:return: 大小为 ``(B, max_len)`` 的 ``mask``, 元素类型为 ``bool`` 或 ``uint8``
""" """
if isinstance(seq_len, np.ndarray): if isinstance(seq_len, np.ndarray):
assert len(np.shape(seq_len)) == 1, f"seq_len can only have one dimension, got {len(np.shape(seq_len))}." assert len(np.shape(seq_len)) == 1, f"seq_len can only have one dimension, got {len(np.shape(seq_len))}."


+ 5
- 3
fastNLP/envs/utils.py View File

@@ -6,6 +6,7 @@ from packaging.version import Version
import subprocess import subprocess
import pkg_resources import pkg_resources


__all__ = []


def _module_available(module_path: str) -> bool: def _module_available(module_path: str) -> bool:
"""Check if a path is available in your environment. """Check if a path is available in your environment.
@@ -48,10 +49,11 @@ def _compare_version(package: str, op: Callable, version: str, use_base_version:
pkg_version = Version(pkg_version.base_version) pkg_version = Version(pkg_version.base_version)
return op(pkg_version, Version(version)) return op(pkg_version, Version(version))


def get_gpu_count():
def get_gpu_count() -> int:
""" """
利用命令行获取gpu数目的函数
:return: gpu数目,如果没有显卡设备则为-1
利用命令行获取 ``gpu`` 数目的函数

:return: 显卡数目,如果没有显卡设备则为-1
""" """
try: try:
lines = subprocess.check_output(['nvidia-smi', '--query-gpu=memory.used', '--format=csv']) lines = subprocess.check_output(['nvidia-smi', '--query-gpu=memory.used', '--format=csv'])


+ 0
- 9
fastNLP/modules/__init__.py View File

@@ -1,9 +0,0 @@
__all__ = [
"MixModule",
"torch2paddle",
"paddle2torch",
"torch2jittor",
"jittor2torch",
]

from .mix_modules import MixModule, torch2paddle, paddle2torch, torch2jittor, jittor2torch

+ 0
- 10
fastNLP/modules/mix_modules/__init__.py View File

@@ -1,10 +0,0 @@
__all__ = [
"MixModule",
"torch2paddle",
"paddle2torch",
"torch2jittor",
"jittor2torch",
]

from .mix_module import MixModule
from .utils import *

+ 0
- 310
fastNLP/modules/mix_modules/mix_module.py View File

@@ -1,310 +0,0 @@
import os
import io
import pickle
from typing import Dict
from collections import OrderedDict

import numpy as np

from fastNLP.envs.imports import _NEED_IMPORT_JITTOR, _NEED_IMPORT_PADDLE, _NEED_IMPORT_TORCH
from fastNLP.core.utils.paddle_utils import paddle_to

if _NEED_IMPORT_PADDLE:
import paddle
from paddle.nn import Layer as PaddleLayer

if _NEED_IMPORT_TORCH:
import torch
from torch.nn import Module as TorchModule, Parameter as TorchParameter

if _NEED_IMPORT_JITTOR:
import jittor


__all__ = [
"MixModule",
]

class MixModule:
"""
TODO: 支持不同的混合方式;添加state_dict的支持;如果参数里有List of Tensors该怎么处理;
是否需要仿照Module那样在初始化的时候给各种模型分类
可以同时使用Torch和Paddle框架的混合模型
"""
def __init__(self, *args, **kwargs):
pass

def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)

def named_parameters(self, prefix='', recurse: bool=True, backend=None):
"""
返回模型的名字和参数

:param prefix: 输出时在参数名前加上的前缀
:param recurse: 是否递归地输出参数
:param backend: `backend`=`None`时,将所有模型和张量的参数返回;
`backend`=`torch`时,返回`torch`的参数;
`backend`=`paddle`时,返回`paddle`的参数。
"""
if backend is None:
generator = self.attributes(TorchModule, TorchParameter, PaddleLayer)
elif backend == "torch":
generator = self.attributes(TorchModule, TorchParameter)
elif backend == "paddle":
generator = self.attributes(PaddleLayer)
else:
raise ValueError("Unknown backend parameter.")

for name, value in generator:
name = prefix + ('.' if prefix else '') + name
if isinstance(value, TorchParameter):
# 非Module/Layer类型,直接输出名字和值
yield name, value
elif recurse:
# 递归地调用named_parameters
for name_r, value_r in value.named_parameters(name, recurse):
yield name_r, value_r

def parameters(self, recurse: bool = True, backend: str = None):
"""
返回模型的参数

:param recurse:
:param backend: `backend`=`None`时,将所有模型和张量的参数返回;
`backend`=`torch`时,返回`torch`的参数;
`backend`=`paddle`时,返回`paddle`的参数。
"""
for name, value in self.named_parameters(recurse=recurse, backend=backend):
yield value
def forward(self, *args, **kwargs):
raise NotImplementedError

def train_step(self, batch):
raise NotImplementedError

def test_step(self, batch):
raise NotImplementedError

def evaluate_step(self, batch):
raise NotImplementedError

def train(self):
for name, value in self.attributes(TorchModule, PaddleLayer):
value.train()

def eval(self):
for name, value in self.attributes(TorchModule, PaddleLayer):
value.eval()

def to(self, device):
"""
:param device: 设备名
"""
# 有jittor的话 warning
if device == "cpu":
paddle_device = device
elif device.startswith("cuda"):
paddle_device = device.replace("cuda", "gpu")
elif device.startswith("gpu"):
paddle_device = device
device = device.replace("gpu", "cuda")
else:
raise ValueError("Device value error")

for name, value in self.attributes(TorchModule):
# torch的to函数不影响Tensor
vars(self)[name] = value.to(device)
for name, value in self.attributes(TorchParameter):
# Parameter在经过to函数后会变成Tensor类型
vars(self)[name] = TorchParameter(value.to(device), requires_grad=value.requires_grad)

for name, value in self.attributes(PaddleLayer):
vars(self)[name] = value.to(paddle_device)
for name, value in self.attributes(paddle.Tensor):
# paddle的to函数会影响到Tensor
vars(self)[name] = paddle_to(value, paddle_device)

return self

def state_dict(self, backend: str = None) -> Dict:
"""
返回模型的state_dict。

.. note:: torch的destination参数会在将来删除,因此不提供destination参数

:param backend: `backend`=`None`时,将所有模型和张量的state dict返回;
`backend`=`torch`时,返回`torch`的state dict;
`backend`=`paddle`时,返回`paddle`的state dict。
"""
if backend is None:
generator = self.attributes(TorchModule, TorchParameter, PaddleLayer)
elif backend == "torch":
generator = self.attributes(TorchModule, TorchParameter)
elif backend == "paddle":
generator = self.attributes(PaddleLayer)
else:
raise ValueError(f"Unknown backend {backend}.")

destination = OrderedDict()

for name, value in generator:
if value is None:
continue
if isinstance(value, TorchParameter):
destination[name] = value
else:
# 不同框架state_dict函数的参数名和顺序不同
if isinstance(value, PaddleLayer):
kwargs = {
"structured_name_prefix": name + ".",
}
elif isinstance(value, TorchModule):
kwargs = {
"prefix": name + ".",
}
else:
raise ValueError(f"Unknown item type {type(value)}")
destination.update(value.state_dict(**kwargs))

return destination

def save_state_dict_to_file(self, path: str):
"""
保存模型的state dict到path
"""
# TODO 设备限制
filename = os.path.basename(path)
if filename == "":
raise ValueError("Received empty filename.")
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
protocol = 4

saved = {}
paddle_dict = self.state_dict(backend="paddle")
torch_dict = self.state_dict(backend="torch")
# 保存paddle部分
# 调用paddle保存时的处理函数
paddle_saved_obj = paddle.framework.io._build_saved_state_dict(paddle_dict)
paddle_saved_obj = paddle.fluid.io._unpack_saved_dict(paddle_saved_obj, protocol)
# 将返回的dict保存
saved["paddle"] = paddle_saved_obj

# 保存torch部分
buffer = io.BytesIO()
torch.save(torch_dict, buffer)
saved["torch"] = buffer.getvalue()

# 保存
with open(path, "wb") as f:
pickle.dump(saved, f, protocol)

def load_state_dict_from_file(self, path: str):
"""
从 `path` 中加载保存的state dict
"""
state_dict = {}
with open(path, "rb") as f:
loaded = pickle.load(f)
# 加载paddle的数据
paddle_loaded_obj = loaded["paddle"]
paddle_load_result = paddle.fluid.io._pack_loaded_dict(paddle_loaded_obj)
if "StructuredToParameterName@@" in paddle_load_result:
for key in paddle_load_result["StructuredToParameterName@@"]:
if isinstance(paddle_load_result[key], np.ndarray):
paddle_load_result[key] = paddle.to_tensor(paddle_load_result[key])
state_dict.update(paddle_load_result)
# 加载torch的数据
torch_loaded_obj = loaded["torch"]
torch_bytes = io.BytesIO(torch_loaded_obj)
torch_load_result = torch.load(torch_bytes)
state_dict.update(torch_load_result)

self.load_state_dict(state_dict)

def load_state_dict(self, state_dict):
"""
从state dict中加载数据
"""
missing_keys = []
unexpected_keys = []
error_msgs = []
new_state = {}

local_state = self.state_dict()

# 对字典内容按前缀进行归类
for key, value in state_dict.items():
splited = key.split(".", 1)
if len(splited) == 1:
# 没有前缀,实际上只有torch.nn.Parameter会进入这种情况
new_state[key] = value
else:
prefix, name = splited
if prefix not in new_state:
new_state[prefix] = {}
new_state[prefix][name] = value

for key, param in self.attributes(TorchModule, TorchParameter, PaddleLayer):
if key in new_state:
# 在传入的字典中找到了对应的值
input_param = new_state[key]
if not isinstance(input_param, dict):
# 且不是字典,即上述没有前缀的情况
# 按照torch.nn.Module._load_from_state_dict进行赋值
if not torch.overrides.is_tensor_like(input_param):
error_msgs.append('While copying the parameter named "{}", '
'expected torch.Tensor or Tensor-like object from checkpoint but '
'received {}'
.format(key, type(input_param)))
continue

# This is used to avoid copying uninitialized parameters into
# non-lazy modules, since they dont have the hook to do the checks
# in such case, it will error when accessing the .shape attribute.
is_param_lazy = torch.nn.parameter.is_lazy(param)
# Backward compatibility: loading 1-dim tensor from 0.3.* to version 0.4+
if not is_param_lazy and len(param.shape) == 0 and len(input_param.shape) == 1:
input_param = input_param[0]

if not is_param_lazy and input_param.shape != param.shape:
# local shape should match the one in checkpoint
error_msgs.append('size mismatch for {}: copying a param with shape {} from checkpoint, '
'the shape in current model is {}.'
.format(key, input_param.shape, param.shape))
continue
try:
with torch.no_grad():
param.copy_(input_param)
except Exception as ex:
error_msgs.append('While copying the parameter named "{}", '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}, '
'an exception occurred : {}.'
.format(key, param.size(), input_param.size(), ex.args))
else:
# 否则在子模块中
if isinstance(param, TorchModule):
# torch模块
# 由于paddle没有提供类似strict的参数,因此也不对torch作要求
param.load_state_dict(input_param, strict=False)
elif isinstance(param, PaddleLayer):
# paddle模块
param.load_dict(input_param)
else:
missing_keys.append(key)

if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
self.__class__.__name__, "\n\t".join(error_msgs)))

def attributes(self, *types):
"""
查找对应类型的成员
"""
for name, value in vars(self).items():
if isinstance(value, types):
yield name, value

+ 0
- 233
fastNLP/modules/mix_modules/utils.py View File

@@ -1,233 +0,0 @@
import warnings
import os
from typing import Any, Optional, Union

import numpy as np

from fastNLP.core.utils.utils import apply_to_collection
from fastNLP.core.utils.paddle_utils import paddle_to
from fastNLP.envs.imports import _NEED_IMPORT_JITTOR, _NEED_IMPORT_TORCH, _NEED_IMPORT_PADDLE

if _NEED_IMPORT_PADDLE:
import paddle

if _NEED_IMPORT_JITTOR:
import jittor

if _NEED_IMPORT_TORCH:
import torch

__all__ = [
"paddle2torch",
"torch2paddle",
"jittor2torch",
"torch2jittor",
]

def _paddle2torch(paddle_tensor: 'paddle.Tensor', target_device: Optional[Union[str, int]] = None, no_gradient: bool = None) -> 'torch.Tensor':
"""
将paddle tensor转换为torch tensor,并且能够保留梯度进行反向传播
:param paddle_tensor: 要转换的paddle张量
:param target_device: 是否将转换后的张量迁移到特定设备上,输入为`None`时,和输入的张量相同。
:param no_gradient: 是否保留原张量的梯度。为`None`时,新的张量与输入张量保持一致;
为`True`时,全部不保留梯度;为`False`时,全部保留梯度。
:return: 转换后的torch张量
"""
no_gradient = paddle_tensor.stop_gradient if no_gradient is None else no_gradient
paddle_numpy = paddle_tensor.numpy()
if not np.issubdtype(paddle_numpy.dtype, np.inexact):
no_gradient = True

if target_device is None:
if paddle_tensor.place.is_gpu_place():
# paddlepaddle有两种Place,对应不同的device id获取方式
if hasattr(paddle_tensor.place, "gpu_device_id"):
# paddle.fluid.core_avx.Place
# 在gpu环境下创建张量的话,张量的place是这一类型
target_device = f"cuda:{paddle_tensor.place.gpu_device_id()}"
else:
# paddle.CUDAPlace
target_device = f"cuda:{paddle_tensor.place.get_device_id()}"
else:
# TODO: 可能需要支持xpu等设备
target_device = "cpu"

if not no_gradient:
# 保持梯度,并保持反向传播
# torch.tensor会保留numpy数组的类型
torch_tensor = torch.tensor(paddle_numpy, requires_grad=True, device=target_device)
hook = torch_tensor.register_hook(
lambda grad: paddle.autograd.backward(paddle_tensor, paddle.to_tensor(grad.cpu().numpy()))
)
else:
# 不保留梯度
torch_tensor = torch.tensor(paddle_numpy, requires_grad=False, device=target_device)

return torch_tensor


def _torch2paddle(torch_tensor: 'torch.Tensor', target_device: str = None, no_gradient: bool = None) -> 'paddle.Tensor':
"""
将torch tensor转换为paddle tensor,并且能够保留梯度进行反向传播。
:param torch_tensor: 要转换的torch张量
:param target_device: 是否将转换后的张量迁移到特定设备上,输入为`None`时,和输入的张量相同。
:param no_gradient: 是否保留原张量的梯度。为`None`时,新的张量与输入张量保持一致;
为`True`时,全部不保留梯度;为`False`时,全部保留梯度。
:return: 转换后的paddle张量
"""
no_gradient = not torch_tensor.requires_grad if no_gradient is None else no_gradient
if target_device is None:
if torch_tensor.is_cuda:
target_device = f"gpu:{torch_tensor.device.index}"
else:
target_device = "cpu"

if not no_gradient:
# 保持梯度并保持反向传播
# paddle的stop_gradient和torch的requires_grad表现是相反的
paddle_tensor = paddle.to_tensor(torch_tensor.detach().numpy(), stop_gradient=False)
hook = paddle_tensor.register_hook(
lambda grad: torch.autograd.backward(torch_tensor, torch.tensor(grad.numpy()))
)
else:
paddle_tensor = paddle.to_tensor(torch_tensor.detach().numpy(), stop_gradient=True)

paddle_tensor = paddle_to(paddle_tensor, target_device)

return paddle_tensor


def _jittor2torch(jittor_var: 'jittor.Var', target_device: Optional[Union[str, int]] = None, no_gradient: bool = None) -> 'torch.Tensor':
"""
将jittor Var转换为torch tensor,并且能够保留梯度进行反向传播
:param jittor_var: 要转换的jittor变量
:param target_device: 是否将转换后的张量迁移到特定设备上,输入为`None`时,根据jittor.flags.use_cuda决定。
:param no_gradient: 是否保留原张量的梯度。为`None`时,新的张量与输入张量保持一致;
为`True`时,全部不保留梯度;为`False`时,全部保留梯度。
:return: 转换后的torch张量
"""
# TODO: warning:无法保留梯度
# jittor的grad可以通过callback进行传递
# 如果outputs有_grad键,可以实现求导
no_gradient = not jittor_var.requires_grad if no_gradient is None else no_gradient
if no_gradient == False:
warnings.warn("The result tensor will not keep gradients due to differences between jittor and pytorch.")
jittor_numpy = jittor_var.numpy()
if not np.issubdtype(jittor_numpy.dtype, np.inexact):
no_gradient = True

if target_device is None:
# jittor的设备分配是自动的
# 根据use_cuda判断
if jittor.flags.use_cuda:
target_device = "cuda:0"
else:
target_device = "cpu"

torch_tensor = torch.tensor(jittor_numpy, requires_grad=not no_gradient, device=target_device)

return torch_tensor


def _torch2jittor(torch_tensor: 'torch.Tensor', no_gradient: bool = None) -> 'jittor.Var':
"""
将torch tensor转换为jittor Var,并且能够保留梯度进行反向传播
:param torch_tensor: 要转换的torch张量
:param no_gradient: 是否保留原张量的梯度。为`None`时,新的张量与输入张量保持一致;
为`True`时,全部不保留梯度;为`False`时,全部保留梯度。
:return: 转换后的jittor变量
"""
no_gradient = not torch_tensor.requires_grad if no_gradient is None else no_gradient

if not no_gradient:
# 保持梯度并保持反向传播
jittor_var = jittor.Var(torch_tensor.detach().numpy())
jittor_var.requires_grad = True
hook = jittor_var.register_hook(
lambda grad: torch.autograd.backward(torch_tensor, torch.tensor(grad.numpy()))
)
else:
jittor_var = jittor.Var(torch_tensor.detach().numpy())
jittor_var.requires_grad = False

return jittor_var


def torch2paddle(torch_in: Any, target_device: str = None, no_gradient: bool = None) -> Any:
"""
递归地将输入中包含的torch张量转换为paddle张量

:param torch_in: 要转换的包含torch.Tensor类型的变量
:param target_device: 是否将转换后的张量迁移到特定设备上,
输入为`None`时,和输入的张量相同,
:param no_gradient: 是否保留原张量的梯度。为`None`时,新的张量与输入张量保持一致;
为`True`时,全部不保留梯度;为`False`时,全部保留梯度。
:return: 将所有torch.Tensor转换为paddle.Tensor的张量
"""

return apply_to_collection(
torch_in,
dtype=torch.Tensor,
function=_torch2paddle,
target_device=target_device,
no_gradient=no_gradient,
)


def paddle2torch(paddle_in: Any, target_device: str = None, no_gradient: bool = None) -> Any:
"""
递归地将输入中包含的paddle张量转换为torch张量

:param torch_in: 要转换的包含paddle.Tensor类型的变量
:param target_device: 是否将转换后的张量迁移到特定设备上,
输入为`None`时,和输入的张量相同,
:param no_gradient: 是否保留原张量的梯度。为`None`时,新的张量与输入张量保持一致;
为`True`时,全部不保留梯度;为`False`时,全部保留梯度。
:return: 将所有paddle.Tensor转换为torch.Tensor后的变量
"""

return apply_to_collection(
paddle_in,
dtype=paddle.Tensor,
function=_paddle2torch,
target_device=target_device,
no_gradient=no_gradient,
)


def jittor2torch(jittor_in: Any, target_device: str = None, no_gradient: bool = None) -> Any:
"""
递归地将输入中包含的jittor变量转换为torch张量

:param jittor_in: 要转换的jittor变量
:param target_device: 是否将转换后的张量迁移到特定设备上,输入为`None`时,默认为cuda:0。
:param no_gradient: 是否保留原张量的梯度。为`None`时,新的张量与输入张量保持一致;
为`True`时,全部不保留梯度;为`False`时,全部保留梯度。
:return: 转换后的torch张量
"""

return apply_to_collection(
jittor_in,
dtype=jittor.Var,
function=_jittor2torch,
target_device=target_device,
no_gradient=no_gradient,
)


def torch2jittor(torch_in: Any, no_gradient: bool = None) -> Any:
"""
递归地将输入中包含的torch张量转换为jittor变量

:param torch_tensor: 要转换的torch张量
:param no_gradient: 是否保留原张量的梯度。为`None`时,新的张量与输入张量保持一致;
为`True`时,全部不保留梯度;为`False`时,全部保留梯度。
:return: 转换后的jittor变量
"""
return apply_to_collection(
torch_in,
dtype=torch.Tensor,
function=_torch2jittor,
no_gradient=no_gradient,
)

+ 2
- 2
tests/core/collators/test_collator.py View File

@@ -334,9 +334,9 @@ def test_torch_dl():
dl = TorchDataLoader(ds, batch_size=2) dl = TorchDataLoader(ds, batch_size=2)
batch = next(iter(dl)) batch = next(iter(dl))
assert 'x' in batch and 'y' in batch and 'z' in batch and 'i' in batch and 'j' in batch assert 'x' in batch and 'y' in batch and 'z' in batch and 'i' in batch and 'j' in batch
assert isinstance(batch['z'], torch.Tensor)
assert isinstance(batch['z'], torch.FloatTensor)
assert isinstance(batch['j'], list) assert isinstance(batch['j'], list)
assert isinstance(batch['i']['j'], torch.Tensor)
assert isinstance(batch['i']['j'], torch.LongTensor)


dl.set_ignore('x') dl.set_ignore('x')
batch = next(iter(dl)) batch = next(iter(dl))


+ 4
- 0
tests/core/controllers/test_trainer_jittor.py View File

@@ -11,6 +11,9 @@ if _NEED_IMPORT_JITTOR:
import jittor as jt import jittor as jt
from jittor import nn, Module from jittor import nn, Module
from jittor.dataset import Dataset from jittor.dataset import Dataset
else:
from fastNLP.core.utils.dummy_class import DummyClass as Module
from fastNLP.core.utils.dummy_class import DummyClass as Dataset




class JittorNormalModel_Classification(Module): class JittorNormalModel_Classification(Module):
@@ -68,6 +71,7 @@ class TrainJittorConfig:


@pytest.mark.parametrize("driver,device", [("jittor", None)]) @pytest.mark.parametrize("driver,device", [("jittor", None)])
@pytest.mark.parametrize("callbacks", [[RichCallback(100)]]) @pytest.mark.parametrize("callbacks", [[RichCallback(100)]])
@pytest.mark.jittor
def test_trainer_jittor( def test_trainer_jittor(
driver, driver,
device, device,


+ 0
- 0
tests/core/drivers/torch_paddle_driver/__init__.py View File


+ 0
- 122
tests/core/drivers/torch_paddle_driver/_test_torch_paddle_driver.py View File

@@ -1,122 +0,0 @@
import pytest

from fastNLP.modules.mix_modules.mix_module import MixModule
from fastNLP.core.drivers.torch_paddle_driver.torch_paddle_driver import TorchPaddleDriver
from fastNLP.modules.mix_modules.utils import paddle2torch, torch2paddle

import torch
import paddle
from paddle.io import Dataset, DataLoader
import numpy as np

############################################################################
#
# 测试在MNIST数据集上的表现
#
############################################################################

class MNISTDataset(Dataset):
def __init__(self, dataset):

self.dataset = [
(
np.array(img).astype('float32').reshape(-1),
label
) for img, label in dataset
]

def __getitem__(self, idx):
return self.dataset[idx]

def __len__(self):
return len(self.dataset)

class MixMNISTModel(MixModule):
def __init__(self):
super(MixMNISTModel, self).__init__()

self.fc1 = paddle.nn.Linear(784, 64)
self.fc2 = paddle.nn.Linear(64, 32)
self.fc3 = torch.nn.Linear(32, 10)
self.fc4 = torch.nn.Linear(10, 10)

def forward(self, x):

paddle_out = self.fc1(x)
paddle_out = self.fc2(paddle_out)
torch_in = paddle2torch(paddle_out)
torch_out = self.fc3(torch_in)
torch_out = self.fc4(torch_out)

return torch_out

def train_step(self, x):
return self.forward(x)

def test_step(self, x):
return self.forward(x)

@pytest.mark.torchpaddle
class TestMNIST:

@classmethod
def setup_class(self):

self.train_dataset = paddle.vision.datasets.MNIST(mode='train')
self.test_dataset = paddle.vision.datasets.MNIST(mode='test')
self.train_dataset = MNISTDataset(self.train_dataset)

self.lr = 0.0003
self.epochs = 20

self.dataloader = DataLoader(self.train_dataset, batch_size=100, shuffle=True)

def setup_method(self):
model = MixMNISTModel()
self.torch_loss_func = torch.nn.CrossEntropyLoss()

torch_opt = torch.optim.Adam(model.parameters(backend="torch"), self.lr)
paddle_opt = paddle.optimizer.Adam(parameters=model.parameters(backend="paddle"), learning_rate=self.lr)

self.driver = TorchPaddleDriver(model=model, device="cuda:0")
self.driver.set_optimizers([torch_opt, paddle_opt])

def test_case1(self):

epochs = 20

self.driver.setup()
self.driver.zero_grad()
# 开始训练
current_epoch_idx = 0
while current_epoch_idx < epochs:
epoch_loss, batch = 0, 0
self.driver.set_model_mode("train")
self.driver.set_sampler_epoch(self.dataloader, current_epoch_idx)
for batch, (img, label) in enumerate(self.dataloader):
img = paddle.to_tensor(img).cuda()
torch_out = self.driver.train_step(img)
label = torch.from_numpy(label.numpy()).reshape(-1)
loss = self.torch_loss_func(torch_out.cpu(), label)
epoch_loss += loss.item()

self.driver.backward(loss)
self.driver.step()
self.driver.zero_grad()

current_epoch_idx += 1

# 开始测试
correct = 0
for img, label in self.test_dataset:

img = paddle.to_tensor(np.array(img).astype('float32').reshape(1, -1))
torch_out = self.driver.test_step(img)
res = torch_out.softmax(-1).argmax().item()
label = label.item()
if res == label:
correct += 1

acc = correct / len(self.test_dataset)
assert acc > 0.85

+ 0
- 0
tests/core/drivers/torch_paddle_driver/_test_utils.py View File


+ 0
- 204
tests/core/utils/_test_torch_paddle_utils.py View File

@@ -1,204 +0,0 @@
import paddle
import pytest
import torch

from fastNLP.core.utils.torch_paddle_utils import torch_paddle_move_data_to_device

############################################################################
#
# 测试将参数中包含的所有torch和paddle张量迁移到指定设备
#
############################################################################

@pytest.mark.torchpaddle
class TestTorchPaddleMoveDataToDevice:

def check_gpu(self, tensor, idx):
"""
检查张量是否在指定显卡上的工具函数
"""

if isinstance(tensor, paddle.Tensor):
assert tensor.place.is_gpu_place()
assert tensor.place.gpu_device_id() == idx
elif isinstance(tensor, torch.Tensor):
assert tensor.is_cuda
assert tensor.device.index == idx

def check_cpu(self, tensor):
if isinstance(tensor, paddle.Tensor):
assert tensor.place.is_cpu_place()
elif isinstance(tensor, torch.Tensor):
assert not tensor.is_cuda

def test_tensor_transfer(self):
"""
测试迁移单个张量
"""

paddle_tensor = paddle.rand((3, 4, 5)).cpu()
res = torch_paddle_move_data_to_device(paddle_tensor, device=None, data_device=None)
self.check_cpu(res)

res = torch_paddle_move_data_to_device(paddle_tensor, device="gpu:0", data_device=None)
self.check_gpu(res, 0)

res = torch_paddle_move_data_to_device(paddle_tensor, device="gpu:1", data_device=None)
self.check_gpu(res, 1)

res = torch_paddle_move_data_to_device(paddle_tensor, device="cuda:0", data_device="cpu")
self.check_gpu(res, 0)

res = torch_paddle_move_data_to_device(paddle_tensor, device=None, data_device="gpu:0")
self.check_gpu(res, 0)

res = torch_paddle_move_data_to_device(paddle_tensor, device=None, data_device="cuda:1")
self.check_gpu(res, 1)

torch_tensor = torch.rand(3, 4, 5)
res = torch_paddle_move_data_to_device(torch_tensor, device=None, data_device=None)
self.check_cpu(res)

res = torch_paddle_move_data_to_device(torch_tensor, device="gpu:0", data_device=None)
self.check_gpu(res, 0)

res = torch_paddle_move_data_to_device(torch_tensor, device="gpu:1", data_device=None)
self.check_gpu(res, 1)

res = torch_paddle_move_data_to_device(torch_tensor, device="gpu:0", data_device="cpu")
self.check_gpu(res, 0)

res = torch_paddle_move_data_to_device(torch_tensor, device=None, data_device="gpu:0")
self.check_gpu(res, 0)

res = torch_paddle_move_data_to_device(torch_tensor, device=None, data_device="gpu:1")
self.check_gpu(res, 1)

def test_list_transfer(self):
"""
测试迁移张量的列表
"""

paddle_list = [paddle.rand((6, 4, 2)) for i in range(5)] + [torch.rand((6, 4, 2)) for i in range(5)]
res = torch_paddle_move_data_to_device(paddle_list, device=None, data_device="gpu:1")
assert isinstance(res, list)
for r in res:
self.check_gpu(r, 1)

res = torch_paddle_move_data_to_device(paddle_list, device="cpu", data_device="gpu:1")
assert isinstance(res, list)
for r in res:
self.check_cpu(r)

res = torch_paddle_move_data_to_device(paddle_list, device="gpu:0", data_device=None)
assert isinstance(res, list)
for r in res:
self.check_gpu(r, 0)

res = torch_paddle_move_data_to_device(paddle_list, device="gpu:1", data_device="cpu")
assert isinstance(res, list)
for r in res:
self.check_gpu(r, 1)

def test_tensor_tuple_transfer(self):
"""
测试迁移张量的元组
"""

paddle_list = [paddle.rand((6, 4, 2)) for i in range(10)] + [torch.rand((6, 4, 2)) for i in range(5)]
paddle_tuple = tuple(paddle_list)
res = torch_paddle_move_data_to_device(paddle_tuple, device=None, data_device="gpu:1")
assert isinstance(res, tuple)
for r in res:
self.check_gpu(r, 1)

res = torch_paddle_move_data_to_device(paddle_tuple, device="cpu", data_device="gpu:1")
assert isinstance(res, tuple)
for r in res:
self.check_cpu(r)

res = torch_paddle_move_data_to_device(paddle_tuple, device="gpu:0", data_device=None)
assert isinstance(res, tuple)
for r in res:
self.check_gpu(r, 0)

res = torch_paddle_move_data_to_device(paddle_tuple, device="gpu:1", data_device="cpu")
assert isinstance(res, tuple)
for r in res:
self.check_gpu(r, 1)

def test_dict_transfer(self):
"""
测试迁移复杂的字典结构
"""

paddle_dict = {
"torch_tensor": torch.rand((3, 4)),
"torch_list": [torch.rand((6, 4, 2)) for i in range(10)],
"dict":{
"list": [paddle.rand((6, 4, 2)) for i in range(5)] + [torch.rand((6, 4, 2)) for i in range(5)],
"torch_tensor": torch.rand((3, 4)),
"paddle_tensor": paddle.rand((3, 4))
},
"paddle_tensor": paddle.rand((3, 4)),
"list": [paddle.rand((6, 4, 2)) for i in range(10)] ,
"int": 2,
"string": "test string"
}

res = torch_paddle_move_data_to_device(paddle_dict, device="gpu:0", data_device=None)
assert isinstance(res, dict)
self.check_gpu(res["torch_tensor"], 0)
self.check_gpu(res["paddle_tensor"], 0)
assert isinstance(res["torch_list"], list)
for t in res["torch_list"]:
self.check_gpu(t, 0)
assert isinstance(res["list"], list)
for t in res["list"]:
self.check_gpu(t, 0)
assert isinstance(res["int"], int)
assert isinstance(res["string"], str)
assert isinstance(res["dict"], dict)
assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]:
self.check_gpu(t, 0)
self.check_gpu(res["dict"]["torch_tensor"], 0)
self.check_gpu(res["dict"]["paddle_tensor"], 0)

res = torch_paddle_move_data_to_device(paddle_dict, device=None, data_device="gpu:1")
assert isinstance(res, dict)
self.check_gpu(res["torch_tensor"], 1)
self.check_gpu(res["paddle_tensor"], 1)
assert isinstance(res["torch_list"], list)
for t in res["torch_list"]:
self.check_gpu(t, 1)
assert isinstance(res["list"], list)
for t in res["list"]:
self.check_gpu(t, 1)
assert isinstance(res["int"], int)
assert isinstance(res["string"], str)
assert isinstance(res["dict"], dict)
assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]:
self.check_gpu(t, 1)
self.check_gpu(res["dict"]["torch_tensor"], 1)
self.check_gpu(res["dict"]["paddle_tensor"], 1)

res = torch_paddle_move_data_to_device(paddle_dict, device="cpu", data_device="gpu:0")
assert isinstance(res, dict)
self.check_cpu(res["torch_tensor"])
self.check_cpu(res["paddle_tensor"])
assert isinstance(res["torch_list"], list)
for t in res["torch_list"]:
self.check_cpu(t)
assert isinstance(res["list"], list)
for t in res["list"]:
self.check_cpu(t)
assert isinstance(res["int"], int)
assert isinstance(res["string"], str)
assert isinstance(res["dict"], dict)
assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]:
self.check_cpu(t)
self.check_cpu(res["dict"]["torch_tensor"])
self.check_cpu(res["dict"]["paddle_tensor"])

+ 0
- 0
tests/modules/__init__.py View File


+ 0
- 0
tests/modules/mix_modules/__init__.py View File


+ 0
- 378
tests/modules/mix_modules/_test_mix_module.py View File

@@ -1,378 +0,0 @@
import pytest
import os
from itertools import chain

import torch
import paddle
from paddle.io import Dataset, DataLoader
import numpy as np

from fastNLP.modules.mix_modules.mix_module import MixModule
from fastNLP.modules.mix_modules.utils import paddle2torch, torch2paddle
from fastNLP.envs.distributed import rank_zero_rm


############################################################################
#
# 测试类的基本功能
#
############################################################################

class MixModuleForTest(MixModule):
def __init__(self):
super(MixModuleForTest, self).__init__()

self.torch_fc1 = torch.nn.Linear(10, 10)
self.torch_softmax = torch.nn.Softmax(0)
self.torch_conv2d1 = torch.nn.Conv2d(10, 10, 3)
self.torch_tensor = torch.ones(3, 3)
self.torch_param = torch.nn.Parameter(torch.ones(4, 4))

self.paddle_fc1 = paddle.nn.Linear(10, 10)
self.paddle_softmax = paddle.nn.Softmax(0)
self.paddle_conv2d1 = paddle.nn.Conv2D(10, 10, 3)
self.paddle_tensor = paddle.ones((4, 4))

class TorchModuleForTest(torch.nn.Module):
def __init__(self):
super(TorchModuleForTest, self).__init__()

self.torch_fc1 = torch.nn.Linear(10, 10)
self.torch_softmax = torch.nn.Softmax(0)
self.torch_conv2d1 = torch.nn.Conv2d(10, 10, 3)
self.torch_tensor = torch.ones(3, 3)
self.torch_param = torch.nn.Parameter(torch.ones(4, 4))

class PaddleModuleForTest(paddle.nn.Layer):
def __init__(self):
super(PaddleModuleForTest, self).__init__()

self.paddle_fc1 = paddle.nn.Linear(10, 10)
self.paddle_softmax = paddle.nn.Softmax(0)
self.paddle_conv2d1 = paddle.nn.Conv2D(10, 10, 3)
self.paddle_tensor = paddle.ones((4, 4))


@pytest.mark.torchpaddle
class TestTorchPaddleMixModule:

def setup_method(self):

self.model = MixModuleForTest()
self.torch_model = TorchModuleForTest()
self.paddle_model = PaddleModuleForTest()

def test_to(self):
"""
测试混合模型的to函数
"""
self.model.to("cuda")
self.torch_model.to("cuda")
self.paddle_model.to("gpu")
self.if_device_correct("cuda")

self.model.to("cuda:2")
self.torch_model.to("cuda:2")
self.paddle_model.to("gpu:2")
self.if_device_correct("cuda:2")

self.model.to("gpu:1")
self.torch_model.to("cuda:1")
self.paddle_model.to("gpu:1")
self.if_device_correct("cuda:1")

self.model.to("cpu")
self.torch_model.to("cpu")
self.paddle_model.to("cpu")
self.if_device_correct("cpu")

def test_train_eval(self):
"""
测试train和eval函数
"""
self.model.eval()
self.if_training_correct(False)

self.model.train()
self.if_training_correct(True)

def test_parameters(self):
"""
测试parameters()函数,由于初始化是随机的,目前仅比较得到结果的长度
"""
mix_params = []
params = []

for value in self.model.named_parameters():
mix_params.append(value)

for value in chain(self.torch_model.named_parameters(), self.paddle_model.named_parameters()):
params.append(value)

assert len(params) == len(mix_params)

def test_named_parameters(self):
"""
测试named_parameters函数
"""
mix_param_names = []
param_names = []

for name, value in self.model.named_parameters():
mix_param_names.append(name)

for name, value in chain(self.torch_model.named_parameters(), self.paddle_model.named_parameters()):
param_names.append(name)

assert sorted(param_names) == sorted(mix_param_names)

def test_torch_named_parameters(self):
"""
测试对torch参数的提取
"""
mix_param_names = []
param_names = []

for name, value in self.model.named_parameters(backend="torch"):
mix_param_names.append(name)

for name, value in self.torch_model.named_parameters():
param_names.append(name)

assert sorted(param_names) == sorted(mix_param_names)

def test_paddle_named_parameters(self):
"""
测试对paddle参数的提取
"""
mix_param_names = []
param_names = []

for name, value in self.model.named_parameters(backend="paddle"):
mix_param_names.append(name)

for name, value in self.paddle_model.named_parameters():
param_names.append(name)

assert sorted(param_names) == sorted(mix_param_names)

def test_torch_state_dict(self):
"""
测试提取torch的state dict
"""
torch_dict = self.torch_model.state_dict()
mix_dict = self.model.state_dict(backend="torch")

assert sorted(torch_dict.keys()) == sorted(mix_dict.keys())

def test_paddle_state_dict(self):
"""
测试提取paddle的state dict
"""
paddle_dict = self.paddle_model.state_dict()
mix_dict = self.model.state_dict(backend="paddle")

# TODO 测试程序会显示passed后显示paddle的异常退出信息
assert sorted(paddle_dict.keys()) == sorted(mix_dict.keys())

def test_state_dict(self):
"""
测试提取所有的state dict
"""
all_dict = self.torch_model.state_dict()
all_dict.update(self.paddle_model.state_dict())
mix_dict = self.model.state_dict()

# TODO 测试程序会显示passed后显示paddle的异常退出信息
assert sorted(all_dict.keys()) == sorted(mix_dict.keys())

def test_load_state_dict(self):
"""
测试load_state_dict函数
"""
state_dict = self.model.state_dict()

new_model = MixModuleForTest()
new_model.load_state_dict(state_dict)
new_state_dict = new_model.state_dict()

for name, value in state_dict.items():
state_dict[name] = value.tolist()
for name, value in new_state_dict.items():
new_state_dict[name] = value.tolist()

# self.assertDictEqual(state_dict, new_state_dict)

def test_save_and_load_state_dict(self):
"""
测试save_state_dict_to_file和load_state_dict_from_file函数
"""
path = "model"
try:
self.model.save_state_dict_to_file(path)
new_model = MixModuleForTest()
new_model.load_state_dict_from_file(path)

state_dict = self.model.state_dict()
new_state_dict = new_model.state_dict()

for name, value in state_dict.items():
state_dict[name] = value.tolist()
for name, value in new_state_dict.items():
new_state_dict[name] = value.tolist()

# self.assertDictEqual(state_dict, new_state_dict)
finally:
rank_zero_rm(path)

def if_device_correct(self, device):


assert self.model.torch_fc1.weight.device == self.torch_model.torch_fc1.weight.device
assert self.model.torch_conv2d1.weight.device == self.torch_model.torch_fc1.bias.device
assert self.model.torch_conv2d1.bias.device == self.torch_model.torch_conv2d1.bias.device
assert self.model.torch_tensor.device == self.torch_model.torch_tensor.device
assert self.model.torch_param.device == self.torch_model.torch_param.device

if device == "cpu":
assert self.model.paddle_fc1.weight.place.is_cpu_place()
assert self.model.paddle_fc1.bias.place.is_cpu_place()
assert self.model.paddle_conv2d1.weight.place.is_cpu_place()
assert self.model.paddle_conv2d1.bias.place.is_cpu_place()
assert self.model.paddle_tensor.place.is_cpu_place()
elif device.startswith("cuda"):
assert self.model.paddle_fc1.weight.place.is_gpu_place()
assert self.model.paddle_fc1.bias.place.is_gpu_place()
assert self.model.paddle_conv2d1.weight.place.is_gpu_place()
assert self.model.paddle_conv2d1.bias.place.is_gpu_place()
assert self.model.paddle_tensor.place.is_gpu_place()

assert self.model.paddle_fc1.weight.place.gpu_device_id() == self.paddle_model.paddle_fc1.weight.place.gpu_device_id()
assert self.model.paddle_fc1.bias.place.gpu_device_id() == self.paddle_model.paddle_fc1.bias.place.gpu_device_id()
assert self.model.paddle_conv2d1.weight.place.gpu_device_id() == self.paddle_model.paddle_conv2d1.weight.place.gpu_device_id()
assert self.model.paddle_conv2d1.bias.place.gpu_device_id() == self.paddle_model.paddle_conv2d1.bias.place.gpu_device_id()
assert self.model.paddle_tensor.place.gpu_device_id() == self.paddle_model.paddle_tensor.place.gpu_device_id()
else:
raise NotImplementedError

def if_training_correct(self, training):

assert self.model.torch_fc1.training == training
assert self.model.torch_softmax.training == training
assert self.model.torch_conv2d1.training == training

assert self.model.paddle_fc1.training == training
assert self.model.paddle_softmax.training == training
assert self.model.paddle_conv2d1.training == training


############################################################################
#
# 测试在MNIST数据集上的表现
#
############################################################################

class MNISTDataset(Dataset):
def __init__(self, dataset):

self.dataset = [
(
np.array(img).astype('float32').reshape(-1),
label
) for img, label in dataset
]

def __getitem__(self, idx):
return self.dataset[idx]

def __len__(self):
return len(self.dataset)

class MixMNISTModel(MixModule):
def __init__(self):
super(MixMNISTModel, self).__init__()

self.fc1 = paddle.nn.Linear(784, 64)
self.fc2 = paddle.nn.Linear(64, 32)
self.fc3 = torch.nn.Linear(32, 10)
self.fc4 = torch.nn.Linear(10, 10)

def forward(self, x):

paddle_out = self.fc1(x)
paddle_out = self.fc2(paddle_out)
torch_in = paddle2torch(paddle_out)
torch_out = self.fc3(torch_in)
torch_out = self.fc4(torch_out)

return torch_out

@pytest.mark.torchpaddle
class TestMNIST:

@classmethod
def setup_class(self):

self.train_dataset = paddle.vision.datasets.MNIST(mode='train')
self.test_dataset = paddle.vision.datasets.MNIST(mode='test')
self.train_dataset = MNISTDataset(self.train_dataset)

self.lr = 0.0003
self.epochs = 20

self.dataloader = DataLoader(self.train_dataset, batch_size=100, shuffle=True)

def setup_method(self):
self.model = MixMNISTModel().to("cuda")
self.torch_loss_func = torch.nn.CrossEntropyLoss()

self.torch_opt = torch.optim.Adam(self.model.parameters(backend="torch"), self.lr)
self.paddle_opt = paddle.optimizer.Adam(parameters=self.model.parameters(backend="paddle"), learning_rate=self.lr)

def test_case1(self):

# 开始训练
for epoch in range(self.epochs):
epoch_loss, batch = 0, 0
for batch, (img, label) in enumerate(self.dataloader):

img = paddle.to_tensor(img).cuda()
torch_out = self.model(img)
label = torch.from_numpy(label.numpy()).reshape(-1)
loss = self.torch_loss_func(torch_out.cpu(), label)
epoch_loss += loss.item()

loss.backward()
self.torch_opt.step()
self.paddle_opt.step()
self.torch_opt.zero_grad()
self.paddle_opt.clear_grad()

else:
assert epoch_loss / (batch + 1) < 0.3

# 开始测试
correct = 0
for img, label in self.test_dataset:

img = paddle.to_tensor(np.array(img).astype('float32').reshape(1, -1))
torch_out = self.model(img)
res = torch_out.softmax(-1).argmax().item()
label = label.item()
if res == label:
correct += 1

acc = correct / len(self.test_dataset)
assert acc > 0.85

############################################################################
#
# 测试在ERNIE中文数据集上的表现
#
############################################################################

+ 0
- 435
tests/modules/mix_modules/_test_utils.py View File

@@ -1,435 +0,0 @@
import unittest
import os

os.environ["log_silent"] = "1"
import torch
import paddle
import jittor

from fastNLP.modules.mix_modules.utils import (
paddle2torch,
torch2paddle,
jittor2torch,
torch2jittor,
)

############################################################################
#
# 测试paddle到torch的转换
#
############################################################################

class Paddle2TorchTestCase(unittest.TestCase):

def check_torch_tensor(self, tensor, device, requires_grad):
"""
检查张量设备和梯度情况的工具函数
"""

assert isinstance(tensor, torch.Tensor)
assert tensor.device == torch.device(device)
assert tensor.requires_grad == requires_grad

def test_gradient(self):
"""
测试张量转换后的反向传播是否正确
"""

x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0, 5.0], stop_gradient=False)
y = paddle2torch(x)
z = 3 * (y ** 2)
z.sum().backward()
assert y.grad.tolist() == [6, 12, 18, 24, 30]

def test_tensor_transfer(self):
"""
测试单个张量的设备和梯度转换是否正确
"""

paddle_tensor = paddle.rand((3, 4, 5)).cpu()
res = paddle2torch(paddle_tensor)
self.check_torch_tensor(res, "cpu", not paddle_tensor.stop_gradient)

res = paddle2torch(paddle_tensor, target_device="cuda:2", no_gradient=None)
self.check_torch_tensor(res, "cuda:2", not paddle_tensor.stop_gradient)

res = paddle2torch(paddle_tensor, target_device="cuda:1", no_gradient=True)
self.check_torch_tensor(res, "cuda:1", False)

res = paddle2torch(paddle_tensor, target_device="cuda:1", no_gradient=False)
self.check_torch_tensor(res, "cuda:1", True)

def test_list_transfer(self):
"""
测试张量列表的转换
"""

paddle_list = [paddle.rand((6, 4, 2)).cuda(1) for i in range(10)]
res = paddle2torch(paddle_list)
assert isinstance(res, list)
for t in res:
self.check_torch_tensor(t, "cuda:1", False)

res = paddle2torch(paddle_list, target_device="cpu", no_gradient=False)
assert isinstance(res, list)
for t in res:
self.check_torch_tensor(t, "cpu", True)

def test_tensor_tuple_transfer(self):
"""
测试张量元组的转换
"""

paddle_list = [paddle.rand((6, 4, 2)).cuda(1) for i in range(10)]
paddle_tuple = tuple(paddle_list)
res = paddle2torch(paddle_tuple)
assert isinstance(res, tuple)
for t in res:
self.check_torch_tensor(t, "cuda:1", False)

def test_dict_transfer(self):
"""
测试包含复杂结构的字典的转换
"""

paddle_dict = {
"tensor": paddle.rand((3, 4)).cuda(0),
"list": [paddle.rand((6, 4, 2)).cuda(0) for i in range(10)],
"dict":{
"list": [paddle.rand((6, 4, 2)).cuda(0) for i in range(10)],
"tensor": paddle.rand((3, 4)).cuda(0)
},
"int": 2,
"string": "test string"
}
res = paddle2torch(paddle_dict)
assert isinstance(res, dict)
self.check_torch_tensor(res["tensor"], "cuda:0", False)
assert isinstance(res["list"], list)
for t in res["list"]:
self.check_torch_tensor(t, "cuda:0", False)
assert isinstance(res["int"], int)
assert isinstance(res["string"], str)
assert isinstance(res["dict"], dict)
assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]:
self.check_torch_tensor(t, "cuda:0", False)
self.check_torch_tensor(res["dict"]["tensor"], "cuda:0", False)


############################################################################
#
# 测试torch到paddle的转换
#
############################################################################

class Torch2PaddleTestCase(unittest.TestCase):

def check_paddle_tensor(self, tensor, device, stop_gradient):
"""
检查得到的paddle张量设备和梯度情况的工具函数
"""

assert isinstance(tensor, paddle.Tensor)
if device == "cpu":
assert tensor.place.is_cpu_place()
elif device.startswith("gpu"):
paddle_device = paddle.device._convert_to_place(device)
assert tensor.place.is_gpu_place()
if hasattr(tensor.place, "gpu_device_id"):
# paddle中,有两种Place
# paddle.fluid.core.Place是创建Tensor时使用的类型
# 有函数gpu_device_id获取设备
assert tensor.place.gpu_device_id() == paddle_device.get_device_id()
else:
# 通过_convert_to_place得到的是paddle.CUDAPlace
# 通过get_device_id获取设备
assert tensor.place.get_device_id() == paddle_device.get_device_id()
else:
raise NotImplementedError
assert tensor.stop_gradient == stop_gradient

def test_gradient(self):
"""
测试转换后梯度的反向传播
"""

x = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0], requires_grad=True)
y = torch2paddle(x)
z = 3 * (y ** 2)
z.sum().backward()
assert y.grad.tolist() == [6, 12, 18, 24, 30]

def test_tensor_transfer(self):
"""
测试单个张量的转换
"""

torch_tensor = torch.rand((3, 4, 5))
res = torch2paddle(torch_tensor)
self.check_paddle_tensor(res, "cpu", True)

res = torch2paddle(torch_tensor, target_device="gpu:2", no_gradient=None)
self.check_paddle_tensor(res, "gpu:2", True)

res = torch2paddle(torch_tensor, target_device="gpu:2", no_gradient=True)
self.check_paddle_tensor(res, "gpu:2", True)

res = torch2paddle(torch_tensor, target_device="gpu:2", no_gradient=False)
self.check_paddle_tensor(res, "gpu:2", False)

def test_tensor_list_transfer(self):
"""
测试张量列表的转换
"""

torch_list = [torch.rand(6, 4, 2) for i in range(10)]
res = torch2paddle(torch_list)
assert isinstance(res, list)
for t in res:
self.check_paddle_tensor(t, "cpu", True)

res = torch2paddle(torch_list, target_device="gpu:1", no_gradient=False)
assert isinstance(res, list)
for t in res:
self.check_paddle_tensor(t, "gpu:1", False)

def test_tensor_tuple_transfer(self):
"""
测试张量元组的转换
"""
torch_list = [torch.rand(6, 4, 2) for i in range(10)]
torch_tuple = tuple(torch_list)
res = torch2paddle(torch_tuple, target_device="cpu")
assert isinstance(res, tuple)
for t in res:
self.check_paddle_tensor(t, "cpu", True)

def test_dict_transfer(self):
"""
测试复杂的字典结构的转换
"""

torch_dict = {
"tensor": torch.rand((3, 4)),
"list": [torch.rand(6, 4, 2) for i in range(10)],
"dict":{
"list": [torch.rand(6, 4, 2) for i in range(10)],
"tensor": torch.rand((3, 4))
},
"int": 2,
"string": "test string"
}
res = torch2paddle(torch_dict)
assert isinstance(res, dict)
self.check_paddle_tensor(res["tensor"], "cpu", True)
assert isinstance(res["list"], list)
for t in res["list"]:
self.check_paddle_tensor(t, "cpu", True)
assert isinstance(res["int"], int)
assert isinstance(res["string"], str)
assert isinstance(res["dict"], dict)
assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]:
self.check_paddle_tensor(t, "cpu", True)
self.check_paddle_tensor(res["dict"]["tensor"], "cpu", True)


############################################################################
#
# 测试jittor到torch的转换
#
############################################################################

class Jittor2TorchTestCase(unittest.TestCase):

def check_torch_tensor(self, tensor, device, requires_grad):
"""
检查得到的torch张量的工具函数
"""

assert isinstance(tensor, torch.Tensor)
if device == "cpu":
assert not tensor.is_cuda
else:
assert tensor.device == torch.device(device)
assert tensor.requires_grad == requires_grad

def test_var_transfer(self):
"""
测试单个Jittor Var的转换
"""

jittor_var = jittor.rand((3, 4, 5))
res = jittor2torch(jittor_var)
self.check_torch_tensor(res, "cpu", True)

res = jittor2torch(jittor_var, target_device="cuda:2", no_gradient=None)
self.check_torch_tensor(res, "cuda:2", True)

res = jittor2torch(jittor_var, target_device="cuda:2", no_gradient=True)
self.check_torch_tensor(res, "cuda:2", False)

res = jittor2torch(jittor_var, target_device="cuda:2", no_gradient=False)
self.check_torch_tensor(res, "cuda:2", True)

def test_var_list_transfer(self):
"""
测试Jittor列表的转换
"""

jittor_list = [jittor.rand((6, 4, 2)) for i in range(10)]
res = jittor2torch(jittor_list)
assert isinstance(res, list)
for t in res:
self.check_torch_tensor(t, "cpu", True)

res = jittor2torch(jittor_list, target_device="cuda:1", no_gradient=False)
assert isinstance(res, list)
for t in res:
self.check_torch_tensor(t, "cuda:1", True)

def test_var_tuple_transfer(self):
"""
测试Jittor变量元组的转换
"""

jittor_list = [jittor.rand((6, 4, 2)) for i in range(10)]
jittor_tuple = tuple(jittor_list)
res = jittor2torch(jittor_tuple, target_device="cpu")
assert isinstance(res, tuple)
for t in res:
self.check_torch_tensor(t, "cpu", True)

def test_dict_transfer(self):
"""
测试字典结构的转换
"""

jittor_dict = {
"tensor": jittor.rand((3, 4)),
"list": [jittor.rand(6, 4, 2) for i in range(10)],
"dict":{
"list": [jittor.rand(6, 4, 2) for i in range(10)],
"tensor": jittor.rand((3, 4))
},
"int": 2,
"string": "test string"
}
res = jittor2torch(jittor_dict)
assert isinstance(res, dict)
self.check_torch_tensor(res["tensor"], "cpu", True)
assert isinstance(res["list"], list)
for t in res["list"]:
self.check_torch_tensor(t, "cpu", True)
assert isinstance(res["int"], int)
assert isinstance(res["string"], str)
assert isinstance(res["dict"], dict)
assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]:
self.check_torch_tensor(t, "cpu", True)
self.check_torch_tensor(res["dict"]["tensor"], "cpu", True)


############################################################################
#
# 测试torch到jittor的转换
#
############################################################################

class Torch2JittorTestCase(unittest.TestCase):

def check_jittor_var(self, var, requires_grad):
"""
检查得到的Jittor Var梯度情况的工具函数
"""

assert isinstance(var, jittor.Var)
assert var.requires_grad == requires_grad

def test_gradient(self):
"""
测试反向传播的梯度
"""

x = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0], requires_grad=True)
y = torch2jittor(x)
z = 3 * (y ** 2)
grad = jittor.grad(z, y)
assert grad.tolist() == [6.0, 12.0, 18.0, 24.0, 30.0]

def test_tensor_transfer(self):
"""
测试单个张量转换为Jittor
"""

torch_tensor = torch.rand((3, 4, 5))
res = torch2jittor(torch_tensor)
self.check_jittor_var(res, False)

res = torch2jittor(torch_tensor, no_gradient=None)
self.check_jittor_var(res, False)

res = torch2jittor(torch_tensor, no_gradient=True)
self.check_jittor_var(res, False)

res = torch2jittor(torch_tensor, no_gradient=False)
self.check_jittor_var(res, True)

def test_tensor_list_transfer(self):
"""
测试张量列表的转换
"""

torch_list = [torch.rand((6, 4, 2)) for i in range(10)]
res = torch2jittor(torch_list)
assert isinstance(res, list)
for t in res:
self.check_jittor_var(t, False)

res = torch2jittor(torch_list, no_gradient=False)
assert isinstance(res, list)
for t in res:
self.check_jittor_var(t, True)

def test_tensor_tuple_transfer(self):
"""
测试张量元组的转换
"""

torch_list = [torch.rand((6, 4, 2)) for i in range(10)]
torch_tuple = tuple(torch_list)
res = torch2jittor(torch_tuple)
assert isinstance(res, tuple)
for t in res:
self.check_jittor_var(t, False)

def test_dict_transfer(self):
"""
测试字典结构的转换
"""

torch_dict = {
"tensor": torch.rand((3, 4)),
"list": [torch.rand(6, 4, 2) for i in range(10)],
"dict":{
"list": [torch.rand(6, 4, 2) for i in range(10)],
"tensor": torch.rand((3, 4))
},
"int": 2,
"string": "test string"
}
res = torch2jittor(torch_dict)
assert isinstance(res, dict)
self.check_jittor_var(res["tensor"], False)
assert isinstance(res["list"], list)
for t in res["list"]:
self.check_jittor_var(t, False)
assert isinstance(res["int"], int)
assert isinstance(res["string"], str)
assert isinstance(res["dict"], dict)
assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]:
self.check_jittor_var(t, False)
self.check_jittor_var(res["dict"]["tensor"], False)

Loading…
Cancel
Save