Browse Source

删除 mix_module 和 torch_paddle_driver 的内容

tags/v1.0.0alpha
x54-729 2 years ago
parent
commit
34698f0f9e
19 changed files with 0 additions and 1957 deletions
  1. +0
    -1
      fastNLP/core/__init__.py
  2. +0
    -2
      fastNLP/core/drivers/__init__.py
  3. +0
    -5
      fastNLP/core/drivers/torch_paddle_driver/__init__.py
  4. +0
    -193
      fastNLP/core/drivers/torch_paddle_driver/torch_paddle_driver.py
  5. +0
    -4
      fastNLP/core/drivers/torch_paddle_driver/utils.py
  6. +0
    -2
      fastNLP/core/utils/__init__.py
  7. +0
    -49
      fastNLP/core/utils/torch_paddle_utils.py
  8. +0
    -9
      fastNLP/modules/__init__.py
  9. +0
    -10
      fastNLP/modules/mix_modules/__init__.py
  10. +0
    -310
      fastNLP/modules/mix_modules/mix_module.py
  11. +0
    -233
      fastNLP/modules/mix_modules/utils.py
  12. +0
    -0
      tests/core/drivers/torch_paddle_driver/__init__.py
  13. +0
    -122
      tests/core/drivers/torch_paddle_driver/_test_torch_paddle_driver.py
  14. +0
    -0
      tests/core/drivers/torch_paddle_driver/_test_utils.py
  15. +0
    -204
      tests/core/utils/_test_torch_paddle_utils.py
  16. +0
    -0
      tests/modules/__init__.py
  17. +0
    -0
      tests/modules/mix_modules/__init__.py
  18. +0
    -378
      tests/modules/mix_modules/_test_mix_module.py
  19. +0
    -435
      tests/modules/mix_modules/_test_utils.py

+ 0
- 1
fastNLP/core/__init__.py View File

@@ -63,7 +63,6 @@ __all__ = [
"PaddleFleetDriver", "PaddleFleetDriver",
"JittorSingleDriver", "JittorSingleDriver",
"JittorMPIDriver", "JittorMPIDriver",
"TorchPaddleDriver",


# log # log
"logger", "logger",


+ 0
- 2
fastNLP/core/drivers/__init__.py View File

@@ -9,7 +9,6 @@ __all__ = [
"JittorDriver", "JittorDriver",
"JittorSingleDriver", "JittorSingleDriver",
"JittorMPIDriver", "JittorMPIDriver",
"TorchPaddleDriver",
'torch_seed_everything', 'torch_seed_everything',
'paddle_seed_everything', 'paddle_seed_everything',
'optimizer_state_to_device' 'optimizer_state_to_device'
@@ -18,7 +17,6 @@ __all__ = [
from .torch_driver import TorchDriver, TorchSingleDriver, TorchDDPDriver, torch_seed_everything, optimizer_state_to_device from .torch_driver import TorchDriver, TorchSingleDriver, TorchDDPDriver, torch_seed_everything, optimizer_state_to_device
from .jittor_driver import JittorDriver, JittorMPIDriver, JittorSingleDriver from .jittor_driver import JittorDriver, JittorMPIDriver, JittorSingleDriver
from .paddle_driver import PaddleDriver, PaddleFleetDriver, PaddleSingleDriver, paddle_seed_everything from .paddle_driver import PaddleDriver, PaddleFleetDriver, PaddleSingleDriver, paddle_seed_everything
from .torch_paddle_driver import TorchPaddleDriver
from .driver import Driver from .driver import Driver






+ 0
- 5
fastNLP/core/drivers/torch_paddle_driver/__init__.py View File

@@ -1,5 +0,0 @@
__all__ = [
"TorchPaddleDriver",
]

from .torch_paddle_driver import TorchPaddleDriver

+ 0
- 193
fastNLP/core/drivers/torch_paddle_driver/torch_paddle_driver.py View File

@@ -1,193 +0,0 @@
from typing import Optional, Dict, Union, Callable, Tuple

from fastNLP.envs.imports import _NEED_IMPORT_PADDLE, _NEED_IMPORT_TORCH
from fastNLP.core.utils.utils import _get_fun_msg


if _NEED_IMPORT_PADDLE:
import paddle
from paddle.io import DataLoader as PaddleDataLoader
from paddle.optimizer import Optimizer as PaddleOptimizer

if _NEED_IMPORT_TORCH:
import torch
from torch.utils.data import DataLoader as TorchDataLoader
from torch.optim import Optimizer as TorchOptimizer

from fastNLP.core.drivers.driver import Driver
from fastNLP.envs.distributed import rank_zero_call
from fastNLP.core.utils.utils import auto_param_call, apply_to_collection
from fastNLP.core.log.logger import logger
from fastNLP.modules.mix_modules.mix_module import MixModule


__all__ = [
"TorchPaddleDriver",
]

class TorchPaddleDriver(Driver):
"""
针对torch和paddle混合模型的driver
由于是两种不同的框架不方便实现多卡,暂时先实现CPU和GPU单卡的功能
"""
def __init__(self, model, device: Optional[str] = None, **kwargs):
super(TorchPaddleDriver, self).__init__(model)

self.model_device = device
self.torch_non_blocking = kwargs.get("torch_non_blocking", None)
self.paddle_blocking = kwargs.get("paddle_blocking", None)

self._data_device = kwargs.get("_data_device", None)
if isinstance(self._data_device, int):
# 将data_device设置为cuda:x的字符串形式
if self._data_device < 0:
raise ValueError("Parameter `_data_device` can not be smaller than 0.")
_could_use_device_num = paddle.device.cuda.device_count()
if self._data_device >= _could_use_device_num:
raise ValueError("The gpu device that parameter `device` specifies is not existed.")
self._data_device = f"cuda:{self._data_device}"
elif self._data_device is not None:
raise ValueError("Parameter `device` is wrong type, please check our documentation for the right use.")

def setup(self):
if self.model_device is not None:
paddle.device.set_device(self.model_device.replace("cuda", "gpu"))
self.model.to(self.model_device)

@staticmethod
def check_dataloader_legality(dataloader, dataloader_name, is_train: bool = False):
if is_train:
if not isinstance(dataloader, (TorchDataLoader, PaddleDataLoader)):
raise ValueError(f"Parameter `{dataloader_name}` should be 'torch.util.data.DataLoader' or `paddle.io.dataloader` type, not {type(dataloader)}.")
else:
if not isinstance(dataloader, Dict):
raise ValueError(f"Parameter `{dataloader_name}` should be 'Dict' type, not {type(dataloader)}.")
else:
for each_dataloader in dataloader.values():
if not isinstance(each_dataloader, (TorchDataLoader, PaddleDataLoader)):
raise ValueError(f"Each dataloader of parameter `{dataloader_name}` should be "
f"'torch.util.data.DataLoader' or `paddle.io.dataloader` "
f"type, not {type(each_dataloader)}.")

@staticmethod
def _check_optimizer_legality(optimizers):
for each_optimizer in optimizers:
if not isinstance(each_optimizer, (TorchOptimizer, PaddleOptimizer)):
raise ValueError(f"Each optimizers of parameter `optimizers` should be "
f"'torch.optim.Optimizer' or 'paddle.optimizers.Optimizer' type, "
f"not {type(each_optimizer)}.")

def step(self):
for optimizer in self.optimizers:
optimizer.step()

def backward(self, loss):
loss.backward()

def zero_grad(self):
for optimizer in self.optimizers:
if isinstance(optimizer, TorchOptimizer):
optimizer.zero_grad()
elif isinstance(optimizer, PaddleOptimizer):
optimizer.clear_grad()
else:
raise ValueError("Unknown optimizers type.")

def model_call(self, batch, fn: Callable, signature_fn: Optional[Callable]) -> Dict:
if isinstance(batch, Dict) and not self.wo_auto_param_call:
return auto_param_call(fn, batch, signature_fn=signature_fn)
else:
return fn(batch)

def get_model_call_fn(self, fn: str) -> Tuple:
if hasattr(self.model, fn):
fn = getattr(self.model, fn)
if not callable(fn):
raise RuntimeError(f"The `{fn}` attribute is not `Callable`.")
logger.debug(f'Use {_get_fun_msg(fn, with_fp=False)}...')
return fn, None
elif fn in {"train_step", "evaluate_step"}:
logger.debug(f'Use {_get_fun_msg(self.model.forward, with_fp=False)}...')
return self.model, self.model.forward
else:
raise RuntimeError(f"There is no `{fn}` method in your {type(self.model)}.")

def predict_step(self, batch):
if isinstance(batch, Dict):
return auto_param_call(self._predict_step, batch)
else:
return self._predict_step(batch)

@rank_zero_call
def save_model(self, filepath: str, only_state_dict: bool = True, model_save_fn: Optional[Callable] = None):
r"""
暂时不提供保存整个模型的方法
"""
if only_state_dict == False:
logger.warn("TorchPaddleModule only support saving state dicts now.")
if model_save_fn is not None:
model_save_fn(filepath)
else:
model = self.unwrap_model()
self.move_model_to_device(model, "cpu")
self.model.save(filepath)
self.move_model_to_device(model, self.model_device)

def load_model(self, filepath: str):
"""
加载模型的加载函数;

:param filepath: 保存文件的文件位置(需要包括文件名);
:return:
"""
return self.model.load(filepath)

def save(self):
...

def load(self):
...

@staticmethod
def move_model_to_device(model: MixModule, device: str):
if device is not None:
model.to(device)

def unwrap_model(self):
return self.model

@staticmethod
def tensor_to_numeric(tensor):
if tensor is None:
return None

def _translate(_data):
return _data.tolist()

return apply_to_collection(
data=tensor,
dtype=(paddle.Tensor, torch.Tensor),
function=_translate
)

def set_model_mode(self, mode: str):
assert mode in {"train", "eval"}
getattr(self.model, mode)()

def get_model_device(self):
return self.model_device

@property
def data_device(self):
if self.model_device is not None:
return self.model_device
else:
return self._data_device

def set_model_mode(self, mode: str):
assert mode in {"train", "eval"}
getattr(self.model, mode)()

def set_sampler_epoch(self, dataloader: Union['TorchDataLoader', 'PaddleDataLoader'], cur_epoch_idx):
# 保证 ddp 训练时的 shuffle=True 时的正确性,因为需要保证每一个进程上的 sampler 的shuffle 的随机数种子是一样的;
return dataloader

+ 0
- 4
fastNLP/core/drivers/torch_paddle_driver/utils.py View File

@@ -1,4 +0,0 @@
from fastNLP.envs.imports import _NEED_IMPORT_PADDLE

if _NEED_IMPORT_PADDLE:
pass

+ 0
- 2
fastNLP/core/utils/__init__.py View File

@@ -11,7 +11,6 @@ __all__ = [
'is_in_fnlp_paddle_dist', 'is_in_fnlp_paddle_dist',
'is_in_paddle_launch_dist', 'is_in_paddle_launch_dist',
'f_rich_progress', 'f_rich_progress',
'torch_paddle_move_data_to_device',
'torch_move_data_to_device', 'torch_move_data_to_device',
'get_fn_arg_names', 'get_fn_arg_names',
'auto_param_call', 'auto_param_call',
@@ -32,7 +31,6 @@ from .jittor_utils import is_jittor_dataset, jittor_collate_wraps
from .paddle_utils import get_device_from_visible, paddle_to, paddle_move_data_to_device, get_paddle_device_id, get_paddle_gpu_str, is_in_paddle_dist, \ from .paddle_utils import get_device_from_visible, paddle_to, paddle_move_data_to_device, get_paddle_device_id, get_paddle_gpu_str, is_in_paddle_dist, \
is_in_fnlp_paddle_dist, is_in_paddle_launch_dist is_in_fnlp_paddle_dist, is_in_paddle_launch_dist
from .rich_progress import f_rich_progress from .rich_progress import f_rich_progress
from .torch_paddle_utils import torch_paddle_move_data_to_device
from .torch_utils import torch_move_data_to_device from .torch_utils import torch_move_data_to_device
from .utils import * from .utils import *




+ 0
- 49
fastNLP/core/utils/torch_paddle_utils.py View File

@@ -1,49 +0,0 @@
from typing import Any, Optional

from fastNLP.envs.imports import _NEED_IMPORT_PADDLE, _NEED_IMPORT_TORCH

if _NEED_IMPORT_PADDLE:
import paddle

if _NEED_IMPORT_TORCH:
import torch

__all__ = [
"torch_paddle_move_data_to_device",
]

from .utils import apply_to_collection
from .paddle_utils import paddle_to


def torch_paddle_move_data_to_device(batch: Any, device: Optional[str] = None, non_blocking: Optional[bool] = True,
data_device: Optional[str] = None) -> Any:
r"""
将数据集合传输到给定设备。只有paddle.Tensor和torch.Tensor对象会被传输到设备中,其余保持不变

:param batch:
:param device:
:param non_blocking:
:param data_device:
:return: 相同的集合,但所有包含的张量都驻留在新设备上;
"""

if device is None:
if data_device is not None:
device = data_device
else:
return batch

torch_device = device.replace("gpu", "cuda")
paddle_device = device.replace("cuda", "gpu")

def batch_to(data: Any) -> Any:
if isinstance(data, torch.Tensor):
data = data.to(torch_device, non_blocking=non_blocking)
elif isinstance(data, paddle.Tensor):
data = paddle_to(data, paddle_device)
return data

return apply_to_collection(batch, dtype=(paddle.Tensor, torch.Tensor), function=batch_to)

+ 0
- 9
fastNLP/modules/__init__.py View File

@@ -1,9 +0,0 @@
__all__ = [
"MixModule",
"torch2paddle",
"paddle2torch",
"torch2jittor",
"jittor2torch",
]

from .mix_modules import MixModule, torch2paddle, paddle2torch, torch2jittor, jittor2torch

+ 0
- 10
fastNLP/modules/mix_modules/__init__.py View File

@@ -1,10 +0,0 @@
__all__ = [
"MixModule",
"torch2paddle",
"paddle2torch",
"torch2jittor",
"jittor2torch",
]

from .mix_module import MixModule
from .utils import *

+ 0
- 310
fastNLP/modules/mix_modules/mix_module.py View File

@@ -1,310 +0,0 @@
import os
import io
import pickle
from typing import Dict
from collections import OrderedDict

import numpy as np

from fastNLP.envs.imports import _NEED_IMPORT_JITTOR, _NEED_IMPORT_PADDLE, _NEED_IMPORT_TORCH
from fastNLP.core.utils.paddle_utils import paddle_to

if _NEED_IMPORT_PADDLE:
import paddle
from paddle.nn import Layer as PaddleLayer

if _NEED_IMPORT_TORCH:
import torch
from torch.nn import Module as TorchModule, Parameter as TorchParameter

if _NEED_IMPORT_JITTOR:
import jittor


__all__ = [
"MixModule",
]

class MixModule:
"""
TODO: 支持不同的混合方式;添加state_dict的支持;如果参数里有List of Tensors该怎么处理;
是否需要仿照Module那样在初始化的时候给各种模型分类
可以同时使用Torch和Paddle框架的混合模型
"""
def __init__(self, *args, **kwargs):
pass

def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)

def named_parameters(self, prefix='', recurse: bool=True, backend=None):
"""
返回模型的名字和参数

:param prefix: 输出时在参数名前加上的前缀
:param recurse: 是否递归地输出参数
:param backend: `backend`=`None`时,将所有模型和张量的参数返回;
`backend`=`torch`时,返回`torch`的参数;
`backend`=`paddle`时,返回`paddle`的参数。
"""
if backend is None:
generator = self.attributes(TorchModule, TorchParameter, PaddleLayer)
elif backend == "torch":
generator = self.attributes(TorchModule, TorchParameter)
elif backend == "paddle":
generator = self.attributes(PaddleLayer)
else:
raise ValueError("Unknown backend parameter.")

for name, value in generator:
name = prefix + ('.' if prefix else '') + name
if isinstance(value, TorchParameter):
# 非Module/Layer类型,直接输出名字和值
yield name, value
elif recurse:
# 递归地调用named_parameters
for name_r, value_r in value.named_parameters(name, recurse):
yield name_r, value_r

def parameters(self, recurse: bool = True, backend: str = None):
"""
返回模型的参数

:param recurse:
:param backend: `backend`=`None`时,将所有模型和张量的参数返回;
`backend`=`torch`时,返回`torch`的参数;
`backend`=`paddle`时,返回`paddle`的参数。
"""
for name, value in self.named_parameters(recurse=recurse, backend=backend):
yield value
def forward(self, *args, **kwargs):
raise NotImplementedError

def train_step(self, batch):
raise NotImplementedError

def test_step(self, batch):
raise NotImplementedError

def evaluate_step(self, batch):
raise NotImplementedError

def train(self):
for name, value in self.attributes(TorchModule, PaddleLayer):
value.train()

def eval(self):
for name, value in self.attributes(TorchModule, PaddleLayer):
value.eval()

def to(self, device):
"""
:param device: 设备名
"""
# 有jittor的话 warning
if device == "cpu":
paddle_device = device
elif device.startswith("cuda"):
paddle_device = device.replace("cuda", "gpu")
elif device.startswith("gpu"):
paddle_device = device
device = device.replace("gpu", "cuda")
else:
raise ValueError("Device value error")

for name, value in self.attributes(TorchModule):
# torch的to函数不影响Tensor
vars(self)[name] = value.to(device)
for name, value in self.attributes(TorchParameter):
# Parameter在经过to函数后会变成Tensor类型
vars(self)[name] = TorchParameter(value.to(device), requires_grad=value.requires_grad)

for name, value in self.attributes(PaddleLayer):
vars(self)[name] = value.to(paddle_device)
for name, value in self.attributes(paddle.Tensor):
# paddle的to函数会影响到Tensor
vars(self)[name] = paddle_to(value, paddle_device)

return self

def state_dict(self, backend: str = None) -> Dict:
"""
返回模型的state_dict。

.. note:: torch的destination参数会在将来删除,因此不提供destination参数

:param backend: `backend`=`None`时,将所有模型和张量的state dict返回;
`backend`=`torch`时,返回`torch`的state dict;
`backend`=`paddle`时,返回`paddle`的state dict。
"""
if backend is None:
generator = self.attributes(TorchModule, TorchParameter, PaddleLayer)
elif backend == "torch":
generator = self.attributes(TorchModule, TorchParameter)
elif backend == "paddle":
generator = self.attributes(PaddleLayer)
else:
raise ValueError(f"Unknown backend {backend}.")

destination = OrderedDict()

for name, value in generator:
if value is None:
continue
if isinstance(value, TorchParameter):
destination[name] = value
else:
# 不同框架state_dict函数的参数名和顺序不同
if isinstance(value, PaddleLayer):
kwargs = {
"structured_name_prefix": name + ".",
}
elif isinstance(value, TorchModule):
kwargs = {
"prefix": name + ".",
}
else:
raise ValueError(f"Unknown item type {type(value)}")
destination.update(value.state_dict(**kwargs))

return destination

def save_state_dict_to_file(self, path: str):
"""
保存模型的state dict到path
"""
# TODO 设备限制
filename = os.path.basename(path)
if filename == "":
raise ValueError("Received empty filename.")
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
protocol = 4

saved = {}
paddle_dict = self.state_dict(backend="paddle")
torch_dict = self.state_dict(backend="torch")
# 保存paddle部分
# 调用paddle保存时的处理函数
paddle_saved_obj = paddle.framework.io._build_saved_state_dict(paddle_dict)
paddle_saved_obj = paddle.fluid.io._unpack_saved_dict(paddle_saved_obj, protocol)
# 将返回的dict保存
saved["paddle"] = paddle_saved_obj

# 保存torch部分
buffer = io.BytesIO()
torch.save(torch_dict, buffer)
saved["torch"] = buffer.getvalue()

# 保存
with open(path, "wb") as f:
pickle.dump(saved, f, protocol)

def load_state_dict_from_file(self, path: str):
"""
从 `path` 中加载保存的state dict
"""
state_dict = {}
with open(path, "rb") as f:
loaded = pickle.load(f)
# 加载paddle的数据
paddle_loaded_obj = loaded["paddle"]
paddle_load_result = paddle.fluid.io._pack_loaded_dict(paddle_loaded_obj)
if "StructuredToParameterName@@" in paddle_load_result:
for key in paddle_load_result["StructuredToParameterName@@"]:
if isinstance(paddle_load_result[key], np.ndarray):
paddle_load_result[key] = paddle.to_tensor(paddle_load_result[key])
state_dict.update(paddle_load_result)
# 加载torch的数据
torch_loaded_obj = loaded["torch"]
torch_bytes = io.BytesIO(torch_loaded_obj)
torch_load_result = torch.load(torch_bytes)
state_dict.update(torch_load_result)

self.load_state_dict(state_dict)

def load_state_dict(self, state_dict):
"""
从state dict中加载数据
"""
missing_keys = []
unexpected_keys = []
error_msgs = []
new_state = {}

local_state = self.state_dict()

# 对字典内容按前缀进行归类
for key, value in state_dict.items():
splited = key.split(".", 1)
if len(splited) == 1:
# 没有前缀,实际上只有torch.nn.Parameter会进入这种情况
new_state[key] = value
else:
prefix, name = splited
if prefix not in new_state:
new_state[prefix] = {}
new_state[prefix][name] = value

for key, param in self.attributes(TorchModule, TorchParameter, PaddleLayer):
if key in new_state:
# 在传入的字典中找到了对应的值
input_param = new_state[key]
if not isinstance(input_param, dict):
# 且不是字典,即上述没有前缀的情况
# 按照torch.nn.Module._load_from_state_dict进行赋值
if not torch.overrides.is_tensor_like(input_param):
error_msgs.append('While copying the parameter named "{}", '
'expected torch.Tensor or Tensor-like object from checkpoint but '
'received {}'
.format(key, type(input_param)))
continue

# This is used to avoid copying uninitialized parameters into
# non-lazy modules, since they dont have the hook to do the checks
# in such case, it will error when accessing the .shape attribute.
is_param_lazy = torch.nn.parameter.is_lazy(param)
# Backward compatibility: loading 1-dim tensor from 0.3.* to version 0.4+
if not is_param_lazy and len(param.shape) == 0 and len(input_param.shape) == 1:
input_param = input_param[0]

if not is_param_lazy and input_param.shape != param.shape:
# local shape should match the one in checkpoint
error_msgs.append('size mismatch for {}: copying a param with shape {} from checkpoint, '
'the shape in current model is {}.'
.format(key, input_param.shape, param.shape))
continue
try:
with torch.no_grad():
param.copy_(input_param)
except Exception as ex:
error_msgs.append('While copying the parameter named "{}", '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}, '
'an exception occurred : {}.'
.format(key, param.size(), input_param.size(), ex.args))
else:
# 否则在子模块中
if isinstance(param, TorchModule):
# torch模块
# 由于paddle没有提供类似strict的参数,因此也不对torch作要求
param.load_state_dict(input_param, strict=False)
elif isinstance(param, PaddleLayer):
# paddle模块
param.load_dict(input_param)
else:
missing_keys.append(key)

if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
self.__class__.__name__, "\n\t".join(error_msgs)))

def attributes(self, *types):
"""
查找对应类型的成员
"""
for name, value in vars(self).items():
if isinstance(value, types):
yield name, value

+ 0
- 233
fastNLP/modules/mix_modules/utils.py View File

@@ -1,233 +0,0 @@
import warnings
import os
from typing import Any, Optional, Union

import numpy as np

from fastNLP.core.utils.utils import apply_to_collection
from fastNLP.core.utils.paddle_utils import paddle_to
from fastNLP.envs.imports import _NEED_IMPORT_JITTOR, _NEED_IMPORT_TORCH, _NEED_IMPORT_PADDLE

if _NEED_IMPORT_PADDLE:
import paddle

if _NEED_IMPORT_JITTOR:
import jittor

if _NEED_IMPORT_TORCH:
import torch

__all__ = [
"paddle2torch",
"torch2paddle",
"jittor2torch",
"torch2jittor",
]

def _paddle2torch(paddle_tensor: 'paddle.Tensor', target_device: Optional[Union[str, int]] = None, no_gradient: bool = None) -> 'torch.Tensor':
"""
将paddle tensor转换为torch tensor,并且能够保留梯度进行反向传播
:param paddle_tensor: 要转换的paddle张量
:param target_device: 是否将转换后的张量迁移到特定设备上,输入为`None`时,和输入的张量相同。
:param no_gradient: 是否保留原张量的梯度。为`None`时,新的张量与输入张量保持一致;
为`True`时,全部不保留梯度;为`False`时,全部保留梯度。
:return: 转换后的torch张量
"""
no_gradient = paddle_tensor.stop_gradient if no_gradient is None else no_gradient
paddle_numpy = paddle_tensor.numpy()
if not np.issubdtype(paddle_numpy.dtype, np.inexact):
no_gradient = True

if target_device is None:
if paddle_tensor.place.is_gpu_place():
# paddlepaddle有两种Place,对应不同的device id获取方式
if hasattr(paddle_tensor.place, "gpu_device_id"):
# paddle.fluid.core_avx.Place
# 在gpu环境下创建张量的话,张量的place是这一类型
target_device = f"cuda:{paddle_tensor.place.gpu_device_id()}"
else:
# paddle.CUDAPlace
target_device = f"cuda:{paddle_tensor.place.get_device_id()}"
else:
# TODO: 可能需要支持xpu等设备
target_device = "cpu"

if not no_gradient:
# 保持梯度,并保持反向传播
# torch.tensor会保留numpy数组的类型
torch_tensor = torch.tensor(paddle_numpy, requires_grad=True, device=target_device)
hook = torch_tensor.register_hook(
lambda grad: paddle.autograd.backward(paddle_tensor, paddle.to_tensor(grad.cpu().numpy()))
)
else:
# 不保留梯度
torch_tensor = torch.tensor(paddle_numpy, requires_grad=False, device=target_device)

return torch_tensor


def _torch2paddle(torch_tensor: 'torch.Tensor', target_device: str = None, no_gradient: bool = None) -> 'paddle.Tensor':
"""
将torch tensor转换为paddle tensor,并且能够保留梯度进行反向传播。
:param torch_tensor: 要转换的torch张量
:param target_device: 是否将转换后的张量迁移到特定设备上,输入为`None`时,和输入的张量相同。
:param no_gradient: 是否保留原张量的梯度。为`None`时,新的张量与输入张量保持一致;
为`True`时,全部不保留梯度;为`False`时,全部保留梯度。
:return: 转换后的paddle张量
"""
no_gradient = not torch_tensor.requires_grad if no_gradient is None else no_gradient
if target_device is None:
if torch_tensor.is_cuda:
target_device = f"gpu:{torch_tensor.device.index}"
else:
target_device = "cpu"

if not no_gradient:
# 保持梯度并保持反向传播
# paddle的stop_gradient和torch的requires_grad表现是相反的
paddle_tensor = paddle.to_tensor(torch_tensor.detach().numpy(), stop_gradient=False)
hook = paddle_tensor.register_hook(
lambda grad: torch.autograd.backward(torch_tensor, torch.tensor(grad.numpy()))
)
else:
paddle_tensor = paddle.to_tensor(torch_tensor.detach().numpy(), stop_gradient=True)

paddle_tensor = paddle_to(paddle_tensor, target_device)

return paddle_tensor


def _jittor2torch(jittor_var: 'jittor.Var', target_device: Optional[Union[str, int]] = None, no_gradient: bool = None) -> 'torch.Tensor':
"""
将jittor Var转换为torch tensor,并且能够保留梯度进行反向传播
:param jittor_var: 要转换的jittor变量
:param target_device: 是否将转换后的张量迁移到特定设备上,输入为`None`时,根据jittor.flags.use_cuda决定。
:param no_gradient: 是否保留原张量的梯度。为`None`时,新的张量与输入张量保持一致;
为`True`时,全部不保留梯度;为`False`时,全部保留梯度。
:return: 转换后的torch张量
"""
# TODO: warning:无法保留梯度
# jittor的grad可以通过callback进行传递
# 如果outputs有_grad键,可以实现求导
no_gradient = not jittor_var.requires_grad if no_gradient is None else no_gradient
if no_gradient == False:
warnings.warn("The result tensor will not keep gradients due to differences between jittor and pytorch.")
jittor_numpy = jittor_var.numpy()
if not np.issubdtype(jittor_numpy.dtype, np.inexact):
no_gradient = True

if target_device is None:
# jittor的设备分配是自动的
# 根据use_cuda判断
if jittor.flags.use_cuda:
target_device = "cuda:0"
else:
target_device = "cpu"

torch_tensor = torch.tensor(jittor_numpy, requires_grad=not no_gradient, device=target_device)

return torch_tensor


def _torch2jittor(torch_tensor: 'torch.Tensor', no_gradient: bool = None) -> 'jittor.Var':
"""
将torch tensor转换为jittor Var,并且能够保留梯度进行反向传播
:param torch_tensor: 要转换的torch张量
:param no_gradient: 是否保留原张量的梯度。为`None`时,新的张量与输入张量保持一致;
为`True`时,全部不保留梯度;为`False`时,全部保留梯度。
:return: 转换后的jittor变量
"""
no_gradient = not torch_tensor.requires_grad if no_gradient is None else no_gradient

if not no_gradient:
# 保持梯度并保持反向传播
jittor_var = jittor.Var(torch_tensor.detach().numpy())
jittor_var.requires_grad = True
hook = jittor_var.register_hook(
lambda grad: torch.autograd.backward(torch_tensor, torch.tensor(grad.numpy()))
)
else:
jittor_var = jittor.Var(torch_tensor.detach().numpy())
jittor_var.requires_grad = False

return jittor_var


def torch2paddle(torch_in: Any, target_device: str = None, no_gradient: bool = None) -> Any:
"""
递归地将输入中包含的torch张量转换为paddle张量

:param torch_in: 要转换的包含torch.Tensor类型的变量
:param target_device: 是否将转换后的张量迁移到特定设备上,
输入为`None`时,和输入的张量相同,
:param no_gradient: 是否保留原张量的梯度。为`None`时,新的张量与输入张量保持一致;
为`True`时,全部不保留梯度;为`False`时,全部保留梯度。
:return: 将所有torch.Tensor转换为paddle.Tensor的张量
"""

return apply_to_collection(
torch_in,
dtype=torch.Tensor,
function=_torch2paddle,
target_device=target_device,
no_gradient=no_gradient,
)


def paddle2torch(paddle_in: Any, target_device: str = None, no_gradient: bool = None) -> Any:
"""
递归地将输入中包含的paddle张量转换为torch张量

:param torch_in: 要转换的包含paddle.Tensor类型的变量
:param target_device: 是否将转换后的张量迁移到特定设备上,
输入为`None`时,和输入的张量相同,
:param no_gradient: 是否保留原张量的梯度。为`None`时,新的张量与输入张量保持一致;
为`True`时,全部不保留梯度;为`False`时,全部保留梯度。
:return: 将所有paddle.Tensor转换为torch.Tensor后的变量
"""

return apply_to_collection(
paddle_in,
dtype=paddle.Tensor,
function=_paddle2torch,
target_device=target_device,
no_gradient=no_gradient,
)


def jittor2torch(jittor_in: Any, target_device: str = None, no_gradient: bool = None) -> Any:
"""
递归地将输入中包含的jittor变量转换为torch张量

:param jittor_in: 要转换的jittor变量
:param target_device: 是否将转换后的张量迁移到特定设备上,输入为`None`时,默认为cuda:0。
:param no_gradient: 是否保留原张量的梯度。为`None`时,新的张量与输入张量保持一致;
为`True`时,全部不保留梯度;为`False`时,全部保留梯度。
:return: 转换后的torch张量
"""

return apply_to_collection(
jittor_in,
dtype=jittor.Var,
function=_jittor2torch,
target_device=target_device,
no_gradient=no_gradient,
)


def torch2jittor(torch_in: Any, no_gradient: bool = None) -> Any:
"""
递归地将输入中包含的torch张量转换为jittor变量

:param torch_tensor: 要转换的torch张量
:param no_gradient: 是否保留原张量的梯度。为`None`时,新的张量与输入张量保持一致;
为`True`时,全部不保留梯度;为`False`时,全部保留梯度。
:return: 转换后的jittor变量
"""
return apply_to_collection(
torch_in,
dtype=torch.Tensor,
function=_torch2jittor,
no_gradient=no_gradient,
)

+ 0
- 0
tests/core/drivers/torch_paddle_driver/__init__.py View File


+ 0
- 122
tests/core/drivers/torch_paddle_driver/_test_torch_paddle_driver.py View File

@@ -1,122 +0,0 @@
import pytest

from fastNLP.modules.mix_modules.mix_module import MixModule
from fastNLP.core.drivers.torch_paddle_driver.torch_paddle_driver import TorchPaddleDriver
from fastNLP.modules.mix_modules.utils import paddle2torch, torch2paddle

import torch
import paddle
from paddle.io import Dataset, DataLoader
import numpy as np

############################################################################
#
# 测试在MNIST数据集上的表现
#
############################################################################

class MNISTDataset(Dataset):
def __init__(self, dataset):

self.dataset = [
(
np.array(img).astype('float32').reshape(-1),
label
) for img, label in dataset
]

def __getitem__(self, idx):
return self.dataset[idx]

def __len__(self):
return len(self.dataset)

class MixMNISTModel(MixModule):
def __init__(self):
super(MixMNISTModel, self).__init__()

self.fc1 = paddle.nn.Linear(784, 64)
self.fc2 = paddle.nn.Linear(64, 32)
self.fc3 = torch.nn.Linear(32, 10)
self.fc4 = torch.nn.Linear(10, 10)

def forward(self, x):

paddle_out = self.fc1(x)
paddle_out = self.fc2(paddle_out)
torch_in = paddle2torch(paddle_out)
torch_out = self.fc3(torch_in)
torch_out = self.fc4(torch_out)

return torch_out

def train_step(self, x):
return self.forward(x)

def test_step(self, x):
return self.forward(x)

@pytest.mark.torchpaddle
class TestMNIST:

@classmethod
def setup_class(self):

self.train_dataset = paddle.vision.datasets.MNIST(mode='train')
self.test_dataset = paddle.vision.datasets.MNIST(mode='test')
self.train_dataset = MNISTDataset(self.train_dataset)

self.lr = 0.0003
self.epochs = 20

self.dataloader = DataLoader(self.train_dataset, batch_size=100, shuffle=True)

def setup_method(self):
model = MixMNISTModel()
self.torch_loss_func = torch.nn.CrossEntropyLoss()

torch_opt = torch.optim.Adam(model.parameters(backend="torch"), self.lr)
paddle_opt = paddle.optimizer.Adam(parameters=model.parameters(backend="paddle"), learning_rate=self.lr)

self.driver = TorchPaddleDriver(model=model, device="cuda:0")
self.driver.set_optimizers([torch_opt, paddle_opt])

def test_case1(self):

epochs = 20

self.driver.setup()
self.driver.zero_grad()
# 开始训练
current_epoch_idx = 0
while current_epoch_idx < epochs:
epoch_loss, batch = 0, 0
self.driver.set_model_mode("train")
self.driver.set_sampler_epoch(self.dataloader, current_epoch_idx)
for batch, (img, label) in enumerate(self.dataloader):
img = paddle.to_tensor(img).cuda()
torch_out = self.driver.train_step(img)
label = torch.from_numpy(label.numpy()).reshape(-1)
loss = self.torch_loss_func(torch_out.cpu(), label)
epoch_loss += loss.item()

self.driver.backward(loss)
self.driver.step()
self.driver.zero_grad()

current_epoch_idx += 1

# 开始测试
correct = 0
for img, label in self.test_dataset:

img = paddle.to_tensor(np.array(img).astype('float32').reshape(1, -1))
torch_out = self.driver.test_step(img)
res = torch_out.softmax(-1).argmax().item()
label = label.item()
if res == label:
correct += 1

acc = correct / len(self.test_dataset)
assert acc > 0.85

+ 0
- 0
tests/core/drivers/torch_paddle_driver/_test_utils.py View File


+ 0
- 204
tests/core/utils/_test_torch_paddle_utils.py View File

@@ -1,204 +0,0 @@
import paddle
import pytest
import torch

from fastNLP.core.utils.torch_paddle_utils import torch_paddle_move_data_to_device

############################################################################
#
# 测试将参数中包含的所有torch和paddle张量迁移到指定设备
#
############################################################################

@pytest.mark.torchpaddle
class TestTorchPaddleMoveDataToDevice:

def check_gpu(self, tensor, idx):
"""
检查张量是否在指定显卡上的工具函数
"""

if isinstance(tensor, paddle.Tensor):
assert tensor.place.is_gpu_place()
assert tensor.place.gpu_device_id() == idx
elif isinstance(tensor, torch.Tensor):
assert tensor.is_cuda
assert tensor.device.index == idx

def check_cpu(self, tensor):
if isinstance(tensor, paddle.Tensor):
assert tensor.place.is_cpu_place()
elif isinstance(tensor, torch.Tensor):
assert not tensor.is_cuda

def test_tensor_transfer(self):
"""
测试迁移单个张量
"""

paddle_tensor = paddle.rand((3, 4, 5)).cpu()
res = torch_paddle_move_data_to_device(paddle_tensor, device=None, data_device=None)
self.check_cpu(res)

res = torch_paddle_move_data_to_device(paddle_tensor, device="gpu:0", data_device=None)
self.check_gpu(res, 0)

res = torch_paddle_move_data_to_device(paddle_tensor, device="gpu:1", data_device=None)
self.check_gpu(res, 1)

res = torch_paddle_move_data_to_device(paddle_tensor, device="cuda:0", data_device="cpu")
self.check_gpu(res, 0)

res = torch_paddle_move_data_to_device(paddle_tensor, device=None, data_device="gpu:0")
self.check_gpu(res, 0)

res = torch_paddle_move_data_to_device(paddle_tensor, device=None, data_device="cuda:1")
self.check_gpu(res, 1)

torch_tensor = torch.rand(3, 4, 5)
res = torch_paddle_move_data_to_device(torch_tensor, device=None, data_device=None)
self.check_cpu(res)

res = torch_paddle_move_data_to_device(torch_tensor, device="gpu:0", data_device=None)
self.check_gpu(res, 0)

res = torch_paddle_move_data_to_device(torch_tensor, device="gpu:1", data_device=None)
self.check_gpu(res, 1)

res = torch_paddle_move_data_to_device(torch_tensor, device="gpu:0", data_device="cpu")
self.check_gpu(res, 0)

res = torch_paddle_move_data_to_device(torch_tensor, device=None, data_device="gpu:0")
self.check_gpu(res, 0)

res = torch_paddle_move_data_to_device(torch_tensor, device=None, data_device="gpu:1")
self.check_gpu(res, 1)

def test_list_transfer(self):
"""
测试迁移张量的列表
"""

paddle_list = [paddle.rand((6, 4, 2)) for i in range(5)] + [torch.rand((6, 4, 2)) for i in range(5)]
res = torch_paddle_move_data_to_device(paddle_list, device=None, data_device="gpu:1")
assert isinstance(res, list)
for r in res:
self.check_gpu(r, 1)

res = torch_paddle_move_data_to_device(paddle_list, device="cpu", data_device="gpu:1")
assert isinstance(res, list)
for r in res:
self.check_cpu(r)

res = torch_paddle_move_data_to_device(paddle_list, device="gpu:0", data_device=None)
assert isinstance(res, list)
for r in res:
self.check_gpu(r, 0)

res = torch_paddle_move_data_to_device(paddle_list, device="gpu:1", data_device="cpu")
assert isinstance(res, list)
for r in res:
self.check_gpu(r, 1)

def test_tensor_tuple_transfer(self):
"""
测试迁移张量的元组
"""

paddle_list = [paddle.rand((6, 4, 2)) for i in range(10)] + [torch.rand((6, 4, 2)) for i in range(5)]
paddle_tuple = tuple(paddle_list)
res = torch_paddle_move_data_to_device(paddle_tuple, device=None, data_device="gpu:1")
assert isinstance(res, tuple)
for r in res:
self.check_gpu(r, 1)

res = torch_paddle_move_data_to_device(paddle_tuple, device="cpu", data_device="gpu:1")
assert isinstance(res, tuple)
for r in res:
self.check_cpu(r)

res = torch_paddle_move_data_to_device(paddle_tuple, device="gpu:0", data_device=None)
assert isinstance(res, tuple)
for r in res:
self.check_gpu(r, 0)

res = torch_paddle_move_data_to_device(paddle_tuple, device="gpu:1", data_device="cpu")
assert isinstance(res, tuple)
for r in res:
self.check_gpu(r, 1)

def test_dict_transfer(self):
"""
测试迁移复杂的字典结构
"""

paddle_dict = {
"torch_tensor": torch.rand((3, 4)),
"torch_list": [torch.rand((6, 4, 2)) for i in range(10)],
"dict":{
"list": [paddle.rand((6, 4, 2)) for i in range(5)] + [torch.rand((6, 4, 2)) for i in range(5)],
"torch_tensor": torch.rand((3, 4)),
"paddle_tensor": paddle.rand((3, 4))
},
"paddle_tensor": paddle.rand((3, 4)),
"list": [paddle.rand((6, 4, 2)) for i in range(10)] ,
"int": 2,
"string": "test string"
}

res = torch_paddle_move_data_to_device(paddle_dict, device="gpu:0", data_device=None)
assert isinstance(res, dict)
self.check_gpu(res["torch_tensor"], 0)
self.check_gpu(res["paddle_tensor"], 0)
assert isinstance(res["torch_list"], list)
for t in res["torch_list"]:
self.check_gpu(t, 0)
assert isinstance(res["list"], list)
for t in res["list"]:
self.check_gpu(t, 0)
assert isinstance(res["int"], int)
assert isinstance(res["string"], str)
assert isinstance(res["dict"], dict)
assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]:
self.check_gpu(t, 0)
self.check_gpu(res["dict"]["torch_tensor"], 0)
self.check_gpu(res["dict"]["paddle_tensor"], 0)

res = torch_paddle_move_data_to_device(paddle_dict, device=None, data_device="gpu:1")
assert isinstance(res, dict)
self.check_gpu(res["torch_tensor"], 1)
self.check_gpu(res["paddle_tensor"], 1)
assert isinstance(res["torch_list"], list)
for t in res["torch_list"]:
self.check_gpu(t, 1)
assert isinstance(res["list"], list)
for t in res["list"]:
self.check_gpu(t, 1)
assert isinstance(res["int"], int)
assert isinstance(res["string"], str)
assert isinstance(res["dict"], dict)
assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]:
self.check_gpu(t, 1)
self.check_gpu(res["dict"]["torch_tensor"], 1)
self.check_gpu(res["dict"]["paddle_tensor"], 1)

res = torch_paddle_move_data_to_device(paddle_dict, device="cpu", data_device="gpu:0")
assert isinstance(res, dict)
self.check_cpu(res["torch_tensor"])
self.check_cpu(res["paddle_tensor"])
assert isinstance(res["torch_list"], list)
for t in res["torch_list"]:
self.check_cpu(t)
assert isinstance(res["list"], list)
for t in res["list"]:
self.check_cpu(t)
assert isinstance(res["int"], int)
assert isinstance(res["string"], str)
assert isinstance(res["dict"], dict)
assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]:
self.check_cpu(t)
self.check_cpu(res["dict"]["torch_tensor"])
self.check_cpu(res["dict"]["paddle_tensor"])

+ 0
- 0
tests/modules/__init__.py View File


+ 0
- 0
tests/modules/mix_modules/__init__.py View File


+ 0
- 378
tests/modules/mix_modules/_test_mix_module.py View File

@@ -1,378 +0,0 @@
import pytest
import os
from itertools import chain

import torch
import paddle
from paddle.io import Dataset, DataLoader
import numpy as np

from fastNLP.modules.mix_modules.mix_module import MixModule
from fastNLP.modules.mix_modules.utils import paddle2torch, torch2paddle
from fastNLP.envs.distributed import rank_zero_rm


############################################################################
#
# 测试类的基本功能
#
############################################################################

class MixModuleForTest(MixModule):
def __init__(self):
super(MixModuleForTest, self).__init__()

self.torch_fc1 = torch.nn.Linear(10, 10)
self.torch_softmax = torch.nn.Softmax(0)
self.torch_conv2d1 = torch.nn.Conv2d(10, 10, 3)
self.torch_tensor = torch.ones(3, 3)
self.torch_param = torch.nn.Parameter(torch.ones(4, 4))

self.paddle_fc1 = paddle.nn.Linear(10, 10)
self.paddle_softmax = paddle.nn.Softmax(0)
self.paddle_conv2d1 = paddle.nn.Conv2D(10, 10, 3)
self.paddle_tensor = paddle.ones((4, 4))

class TorchModuleForTest(torch.nn.Module):
def __init__(self):
super(TorchModuleForTest, self).__init__()

self.torch_fc1 = torch.nn.Linear(10, 10)
self.torch_softmax = torch.nn.Softmax(0)
self.torch_conv2d1 = torch.nn.Conv2d(10, 10, 3)
self.torch_tensor = torch.ones(3, 3)
self.torch_param = torch.nn.Parameter(torch.ones(4, 4))

class PaddleModuleForTest(paddle.nn.Layer):
def __init__(self):
super(PaddleModuleForTest, self).__init__()

self.paddle_fc1 = paddle.nn.Linear(10, 10)
self.paddle_softmax = paddle.nn.Softmax(0)
self.paddle_conv2d1 = paddle.nn.Conv2D(10, 10, 3)
self.paddle_tensor = paddle.ones((4, 4))


@pytest.mark.torchpaddle
class TestTorchPaddleMixModule:

def setup_method(self):

self.model = MixModuleForTest()
self.torch_model = TorchModuleForTest()
self.paddle_model = PaddleModuleForTest()

def test_to(self):
"""
测试混合模型的to函数
"""
self.model.to("cuda")
self.torch_model.to("cuda")
self.paddle_model.to("gpu")
self.if_device_correct("cuda")

self.model.to("cuda:2")
self.torch_model.to("cuda:2")
self.paddle_model.to("gpu:2")
self.if_device_correct("cuda:2")

self.model.to("gpu:1")
self.torch_model.to("cuda:1")
self.paddle_model.to("gpu:1")
self.if_device_correct("cuda:1")

self.model.to("cpu")
self.torch_model.to("cpu")
self.paddle_model.to("cpu")
self.if_device_correct("cpu")

def test_train_eval(self):
"""
测试train和eval函数
"""
self.model.eval()
self.if_training_correct(False)

self.model.train()
self.if_training_correct(True)

def test_parameters(self):
"""
测试parameters()函数,由于初始化是随机的,目前仅比较得到结果的长度
"""
mix_params = []
params = []

for value in self.model.named_parameters():
mix_params.append(value)

for value in chain(self.torch_model.named_parameters(), self.paddle_model.named_parameters()):
params.append(value)

assert len(params) == len(mix_params)

def test_named_parameters(self):
"""
测试named_parameters函数
"""
mix_param_names = []
param_names = []

for name, value in self.model.named_parameters():
mix_param_names.append(name)

for name, value in chain(self.torch_model.named_parameters(), self.paddle_model.named_parameters()):
param_names.append(name)

assert sorted(param_names) == sorted(mix_param_names)

def test_torch_named_parameters(self):
"""
测试对torch参数的提取
"""
mix_param_names = []
param_names = []

for name, value in self.model.named_parameters(backend="torch"):
mix_param_names.append(name)

for name, value in self.torch_model.named_parameters():
param_names.append(name)

assert sorted(param_names) == sorted(mix_param_names)

def test_paddle_named_parameters(self):
"""
测试对paddle参数的提取
"""
mix_param_names = []
param_names = []

for name, value in self.model.named_parameters(backend="paddle"):
mix_param_names.append(name)

for name, value in self.paddle_model.named_parameters():
param_names.append(name)

assert sorted(param_names) == sorted(mix_param_names)

def test_torch_state_dict(self):
"""
测试提取torch的state dict
"""
torch_dict = self.torch_model.state_dict()
mix_dict = self.model.state_dict(backend="torch")

assert sorted(torch_dict.keys()) == sorted(mix_dict.keys())

def test_paddle_state_dict(self):
"""
测试提取paddle的state dict
"""
paddle_dict = self.paddle_model.state_dict()
mix_dict = self.model.state_dict(backend="paddle")

# TODO 测试程序会显示passed后显示paddle的异常退出信息
assert sorted(paddle_dict.keys()) == sorted(mix_dict.keys())

def test_state_dict(self):
"""
测试提取所有的state dict
"""
all_dict = self.torch_model.state_dict()
all_dict.update(self.paddle_model.state_dict())
mix_dict = self.model.state_dict()

# TODO 测试程序会显示passed后显示paddle的异常退出信息
assert sorted(all_dict.keys()) == sorted(mix_dict.keys())

def test_load_state_dict(self):
"""
测试load_state_dict函数
"""
state_dict = self.model.state_dict()

new_model = MixModuleForTest()
new_model.load_state_dict(state_dict)
new_state_dict = new_model.state_dict()

for name, value in state_dict.items():
state_dict[name] = value.tolist()
for name, value in new_state_dict.items():
new_state_dict[name] = value.tolist()

# self.assertDictEqual(state_dict, new_state_dict)

def test_save_and_load_state_dict(self):
"""
测试save_state_dict_to_file和load_state_dict_from_file函数
"""
path = "model"
try:
self.model.save_state_dict_to_file(path)
new_model = MixModuleForTest()
new_model.load_state_dict_from_file(path)

state_dict = self.model.state_dict()
new_state_dict = new_model.state_dict()

for name, value in state_dict.items():
state_dict[name] = value.tolist()
for name, value in new_state_dict.items():
new_state_dict[name] = value.tolist()

# self.assertDictEqual(state_dict, new_state_dict)
finally:
rank_zero_rm(path)

def if_device_correct(self, device):


assert self.model.torch_fc1.weight.device == self.torch_model.torch_fc1.weight.device
assert self.model.torch_conv2d1.weight.device == self.torch_model.torch_fc1.bias.device
assert self.model.torch_conv2d1.bias.device == self.torch_model.torch_conv2d1.bias.device
assert self.model.torch_tensor.device == self.torch_model.torch_tensor.device
assert self.model.torch_param.device == self.torch_model.torch_param.device

if device == "cpu":
assert self.model.paddle_fc1.weight.place.is_cpu_place()
assert self.model.paddle_fc1.bias.place.is_cpu_place()
assert self.model.paddle_conv2d1.weight.place.is_cpu_place()
assert self.model.paddle_conv2d1.bias.place.is_cpu_place()
assert self.model.paddle_tensor.place.is_cpu_place()
elif device.startswith("cuda"):
assert self.model.paddle_fc1.weight.place.is_gpu_place()
assert self.model.paddle_fc1.bias.place.is_gpu_place()
assert self.model.paddle_conv2d1.weight.place.is_gpu_place()
assert self.model.paddle_conv2d1.bias.place.is_gpu_place()
assert self.model.paddle_tensor.place.is_gpu_place()

assert self.model.paddle_fc1.weight.place.gpu_device_id() == self.paddle_model.paddle_fc1.weight.place.gpu_device_id()
assert self.model.paddle_fc1.bias.place.gpu_device_id() == self.paddle_model.paddle_fc1.bias.place.gpu_device_id()
assert self.model.paddle_conv2d1.weight.place.gpu_device_id() == self.paddle_model.paddle_conv2d1.weight.place.gpu_device_id()
assert self.model.paddle_conv2d1.bias.place.gpu_device_id() == self.paddle_model.paddle_conv2d1.bias.place.gpu_device_id()
assert self.model.paddle_tensor.place.gpu_device_id() == self.paddle_model.paddle_tensor.place.gpu_device_id()
else:
raise NotImplementedError

def if_training_correct(self, training):

assert self.model.torch_fc1.training == training
assert self.model.torch_softmax.training == training
assert self.model.torch_conv2d1.training == training

assert self.model.paddle_fc1.training == training
assert self.model.paddle_softmax.training == training
assert self.model.paddle_conv2d1.training == training


############################################################################
#
# 测试在MNIST数据集上的表现
#
############################################################################

class MNISTDataset(Dataset):
def __init__(self, dataset):

self.dataset = [
(
np.array(img).astype('float32').reshape(-1),
label
) for img, label in dataset
]

def __getitem__(self, idx):
return self.dataset[idx]

def __len__(self):
return len(self.dataset)

class MixMNISTModel(MixModule):
def __init__(self):
super(MixMNISTModel, self).__init__()

self.fc1 = paddle.nn.Linear(784, 64)
self.fc2 = paddle.nn.Linear(64, 32)
self.fc3 = torch.nn.Linear(32, 10)
self.fc4 = torch.nn.Linear(10, 10)

def forward(self, x):

paddle_out = self.fc1(x)
paddle_out = self.fc2(paddle_out)
torch_in = paddle2torch(paddle_out)
torch_out = self.fc3(torch_in)
torch_out = self.fc4(torch_out)

return torch_out

@pytest.mark.torchpaddle
class TestMNIST:

@classmethod
def setup_class(self):

self.train_dataset = paddle.vision.datasets.MNIST(mode='train')
self.test_dataset = paddle.vision.datasets.MNIST(mode='test')
self.train_dataset = MNISTDataset(self.train_dataset)

self.lr = 0.0003
self.epochs = 20

self.dataloader = DataLoader(self.train_dataset, batch_size=100, shuffle=True)

def setup_method(self):
self.model = MixMNISTModel().to("cuda")
self.torch_loss_func = torch.nn.CrossEntropyLoss()

self.torch_opt = torch.optim.Adam(self.model.parameters(backend="torch"), self.lr)
self.paddle_opt = paddle.optimizer.Adam(parameters=self.model.parameters(backend="paddle"), learning_rate=self.lr)

def test_case1(self):

# 开始训练
for epoch in range(self.epochs):
epoch_loss, batch = 0, 0
for batch, (img, label) in enumerate(self.dataloader):

img = paddle.to_tensor(img).cuda()
torch_out = self.model(img)
label = torch.from_numpy(label.numpy()).reshape(-1)
loss = self.torch_loss_func(torch_out.cpu(), label)
epoch_loss += loss.item()

loss.backward()
self.torch_opt.step()
self.paddle_opt.step()
self.torch_opt.zero_grad()
self.paddle_opt.clear_grad()

else:
assert epoch_loss / (batch + 1) < 0.3

# 开始测试
correct = 0
for img, label in self.test_dataset:

img = paddle.to_tensor(np.array(img).astype('float32').reshape(1, -1))
torch_out = self.model(img)
res = torch_out.softmax(-1).argmax().item()
label = label.item()
if res == label:
correct += 1

acc = correct / len(self.test_dataset)
assert acc > 0.85

############################################################################
#
# 测试在ERNIE中文数据集上的表现
#
############################################################################

+ 0
- 435
tests/modules/mix_modules/_test_utils.py View File

@@ -1,435 +0,0 @@
import unittest
import os

os.environ["log_silent"] = "1"
import torch
import paddle
import jittor

from fastNLP.modules.mix_modules.utils import (
paddle2torch,
torch2paddle,
jittor2torch,
torch2jittor,
)

############################################################################
#
# 测试paddle到torch的转换
#
############################################################################

class Paddle2TorchTestCase(unittest.TestCase):

def check_torch_tensor(self, tensor, device, requires_grad):
"""
检查张量设备和梯度情况的工具函数
"""

assert isinstance(tensor, torch.Tensor)
assert tensor.device == torch.device(device)
assert tensor.requires_grad == requires_grad

def test_gradient(self):
"""
测试张量转换后的反向传播是否正确
"""

x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0, 5.0], stop_gradient=False)
y = paddle2torch(x)
z = 3 * (y ** 2)
z.sum().backward()
assert y.grad.tolist() == [6, 12, 18, 24, 30]

def test_tensor_transfer(self):
"""
测试单个张量的设备和梯度转换是否正确
"""

paddle_tensor = paddle.rand((3, 4, 5)).cpu()
res = paddle2torch(paddle_tensor)
self.check_torch_tensor(res, "cpu", not paddle_tensor.stop_gradient)

res = paddle2torch(paddle_tensor, target_device="cuda:2", no_gradient=None)
self.check_torch_tensor(res, "cuda:2", not paddle_tensor.stop_gradient)

res = paddle2torch(paddle_tensor, target_device="cuda:1", no_gradient=True)
self.check_torch_tensor(res, "cuda:1", False)

res = paddle2torch(paddle_tensor, target_device="cuda:1", no_gradient=False)
self.check_torch_tensor(res, "cuda:1", True)

def test_list_transfer(self):
"""
测试张量列表的转换
"""

paddle_list = [paddle.rand((6, 4, 2)).cuda(1) for i in range(10)]
res = paddle2torch(paddle_list)
assert isinstance(res, list)
for t in res:
self.check_torch_tensor(t, "cuda:1", False)

res = paddle2torch(paddle_list, target_device="cpu", no_gradient=False)
assert isinstance(res, list)
for t in res:
self.check_torch_tensor(t, "cpu", True)

def test_tensor_tuple_transfer(self):
"""
测试张量元组的转换
"""

paddle_list = [paddle.rand((6, 4, 2)).cuda(1) for i in range(10)]
paddle_tuple = tuple(paddle_list)
res = paddle2torch(paddle_tuple)
assert isinstance(res, tuple)
for t in res:
self.check_torch_tensor(t, "cuda:1", False)

def test_dict_transfer(self):
"""
测试包含复杂结构的字典的转换
"""

paddle_dict = {
"tensor": paddle.rand((3, 4)).cuda(0),
"list": [paddle.rand((6, 4, 2)).cuda(0) for i in range(10)],
"dict":{
"list": [paddle.rand((6, 4, 2)).cuda(0) for i in range(10)],
"tensor": paddle.rand((3, 4)).cuda(0)
},
"int": 2,
"string": "test string"
}
res = paddle2torch(paddle_dict)
assert isinstance(res, dict)
self.check_torch_tensor(res["tensor"], "cuda:0", False)
assert isinstance(res["list"], list)
for t in res["list"]:
self.check_torch_tensor(t, "cuda:0", False)
assert isinstance(res["int"], int)
assert isinstance(res["string"], str)
assert isinstance(res["dict"], dict)
assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]:
self.check_torch_tensor(t, "cuda:0", False)
self.check_torch_tensor(res["dict"]["tensor"], "cuda:0", False)


############################################################################
#
# 测试torch到paddle的转换
#
############################################################################

class Torch2PaddleTestCase(unittest.TestCase):

def check_paddle_tensor(self, tensor, device, stop_gradient):
"""
检查得到的paddle张量设备和梯度情况的工具函数
"""

assert isinstance(tensor, paddle.Tensor)
if device == "cpu":
assert tensor.place.is_cpu_place()
elif device.startswith("gpu"):
paddle_device = paddle.device._convert_to_place(device)
assert tensor.place.is_gpu_place()
if hasattr(tensor.place, "gpu_device_id"):
# paddle中,有两种Place
# paddle.fluid.core.Place是创建Tensor时使用的类型
# 有函数gpu_device_id获取设备
assert tensor.place.gpu_device_id() == paddle_device.get_device_id()
else:
# 通过_convert_to_place得到的是paddle.CUDAPlace
# 通过get_device_id获取设备
assert tensor.place.get_device_id() == paddle_device.get_device_id()
else:
raise NotImplementedError
assert tensor.stop_gradient == stop_gradient

def test_gradient(self):
"""
测试转换后梯度的反向传播
"""

x = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0], requires_grad=True)
y = torch2paddle(x)
z = 3 * (y ** 2)
z.sum().backward()
assert y.grad.tolist() == [6, 12, 18, 24, 30]

def test_tensor_transfer(self):
"""
测试单个张量的转换
"""

torch_tensor = torch.rand((3, 4, 5))
res = torch2paddle(torch_tensor)
self.check_paddle_tensor(res, "cpu", True)

res = torch2paddle(torch_tensor, target_device="gpu:2", no_gradient=None)
self.check_paddle_tensor(res, "gpu:2", True)

res = torch2paddle(torch_tensor, target_device="gpu:2", no_gradient=True)
self.check_paddle_tensor(res, "gpu:2", True)

res = torch2paddle(torch_tensor, target_device="gpu:2", no_gradient=False)
self.check_paddle_tensor(res, "gpu:2", False)

def test_tensor_list_transfer(self):
"""
测试张量列表的转换
"""

torch_list = [torch.rand(6, 4, 2) for i in range(10)]
res = torch2paddle(torch_list)
assert isinstance(res, list)
for t in res:
self.check_paddle_tensor(t, "cpu", True)

res = torch2paddle(torch_list, target_device="gpu:1", no_gradient=False)
assert isinstance(res, list)
for t in res:
self.check_paddle_tensor(t, "gpu:1", False)

def test_tensor_tuple_transfer(self):
"""
测试张量元组的转换
"""
torch_list = [torch.rand(6, 4, 2) for i in range(10)]
torch_tuple = tuple(torch_list)
res = torch2paddle(torch_tuple, target_device="cpu")
assert isinstance(res, tuple)
for t in res:
self.check_paddle_tensor(t, "cpu", True)

def test_dict_transfer(self):
"""
测试复杂的字典结构的转换
"""

torch_dict = {
"tensor": torch.rand((3, 4)),
"list": [torch.rand(6, 4, 2) for i in range(10)],
"dict":{
"list": [torch.rand(6, 4, 2) for i in range(10)],
"tensor": torch.rand((3, 4))
},
"int": 2,
"string": "test string"
}
res = torch2paddle(torch_dict)
assert isinstance(res, dict)
self.check_paddle_tensor(res["tensor"], "cpu", True)
assert isinstance(res["list"], list)
for t in res["list"]:
self.check_paddle_tensor(t, "cpu", True)
assert isinstance(res["int"], int)
assert isinstance(res["string"], str)
assert isinstance(res["dict"], dict)
assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]:
self.check_paddle_tensor(t, "cpu", True)
self.check_paddle_tensor(res["dict"]["tensor"], "cpu", True)


############################################################################
#
# 测试jittor到torch的转换
#
############################################################################

class Jittor2TorchTestCase(unittest.TestCase):

def check_torch_tensor(self, tensor, device, requires_grad):
"""
检查得到的torch张量的工具函数
"""

assert isinstance(tensor, torch.Tensor)
if device == "cpu":
assert not tensor.is_cuda
else:
assert tensor.device == torch.device(device)
assert tensor.requires_grad == requires_grad

def test_var_transfer(self):
"""
测试单个Jittor Var的转换
"""

jittor_var = jittor.rand((3, 4, 5))
res = jittor2torch(jittor_var)
self.check_torch_tensor(res, "cpu", True)

res = jittor2torch(jittor_var, target_device="cuda:2", no_gradient=None)
self.check_torch_tensor(res, "cuda:2", True)

res = jittor2torch(jittor_var, target_device="cuda:2", no_gradient=True)
self.check_torch_tensor(res, "cuda:2", False)

res = jittor2torch(jittor_var, target_device="cuda:2", no_gradient=False)
self.check_torch_tensor(res, "cuda:2", True)

def test_var_list_transfer(self):
"""
测试Jittor列表的转换
"""

jittor_list = [jittor.rand((6, 4, 2)) for i in range(10)]
res = jittor2torch(jittor_list)
assert isinstance(res, list)
for t in res:
self.check_torch_tensor(t, "cpu", True)

res = jittor2torch(jittor_list, target_device="cuda:1", no_gradient=False)
assert isinstance(res, list)
for t in res:
self.check_torch_tensor(t, "cuda:1", True)

def test_var_tuple_transfer(self):
"""
测试Jittor变量元组的转换
"""

jittor_list = [jittor.rand((6, 4, 2)) for i in range(10)]
jittor_tuple = tuple(jittor_list)
res = jittor2torch(jittor_tuple, target_device="cpu")
assert isinstance(res, tuple)
for t in res:
self.check_torch_tensor(t, "cpu", True)

def test_dict_transfer(self):
"""
测试字典结构的转换
"""

jittor_dict = {
"tensor": jittor.rand((3, 4)),
"list": [jittor.rand(6, 4, 2) for i in range(10)],
"dict":{
"list": [jittor.rand(6, 4, 2) for i in range(10)],
"tensor": jittor.rand((3, 4))
},
"int": 2,
"string": "test string"
}
res = jittor2torch(jittor_dict)
assert isinstance(res, dict)
self.check_torch_tensor(res["tensor"], "cpu", True)
assert isinstance(res["list"], list)
for t in res["list"]:
self.check_torch_tensor(t, "cpu", True)
assert isinstance(res["int"], int)
assert isinstance(res["string"], str)
assert isinstance(res["dict"], dict)
assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]:
self.check_torch_tensor(t, "cpu", True)
self.check_torch_tensor(res["dict"]["tensor"], "cpu", True)


############################################################################
#
# 测试torch到jittor的转换
#
############################################################################

class Torch2JittorTestCase(unittest.TestCase):

def check_jittor_var(self, var, requires_grad):
"""
检查得到的Jittor Var梯度情况的工具函数
"""

assert isinstance(var, jittor.Var)
assert var.requires_grad == requires_grad

def test_gradient(self):
"""
测试反向传播的梯度
"""

x = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0], requires_grad=True)
y = torch2jittor(x)
z = 3 * (y ** 2)
grad = jittor.grad(z, y)
assert grad.tolist() == [6.0, 12.0, 18.0, 24.0, 30.0]

def test_tensor_transfer(self):
"""
测试单个张量转换为Jittor
"""

torch_tensor = torch.rand((3, 4, 5))
res = torch2jittor(torch_tensor)
self.check_jittor_var(res, False)

res = torch2jittor(torch_tensor, no_gradient=None)
self.check_jittor_var(res, False)

res = torch2jittor(torch_tensor, no_gradient=True)
self.check_jittor_var(res, False)

res = torch2jittor(torch_tensor, no_gradient=False)
self.check_jittor_var(res, True)

def test_tensor_list_transfer(self):
"""
测试张量列表的转换
"""

torch_list = [torch.rand((6, 4, 2)) for i in range(10)]
res = torch2jittor(torch_list)
assert isinstance(res, list)
for t in res:
self.check_jittor_var(t, False)

res = torch2jittor(torch_list, no_gradient=False)
assert isinstance(res, list)
for t in res:
self.check_jittor_var(t, True)

def test_tensor_tuple_transfer(self):
"""
测试张量元组的转换
"""

torch_list = [torch.rand((6, 4, 2)) for i in range(10)]
torch_tuple = tuple(torch_list)
res = torch2jittor(torch_tuple)
assert isinstance(res, tuple)
for t in res:
self.check_jittor_var(t, False)

def test_dict_transfer(self):
"""
测试字典结构的转换
"""

torch_dict = {
"tensor": torch.rand((3, 4)),
"list": [torch.rand(6, 4, 2) for i in range(10)],
"dict":{
"list": [torch.rand(6, 4, 2) for i in range(10)],
"tensor": torch.rand((3, 4))
},
"int": 2,
"string": "test string"
}
res = torch2jittor(torch_dict)
assert isinstance(res, dict)
self.check_jittor_var(res["tensor"], False)
assert isinstance(res["list"], list)
for t in res["list"]:
self.check_jittor_var(t, False)
assert isinstance(res["int"], int)
assert isinstance(res["string"], str)
assert isinstance(res["dict"], dict)
assert isinstance(res["dict"]["list"], list)
for t in res["dict"]["list"]:
self.check_jittor_var(t, False)
self.check_jittor_var(res["dict"]["tensor"], False)

Loading…
Cancel
Save