From 422510285e20f298172fcb2ecc0644b05a2ecead Mon Sep 17 00:00:00 2001 From: yh Date: Tue, 17 May 2022 22:39:19 +0800 Subject: [PATCH] =?UTF-8?q?=E5=A2=9E=E5=8A=A0pytorch=E7=89=88=E6=9C=AC?= =?UTF-8?q?=E9=99=90=E5=88=B6=E4=B8=BA1.6=E4=BB=A5=E4=B8=8A?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- fastNLP/core/drivers/torch_driver/torch_driver.py | 2 +- fastNLP/core/drivers/torch_driver/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fastNLP/core/drivers/torch_driver/torch_driver.py b/fastNLP/core/drivers/torch_driver/torch_driver.py index 9113de51..156681be 100644 --- a/fastNLP/core/drivers/torch_driver/torch_driver.py +++ b/fastNLP/core/drivers/torch_driver/torch_driver.py @@ -55,7 +55,7 @@ class TorchDriver(Driver): # 因为 ddp 和 single_device 的混合精度训练的设置是一样的,因此可以统一抽象到这里; self.fp16 = fp16 if parse_version(torch.__version__) < parse_version('1.6'): - raise RuntimeError("Pytorch supports float16 after version 1.6, please upgrade your pytorch version.") + raise RuntimeError(f"Pytorch({torch.__version__}) need to be older than 1.6.") self.auto_cast, _grad_scaler = _build_fp16_env(dummy=not fp16) self.grad_scaler = _grad_scaler() diff --git a/fastNLP/core/drivers/torch_driver/utils.py b/fastNLP/core/drivers/torch_driver/utils.py index 57e57061..14f5b9f3 100644 --- a/fastNLP/core/drivers/torch_driver/utils.py +++ b/fastNLP/core/drivers/torch_driver/utils.py @@ -160,7 +160,7 @@ def _build_fp16_env(dummy=False): GradScaler = DummyGradScaler else: if not torch.cuda.is_available(): - raise RuntimeError("No cuda") + raise RuntimeError("Pytorch is not installed in gpu version, please use device='cpu'.") if torch.cuda.get_device_capability(0)[0] < 7: logger.rank_zero_warning( "NOTE: your device does NOT support faster training with fp16, "