| @@ -22,7 +22,7 @@ pipeline { | |||||
| agent { | agent { | ||||
| docker { | docker { | ||||
| image 'fnlp:torch-1.11' | image 'fnlp:torch-1.11' | ||||
| args '-u root:root -v ${JENKINS_HOME}/html/docs:/docs -v ${JENKINS_HOME}/html/_ci:/ci --gpus all --shm-size 256M' | |||||
| args '-u root:root -v ${JENKINS_HOME}/html/docs:/docs -v ${JENKINS_HOME}/html/_ci:/ci --gpus all --shm-size 1G' | |||||
| } | } | ||||
| } | } | ||||
| steps { | steps { | ||||
| @@ -55,19 +55,19 @@ pipeline { | |||||
| sh 'FASTNLP_BACKEND=paddle pytest ./tests/core/controllers/test_trainer_paddle.py --durations=0 --co' | sh 'FASTNLP_BACKEND=paddle pytest ./tests/core/controllers/test_trainer_paddle.py --durations=0 --co' | ||||
| } | } | ||||
| } | } | ||||
| stage('Test Jittor') { | |||||
| agent { | |||||
| docker { | |||||
| image 'fnlp:jittor' | |||||
| args '-u root:root -v ${JENKINS_HOME}/html/docs:/docs -v ${JENKINS_HOME}/html/_ci:/ci --gpus all' | |||||
| } | |||||
| } | |||||
| steps { | |||||
| // sh 'pip install fitlog' | |||||
| // sh 'pytest ./tests --html=test_results.html --self-contained-html' | |||||
| sh 'pytest ./tests --durations=0 -m jittor --co' | |||||
| } | |||||
| } | |||||
| // stage('Test Jittor') { | |||||
| // agent { | |||||
| // docker { | |||||
| // image 'fnlp:jittor' | |||||
| // args '-u root:root -v ${JENKINS_HOME}/html/docs:/docs -v ${JENKINS_HOME}/html/_ci:/ci --gpus all' | |||||
| // } | |||||
| // } | |||||
| // steps { | |||||
| // // sh 'pip install fitlog' | |||||
| // // sh 'pytest ./tests --html=test_results.html --self-contained-html' | |||||
| // sh 'pytest ./tests --durations=0 -m jittor --co' | |||||
| // } | |||||
| // } | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| @@ -73,7 +73,6 @@ def model_and_optimizers(request): | |||||
| @pytest.mark.torch | @pytest.mark.torch | ||||
| @pytest.mark.temp | |||||
| @pytest.mark.parametrize("driver,device", [("torch", [0, 1]), ("torch", 1), ("torch", "cpu")]) # ("torch", "cpu"), ("torch", [0, 1]), ("torch", 1) | @pytest.mark.parametrize("driver,device", [("torch", [0, 1]), ("torch", 1), ("torch", "cpu")]) # ("torch", "cpu"), ("torch", [0, 1]), ("torch", 1) | ||||
| @magic_argv_env_context | @magic_argv_env_context | ||||
| def test_load_best_model_callback( | def test_load_best_model_callback( | ||||
| @@ -83,7 +82,6 @@ def test_load_best_model_callback( | |||||
| ): | ): | ||||
| for save_folder in ['save_models', None]: | for save_folder in ['save_models', None]: | ||||
| for only_state_dict in [True, False]: | for only_state_dict in [True, False]: | ||||
| logger.error(f"{save_folder}, {only_state_dict}") | |||||
| callbacks = [LoadBestModelCallback(monitor='acc', only_state_dict=only_state_dict, | callbacks = [LoadBestModelCallback(monitor='acc', only_state_dict=only_state_dict, | ||||
| save_folder=save_folder)] | save_folder=save_folder)] | ||||
| trainer = Trainer( | trainer = Trainer( | ||||
| @@ -12,14 +12,12 @@ def test_no_args(): | |||||
| def f(*args, a, b, **kwarg): | def f(*args, a, b, **kwarg): | ||||
| c = 100 | c = 100 | ||||
| call_kwargs = _match_param(f, demo) | call_kwargs = _match_param(f, demo) | ||||
| with pytest.raises(RuntimeError): | |||||
| f(a=1, b=2) | |||||
| f(a=1, b=2) | |||||
| def f(a, *args, b, **kwarg): | def f(a, *args, b, **kwarg): | ||||
| c = 100 | c = 100 | ||||
| call_kwargs = _match_param(f, demo) | call_kwargs = _match_param(f, demo) | ||||
| with pytest.raises(RuntimeError): | |||||
| f(a=1, b=2) | |||||
| f(a=1, b=2) | |||||
| @recover_logger | @recover_logger | ||||
| @@ -147,13 +147,14 @@ class TestFdl: | |||||
| assert 'Parameter:prefetch_factor' in out[0] | assert 'Parameter:prefetch_factor' in out[0] | ||||
| @recover_logger | @recover_logger | ||||
| @pytest.mark.temp | |||||
| def test_version_111(self): | def test_version_111(self): | ||||
| if parse_version(torch.__version__) <= parse_version('1.7'): | if parse_version(torch.__version__) <= parse_version('1.7'): | ||||
| pytest.skip("Torch version smaller than 1.7") | pytest.skip("Torch version smaller than 1.7") | ||||
| logger.set_stdout() | logger.set_stdout() | ||||
| ds = DataSet({"x": [[1, 2], [2, 3, 4], [4, 5, 6, 7]] * 10, "y": [1, 0, 1] * 10}) | ds = DataSet({"x": [[1, 2], [2, 3, 4], [4, 5, 6, 7]] * 10, "y": [1, 0, 1] * 10}) | ||||
| with Capturing() as out: | with Capturing() as out: | ||||
| dl = TorchDataLoader(ds, num_workers=2, prefetch_factor=3, shuffle=False) | |||||
| dl = TorchDataLoader(ds, num_workers=0, prefetch_factor=2, generator=torch.Generator(), shuffle=False) | |||||
| for idx, batch in enumerate(dl): | for idx, batch in enumerate(dl): | ||||
| assert len(batch['x'])==1 | assert len(batch['x'])==1 | ||||
| assert batch['x'][0].tolist() == ds[idx]['x'] | assert batch['x'][0].tolist() == ds[idx]['x'] | ||||