diff --git a/modelscope/utils/multi_modal/fp16/fp16util.py b/modelscope/utils/multi_modal/fp16/fp16util.py index 29595a6c..f7ccd167 100644 --- a/modelscope/utils/multi_modal/fp16/fp16util.py +++ b/modelscope/utils/multi_modal/fp16/fp16util.py @@ -123,7 +123,7 @@ def prep_param_lists(model, flat_master=False): Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`. # noqa .. _`Training Neural Networks with Mixed Precision: Real Examples`: - http://on-demand.gputechconf.com/gtc/2018/video/S81012/ + https://www.nvidia.com/en-us/on-demand/session/gtcsiliconvalley2018-s81012/ """ model_params = [ param for param in model.parameters() if param.requires_grad