You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

optimizer.py 4.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. Differential privacy optimizer.
  16. """
  17. from mindspore import nn
  18. from mindspore import Tensor
  19. from mindspore.ops import composite as C
  20. from mindspore.ops import operations as P
  21. from mindspore.ops import functional as F
  22. from mindspore.common import dtype as mstype
  23. from mindarmour.diff_privacy.mechanisms.mechanisms import MechanismsFactory
  24. from mindarmour.utils._check_param import check_int_positive
  25. _grad_scale = C.MultitypeFuncGraph("grad_scale")
  26. _reciprocal = P.Reciprocal()
  27. @_grad_scale.register("Tensor", "Tensor")
  28. def tensor_grad_scale(scale, grad):
  29. """ grad scaling """
  30. return grad * _reciprocal(scale)
  31. class _TupleAdd(nn.Cell):
  32. def __init__(self):
  33. super(_TupleAdd, self).__init__()
  34. self.add = P.TensorAdd()
  35. self.hyper_map = C.HyperMap()
  36. def construct(self, input1, input2):
  37. """Add two tuple of data."""
  38. out = self.hyper_map(self.add, input1, input2)
  39. return out
  40. class DPOptimizerClassFactory:
  41. """
  42. Factory class of Optimizer.
  43. Args:
  44. micro_batches (int): The number of small batches split from an original batch. Default: 2.
  45. Returns:
  46. Optimizer, Optimizer class
  47. Examples:
  48. >>> GaussianSGD = DPOptimizerClassFactory(micro_batches=2)
  49. >>> GaussianSGD.set_mechanisms('Gaussian', norm_bound=1.0, initial_noise_multiplier=1.5)
  50. >>> net_opt = GaussianSGD.create('Momentum')(params=network.trainable_params(),
  51. >>> learning_rate=cfg.lr,
  52. >>> momentum=cfg.momentum)
  53. """
  54. def __init__(self, micro_batches=2):
  55. self._mech_factory = MechanismsFactory()
  56. self.mech = None
  57. self._micro_batches = check_int_positive('micro_batches', micro_batches)
  58. def set_mechanisms(self, policy, *args, **kwargs):
  59. """
  60. Get noise mechanism object.
  61. Args:
  62. policy (str): Choose mechanism type.
  63. """
  64. self.mech = self._mech_factory.create(policy, *args, **kwargs)
  65. def create(self, policy, *args, **kwargs):
  66. """
  67. Create DP optimizer.
  68. Args:
  69. policy (str): Choose original optimizer type.
  70. Returns:
  71. Optimizer, A optimizer with DP.
  72. """
  73. if policy == 'SGD':
  74. cls = self._get_dp_optimizer_class(nn.SGD, self.mech, self._micro_batches, *args, **kwargs)
  75. return cls
  76. if policy == 'Momentum':
  77. cls = self._get_dp_optimizer_class(nn.Momentum, self.mech, self._micro_batches, *args, **kwargs)
  78. return cls
  79. if policy == 'Adam':
  80. cls = self._get_dp_optimizer_class(nn.Adam, self.mech, self._micro_batches, *args, **kwargs)
  81. return cls
  82. raise NameError("The {} is not implement, please choose ['SGD', 'Momentum', 'Adam']".format(policy))
  83. def _get_dp_optimizer_class(self, cls, mech, micro_batches):
  84. """
  85. Wrap original mindspore optimizer with `self._mech`.
  86. """
  87. class DPOptimizer(cls):
  88. """
  89. Initialize the DPOptimizerClass.
  90. Returns:
  91. Optimizer, Optimizer class.
  92. """
  93. def __init__(self, *args, **kwargs):
  94. super(DPOptimizer, self).__init__(*args, **kwargs)
  95. self._mech = mech
  96. self._tuple_add = _TupleAdd()
  97. self._hyper_map = C.HyperMap()
  98. self._micro_float = Tensor(micro_batches, mstype.float32)
  99. def construct(self, gradients):
  100. """
  101. construct a compute flow.
  102. """
  103. grad_noise = self._hyper_map(self._mech, gradients)
  104. grads = self._tuple_add(gradients, grad_noise)
  105. grads = self._hyper_map(F.partial(_grad_scale, self._micro_float), grads)
  106. gradients = super(DPOptimizer, self).construct(grads)
  107. return gradients
  108. return DPOptimizer

MindArmour关注AI的安全和隐私问题。致力于增强模型的安全可信、保护用户的数据隐私。主要包含3个模块:对抗样本鲁棒性模块、Fuzz Testing模块、隐私保护与评估模块。