You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

optimizer.py 6.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. Differential privacy optimizer.
  16. """
  17. from mindspore import nn
  18. from mindspore import Tensor
  19. from mindspore.ops import composite as C
  20. from mindspore.ops import operations as P
  21. from mindspore.ops import functional as F
  22. from mindspore.common import dtype as mstype
  23. from mindarmour.utils.logger import LogUtil
  24. from mindarmour.utils._check_param import check_int_positive
  25. from ..mechanisms.mechanisms import NoiseMechanismsFactory
  26. from ..mechanisms.mechanisms import _MechanismsParamsUpdater
  27. LOGGER = LogUtil.get_instance()
  28. TAG = 'DP optimizer'
  29. _grad_scale = C.MultitypeFuncGraph("grad_scale")
  30. _reciprocal = P.Reciprocal()
  31. @_grad_scale.register("Tensor", "Tensor")
  32. def tensor_grad_scale(scale, grad):
  33. """ grad scaling """
  34. return grad*_reciprocal(scale)
  35. class _TupleAdd(nn.Cell):
  36. def __init__(self):
  37. super(_TupleAdd, self).__init__()
  38. self.add = P.Add()
  39. self.hyper_map = C.HyperMap()
  40. def construct(self, input1, input2):
  41. """Add two tuple of data."""
  42. out = self.hyper_map(self.add, input1, input2)
  43. return out
  44. class DPOptimizerClassFactory:
  45. """
  46. Factory class of Optimizer.
  47. Args:
  48. micro_batches (int): The number of small batches split from an original batch. Default: 2.
  49. Returns:
  50. Optimizer, Optimizer class.
  51. Examples:
  52. >>> GaussianSGD = DPOptimizerClassFactory(micro_batches=2)
  53. >>> GaussianSGD.set_mechanisms('Gaussian', norm_bound=1.0, initial_noise_multiplier=1.5)
  54. >>> net_opt = GaussianSGD.create('Momentum')(params=network.trainable_params(),
  55. >>> learning_rate=0.001,
  56. >>> momentum=0.9)
  57. """
  58. def __init__(self, micro_batches=2):
  59. self._mech_factory = NoiseMechanismsFactory()
  60. self._mech = None
  61. self._micro_batches = check_int_positive('micro_batches', micro_batches)
  62. def set_mechanisms(self, policy, *args, **kwargs):
  63. """
  64. Get noise mechanism object. Policies can be 'sgd', 'momentum'
  65. or 'adam'. Candidate args and kwargs can be seen in class
  66. NoiseMechanismsFactory of mechanisms.py.
  67. Args:
  68. policy (str): Choose mechanism type.
  69. """
  70. self._mech = self._mech_factory.create(policy, *args, **kwargs)
  71. def create(self, policy):
  72. """
  73. Create DP optimizer. Policies can be 'sgd', 'momentum'
  74. or 'adam'.
  75. Args:
  76. policy (str): Choose original optimizer type.
  77. Returns:
  78. Optimizer, an optimizer with DP.
  79. """
  80. policy_ = policy.lower()
  81. if policy_ == 'sgd':
  82. dp_opt_class = self._get_dp_optimizer_class(nn.SGD)
  83. elif policy_ == 'momentum':
  84. dp_opt_class = self._get_dp_optimizer_class(nn.Momentum)
  85. elif policy_ == 'adam':
  86. dp_opt_class = self._get_dp_optimizer_class(nn.Adam)
  87. else:
  88. msg = "The policy must be in ('SGD', 'Momentum', 'Adam'), but got {}." \
  89. .format(policy)
  90. LOGGER.error(TAG, msg)
  91. raise NameError(msg)
  92. return dp_opt_class
  93. def _get_dp_optimizer_class(self, opt_class):
  94. """
  95. Wrap original mindspore optimizer with `self._mech`.
  96. """
  97. if self._mech is None:
  98. msg = 'Noise mechanism should be given through set_mechanisms(), but got None.'
  99. LOGGER.error(TAG, msg)
  100. raise ValueError(msg)
  101. mech = self._mech
  102. micro_batches = self._micro_batches
  103. class DPOptimizer(opt_class):
  104. """
  105. Initialize the DPOptimizerClass.
  106. Returns:
  107. Optimizer, Optimizer class.
  108. """
  109. def __init__(self, *args, **kwargs):
  110. super(DPOptimizer, self).__init__(*args, **kwargs)
  111. self._mech = mech
  112. self._tuple_add = _TupleAdd()
  113. self._hyper_map = C.HyperMap()
  114. self._micro_batches = Tensor(micro_batches, mstype.float32)
  115. self._mech_param_updater = None
  116. if self._mech is not None and self._mech._decay_policy is not None:
  117. self._mech_param_updater = _MechanismsParamsUpdater(decay_policy=self._mech._decay_policy,
  118. decay_rate=self._mech._noise_decay_rate,
  119. cur_noise_multiplier=
  120. self._mech._noise_multiplier,
  121. init_noise_multiplier=
  122. self._mech._initial_noise_multiplier)
  123. def construct(self, gradients):
  124. """
  125. construct a compute flow.
  126. """
  127. # generate noise
  128. grad_noise_tuple = ()
  129. for grad_item in gradients:
  130. grad_noise = self._mech(grad_item)
  131. grad_noise_tuple = grad_noise_tuple + (grad_noise,)
  132. # add noise
  133. gradients = self._tuple_add(gradients, grad_noise_tuple)
  134. # div by self._micro_batches
  135. gradients = self._hyper_map(F.partial(_grad_scale, self._micro_batches), gradients)
  136. # update mech parameters
  137. if self._mech_param_updater is not None:
  138. multiplier = self._mech_param_updater()
  139. gradients = F.depend(gradients, multiplier)
  140. gradients = super(DPOptimizer, self).construct(gradients)
  141. return gradients
  142. return DPOptimizer

MindArmour关注AI的安全和隐私问题。致力于增强模型的安全可信、保护用户的数据隐私。主要包含3个模块:对抗样本鲁棒性模块、Fuzz Testing模块、隐私保护与评估模块。