Merge pull request !120 from jxlang910/mastertags/v1.0.0
@@ -350,9 +350,9 @@ class Translate(ImageTransform): | |||||
Translate an image. | Translate an image. | ||||
Args: | Args: | ||||
x_bias ([int, float): X-direction translation, x=x+x_bias*image_length. | |||||
x_bias (Union[int, float]): X-direction translation, x = x + x_bias*image_length. | |||||
Default: 0. | Default: 0. | ||||
y_bias ([int, float): Y-direction translation, y=y+y_bias*image_wide. | |||||
y_bias (Union[int, float]): Y-direction translation, y = y + y_bias*image_wide. | |||||
Default: 0. | Default: 0. | ||||
""" | """ | ||||
@@ -365,10 +365,8 @@ class Translate(ImageTransform): | |||||
Set translate parameters. | Set translate parameters. | ||||
Args: | Args: | ||||
x_bias ([float, int]): X-direction translation, x=x+x_bias*image_length. | |||||
Default: 0. | |||||
y_bias ([float, int]): Y-direction translation, y=y+y_bias*image_wide. | |||||
Default: 0. | |||||
x_bias (Union[float, int]): X-direction translation. Default: 0. | |||||
y_bias (Union[float, int]): Y-direction translation. Default: 0. | |||||
auto_param (bool): True if auto generate parameters. Default: False. | auto_param (bool): True if auto generate parameters. Default: False. | ||||
""" | """ | ||||
self.auto_param = auto_param | self.auto_param = auto_param | ||||
@@ -426,7 +426,6 @@ class AdaClippingWithGaussianRandom(Cell): | |||||
next_norm_bound = self._sub(norm_bound, | next_norm_bound = self._sub(norm_bound, | ||||
self._mul(self._learning_rate, grad_clip)) | self._mul(self._learning_rate, grad_clip)) | ||||
# decay_policy == 'Geometric' | |||||
else: | else: | ||||
grad_clip = self._sub(empirical_fraction, | grad_clip = self._sub(empirical_fraction, | ||||
self._target_unclipped_quantile) | self._target_unclipped_quantile) | ||||
@@ -62,7 +62,7 @@ class PrivacyMonitorFactory: | |||||
return RDPMonitor(*args, **kwargs) | return RDPMonitor(*args, **kwargs) | ||||
if policy == 'zcdp': | if policy == 'zcdp': | ||||
return ZCDPMonitor(*args, **kwargs) | return ZCDPMonitor(*args, **kwargs) | ||||
raise ValueError("Only RDP-policy or ZCDP-policy is supported by now") | |||||
raise ValueError("The policy must be 'rdp' or 'zcdp', but got {}".format(policy)) | |||||
class RDPMonitor(Callback): | class RDPMonitor(Callback): | ||||
@@ -86,7 +86,8 @@ class RDPMonitor(Callback): | |||||
be used to calculate privacy spent. Default: 1.5. | be used to calculate privacy spent. Default: 1.5. | ||||
max_eps (Union[float, int, None]): The maximum acceptable epsilon | max_eps (Union[float, int, None]): The maximum acceptable epsilon | ||||
budget for DP training, which is used for estimating the max | budget for DP training, which is used for estimating the max | ||||
training epochs. Default: 10.0. | |||||
training epochs. 'None' means there is no limit to epsilon budget. | |||||
Default: 10.0. | |||||
target_delta (Union[float, int, None]): Target delta budget for DP | target_delta (Union[float, int, None]): Target delta budget for DP | ||||
training. If target_delta is set to be δ, then the privacy budget | training. If target_delta is set to be δ, then the privacy budget | ||||
δ would be fixed during the whole training process. Default: 1e-3. | δ would be fixed during the whole training process. Default: 1e-3. | ||||
@@ -94,7 +95,7 @@ class RDPMonitor(Callback): | |||||
budget for DP training, which is used for estimating the max | budget for DP training, which is used for estimating the max | ||||
training epochs. Max_delta must be less than 1 and suggested | training epochs. Max_delta must be less than 1 and suggested | ||||
to be less than 1e-3, otherwise overflow would be encountered. | to be less than 1e-3, otherwise overflow would be encountered. | ||||
Default: None. | |||||
'None' means there is no limit to delta budget. Default: None. | |||||
target_eps (Union[float, int, None]): Target epsilon budget for DP | target_eps (Union[float, int, None]): Target epsilon budget for DP | ||||
training. If target_eps is set to be ε, then the privacy budget | training. If target_eps is set to be ε, then the privacy budget | ||||
ε would be fixed during the whole training process. Default: None. | ε would be fixed during the whole training process. Default: None. | ||||
@@ -192,6 +193,7 @@ class RDPMonitor(Callback): | |||||
msg = 'One of target eps and target delta must be None' | msg = 'One of target eps and target delta must be None' | ||||
LOGGER.error(TAG, msg) | LOGGER.error(TAG, msg) | ||||
raise ValueError(msg) | raise ValueError(msg) | ||||
if dataset_sink_mode: | if dataset_sink_mode: | ||||
self._per_print_times = int(self._num_samples / self._batch_size) | self._per_print_times = int(self._num_samples / self._batch_size) | ||||
@@ -208,6 +210,14 @@ class RDPMonitor(Callback): | |||||
>>> num_samples=60000, batch_size=32) | >>> num_samples=60000, batch_size=32) | ||||
>>> suggest_epoch = rdp.max_epoch_suggest() | >>> suggest_epoch = rdp.max_epoch_suggest() | ||||
""" | """ | ||||
if self._target_delta is not None and self._max_eps is None: | |||||
msg = 'max_eps should be consistent with target_delta, but got None.' | |||||
LOGGER.error(TAG, msg) | |||||
raise ValueError(msg) | |||||
if self._target_eps is not None and self._max_delta is None: | |||||
msg = 'max_delta should be consistent with target_eps, but got None.' | |||||
LOGGER.error(TAG, msg) | |||||
raise ValueError(msg) | |||||
epoch = 1 | epoch = 1 | ||||
while epoch < 10000: | while epoch < 10000: | ||||
steps = self._num_samples // self._batch_size | steps = self._num_samples // self._batch_size | ||||
@@ -427,7 +437,8 @@ class ZCDPMonitor(Callback): | |||||
initial_noise_multiplier) | initial_noise_multiplier) | ||||
if noise_decay_mode is not None: | if noise_decay_mode is not None: | ||||
if noise_decay_mode not in ('Step', 'Time', 'Exp'): | if noise_decay_mode not in ('Step', 'Time', 'Exp'): | ||||
msg = "Noise decay mode must be in ('Step', 'Time', 'Exp')" | |||||
msg = "Noise decay mode must be in ('Step', 'Time', 'Exp'), but got {}.".\ | |||||
format(noise_decay_mode) | |||||
LOGGER.error(TAG, msg) | LOGGER.error(TAG, msg) | ||||
raise ValueError(msg) | raise ValueError(msg) | ||||
noise_decay_rate = check_param_type('noise_decay_rate', noise_decay_rate, float) | noise_decay_rate = check_param_type('noise_decay_rate', noise_decay_rate, float) | ||||
@@ -36,7 +36,7 @@ _reciprocal = P.Reciprocal() | |||||
@_grad_scale.register("Tensor", "Tensor") | @_grad_scale.register("Tensor", "Tensor") | ||||
def tensor_grad_scale(scale, grad): | def tensor_grad_scale(scale, grad): | ||||
""" grad scaling """ | """ grad scaling """ | ||||
return grad * _reciprocal(scale) | |||||
return grad*_reciprocal(scale) | |||||
class _TupleAdd(nn.Cell): | class _TupleAdd(nn.Cell): | ||||
@@ -59,14 +59,14 @@ class DPOptimizerClassFactory: | |||||
micro_batches (int): The number of small batches split from an original batch. Default: 2. | micro_batches (int): The number of small batches split from an original batch. Default: 2. | ||||
Returns: | Returns: | ||||
Optimizer, Optimizer class | |||||
Optimizer, Optimizer class. | |||||
Examples: | Examples: | ||||
>>> GaussianSGD = DPOptimizerClassFactory(micro_batches=2) | >>> GaussianSGD = DPOptimizerClassFactory(micro_batches=2) | ||||
>>> GaussianSGD.set_mechanisms('Gaussian', norm_bound=1.0, initial_noise_multiplier=1.5) | >>> GaussianSGD.set_mechanisms('Gaussian', norm_bound=1.0, initial_noise_multiplier=1.5) | ||||
>>> net_opt = GaussianSGD.create('Momentum')(params=network.trainable_params(), | >>> net_opt = GaussianSGD.create('Momentum')(params=network.trainable_params(), | ||||
>>> learning_rate=cfg.lr, | |||||
>>> momentum=cfg.momentum) | |||||
>>> learning_rate=0.001, | |||||
>>> momentum=0.9) | |||||
""" | """ | ||||
def __init__(self, micro_batches=2): | def __init__(self, micro_batches=2): | ||||
@@ -76,7 +76,9 @@ class DPOptimizerClassFactory: | |||||
def set_mechanisms(self, policy, *args, **kwargs): | def set_mechanisms(self, policy, *args, **kwargs): | ||||
""" | """ | ||||
Get noise mechanism object. | |||||
Get noise mechanism object. Policies can be 'sgd', 'momentum' | |||||
or 'adam'. Candidate args and kwargs can be seen in class | |||||
NoiseMechanismsFactory of mechanisms.py. | |||||
Args: | Args: | ||||
policy (str): Choose mechanism type. | policy (str): Choose mechanism type. | ||||
@@ -85,15 +87,15 @@ class DPOptimizerClassFactory: | |||||
def create(self, policy): | def create(self, policy): | ||||
""" | """ | ||||
Create DP optimizer. | |||||
Create DP optimizer. Policies can be 'sgd', 'momentum' | |||||
or 'adam'. | |||||
Args: | Args: | ||||
policy (str): Choose original optimizer type. | policy (str): Choose original optimizer type. | ||||
Returns: | Returns: | ||||
Optimizer, A optimizer with DP. | |||||
Optimizer, an optimizer with DP. | |||||
""" | """ | ||||
dp_opt_class = None | |||||
policy_ = policy.lower() | policy_ = policy.lower() | ||||
if policy_ == 'sgd': | if policy_ == 'sgd': | ||||
dp_opt_class = self._get_dp_optimizer_class(nn.SGD) | dp_opt_class = self._get_dp_optimizer_class(nn.SGD) | ||||
@@ -102,7 +104,7 @@ class DPOptimizerClassFactory: | |||||
elif policy_ == 'adam': | elif policy_ == 'adam': | ||||
dp_opt_class = self._get_dp_optimizer_class(nn.Adam) | dp_opt_class = self._get_dp_optimizer_class(nn.Adam) | ||||
else: | else: | ||||
msg = "The {} optimizer is not implement, please choose ['SGD', 'Momentum', 'Adam']" \ | |||||
msg = "The policy must be in ('SGD', 'Momentum', 'Adam'), but got {}." \ | |||||
.format(policy) | .format(policy) | ||||
LOGGER.error(TAG, msg) | LOGGER.error(TAG, msg) | ||||
raise NameError(msg) | raise NameError(msg) | ||||
@@ -112,6 +114,10 @@ class DPOptimizerClassFactory: | |||||
""" | """ | ||||
Wrap original mindspore optimizer with `self._mech`. | Wrap original mindspore optimizer with `self._mech`. | ||||
""" | """ | ||||
if self._mech is None: | |||||
msg = 'Noise mechanism should be given through set_mechanisms(), but got None.' | |||||
LOGGER.error(TAG, msg) | |||||
raise ValueError(msg) | |||||
mech = self._mech | mech = self._mech | ||||
micro_batches = self._micro_batches | micro_batches = self._micro_batches | ||||
@@ -306,8 +306,7 @@ class _ClipGradients(nn.Cell): | |||||
""" | """ | ||||
construct a compute flow. | construct a compute flow. | ||||
""" | """ | ||||
# pylint: disable=consider-using-in | |||||
if clip_type != 0 and clip_type != 1: | |||||
if clip_type not in (0, 1): | |||||
return grads | return grads | ||||
new_grads = () | new_grads = () | ||||