Browse Source

fix cmetrics problem

tags/v1.8.0
ZhidanLiu 3 years ago
parent
commit
f3acd2f983
21 changed files with 206 additions and 221 deletions
  1. +1
    -0
      .jenkins/check/config/filter_pylint.txt
  2. +1
    -1
      .jenkins/test/config/dependent_packages.yaml
  3. +1
    -1
      mindarmour/__init__.py
  4. +4
    -2
      mindarmour/adv_robustness/attacks/__init__.py
  5. +5
    -6
      mindarmour/adv_robustness/attacks/attack.py
  6. +54
    -74
      mindarmour/adv_robustness/attacks/carlini_wagner.py
  7. +15
    -20
      mindarmour/adv_robustness/attacks/deep_fool.py
  8. +1
    -3
      mindarmour/adv_robustness/detectors/black/similarity_detector.py
  9. +0
    -1
      mindarmour/fuzz_testing/fuzzing.py
  10. +4
    -6
      mindarmour/natural_robustness/transform/image/natural_perturb.py
  11. +6
    -7
      mindarmour/privacy/diff_privacy/optimizer/optimizer.py
  12. +7
    -9
      mindarmour/privacy/diff_privacy/train/model.py
  13. +5
    -4
      mindarmour/privacy/evaluation/_check_config.py
  14. +96
    -75
      mindarmour/privacy/sup_privacy/sup_ctrl/conctrl.py
  15. +0
    -1
      mindarmour/privacy/sup_privacy/train/model.py
  16. +1
    -3
      mindarmour/reliability/concept_drift/concept_drift_check_images.py
  17. +0
    -2
      mindarmour/reliability/concept_drift/concept_drift_check_time_series.py
  18. +0
    -1
      mindarmour/reliability/model_fault_injection/__init__.py
  19. +0
    -2
      mindarmour/reliability/model_fault_injection/fault_injection.py
  20. +0
    -2
      mindarmour/reliability/model_fault_injection/fault_type.py
  21. +5
    -1
      setup.py

+ 1
- 0
.jenkins/check/config/filter_pylint.txt View File

@@ -6,6 +6,7 @@
"mindarmour/setup.py" "missing-docstring" "mindarmour/setup.py" "missing-docstring"
"mindarmour/setup.py" "invalid-name" "mindarmour/setup.py" "invalid-name"
"mindarmour/mindarmour/reliability/model_fault_injection/fault_injection.py" "protected-access" "mindarmour/mindarmour/reliability/model_fault_injection/fault_injection.py" "protected-access"
"mindarmour/setup.py" "unused-argument"


# Tests # Tests
"mindarmour/tests/st" "missing-docstring" "mindarmour/tests/st" "missing-docstring"


+ 1
- 1
.jenkins/test/config/dependent_packages.yaml View File

@@ -1,2 +1,2 @@
mindspore: mindspore:
'mindspore/mindspore/version/202202/20220226/master_20220226002452_09f114e52ef6ebdefb1de4477e035b771b61f5b6/'
'mindspore/mindspore/daily/202203/20220320/master_20220320041531_3e442945369de2d9dd20e9e2e9d3c7524a128ee7_newest/'

+ 1
- 1
mindarmour/__init__.py View File

@@ -15,7 +15,7 @@
MindArmour, a tool box of MindSpore to enhance model trustworthiness and achieve MindArmour, a tool box of MindSpore to enhance model trustworthiness and achieve
privacy-preserving machine learning. privacy-preserving machine learning.
""" """
from .adv_robustness.attacks import Attack
from .adv_robustness.attacks.attack import Attack
from .adv_robustness.attacks.black.black_model import BlackModel from .adv_robustness.attacks.black.black_model import BlackModel
from .adv_robustness.defenses.defense import Defense from .adv_robustness.defenses.defense import Defense
from .adv_robustness.detectors.detector import Detector from .adv_robustness.detectors.detector import Detector


+ 4
- 2
mindarmour/adv_robustness/attacks/__init__.py View File

@@ -15,8 +15,10 @@
This module includes classical black-box and white-box attack algorithms This module includes classical black-box and white-box attack algorithms
in making adversarial examples. in making adversarial examples.
""" """
from .gradient_method import *
from .iterative_gradient_method import *
from .gradient_method import FastGradientMethod, FastGradientSignMethod, RandomFastGradientMethod, \
RandomFastGradientSignMethod, LeastLikelyClassMethod, RandomLeastLikelyClassMethod
from .iterative_gradient_method import IterativeGradientMethod, BasicIterativeMethod, MomentumIterativeMethod, \
ProjectedGradientDescent, DiverseInputIterativeMethod, MomentumDiverseInputIterativeMethod
from .deep_fool import DeepFool from .deep_fool import DeepFool
from .jsma import JSMAAttack from .jsma import JSMAAttack
from .carlini_wagner import CarliniWagnerL2Attack from .carlini_wagner import CarliniWagnerL2Attack


+ 5
- 6
mindarmour/adv_robustness/attacks/attack.py View File

@@ -178,13 +178,12 @@ class Attack:
best_position = check_numpy_param('best_position', best_position) best_position = check_numpy_param('best_position', best_position)
x_ori, best_position = check_equal_shape('x_ori', x_ori, 'best_position', best_position) x_ori, best_position = check_equal_shape('x_ori', x_ori, 'best_position', best_position)
_, original_num = self._detection_scores((best_position,) + auxiliary_inputs, gt_boxes, gt_labels, model) _, original_num = self._detection_scores((best_position,) + auxiliary_inputs, gt_boxes, gt_labels, model)
# pylint: disable=invalid-name
REDUCTION_ITERS = 6 # recover 10% difference each time and recover 60% totally.
for _ in range(REDUCTION_ITERS):
BLOCK_NUM = 30 # divide the image into 30 segments
block_width = best_position.shape[0] // BLOCK_NUM
reduction_iters = 6 # recover 10% difference each time and recover 60% totally.
for _ in range(reduction_iters):
block_num = 30 # divide the image into 30 segments
block_width = best_position.shape[0] // block_num
if block_width > 0: if block_width > 0:
for i in range(BLOCK_NUM):
for i in range(block_num):
diff = x_ori[i*block_width: (i+1)*block_width, :, :]\ diff = x_ori[i*block_width: (i+1)*block_width, :, :]\
- best_position[i*block_width:(i+1)*block_width, :, :] - best_position[i*block_width:(i+1)*block_width, :, :]
if np.max(np.abs(diff)) >= 0.1*(self._bounds[1] - self._bounds[0]): if np.max(np.abs(diff)) >= 0.1*(self._bounds[1] - self._bounds[0]):


+ 54
- 74
mindarmour/adv_robustness/attacks/carlini_wagner.py View File

@@ -125,20 +125,14 @@ class CarliniWagnerL2Attack(Attack):
self._num_classes = check_int_positive('num_classes', num_classes) self._num_classes = check_int_positive('num_classes', num_classes)
self._min = check_param_type('box_min', box_min, float) self._min = check_param_type('box_min', box_min, float)
self._max = check_param_type('box_max', box_max, float) self._max = check_param_type('box_max', box_max, float)
self._bin_search_steps = check_int_positive('search_steps',
bin_search_steps)
self._max_iterations = check_int_positive('max_iterations',
max_iterations)
self._confidence = check_param_multi_types('confidence', confidence,
[int, float])
self._learning_rate = check_value_positive('learning_rate',
learning_rate)
self._initial_const = check_value_positive('initial_const',
initial_const)
self._bin_search_steps = check_int_positive('search_steps', bin_search_steps)
self._max_iterations = check_int_positive('max_iterations', max_iterations)
self._confidence = check_param_multi_types('confidence', confidence, [int, float])
self._learning_rate = check_value_positive('learning_rate', learning_rate)
self._initial_const = check_value_positive('initial_const', initial_const)
self._abort_early = check_param_type('abort_early', abort_early, bool) self._abort_early = check_param_type('abort_early', abort_early, bool)
self._fast = check_param_type('fast', fast, bool) self._fast = check_param_type('fast', fast, bool)
self._abort_early_check_ratio = check_value_positive('abort_early_check_ratio',
abort_early_check_ratio)
self._abort_early_check_ratio = check_value_positive('abort_early_check_ratio', abort_early_check_ratio)
self._targeted = check_param_type('targeted', targeted, bool) self._targeted = check_param_type('targeted', targeted, bool)
self._net_grad = GradWrap(self._network) self._net_grad = GradWrap(self._network)
self._sparse = check_param_type('sparse', sparse, bool) self._sparse = check_param_type('sparse', sparse, bool)
@@ -154,10 +148,8 @@ class CarliniWagnerL2Attack(Attack):
new_x (numpy.ndarray): Adversarial examples. new_x (numpy.ndarray): Adversarial examples.
org_x (numpy.ndarray): Original benign input samples. org_x (numpy.ndarray): Original benign input samples.
org_or_target_class (numpy.ndarray): Original/target labels. org_or_target_class (numpy.ndarray): Original/target labels.
constant (float): A trade-off constant to use to balance loss
and perturbation norm.
confidence (float): Confidence level of the output of adversarial
examples.
constant (float): A trade-off constant to use to balance loss and perturbation norm.
confidence (float): Confidence level of the output of adversarial examples.


Returns: Returns:
numpy.ndarray, norm of perturbation, sum of the loss and the numpy.ndarray, norm of perturbation, sum of the loss and the
@@ -183,7 +175,7 @@ class CarliniWagnerL2Attack(Attack):


other_class_index = _best_logits_of_other_class( other_class_index = _best_logits_of_other_class(
logits, org_or_target_class, value=np.inf) logits, org_or_target_class, value=np.inf)
loss1 = np.sum((new_x - org_x)**2,
loss1 = np.sum((new_x - org_x) ** 2,
axis=tuple(range(len(new_x.shape))[1:])) axis=tuple(range(len(new_x.shape))[1:]))
loss2 = np.zeros_like(loss1, dtype=self._dtype) loss2 = np.zeros_like(loss1, dtype=self._dtype)
loss2_grade = np.zeros_like(new_x, dtype=self._dtype) loss2_grade = np.zeros_like(new_x, dtype=self._dtype)
@@ -193,16 +185,16 @@ class CarliniWagnerL2Attack(Attack):
loss2[i] = max(0, logits[i][other_class_index[i]] loss2[i] = max(0, logits[i][other_class_index[i]]
- logits[i][org_or_target_class[i]] - logits[i][org_or_target_class[i]]
+ confidence) + confidence)
loss2_grade[i] = constant[i]*(jaco_grad[other_class_index[
loss2_grade[i] = constant[i] * (jaco_grad[other_class_index[
i]][i] - jaco_grad[org_or_target_class[i]][i]) i]][i] - jaco_grad[org_or_target_class[i]][i])
else: else:
for i in range(org_or_target_class.shape[0]): for i in range(org_or_target_class.shape[0]):
loss2[i] = max(0, logits[i][org_or_target_class[i]] loss2[i] = max(0, logits[i][org_or_target_class[i]]
- logits[i][other_class_index[i]] + confidence) - logits[i][other_class_index[i]] + confidence)
loss2_grade[i] = constant[i]*(jaco_grad[org_or_target_class[
loss2_grade[i] = constant[i] * (jaco_grad[org_or_target_class[
i]][i] - jaco_grad[other_class_index[i]][i]) i]][i] - jaco_grad[other_class_index[i]][i])
total_loss = loss1 + constant*loss2
loss1_grade = 2*(new_x - org_x)
total_loss = loss1 + constant * loss2
loss1_grade = 2 * (new_x - org_x)
for i in range(org_or_target_class.shape[0]): for i in range(org_or_target_class.shape[0]):
if loss2[i] < 0: if loss2[i] < 0:
msg = 'loss value should greater than or equal to 0, ' \ msg = 'loss value should greater than or equal to 0, ' \
@@ -233,7 +225,7 @@ class CarliniWagnerL2Attack(Attack):
mean = (self._min + self._max) / 2 mean = (self._min + self._max) / 2
diff = (self._max - self._min) / 2 diff = (self._max - self._min) / 2
inputs = (inputs - mean) / diff inputs = (inputs - mean) / diff
inputs = inputs*0.999999
inputs = inputs * 0.999999
return np.arctanh(inputs) return np.arctanh(inputs)


def _to_model_space(self, inputs): def _to_model_space(self, inputs):
@@ -257,8 +249,8 @@ class CarliniWagnerL2Attack(Attack):
the_grad = 1 - np.square(inputs) the_grad = 1 - np.square(inputs)
mean = (self._min + self._max) / 2 mean = (self._min + self._max) / 2
diff = (self._max - self._min) / 2 diff = (self._max - self._min) / 2
inputs = inputs*diff + mean
the_grad = the_grad*diff
inputs = inputs * diff + mean
the_grad = the_grad * diff
return inputs, the_grad return inputs, the_grad


def _check_success(self, logits, labels): def _check_success(self, logits, labels):
@@ -292,35 +284,30 @@ class CarliniWagnerL2Attack(Attack):
reconstructed_original, _ = self._to_model_space(att_original) reconstructed_original, _ = self._to_model_space(att_original)


# find an adversarial sample # find an adversarial sample
const = np.ones_like(labels, dtype=self._dtype)*self._initial_const
const = np.ones_like(labels, dtype=self._dtype) * self._initial_const
lower_bound = np.zeros_like(labels, dtype=self._dtype) lower_bound = np.zeros_like(labels, dtype=self._dtype)
upper_bound = np.ones_like(labels, dtype=self._dtype)*np.inf
upper_bound = np.ones_like(labels, dtype=self._dtype) * np.inf
adversarial_res = inputs.copy() adversarial_res = inputs.copy()
adversarial_loss = np.ones_like(labels, dtype=self._dtype)*np.inf
adversarial_loss = np.ones_like(labels, dtype=self._dtype) * np.inf
samples_num = labels.shape[0] samples_num = labels.shape[0]
adv_flag = np.zeros_like(labels) adv_flag = np.zeros_like(labels)
for binary_search_step in range(self._bin_search_steps): for binary_search_step in range(self._bin_search_steps):
if (binary_search_step == self._bin_search_steps - 1) and \
(self._bin_search_steps >= 10):
if (binary_search_step == self._bin_search_steps - 1) and (self._bin_search_steps >= 10):
const = min(1e10, upper_bound) const = min(1e10, upper_bound)
LOGGER.debug(TAG,
'starting optimization with const = %s',
str(const))
LOGGER.debug(TAG, 'starting optimization with const = %s', str(const))


att_perturbation = np.zeros_like(att_original, dtype=self._dtype) att_perturbation = np.zeros_like(att_original, dtype=self._dtype)
loss_at_previous_check = np.ones_like(labels, dtype=self._dtype)*np.inf
loss_at_previous_check = np.ones_like(labels, dtype=self._dtype) * np.inf


# create a new optimizer to minimize the perturbation # create a new optimizer to minimize the perturbation
optimizer = _AdamOptimizer(att_perturbation.shape) optimizer = _AdamOptimizer(att_perturbation.shape)


for iteration in range(self._max_iterations): for iteration in range(self._max_iterations):
x_input, dxdp = self._to_model_space(
att_original + att_perturbation)
x_input, dxdp = self._to_model_space(att_original + att_perturbation)
logits = self._network(Tensor(x_input)).asnumpy() logits = self._network(Tensor(x_input)).asnumpy()


current_l2_loss, current_loss, dldx = self._loss_function(
logits, x_input, reconstructed_original,
labels, const, self._confidence)
current_l2_loss, current_loss, dldx = self._loss_function(logits, x_input, reconstructed_original,
labels, const, self._confidence)


is_adv = self._check_success(logits, labels) is_adv = self._check_success(logits, labels)


@@ -334,58 +321,51 @@ class CarliniWagnerL2Attack(Attack):
if np.all(adv_flag): if np.all(adv_flag):
if self._fast: if self._fast:
LOGGER.debug(TAG, "succeed find adversarial examples.") LOGGER.debug(TAG, "succeed find adversarial examples.")
msg = 'iteration: {}, logits_att: {}, ' \
'loss: {}, l2_dist: {}' \
.format(iteration,
np.argmax(logits, axis=1),
current_loss, current_l2_loss)
msg = 'iteration: {}, logits_att: {}, loss: {}, l2_dist: {}' \
.format(iteration, np.argmax(logits, axis=1), current_loss, current_l2_loss)
LOGGER.debug(TAG, msg) LOGGER.debug(TAG, msg)
return adversarial_res return adversarial_res


dldx, inputs = check_equal_shape('dldx', dldx, 'inputs', inputs) dldx, inputs = check_equal_shape('dldx', dldx, 'inputs', inputs)


gradient = dldx*dxdp
att_perturbation += \
optimizer(gradient, self._learning_rate)
gradient = dldx * dxdp
att_perturbation += optimizer(gradient, self._learning_rate)


# check if should stop iteration early # check if should stop iteration early
flag = True flag = True
iter_check = iteration % (np.ceil( iter_check = iteration % (np.ceil(
self._max_iterations*self._abort_early_check_ratio))
self._max_iterations * self._abort_early_check_ratio))
if self._abort_early and iter_check == 0: if self._abort_early and iter_check == 0:
# check progress # check progress
for i in range(inputs.shape[0]): for i in range(inputs.shape[0]):
if current_loss[i] <= .9999*loss_at_previous_check[i]:
if current_loss[i] <= .9999 * loss_at_previous_check[i]:
flag = False flag = False
# stop Adam if all samples has no progress # stop Adam if all samples has no progress
if flag: if flag:
LOGGER.debug(TAG,
'step:%d, no progress yet, stop iteration',
binary_search_step)
LOGGER.debug(TAG, 'step:%d, no progress yet, stop iteration', binary_search_step)
break break
loss_at_previous_check = current_loss loss_at_previous_check = current_loss

for i in range(samples_num):
# update bound based on search result
if adv_flag[i]:
LOGGER.debug(TAG,
'example %d, found adversarial with const=%f',
i, const[i])
upper_bound[i] = const[i]
else:
LOGGER.debug(TAG,
'example %d, failed to find adversarial'
' with const=%f',
i, const[i])
lower_bound[i] = const[i]

if upper_bound[i] == np.inf:
const[i] *= 10
else:
const[i] = (lower_bound[i] + upper_bound[i]) / 2
upper_bound, lower_bound, const = self._update_bounds(samples_num, adv_flag, const, upper_bound,
lower_bound)


return adversarial_res return adversarial_res


def _update_bounds(self, samples_num, adv_flag, const, upper_bound, lower_bound):
"""update bound based on search result"""
for i in range(samples_num):
if adv_flag[i]:
LOGGER.debug(TAG, 'example %d, found adversarial with const=%f', i, const[i])
upper_bound[i] = const[i]
else:
LOGGER.debug(TAG, 'example %d, failed to find adversarial with const=%f', i, const[i])
lower_bound[i] = const[i]

if upper_bound[i] == np.inf:
const[i] *= 10
else:
const[i] = (lower_bound[i] + upper_bound[i]) / 2
return upper_bound, lower_bound, const



class _AdamOptimizer: class _AdamOptimizer:
""" """
@@ -428,8 +408,8 @@ class _AdamOptimizer:
""" """
gradient = check_numpy_param('gradient', gradient) gradient = check_numpy_param('gradient', gradient)
self._t += 1 self._t += 1
self._m = beta1*self._m + (1 - beta1)*gradient
self._v = beta2*self._v + (1 - beta2)*gradient**2
alpha = learning_rate*np.sqrt(1 - beta2**self._t) / (1 - beta1**self._t)
pertur = -alpha*self._m / (np.sqrt(self._v) + epsilon)
self._m = beta1 * self._m + (1 - beta1) * gradient
self._v = beta2 * self._v + (1 - beta2) * gradient ** 2
alpha = learning_rate * np.sqrt(1 - beta2 ** self._t) / (1 - beta1 ** self._t)
pertur = -alpha * self._m / (np.sqrt(self._v) + epsilon)
return pertur return pertur

+ 15
- 20
mindarmour/adv_robustness/attacks/deep_fool.py View File

@@ -253,16 +253,7 @@ class DeepFool(Attack):
if diff_w_k < diff_w: if diff_w_k < diff_w:
diff_w = diff_w_k diff_w = diff_w_k
weight = w_k weight = w_k
if self._norm_level == 2 or self._norm_level == '2':
r_i = diff_w*weight / (np.linalg.norm(weight) + 1e-8)
elif self._norm_level == np.inf or self._norm_level == 'inf':
r_i = diff_w*np.sign(weight) \
/ (np.linalg.norm(weight, ord=1) + 1e-8)
else:
msg = 'ord {} is not available in normalization,' \
.format(str(self._norm_level))
LOGGER.error(TAG, msg)
raise NotImplementedError(msg)
r_i = self._normalize_r_i(diff_w, weight)
r_tot[idx, ...] = r_tot[idx, ...] + r_i r_tot[idx, ...] = r_tot[idx, ...] + r_i


images = self._update_image(x_origin, r_tot) images = self._update_image(x_origin, r_tot)
@@ -311,16 +302,7 @@ class DeepFool(Attack):
diff_w = diff_w_k diff_w = diff_w_k
weight = w_k weight = w_k


if self._norm_level == 2 or self._norm_level == '2':
r_i = diff_w*weight / (np.linalg.norm(weight) + 1e-8)
elif self._norm_level == np.inf or self._norm_level == 'inf':
r_i = diff_w*np.sign(weight) \
/ (np.linalg.norm(weight, ord=1) + 1e-8)
else:
msg = 'ord {} is not available in normalization.' \
.format(str(self._norm_level))
LOGGER.error(TAG, msg)
raise NotImplementedError(msg)
r_i = self._normalize_r_i(diff_w, weight)
r_tot[idx, ...] = r_tot[idx, ...] + r_i r_tot[idx, ...] = r_tot[idx, ...] + r_i


if self._bounds is not None: if self._bounds is not None:
@@ -337,3 +319,16 @@ class DeepFool(Attack):
inputs = inputs.astype(inputs_dtype) inputs = inputs.astype(inputs_dtype)
del preds, grads del preds, grads
return inputs return inputs

def _normalize_r_i(self, diff_w, weight):
"""normalize r_i used to update r_tot"""
if self._norm_level == 2 or self._norm_level == '2':
r_i = diff_w * weight / (np.linalg.norm(weight) + 1e-8)
elif self._norm_level == np.inf or self._norm_level == 'inf':
r_i = diff_w * np.sign(weight) / (np.linalg.norm(weight, ord=1) + 1e-8)
else:
msg = 'ord {} is not available in normalization,'.format(str(self._norm_level))
LOGGER.error(TAG, msg)
raise NotImplementedError(msg)

return r_i

+ 1
- 3
mindarmour/adv_robustness/detectors/black/similarity_detector.py View File

@@ -160,9 +160,7 @@ class SimilarityDetector(Detector):
distance_mat = np.sort(distance_mat, axis=-1) distance_mat = np.sort(distance_mat, axis=-1)
distances.append(distance_mat[:, :self._max_k_neighbor]) distances.append(distance_mat[:, :self._max_k_neighbor])
# the rest # the rest
distance_mat = _pairwise_distances(x_input=data[(data.shape[0] //
self._chunk_size)*
self._chunk_size:, :],
distance_mat = _pairwise_distances(x_input=data[(data.shape[0] // self._chunk_size) * self._chunk_size:, :],
y_input=data) y_input=data)
distance_mat = np.sort(distance_mat, axis=-1) distance_mat = np.sort(distance_mat, axis=-1)
distances.append(distance_mat[:, :self._max_k_neighbor]) distances.append(distance_mat[:, :self._max_k_neighbor])


+ 0
- 1
mindarmour/fuzz_testing/fuzzing.py View File

@@ -450,7 +450,6 @@ class Fuzzer:
for mutate in mutate_config: for mutate in mutate_config:
method = mutate['method'] method = mutate['method']
if method not in self._attacks_list: if method not in self._attacks_list:
# mutates[method] = self._strategies[method]()
mutates[method] = self._strategies[method] mutates[method] = self._strategies[method]
else: else:
network = self._target_model._network network = self._target_model._network


+ 4
- 6
mindarmour/natural_robustness/transform/image/natural_perturb.py View File

@@ -120,9 +120,11 @@ class _NaturalPerturb:
pass pass


def _check(self, image): def _check(self, image):
""" Check image format. If input image is RGB and its shape
"""
Check image format. If input image is RGB and its shape
is (C, H, W), it will be transposed to (H, W, C). If the value is (C, H, W), it will be transposed to (H, W, C). If the value
of the image is not normalized , it will be rescaled between 0 to 255."""
of the image is not normalized , it will be rescaled between 0 to 255.
"""
rgb = _is_rgb(image) rgb = _is_rgb(image)
chw = False chw = False
gray3dim = False gray3dim = False
@@ -131,14 +133,10 @@ class _NaturalPerturb:
chw = _is_chw(image) chw = _is_chw(image)
if chw: if chw:
image = _chw_to_hwc(image) image = _chw_to_hwc(image)
else:
image = image
else: else:
if len(np.shape(image)) == 3: if len(np.shape(image)) == 3:
gray3dim = True gray3dim = True
image = image[0] image = image[0]
else:
image = image
if normalized: if normalized:
image = image * 255 image = image * 255
return rgb, chw, normalized, gray3dim, np.uint8(image) return rgb, chw, normalized, gray3dim, np.uint8(image)


+ 6
- 7
mindarmour/privacy/diff_privacy/optimizer/optimizer.py View File

@@ -36,7 +36,7 @@ _reciprocal = P.Reciprocal()
@_grad_scale.register("Tensor", "Tensor") @_grad_scale.register("Tensor", "Tensor")
def tensor_grad_scale(scale, grad): def tensor_grad_scale(scale, grad):
""" grad scaling """ """ grad scaling """
return grad*_reciprocal(scale)
return grad * _reciprocal(scale)




class _TupleAdd(nn.Cell): class _TupleAdd(nn.Cell):
@@ -141,12 +141,11 @@ class DPOptimizerClassFactory:


self._mech_param_updater = None self._mech_param_updater = None
if self._mech is not None and self._mech._decay_policy is not None: if self._mech is not None and self._mech._decay_policy is not None:
self._mech_param_updater = _MechanismsParamsUpdater(decay_policy=self._mech._decay_policy,
decay_rate=self._mech._noise_decay_rate,
cur_noise_multiplier=
self._mech._noise_multiplier,
init_noise_multiplier=
self._mech._initial_noise_multiplier)
self._mech_param_updater = _MechanismsParamsUpdater(
decay_policy=self._mech._decay_policy,
decay_rate=self._mech._noise_decay_rate,
cur_noise_multiplier=self._mech._noise_multiplier,
init_noise_multiplier=self._mech._initial_noise_multiplier)


def construct(self, gradients): def construct(self, gradients):
""" """


+ 7
- 9
mindarmour/privacy/diff_privacy/train/model.py View File

@@ -62,7 +62,7 @@ _reciprocal = P.Reciprocal()
@_grad_scale.register("Tensor", "Tensor") @_grad_scale.register("Tensor", "Tensor")
def tensor_grad_scale(scale, grad): def tensor_grad_scale(scale, grad):
""" grad scaling """ """ grad scaling """
return grad*F.cast(_reciprocal(scale), F.dtype(grad))
return grad * F.cast(_reciprocal(scale), F.dtype(grad))




class DPModel(Model): class DPModel(Model):
@@ -417,10 +417,8 @@ class _TrainOneStepWithLossScaleCell(Cell):
self._noise_mech_param_updater = _MechanismsParamsUpdater( self._noise_mech_param_updater = _MechanismsParamsUpdater(
decay_policy=self._noise_mech._decay_policy, decay_policy=self._noise_mech._decay_policy,
decay_rate=self._noise_mech._noise_decay_rate, decay_rate=self._noise_mech._noise_decay_rate,
cur_noise_multiplier=
self._noise_mech._noise_multiplier,
init_noise_multiplier=
self._noise_mech._initial_noise_multiplier)
cur_noise_multiplier=self._noise_mech._noise_multiplier,
init_noise_multiplier=self._noise_mech._initial_noise_multiplier)


def construct(self, data, label, sens=None): def construct(self, data, label, sens=None):
""" """
@@ -444,8 +442,8 @@ class _TrainOneStepWithLossScaleCell(Cell):
record_labels = self._split(label) record_labels = self._split(label)
# first index # first index
loss = self.network(record_datas[0], record_labels[0]) loss = self.network(record_datas[0], record_labels[0])
scaling_sens_filled = C.ones_like(loss)*F.cast(scaling_sens,
F.dtype(loss))
scaling_sens_filled = C.ones_like(loss) * F.cast(scaling_sens,
F.dtype(loss))
record_grad = self.grad(self.network, weights)(record_datas[0], record_grad = self.grad(self.network, weights)(record_datas[0],
record_labels[0], record_labels[0],
scaling_sens_filled) scaling_sens_filled)
@@ -465,8 +463,8 @@ class _TrainOneStepWithLossScaleCell(Cell):
total_loss = loss total_loss = loss
for i in range(1, self._micro_batches): for i in range(1, self._micro_batches):
loss = self.network(record_datas[i], record_labels[i]) loss = self.network(record_datas[i], record_labels[i])
scaling_sens_filled = C.ones_like(loss)*F.cast(scaling_sens,
F.dtype(loss))
scaling_sens_filled = C.ones_like(loss) * F.cast(scaling_sens,
F.dtype(loss))
record_grad = self.grad(self.network, weights)(record_datas[i], record_grad = self.grad(self.network, weights)(record_datas[i],
record_labels[i], record_labels[i],
scaling_sens_filled) scaling_sens_filled)


+ 5
- 4
mindarmour/privacy/evaluation/_check_config.py View File

@@ -31,6 +31,7 @@ def _is_positive_int(item):
return False return False
return item > 0 return item > 0



def _is_non_negative_int(item): def _is_non_negative_int(item):
"""Verify that the value is a non-negative integer.""" """Verify that the value is a non-negative integer."""
if not isinstance(item, int): if not isinstance(item, int):
@@ -51,6 +52,7 @@ def _is_non_negative_float(item):
return False return False
return item >= 0 return item >= 0



def _is_range_0_1_float(item): def _is_range_0_1_float(item):
if not isinstance(item, (int, float)): if not isinstance(item, (int, float)):
return False return False
@@ -151,7 +153,6 @@ _VALID_CONFIG_CHECKLIST = {
} }





def _check_config(attack_config, config_checklist): def _check_config(attack_config, config_checklist):
""" """
Verify that config_list is valid. Verify that config_list is valid.
@@ -161,7 +162,7 @@ def _check_config(attack_config, config_checklist):
check_param_type("config", config, dict) check_param_type("config", config, dict)
if set(config.keys()) != {"params", "method"}: if set(config.keys()) != {"params", "method"}:
msg = "Keys of each config in attack_config must be {}," \ msg = "Keys of each config in attack_config must be {}," \
"but got {}.".format({'method', 'params'}, set(config.keys()))
"but got {}.".format({'method', 'params'}, set(config.keys()))
LOGGER.error(TAG, msg) LOGGER.error(TAG, msg)
raise KeyError(msg) raise KeyError(msg)


@@ -175,7 +176,7 @@ def _check_config(attack_config, config_checklist):


if not params.keys() <= config_checklist[method].keys(): if not params.keys() <= config_checklist[method].keys():
msg = "Params in method {} is not accepted, the parameters " \ msg = "Params in method {} is not accepted, the parameters " \
"that can be set are {}.".format(method, set(config_checklist[method].keys()))
"that can be set are {}.".format(method, set(config_checklist[method].keys()))


LOGGER.error(TAG, msg) LOGGER.error(TAG, msg)
raise KeyError(msg) raise KeyError(msg)
@@ -199,7 +200,7 @@ def _check_config(attack_config, config_checklist):
break break


if not flag: if not flag:
msg = "Setting of parmeter {} in method {} is invalid".format(param_key, method)
msg = "Setting of parameter {} in method {} is invalid".format(param_key, method)
raise ValueError(msg) raise ValueError(msg)






+ 96
- 75
mindarmour/privacy/sup_privacy/sup_ctrl/conctrl.py View File

@@ -26,11 +26,14 @@ from mindspore.nn import Cell
from mindarmour.utils.logger import LogUtil from mindarmour.utils.logger import LogUtil
from mindarmour.utils._check_param import check_int_positive, check_value_positive, \ from mindarmour.utils._check_param import check_int_positive, check_value_positive, \
check_value_non_negative, check_param_type check_value_non_negative, check_param_type

LOGGER = LogUtil.get_instance() LOGGER = LogUtil.get_instance()
TAG = 'Suppression training.' TAG = 'Suppression training.'



class SuppressPrivacyFactory: class SuppressPrivacyFactory:
""" Factory class of SuppressCtrl mechanisms""" """ Factory class of SuppressCtrl mechanisms"""

def __init__(self): def __init__(self):
pass pass


@@ -107,6 +110,7 @@ class SuppressPrivacyFactory:
LOGGER.error(TAG, msg) LOGGER.error(TAG, msg)
raise ValueError(msg) raise ValueError(msg)



class SuppressCtrl(Cell): class SuppressCtrl(Cell):
""" """
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_suppress_privacy.html#%E5%BC%95%E5%85%A5%E6%8A%91%E5%88%B6%E9%9A%90%E7%A7%81%E8%AE%AD%E7%BB%83>`_ For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_suppress_privacy.html#%E5%BC%95%E5%85%A5%E6%8A%91%E5%88%B6%E9%9A%90%E7%A7%81%E8%AE%AD%E7%BB%83>`_
@@ -122,6 +126,7 @@ class SuppressCtrl(Cell):
sparse_end (float): The sparsity to reach. sparse_end (float): The sparsity to reach.
sparse_start (Union[float, int]): The sparsity to start. sparse_start (Union[float, int]): The sparsity to start.
""" """

def __init__(self, networks, mask_layers, end_epoch, batch_num, start_epoch, mask_times, lr, def __init__(self, networks, mask_layers, end_epoch, batch_num, start_epoch, mask_times, lr,
sparse_end, sparse_start): sparse_end, sparse_start):
super(SuppressCtrl, self).__init__() super(SuppressCtrl, self).__init__()
@@ -137,7 +142,7 @@ class SuppressCtrl(Cell):


self.weight_lower_bound = 0.005 # all network weight will be larger than this value self.weight_lower_bound = 0.005 # all network weight will be larger than this value
self.sparse_vibra = 0.02 # the sparsity may have certain range of variations self.sparse_vibra = 0.02 # the sparsity may have certain range of variations
self.sparse_valid_max_weight = 0.02 # if max network weight is less than this value, suppress operation stop temporarily
self.sparse_valid_max_weight = 0.02 # if max network weight is less than this value, operation stop temporarily
self.add_noise_thd = 0.50 # if network weight is more than this value, noise is forced self.add_noise_thd = 0.50 # if network weight is more than this value, noise is forced
self.noise_volume = 0.1 # noise volume 0.1 self.noise_volume = 0.1 # noise volume 0.1
self.base_ground_thd = 0.0000001 # if network weight is less than this value, will be considered as 0 self.base_ground_thd = 0.0000001 # if network weight is less than this value, will be considered as 0
@@ -149,72 +154,15 @@ class SuppressCtrl(Cell):
self.mask_start_step = 0 # suppress operation is actually started at this step self.mask_start_step = 0 # suppress operation is actually started at this step
self.mask_prev_step = 0 # previous suppress operation is done at this step self.mask_prev_step = 0 # previous suppress operation is done at this step
self.cur_sparse = 0.0 # current sparsity to which one suppress will get self.cur_sparse = 0.0 # current sparsity to which one suppress will get
self.mask_all_steps = (end_epoch - start_epoch + 1)*batch_num # the amount of step contained in all suppress operation
self.mask_step_interval = self.mask_all_steps/mask_times # the amount of step contaied in one suppress operation
self.mask_all_steps = (end_epoch - start_epoch + 1) * batch_num # the amount of step contained in all operation
self.mask_step_interval = self.mask_all_steps / mask_times # the amount of step contained in one operation
self.mask_initialized = False # flag means the initialization is done self.mask_initialized = False # flag means the initialization is done
self.grad_idx_map = [] self.grad_idx_map = []


if self.lr > 0.5:
msg = "learning rate should not be greater than 0.5, but got {}".format(self.lr)
LOGGER.error(TAG, msg)
raise ValueError(msg)

if self.mask_start_epoch > self.mask_end_epoch:
msg = "start_epoch should not be greater than end_epoch, but got start_epoch and end_epoch are: " \
"{}, {}".format(self.mask_start_epoch, self.mask_end_epoch)
LOGGER.error(TAG, msg)
raise ValueError(msg)

if self.mask_end_epoch > 100:
msg = "The end_epoch should be smaller than 100, but got {}".format(self.mask_end_epoch)
LOGGER.error(TAG, msg)
raise ValueError(msg)

if self.mask_step_interval <= 0:
msg = "step_interval should be greater than 0, but got {}".format(self.mask_step_interval)
LOGGER.error(TAG, msg)
raise ValueError(msg)

if self.mask_step_interval <= 10 or self.mask_step_interval >= 20:
msg = "mask_interval should be greater than 10, smaller than 20, but got {}".format(self.mask_step_interval)
msg += "\n Precision of trained model may be poor !!! "
msg += "\n please modify epoch_start, epoch_end and batch_num !"
msg += "\n mask_interval = (epoch_end-epoch_start+1)*batch_num/mask_times, batch_num = samples/batch_size"
LOGGER.info(TAG, msg)

if self.sparse_end >= 1.00 or self.sparse_end <= 0:
msg = "sparse_end should be in range (0, 1), but got {}".format(self.sparse_end)
LOGGER.error(TAG, msg)
raise ValueError(msg)

if self.sparse_start >= self.sparse_end:
msg = "sparse_start should be smaller than sparse_end, but got sparse_start and sparse_end are: " \
"{}, {}".format(self.sparse_start, self.sparse_end)
LOGGER.error(TAG, msg)
raise ValueError(msg)
self._check_params()


if mask_layers is not None: if mask_layers is not None:
mask_layer_id = 0
for one_mask_layer in mask_layers:
if not isinstance(one_mask_layer, MaskLayerDes):
msg = "mask_layers should be a list of MaskLayerDes, but got a {}".format(type(one_mask_layer))
LOGGER.error(TAG, msg)
raise ValueError(msg)
layer_name = one_mask_layer.layer_name
mask_layer_id2 = 0
for one_mask_layer_2 in mask_layers:
if mask_layer_id != mask_layer_id2 and layer_name == one_mask_layer_2.layer_name:
msg = "Mask layer name should be unique, but got duplicate name: {} in mask_layer {} and {}".\
format(layer_name, mask_layer_id, mask_layer_id2)
LOGGER.error(TAG, msg)
raise ValueError(msg)
if mask_layer_id != mask_layer_id2 and one_mask_layer.grad_idx == one_mask_layer_2.grad_idx:
msg = "Grad_idx should be unique, but got duplicate idx: {} in mask_layer {} and {}".\
format(layer_name, one_mask_layer_2.layer_name, one_mask_layer.grad_idx)
LOGGER.error(TAG, msg)
raise ValueError(msg)
mask_layer_id2 = mask_layer_id2 + 1
mask_layer_id = mask_layer_id + 1
self._check_mask_layers()


if networks is not None: if networks is not None:
for layer in networks.get_parameters(expand=True): for layer in networks.get_parameters(expand=True):
@@ -277,6 +225,71 @@ class SuppressCtrl(Cell):
msg += "\nsup_privacy only support SGD optimizer" msg += "\nsup_privacy only support SGD optimizer"
LOGGER.warn(TAG, msg) LOGGER.warn(TAG, msg)


def _check_params(self):
"""check parameters"""
if self.lr > 0.5:
msg = "learning rate should not be greater than 0.5, but got {}".format(self.lr)
LOGGER.error(TAG, msg)
raise ValueError(msg)

if self.mask_start_epoch > self.mask_end_epoch:
msg = "start_epoch should not be greater than end_epoch, but got start_epoch and end_epoch are: " \
"{}, {}".format(self.mask_start_epoch, self.mask_end_epoch)
LOGGER.error(TAG, msg)
raise ValueError(msg)

if self.mask_end_epoch > 100:
msg = "The end_epoch should be smaller than 100, but got {}".format(self.mask_end_epoch)
LOGGER.error(TAG, msg)
raise ValueError(msg)

if self.mask_step_interval <= 0:
msg = "step_interval should be greater than 0, but got {}".format(self.mask_step_interval)
LOGGER.error(TAG, msg)
raise ValueError(msg)

if self.mask_step_interval <= 10 or self.mask_step_interval >= 20:
msg = "mask_interval should be greater than 10, smaller than 20, but got {}".format(self.mask_step_interval)
msg += "\n Precision of trained model may be poor !!! "
msg += "\n please modify epoch_start, epoch_end and batch_num !"
msg += "\n mask_interval = (epoch_end-epoch_start+1)*batch_num/mask_times, batch_num = samples/batch_size"
LOGGER.info(TAG, msg)

if self.sparse_end >= 1.00 or self.sparse_end <= 0:
msg = "sparse_end should be in range (0, 1), but got {}".format(self.sparse_end)
LOGGER.error(TAG, msg)
raise ValueError(msg)

if self.sparse_start >= self.sparse_end:
msg = "sparse_start should be smaller than sparse_end, but got sparse_start and sparse_end are: " \
"{}, {}".format(self.sparse_start, self.sparse_end)
LOGGER.error(TAG, msg)
raise ValueError(msg)

def _check_mask_layers(self):
"""check mask layers"""
mask_layer_id = 0
for one_mask_layer in self.mask_layers:
if not isinstance(one_mask_layer, MaskLayerDes):
msg = "mask_layers should be a list of MaskLayerDes, but got a {}".format(type(one_mask_layer))
LOGGER.error(TAG, msg)
raise ValueError(msg)
layer_name = one_mask_layer.layer_name
mask_layer_id2 = 0
for one_mask_layer_2 in self.mask_layers:
if mask_layer_id != mask_layer_id2 and layer_name == one_mask_layer_2.layer_name:
msg = "Mask layer name should be unique, but got duplicate name: {} in mask_layer {} and {}". \
format(layer_name, mask_layer_id, mask_layer_id2)
LOGGER.error(TAG, msg)
raise ValueError(msg)
if mask_layer_id != mask_layer_id2 and one_mask_layer.grad_idx == one_mask_layer_2.grad_idx:
msg = "Grad_idx should be unique, but got duplicate idx: {} in mask_layer {} and {}". \
format(layer_name, one_mask_layer_2.layer_name, one_mask_layer.grad_idx)
LOGGER.error(TAG, msg)
raise ValueError(msg)
mask_layer_id2 = mask_layer_id2 + 1
mask_layer_id = mask_layer_id + 1

def update_status(self, cur_epoch, cur_step, cur_step_in_epoch): def update_status(self, cur_epoch, cur_step, cur_step_in_epoch):
""" """
Update the suppress operation status. Update the suppress operation status.
@@ -296,7 +309,7 @@ class SuppressCtrl(Cell):
self.mask_prev_step = cur_step self.mask_prev_step = cur_step
self.to_do_mask = True self.to_do_mask = True
# execute the last suppression operation # execute the last suppression operation
elif cur_epoch == self.mask_end_epoch and cur_step_in_epoch == self.batch_num-2:
elif cur_epoch == self.mask_end_epoch and cur_step_in_epoch == self.batch_num - 2:
self.mask_prev_step = cur_step self.mask_prev_step = cur_step
self.to_do_mask = True self.to_do_mask = True
else: else:
@@ -340,8 +353,8 @@ class SuppressCtrl(Cell):
grad_mask_cell = self.grads_mask_list[grad_idx] grad_mask_cell = self.grads_mask_list[grad_idx]
last_sparse_pos = grad_mask_cell.sparse_pos_list[-1] last_sparse_pos = grad_mask_cell.sparse_pos_list[-1]
if actual_stop_pos <= 0 or \ if actual_stop_pos <= 0 or \
(actual_stop_pos < last_sparse_pos + grad_mask_cell.part_num and \
grad_mask_cell.is_approximity and m > 0):
(actual_stop_pos < last_sparse_pos + grad_mask_cell.part_num and \
grad_mask_cell.is_approximity and m > 0):
sparse_weight_thd = 0 sparse_weight_thd = 0
msg = "{} len={}, sparse={}, current sparse thd={}, [idle] \n" \ msg = "{} len={}, sparse={}, current sparse thd={}, [idle] \n" \
.format(layer.name, len_array, actual_stop_pos / len_array, sparse_weight_thd) .format(layer.name, len_array, actual_stop_pos / len_array, sparse_weight_thd)
@@ -377,7 +390,7 @@ class SuppressCtrl(Cell):
del partition del partition


msg = "{} len={}, sparse={}, current sparse thd={}, max={}, min={}, avg={}, avg_abs={} \n".format( msg = "{} len={}, sparse={}, current sparse thd={}, max={}, min={}, avg={}, avg_abs={} \n".format(
layer.name, len_array, actual_stop_pos/len_array, sparse_weight_thd,
layer.name, len_array, actual_stop_pos / len_array, sparse_weight_thd,
weight_abs_max, weight_abs_min, weight_avg, weight_abs_avg) weight_abs_max, weight_abs_min, weight_avg, weight_abs_avg)
LOGGER.info(TAG, msg) LOGGER.info(TAG, msg)
del weight_array_flat_abs del weight_array_flat_abs
@@ -413,7 +426,7 @@ class SuppressCtrl(Cell):
p = 0 p = 0
q = 0 q = 0
# add noise on weights if not masking or clipping. # add noise on weights if not masking or clipping.
weight_noise_bound = min(self.add_noise_thd, max(self.noise_volume*10, weight_abs_max*0.75))
weight_noise_bound = min(self.add_noise_thd, max(self.noise_volume * 10, weight_abs_max * 0.75))
size = self.grads_mask_list[layer_index].para_num size = self.grads_mask_list[layer_index].para_num
for i in range(0, size): for i in range(0, size):
if mul_mask_array_flat[i] <= 0.0: if mul_mask_array_flat[i] <= 0.0:
@@ -428,14 +441,14 @@ class SuppressCtrl(Cell):
else: else:
# not mask # not mask
if weight_array_flat[i] > 0.0: if weight_array_flat[i] > 0.0:
add_mask_array_flat[i] = (weight_array_flat[i] \
add_mask_array_flat[i] = (weight_array_flat[i]
- min(self.weight_lower_bound, sparse_weight_thd)) / self.lr - min(self.weight_lower_bound, sparse_weight_thd)) / self.lr
else: else:
add_mask_array_flat[i] = (weight_array_flat[i] add_mask_array_flat[i] = (weight_array_flat[i]
+ min(self.weight_lower_bound, sparse_weight_thd)) / self.lr + min(self.weight_lower_bound, sparse_weight_thd)) / self.lr
p = p + 1 p = p + 1
elif is_lower_clip and abs(weight_array_flat[i]) <= \ elif is_lower_clip and abs(weight_array_flat[i]) <= \
self.weight_lower_bound and sparse_weight_thd > self.weight_lower_bound*0.5:
self.weight_lower_bound and sparse_weight_thd > self.weight_lower_bound * 0.5:
# not mask # not mask
mul_mask_array_flat[i] = 1.0 mul_mask_array_flat[i] = 1.0
if weight_array_flat[i] > 0.0: if weight_array_flat[i] > 0.0:
@@ -463,8 +476,8 @@ class SuppressCtrl(Cell):
grad_mask_cell.update() grad_mask_cell.update()
de_weight_cell.update() de_weight_cell.update()
msg = "Dimension of mask tensor is {}D, which located in the {}-th layer of the network. \n The number of " \ msg = "Dimension of mask tensor is {}D, which located in the {}-th layer of the network. \n The number of " \
"suppressed elements, max-clip elements, min-clip elements and noised elements are {}, {}, {}, {}"\
.format(len(grad_mask_cell.mul_mask_array_shape), layer_index, m, n, p, q)
"suppressed elements, max-clip elements, min-clip elements and noised elements are {}, {}, {}, {}" \
.format(len(grad_mask_cell.mul_mask_array_shape), layer_index, m, n, p, q)
LOGGER.info(TAG, msg) LOGGER.info(TAG, msg)
grad_mask_cell.sparse_pos_list.append(m) grad_mask_cell.sparse_pos_list.append(m)


@@ -500,8 +513,8 @@ class SuppressCtrl(Cell):
for i in range(0, part_num): for i in range(0, part_num):
if split_k_num <= 0: if split_k_num <= 0:
break break
array_row_mul_mask = mul_mask_array_flat[i * part_size : (i + 1) * part_size]
array_row_flat_abs = weight_array_flat_abs[i * part_size : (i + 1) * part_size]
array_row_mul_mask = mul_mask_array_flat[i * part_size: (i + 1) * part_size]
array_row_flat_abs = weight_array_flat_abs[i * part_size: (i + 1) * part_size]
if not init_batch_suppress: if not init_batch_suppress:
array_row_flat_abs_masked = np.where(array_row_mul_mask <= 0.0, -1.0, array_row_flat_abs) array_row_flat_abs_masked = np.where(array_row_mul_mask <= 0.0, -1.0, array_row_flat_abs)
set_abs = set(array_row_flat_abs_masked) set_abs = set(array_row_flat_abs_masked)
@@ -553,7 +566,7 @@ class SuppressCtrl(Cell):
split_k_num, (actual_stop_pos - last_sparse_pos), actual_stop_pos, real_suppress_num) split_k_num, (actual_stop_pos - last_sparse_pos), actual_stop_pos, real_suppress_num)
LOGGER.info(TAG, msg) LOGGER.info(TAG, msg)
if init_batch_suppress: if init_batch_suppress:
init_sparse_actual = real_suppress_num/para_num
init_sparse_actual = real_suppress_num / para_num
print("init batch suppresss, actual sparse = {}".format(init_sparse_actual)) print("init batch suppresss, actual sparse = {}".format(init_sparse_actual))


gc.collect() gc.collect()
@@ -660,6 +673,7 @@ class SuppressCtrl(Cell):
return sparse, sparse_value_1, sparse_value_2 return sparse, sparse_value_1, sparse_value_2


def calc_actual_sparse_for_fc1(self, networks): def calc_actual_sparse_for_fc1(self, networks):
"""calculate actual sparse for full connection 1 layer"""
return self.calc_actual_sparse_for_layer(networks, "fc1.weight") return self.calc_actual_sparse_for_layer(networks, "fc1.weight")


def calc_actual_sparse_for_layer(self, networks, layer_name): def calc_actual_sparse_for_layer(self, networks, layer_name):
@@ -716,6 +730,7 @@ class SuppressCtrl(Cell):
msg += "\nsup_privacy only support SGD optimizer" msg += "\nsup_privacy only support SGD optimizer"
LOGGER.info(TAG, msg) LOGGER.info(TAG, msg)



def get_one_mask_layer(mask_layers, layer_name): def get_one_mask_layer(mask_layers, layer_name):
""" """
Returns the layer definitions that need to be suppressed. Returns the layer definitions that need to be suppressed.
@@ -732,6 +747,7 @@ def get_one_mask_layer(mask_layers, layer_name):
return each_mask_layer return each_mask_layer
return None return None



class MaskLayerDes: class MaskLayerDes:
""" """
Describe the layer that need to be suppressed. Describe the layer that need to be suppressed.
@@ -763,6 +779,7 @@ class MaskLayerDes:
>>> masklayers = [] >>> masklayers = []
>>> masklayers.append(MaskLayerDes("conv1.weight", 0, False, True, 10)) >>> masklayers.append(MaskLayerDes("conv1.weight", 0, False, True, 10))
""" """

def __init__(self, layer_name, grad_idx, is_add_noise, is_lower_clip, min_num, upper_bound=1.20): def __init__(self, layer_name, grad_idx, is_add_noise, is_lower_clip, min_num, upper_bound=1.20):
self.layer_name = check_param_type('layer_name', layer_name, str) self.layer_name = check_param_type('layer_name', layer_name, str)
check_param_type('grad_idx', grad_idx, int) check_param_type('grad_idx', grad_idx, int)
@@ -773,6 +790,7 @@ class MaskLayerDes:
self.upper_bound = check_value_positive('upper_bound', upper_bound) self.upper_bound = check_value_positive('upper_bound', upper_bound)
self.inited = False self.inited = False



class GradMaskInCell(Cell): class GradMaskInCell(Cell):
""" """
Define the mask matrix for gradients masking. Define the mask matrix for gradients masking.
@@ -787,6 +805,7 @@ class GradMaskInCell(Cell):
If min_num is smaller than (parameter num*SupperssCtrl.sparse_end), min_num has no effect. If min_num is smaller than (parameter num*SupperssCtrl.sparse_end), min_num has no effect.
upper_bound ([float, int]): max abs value of weight in this layer, default: 1.20. upper_bound ([float, int]): max abs value of weight in this layer, default: 1.20.
""" """

def __init__(self, array, is_add_noise, is_lower_clip, min_num, upper_bound=1.20): def __init__(self, array, is_add_noise, is_lower_clip, min_num, upper_bound=1.20):
super(GradMaskInCell, self).__init__() super(GradMaskInCell, self).__init__()
self.mul_mask_array_shape = array.shape self.mul_mask_array_shape = array.shape
@@ -806,7 +825,7 @@ class GradMaskInCell(Cell):
self.part_size = self.para_num self.part_size = self.para_num
self.part_num_max = 16 self.part_num_max = 16
self.para_many_num = 10000 self.para_many_num = 10000
self.para_huge_num = 10*10000*10000
self.para_huge_num = 10 * 10000 * 10000


if self.para_num > self.para_many_num: if self.para_num > self.para_many_num:
self.is_approximity = True self.is_approximity = True
@@ -836,6 +855,7 @@ class GradMaskInCell(Cell):
""" """
self.mul_mask_tensor = Tensor(self.mul_mask_array_flat.reshape(self.mul_mask_array_shape), mstype.float32) self.mul_mask_tensor = Tensor(self.mul_mask_array_flat.reshape(self.mul_mask_array_shape), mstype.float32)



class DeWeightInCell(Cell): class DeWeightInCell(Cell):
""" """
Define the mask matrix for de-weight masking. Define the mask matrix for de-weight masking.
@@ -843,6 +863,7 @@ class DeWeightInCell(Cell):
Args: Args:
array (numpy.ndarray): The mask array. array (numpy.ndarray): The mask array.
""" """

def __init__(self, array): def __init__(self, array):
super(DeWeightInCell, self).__init__() super(DeWeightInCell, self).__init__()
self.add_mask_array_shape = array.shape self.add_mask_array_shape = array.shape


+ 0
- 1
mindarmour/privacy/sup_privacy/train/model.py View File

@@ -196,7 +196,6 @@ class _TupleMul(nn.Cell):
def construct(self, input1, input2): def construct(self, input1, input2):
"""Add two tuple of data.""" """Add two tuple of data."""
out = self.hyper_map(self.mul, input1, input2) out = self.hyper_map(self.mul, input1, input2)
#print(out)
return out return out


# come from nn.cell_wrapper.TrainOneStepCell # come from nn.cell_wrapper.TrainOneStepCell


+ 1
- 3
mindarmour/reliability/concept_drift/concept_drift_check_images.py View File

@@ -11,8 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# ============================================================================

""" """
Out-of-Distribution detection module for images. Out-of-Distribution detection module for images.
""" """
@@ -33,6 +31,7 @@ class OodDetector:
model (Model):The training model. model (Model):The training model.
ds_train (numpy.ndarray): The training dataset. ds_train (numpy.ndarray): The training dataset.
""" """

def __init__(self, model, ds_train): def __init__(self, model, ds_train):
self.model = model self.model = model
self.ds_train = check_param_type('ds_train', ds_train, np.ndarray) self.ds_train = check_param_type('ds_train', ds_train, np.ndarray)
@@ -66,7 +65,6 @@ class OodDetector:
- float, the optimal threshold. - float, the optimal threshold.
""" """



def ood_predict(self, threshold, ds_test): def ood_predict(self, threshold, ds_test):
""" """
The out-of-distribution detection. The out-of-distribution detection.


+ 0
- 2
mindarmour/reliability/concept_drift/concept_drift_check_time_series.py View File

@@ -9,8 +9,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# ============================================================================

""" """
Concpt drift module Concpt drift module
""" """


+ 0
- 1
mindarmour/reliability/model_fault_injection/__init__.py View File

@@ -8,7 +8,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# ============================================================================
""" """
This module provides model fault injection to evaluate the reliability of given model. This module provides model fault injection to evaluate the reliability of given model.
""" """


+ 0
- 2
mindarmour/reliability/model_fault_injection/fault_injection.py View File

@@ -8,8 +8,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# ============================================================================

""" """
Fault injection module Fault injection module
""" """


+ 0
- 2
mindarmour/reliability/model_fault_injection/fault_type.py View File

@@ -8,8 +8,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# ============================================================================

""" """
Fault type module Fault type module
""" """


+ 5
- 1
setup.py View File

@@ -11,6 +11,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""
setup script
"""
import os import os
import stat import stat
import shlex import shlex
@@ -27,7 +30,7 @@ pkg_dir = os.path.join(cur_dir, 'build')




def clean(): def clean():
# pylint: disable=unused-argument
"""clean"""
def readonly_handler(func, path, execinfo): def readonly_handler(func, path, execinfo):
os.chmod(path, stat.S_IWRITE) os.chmod(path, stat.S_IWRITE)
func(path) func(path)
@@ -38,6 +41,7 @@ def clean():




def write_version(file): def write_version(file):
"""write version"""
file.write("__version__ = '{}'\n".format(version)) file.write("__version__ = '{}'\n".format(version))






Loading…
Cancel
Save