2. fix internal method change of Model class, from _get_mirror_mean to _get gradients_mean.tags/v1.0.0
@@ -116,7 +116,7 @@ def test(cloud_args=None): | |||||
net = vgg16(num_classes=args.num_classes, args=args) | net = vgg16(num_classes=args.num_classes, args=args) | ||||
opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, args.momentum, | opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, args.momentum, | ||||
weight_decay=args.weight_decay) | weight_decay=args.weight_decay) | ||||
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean', is_grad=False) | |||||
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') | |||||
model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}) | model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}) | ||||
param_dict = load_checkpoint(args.pre_trained) | param_dict = load_checkpoint(args.pre_trained) | ||||
@@ -59,7 +59,7 @@ if __name__ == "__main__": | |||||
# load the pretrained model | # load the pretrained model | ||||
net = vgg16(args.num_classes, args) | net = vgg16(args.num_classes, args) | ||||
loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
opt = nn.Momentum(params=get_param_groups(net), learning_rate=0.1, momentum=0.9, | opt = nn.Momentum(params=get_param_groups(net), learning_rate=0.1, momentum=0.9, | ||||
weight_decay=args.weight_decay, loss_scale=args.loss_scale) | weight_decay=args.weight_decay, loss_scale=args.loss_scale) | ||||
load_param_into_net(net, load_checkpoint(args.pre_trained)) | load_param_into_net(net, load_checkpoint(args.pre_trained)) | ||||
@@ -182,7 +182,7 @@ if __name__ == '__main__': | |||||
weight_decay=args.weight_decay, | weight_decay=args.weight_decay, | ||||
loss_scale=args.loss_scale) | loss_scale=args.loss_scale) | ||||
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean', is_grad=False) | |||||
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') | |||||
model = Model(network, loss_fn=loss, optimizer=opt, metrics={'acc'}, | model = Model(network, loss_fn=loss, optimizer=opt, metrics={'acc'}, | ||||
amp_level="O2", keep_batchnorm_fp32=False, loss_scale_manager=None) | amp_level="O2", keep_batchnorm_fp32=False, loss_scale_manager=None) | ||||
@@ -91,8 +91,7 @@ if __name__ == "__main__": | |||||
context.set_context(mode=context.GRAPH_MODE, | context.set_context(mode=context.GRAPH_MODE, | ||||
device_target=cfg.device_target) | device_target=cfg.device_target) | ||||
network = LeNet5() | network = LeNet5() | ||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, | |||||
reduction="mean") | |||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") | |||||
config_ck = CheckpointConfig( | config_ck = CheckpointConfig( | ||||
save_checkpoint_steps=cfg.save_checkpoint_steps, | save_checkpoint_steps=cfg.save_checkpoint_steps, | ||||
keep_checkpoint_max=cfg.keep_checkpoint_max) | keep_checkpoint_max=cfg.keep_checkpoint_max) | ||||
@@ -90,8 +90,7 @@ if __name__ == "__main__": | |||||
context.set_context(mode=context.GRAPH_MODE, | context.set_context(mode=context.GRAPH_MODE, | ||||
device_target=cfg.device_target) | device_target=cfg.device_target) | ||||
network = LeNet5() | network = LeNet5() | ||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, | |||||
reduction="mean") | |||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") | |||||
config_ck = CheckpointConfig( | config_ck = CheckpointConfig( | ||||
save_checkpoint_steps=cfg.save_checkpoint_steps, | save_checkpoint_steps=cfg.save_checkpoint_steps, | ||||
keep_checkpoint_max=cfg.keep_checkpoint_max) | keep_checkpoint_max=cfg.keep_checkpoint_max) | ||||
@@ -90,8 +90,7 @@ if __name__ == "__main__": | |||||
context.set_context(mode=context.GRAPH_MODE, | context.set_context(mode=context.GRAPH_MODE, | ||||
device_target=cfg.device_target) | device_target=cfg.device_target) | ||||
network = LeNet5() | network = LeNet5() | ||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, | |||||
reduction="mean") | |||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") | |||||
config_ck = CheckpointConfig( | config_ck = CheckpointConfig( | ||||
save_checkpoint_steps=cfg.save_checkpoint_steps, | save_checkpoint_steps=cfg.save_checkpoint_steps, | ||||
keep_checkpoint_max=cfg.keep_checkpoint_max) | keep_checkpoint_max=cfg.keep_checkpoint_max) | ||||
@@ -89,7 +89,7 @@ def generate_mnist_dataset(data_path, batch_size=32, repeat_size=1, | |||||
if __name__ == "__main__": | if __name__ == "__main__": | ||||
context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target) | context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target) | ||||
network = LeNet5() | network = LeNet5() | ||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") | |||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") | |||||
config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps, | config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps, | ||||
keep_checkpoint_max=cfg.keep_checkpoint_max) | keep_checkpoint_max=cfg.keep_checkpoint_max) | ||||
ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", | ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", | ||||
@@ -73,7 +73,7 @@ def test_lenet_mnist_coverage(): | |||||
LOGGER.info(TAG, 'SNAC of this test is : %s', model_fuzz_test.get_snac()) | LOGGER.info(TAG, 'SNAC of this test is : %s', model_fuzz_test.get_snac()) | ||||
# generate adv_data | # generate adv_data | ||||
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
loss = SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
attack = FastGradientSignMethod(net, eps=0.3, loss_fn=loss) | attack = FastGradientSignMethod(net, eps=0.3, loss_fn=loss) | ||||
adv_data = attack.batch_generate(test_images, test_labels, batch_size=32) | adv_data = attack.batch_generate(test_images, test_labels, batch_size=32) | ||||
model_fuzz_test.calculate_coverage(adv_data, bias_coefficient=0.5) | model_fuzz_test.calculate_coverage(adv_data, bias_coefficient=0.5) | ||||
@@ -75,7 +75,7 @@ def test_fast_gradient_sign_method(): | |||||
LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy) | LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy) | ||||
# attacking | # attacking | ||||
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
loss = SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
attack = FastGradientSignMethod(net, eps=0.3, loss_fn=loss) | attack = FastGradientSignMethod(net, eps=0.3, loss_fn=loss) | ||||
start_time = time.clock() | start_time = time.clock() | ||||
adv_data = attack.batch_generate(np.concatenate(test_images), | adv_data = attack.batch_generate(np.concatenate(test_images), | ||||
@@ -83,7 +83,7 @@ def test_lbfgs_attack(): | |||||
targeted_labels[i] = (targeted_labels[i] + 1) % 10 | targeted_labels[i] = (targeted_labels[i] + 1) % 10 | ||||
else: | else: | ||||
targeted_labels = true_labels.astype(np.int32) | targeted_labels = true_labels.astype(np.int32) | ||||
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
loss = SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
attack = LBFGS(net, is_targeted=is_targeted, loss_fn=loss) | attack = LBFGS(net, is_targeted=is_targeted, loss_fn=loss) | ||||
start_time = time.clock() | start_time = time.clock() | ||||
adv_data = attack.batch_generate(np.concatenate(test_images), | adv_data = attack.batch_generate(np.concatenate(test_images), | ||||
@@ -77,7 +77,7 @@ def test_momentum_diverse_input_iterative_method(): | |||||
LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy) | LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy) | ||||
# attacking | # attacking | ||||
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
loss = SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
attack = MomentumDiverseInputIterativeMethod(net, loss_fn=loss) | attack = MomentumDiverseInputIterativeMethod(net, loss_fn=loss) | ||||
start_time = time.clock() | start_time = time.clock() | ||||
adv_data = attack.batch_generate(np.concatenate(test_images), | adv_data = attack.batch_generate(np.concatenate(test_images), | ||||
@@ -75,7 +75,7 @@ def test_projected_gradient_descent_method(): | |||||
LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy) | LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy) | ||||
# attacking | # attacking | ||||
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
loss = SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
attack = ProjectedGradientDescent(net, eps=0.3, loss_fn=loss) | attack = ProjectedGradientDescent(net, eps=0.3, loss_fn=loss) | ||||
start_time = time.clock() | start_time = time.clock() | ||||
adv_data = attack.batch_generate(np.concatenate(test_images), | adv_data = attack.batch_generate(np.concatenate(test_images), | ||||
@@ -48,7 +48,7 @@ def test_nad_method(): | |||||
ds_train = generate_mnist_dataset(os.path.join(mnist_path, "train"), | ds_train = generate_mnist_dataset(os.path.join(mnist_path, "train"), | ||||
batch_size=batch_size, repeat_size=1) | batch_size=batch_size, repeat_size=1) | ||||
net = LeNet5() | net = LeNet5() | ||||
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
loss = SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
opt = nn.Momentum(net.trainable_params(), 0.01, 0.09) | opt = nn.Momentum(net.trainable_params(), 0.01, 0.09) | ||||
model = Model(net, loss, opt, metrics=None) | model = Model(net, loss, opt, metrics=None) | ||||
model.train(10, ds_train, callbacks=[LossMonitor()], | model.train(10, ds_train, callbacks=[LossMonitor()], | ||||
@@ -164,7 +164,7 @@ def test_black_defense(): | |||||
wb_model = ModelToBeAttacked(wb_net) | wb_model = ModelToBeAttacked(wb_net) | ||||
# gen white-box adversarial examples of test data | # gen white-box adversarial examples of test data | ||||
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
loss = SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
wb_attack = FastGradientSignMethod(wb_net, eps=0.3, loss_fn=loss) | wb_attack = FastGradientSignMethod(wb_net, eps=0.3, loss_fn=loss) | ||||
wb_adv_sample = wb_attack.generate(attacked_sample, | wb_adv_sample = wb_attack.generate(attacked_sample, | ||||
attacked_true_label) | attacked_true_label) | ||||
@@ -38,8 +38,7 @@ def mnist_train(epoch_size, batch_size, lr, momentum): | |||||
batch_size=batch_size, repeat_size=1) | batch_size=batch_size, repeat_size=1) | ||||
network = LeNet5() | network = LeNet5() | ||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, | |||||
reduction="mean") | |||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") | |||||
net_opt = nn.Momentum(network.trainable_params(), lr, momentum) | net_opt = nn.Momentum(network.trainable_params(), lr, momentum) | ||||
config_ck = CheckpointConfig(save_checkpoint_steps=1875, | config_ck = CheckpointConfig(save_checkpoint_steps=1875, | ||||
keep_checkpoint_max=10) | keep_checkpoint_max=10) | ||||
@@ -73,8 +73,7 @@ class GradientMethod(Attack): | |||||
else: | else: | ||||
self._alpha = alpha | self._alpha = alpha | ||||
if loss_fn is None: | if loss_fn is None: | ||||
loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False, | |||||
sparse=False) | |||||
loss_fn = SoftmaxCrossEntropyWithLogits(sparse=False) | |||||
with_loss_cell = WithLossCell(self._network, loss_fn) | with_loss_cell = WithLossCell(self._network, loss_fn) | ||||
self._grad_all = GradWrapWithLoss(with_loss_cell) | self._grad_all = GradWrapWithLoss(with_loss_cell) | ||||
self._grad_all.set_train() | self._grad_all.set_train() | ||||
@@ -129,7 +129,7 @@ class IterativeGradientMethod(Attack): | |||||
for b in self._bounds: | for b in self._bounds: | ||||
_ = check_param_multi_types('bound', b, [int, float]) | _ = check_param_multi_types('bound', b, [int, float]) | ||||
if loss_fn is None: | if loss_fn is None: | ||||
loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False) | |||||
loss_fn = SoftmaxCrossEntropyWithLogits(sparse=False) | |||||
self._loss_grad = GradWrapWithLoss(WithLossCell(self._network, loss_fn)) | self._loss_grad = GradWrapWithLoss(WithLossCell(self._network, loss_fn)) | ||||
self._loss_grad.set_train() | self._loss_grad.set_train() | ||||
@@ -66,7 +66,7 @@ class LBFGS(Attack): | |||||
self._nb_iter = check_int_positive('nb_iter', nb_iter) | self._nb_iter = check_int_positive('nb_iter', nb_iter) | ||||
self._search_iters = check_int_positive('search_iters', search_iters) | self._search_iters = check_int_positive('search_iters', search_iters) | ||||
if loss_fn is None: | if loss_fn is None: | ||||
loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False) | |||||
loss_fn = SoftmaxCrossEntropyWithLogits(sparse=False) | |||||
with_loss_cell = WithLossCell(self._network, loss_fn) | with_loss_cell = WithLossCell(self._network, loss_fn) | ||||
self._grad_all = GradWrapWithLoss(with_loss_cell) | self._grad_all = GradWrapWithLoss(with_loss_cell) | ||||
self._dtype = None | self._dtype = None | ||||
@@ -58,7 +58,7 @@ class AdversarialDefense(Defense): | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> lr = 0.0001 | >>> lr = 0.0001 | ||||
>>> momentum = 0.9 | >>> momentum = 0.9 | ||||
>>> loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
>>> loss_fn = SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
>>> optimizer = Momentum(net.trainable_params(), lr, momentum) | >>> optimizer = Momentum(net.trainable_params(), lr, momentum) | ||||
>>> adv_defense = AdversarialDefense(net, loss_fn, optimizer) | >>> adv_defense = AdversarialDefense(net, loss_fn, optimizer) | ||||
>>> inputs = np.random.rand(32, 1, 28, 28).astype(np.float32) | >>> inputs = np.random.rand(32, 1, 28, 28).astype(np.float32) | ||||
@@ -70,7 +70,7 @@ class AdversarialDefense(Defense): | |||||
super(AdversarialDefense, self).__init__(network) | super(AdversarialDefense, self).__init__(network) | ||||
network = check_model('network', network, Cell) | network = check_model('network', network, Cell) | ||||
if loss_fn is None: | if loss_fn is None: | ||||
loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
loss_fn = SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
if optimizer is None: | if optimizer is None: | ||||
optimizer = Momentum( | optimizer = Momentum( | ||||
@@ -123,7 +123,7 @@ class NoiseMechanismsFactory: | |||||
>>> batch_size = 32 | >>> batch_size = 32 | ||||
>>> batches = 128 | >>> batches = 128 | ||||
>>> epochs = 1 | >>> epochs = 1 | ||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
>>> noise_mech = NoiseMechanismsFactory().create('Gaussian', | >>> noise_mech = NoiseMechanismsFactory().create('Gaussian', | ||||
>>> norm_bound=norm_bound, | >>> norm_bound=norm_bound, | ||||
>>> initial_noise_multiplier=initial_noise_multiplier) | >>> initial_noise_multiplier=initial_noise_multiplier) | ||||
@@ -39,7 +39,7 @@ from mindspore.ops.operations import NPUClearFloatStatus | |||||
from mindspore.ops.operations import ReduceSum | from mindspore.ops.operations import ReduceSum | ||||
from mindspore.ops.operations import LessEqual | from mindspore.ops.operations import LessEqual | ||||
from mindspore.ops.operations import ControlDepend | from mindspore.ops.operations import ControlDepend | ||||
from mindspore.parallel._utils import _get_mirror_mean | |||||
from mindspore.parallel._utils import _get_gradients_mean | |||||
from mindspore.parallel._utils import _get_device_num | from mindspore.parallel._utils import _get_device_num | ||||
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer | from mindspore.nn.wrap.grad_reducer import DistributedGradReducer | ||||
from mindspore.common.parameter import Parameter | from mindspore.common.parameter import Parameter | ||||
@@ -93,7 +93,7 @@ class DPModel(Model): | |||||
>>> batches = 128 | >>> batches = 128 | ||||
>>> epochs = 1 | >>> epochs = 1 | ||||
>>> micro_batches = 2 | >>> micro_batches = 2 | ||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
>>> factory_opt = DPOptimizerClassFactory(micro_batches=micro_batches) | >>> factory_opt = DPOptimizerClassFactory(micro_batches=micro_batches) | ||||
>>> factory_opt.set_mechanisms('Gaussian', | >>> factory_opt.set_mechanisms('Gaussian', | ||||
>>> norm_bound=norm_bound, | >>> norm_bound=norm_bound, | ||||
@@ -405,7 +405,7 @@ class _TrainOneStepWithLossScaleCell(Cell): | |||||
self.reducer_flag = self.parallel_mode in [ParallelMode.DATA_PARALLEL, | self.reducer_flag = self.parallel_mode in [ParallelMode.DATA_PARALLEL, | ||||
ParallelMode.HYBRID_PARALLEL] | ParallelMode.HYBRID_PARALLEL] | ||||
if self.reducer_flag: | if self.reducer_flag: | ||||
mean = _get_mirror_mean() | |||||
mean = _get_gradients_mean() | |||||
degree = _get_device_num() | degree = _get_device_num() | ||||
self.grad_reducer = DistributedGradReducer(optimizer.parameters, | self.grad_reducer = DistributedGradReducer(optimizer.parameters, | ||||
mean, degree) | mean, degree) | ||||
@@ -611,7 +611,7 @@ class _TrainOneStepCell(Cell): | |||||
ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL): | ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL): | ||||
self.reducer_flag = True | self.reducer_flag = True | ||||
if self.reducer_flag: | if self.reducer_flag: | ||||
mean = _get_mirror_mean() | |||||
mean = _get_gradients_mean() | |||||
degree = _get_device_num() | degree = _get_device_num() | ||||
self.grad_reducer = DistributedGradReducer(optimizer.parameters, | self.grad_reducer = DistributedGradReducer(optimizer.parameters, | ||||
mean, degree) | mean, degree) | ||||
@@ -111,7 +111,7 @@ def test_fast_gradient_method_cpu(): | |||||
input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32) | input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32) | ||||
label = np.asarray([2], np.int32) | label = np.asarray([2], np.int32) | ||||
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
loss = SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
attack = FastGradientMethod(Net(), loss_fn=loss) | attack = FastGradientMethod(Net(), loss_fn=loss) | ||||
ms_adv_x = attack.generate(input_np, label) | ms_adv_x = attack.generate(input_np, label) | ||||
@@ -95,7 +95,7 @@ if __name__ == '__main__': | |||||
attack.generate(inputs_np, labels_np) | attack.generate(inputs_np, labels_np) | ||||
# test train ops | # test train ops | ||||
loss_fn = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=sparse) | |||||
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse) | |||||
optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), | optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), | ||||
0.01, 0.9) | 0.01, 0.9) | ||||
loss_net = WithLossCell(net, loss_fn) | loss_net = WithLossCell(net, loss_fn) | ||||
@@ -52,7 +52,7 @@ def test_ad(): | |||||
labels = np.eye(num_classes)[labels].astype(np.float32) | labels = np.eye(num_classes)[labels].astype(np.float32) | ||||
net = Net() | net = Net() | ||||
loss_fn = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=sparse) | |||||
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse) | |||||
optimizer = Momentum(learning_rate=Tensor(np.array([0.001], np.float32)), | optimizer = Momentum(learning_rate=Tensor(np.array([0.001], np.float32)), | ||||
momentum=0.9, | momentum=0.9, | ||||
params=net.trainable_params()) | params=net.trainable_params()) | ||||
@@ -54,7 +54,7 @@ def test_ead(): | |||||
labels = np.eye(num_classes)[labels].astype(np.float32) | labels = np.eye(num_classes)[labels].astype(np.float32) | ||||
net = Net() | net = Net() | ||||
loss_fn = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=sparse) | |||||
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse) | |||||
optimizer = Momentum(net.trainable_params(), 0.001, 0.9) | optimizer = Momentum(net.trainable_params(), 0.001, 0.9) | ||||
net = Net() | net = Net() | ||||
@@ -52,7 +52,7 @@ def test_nad(): | |||||
labels = np.eye(num_classes)[labels].astype(np.float32) | labels = np.eye(num_classes)[labels].astype(np.float32) | ||||
net = Net() | net = Net() | ||||
loss_fn = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=sparse) | |||||
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse) | |||||
optimizer = Momentum(net.trainable_params(), 0.001, 0.9) | optimizer = Momentum(net.trainable_params(), 0.001, 0.9) | ||||
# defense | # defense | ||||
@@ -53,7 +53,7 @@ def test_pad(): | |||||
# construct network | # construct network | ||||
net = Net() | net = Net() | ||||
loss_fn = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=sparse) | |||||
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse) | |||||
optimizer = Momentum(net.trainable_params(), 0.001, 0.9) | optimizer = Momentum(net.trainable_params(), 0.001, 0.9) | ||||
# defense | # defense | ||||
@@ -48,7 +48,7 @@ def dataset_generator(batch_size, batches): | |||||
@pytest.mark.component_mindarmour | @pytest.mark.component_mindarmour | ||||
def test_get_membership_inference_object(): | def test_get_membership_inference_object(): | ||||
net = Net() | net = Net() | ||||
loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
model = Model(network=net, loss_fn=loss, optimizer=opt) | model = Model(network=net, loss_fn=loss, optimizer=opt) | ||||
inference_model = MembershipInference(model) | inference_model = MembershipInference(model) | ||||
@@ -62,7 +62,7 @@ def test_get_membership_inference_object(): | |||||
@pytest.mark.component_mindarmour | @pytest.mark.component_mindarmour | ||||
def test_membership_inference_object_train(): | def test_membership_inference_object_train(): | ||||
net = Net() | net = Net() | ||||
loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
model = Model(network=net, loss_fn=loss, optimizer=opt) | model = Model(network=net, loss_fn=loss, optimizer=opt) | ||||
inference_model = MembershipInference(model) | inference_model = MembershipInference(model) | ||||
@@ -92,7 +92,7 @@ def test_membership_inference_object_train(): | |||||
@pytest.mark.component_mindarmour | @pytest.mark.component_mindarmour | ||||
def test_membership_inference_eval(): | def test_membership_inference_eval(): | ||||
net = Net() | net = Net() | ||||
loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
model = Model(network=net, loss_fn=loss, optimizer=opt) | model = Model(network=net, loss_fn=loss, optimizer=opt) | ||||
inference_model = MembershipInference(model) | inference_model = MembershipInference(model) | ||||
@@ -53,7 +53,7 @@ def test_dp_model_with_pynative_mode(): | |||||
batches = 128 | batches = 128 | ||||
epochs = 1 | epochs = 1 | ||||
micro_batches = 2 | micro_batches = 2 | ||||
loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
factory_opt = DPOptimizerClassFactory(micro_batches=micro_batches) | factory_opt = DPOptimizerClassFactory(micro_batches=micro_batches) | ||||
factory_opt.set_mechanisms('Gaussian', | factory_opt.set_mechanisms('Gaussian', | ||||
norm_bound=norm_bound, | norm_bound=norm_bound, | ||||
@@ -92,7 +92,7 @@ def test_dp_model_with_graph_mode(): | |||||
batch_size = 32 | batch_size = 32 | ||||
batches = 128 | batches = 128 | ||||
epochs = 1 | epochs = 1 | ||||
loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
noise_mech = NoiseMechanismsFactory().create('Gaussian', | noise_mech = NoiseMechanismsFactory().create('Gaussian', | ||||
norm_bound=norm_bound, | norm_bound=norm_bound, | ||||
initial_noise_multiplier=initial_noise_multiplier) | initial_noise_multiplier=initial_noise_multiplier) | ||||
@@ -131,7 +131,7 @@ def test_dp_model_with_graph_mode_ada_gaussian(): | |||||
batches = 128 | batches = 128 | ||||
epochs = 1 | epochs = 1 | ||||
alpha = 0.8 | alpha = 0.8 | ||||
loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
noise_mech = NoiseMechanismsFactory().create('AdaGaussian', | noise_mech = NoiseMechanismsFactory().create('AdaGaussian', | ||||
norm_bound=norm_bound, | norm_bound=norm_bound, | ||||
initial_noise_multiplier=initial_noise_multiplier, | initial_noise_multiplier=initial_noise_multiplier, | ||||
@@ -58,8 +58,7 @@ def test_dp_monitor(): | |||||
LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', | LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', | ||||
suggest_epoch) | suggest_epoch) | ||||
network = LeNet5() | network = LeNet5() | ||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, | |||||
reduction="mean") | |||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") | |||||
net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) | net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) | ||||
model = Model(network, net_loss, net_opt) | model = Model(network, net_loss, net_opt) | ||||
@@ -88,8 +87,7 @@ def test_dp_monitor_gpu(): | |||||
LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', | LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', | ||||
suggest_epoch) | suggest_epoch) | ||||
network = LeNet5() | network = LeNet5() | ||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, | |||||
reduction="mean") | |||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") | |||||
net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) | net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) | ||||
model = Model(network, net_loss, net_opt) | model = Model(network, net_loss, net_opt) | ||||
@@ -118,8 +116,7 @@ def test_dp_monitor_cpu(): | |||||
LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', | LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', | ||||
suggest_epoch) | suggest_epoch) | ||||
network = LeNet5() | network = LeNet5() | ||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, | |||||
reduction="mean") | |||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") | |||||
net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) | net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) | ||||
model = Model(network, net_loss, net_opt) | model = Model(network, net_loss, net_opt) | ||||
@@ -149,8 +146,7 @@ def test_dp_monitor_zcdp(): | |||||
LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', | LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', | ||||
suggest_epoch) | suggest_epoch) | ||||
network = LeNet5() | network = LeNet5() | ||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, | |||||
reduction="mean") | |||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") | |||||
net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) | net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) | ||||
model = Model(network, net_loss, net_opt) | model = Model(network, net_loss, net_opt) | ||||
@@ -179,8 +175,7 @@ def test_dp_monitor_zcdp_gpu(): | |||||
LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', | LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', | ||||
suggest_epoch) | suggest_epoch) | ||||
network = LeNet5() | network = LeNet5() | ||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, | |||||
reduction="mean") | |||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") | |||||
net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) | net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) | ||||
model = Model(network, net_loss, net_opt) | model = Model(network, net_loss, net_opt) | ||||
@@ -209,8 +204,7 @@ def test_dp_monitor_zcdp_cpu(): | |||||
LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', | LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', | ||||
suggest_epoch) | suggest_epoch) | ||||
network = LeNet5() | network = LeNet5() | ||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, | |||||
reduction="mean") | |||||
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") | |||||
net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) | net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) | ||||
model = Model(network, net_loss, net_opt) | model = Model(network, net_loss, net_opt) | ||||
@@ -83,7 +83,7 @@ def test_lenet_mnist_coverage_cpu(): | |||||
LOGGER.info(TAG, 'SNAC of this test is : %s', model_fuzz_test.get_snac()) | LOGGER.info(TAG, 'SNAC of this test is : %s', model_fuzz_test.get_snac()) | ||||
# generate adv_data | # generate adv_data | ||||
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
loss = SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
attack = FastGradientSignMethod(net, eps=0.3, loss_fn=loss) | attack = FastGradientSignMethod(net, eps=0.3, loss_fn=loss) | ||||
adv_data = attack.batch_generate(test_data, test_labels, batch_size=32) | adv_data = attack.batch_generate(test_data, test_labels, batch_size=32) | ||||
model_fuzz_test.calculate_coverage(adv_data, bias_coefficient=0.5) | model_fuzz_test.calculate_coverage(adv_data, bias_coefficient=0.5) | ||||