Browse Source

Modify MNIST-examples to support all devices(CPU, GPU, Ascend)

tags/v0.3.0-alpha
jin-xiulang 5 years ago
parent
commit
4c4c34edef
19 changed files with 71 additions and 1235 deletions
  1. +2
    -1
      example/mnist_demo/lenet5_mnist_coverage.py
  2. +2
    -1
      example/mnist_demo/lenet5_mnist_fuzzing.py
  3. +3
    -81
      example/mnist_demo/mnist_attack_cw.py
  4. +3
    -82
      example/mnist_demo/mnist_attack_deepfool.py
  5. +3
    -80
      example/mnist_demo/mnist_attack_fgsm.py
  6. +3
    -88
      example/mnist_demo/mnist_attack_genetic.py
  7. +3
    -82
      example/mnist_demo/mnist_attack_hsja.py
  8. +3
    -86
      example/mnist_demo/mnist_attack_jsma.py
  9. +3
    -94
      example/mnist_demo/mnist_attack_lbfgs.py
  10. +4
    -78
      example/mnist_demo/mnist_attack_mdi2fgsm.py
  11. +3
    -90
      example/mnist_demo/mnist_attack_nes.py
  12. +3
    -80
      example/mnist_demo/mnist_attack_pgd.py
  13. +3
    -86
      example/mnist_demo/mnist_attack_pointwise.py
  14. +3
    -81
      example/mnist_demo/mnist_attack_pso.py
  15. +3
    -90
      example/mnist_demo/mnist_attack_salt_and_pepper.py
  16. +3
    -105
      example/mnist_demo/mnist_defense_nad.py
  17. +17
    -16
      example/mnist_demo/mnist_evaluation.py
  18. +5
    -8
      example/mnist_demo/mnist_similarity_detector.py
  19. +2
    -6
      example/mnist_demo/mnist_train.py

+ 2
- 1
example/mnist_demo/lenet5_mnist_coverage.py View File

@@ -33,7 +33,6 @@ LOGGER.set_level('INFO')


def test_lenet_mnist_coverage():
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
@@ -85,4 +84,6 @@ def test_lenet_mnist_coverage():


if __name__ == '__main__':
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
test_lenet_mnist_coverage()

+ 2
- 1
example/mnist_demo/lenet5_mnist_fuzzing.py View File

@@ -32,7 +32,6 @@ LOGGER.set_level('INFO')


def test_lenet_mnist_fuzzing():
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
@@ -87,4 +86,6 @@ def test_lenet_mnist_fuzzing():


if __name__ == '__main__':
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
test_lenet_mnist_fuzzing()

+ 3
- 81
example/mnist_demo/mnist_attack_cw.py View File

@@ -15,7 +15,6 @@ import sys
import time

import numpy as np
import pytest
from mindspore import Model
from mindspore import Tensor
from mindspore import context
@@ -36,89 +35,10 @@ LOGGER.set_level('INFO')
TAG = 'CW_Test'


@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_carlini_wagner_attack():
"""
CW-Attack test
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
load_dict = load_checkpoint(ckpt_name)
load_param_into_net(net, load_dict)

# get test data
data_list = "./MNIST_unzip/test"
batch_size = 32
ds = generate_mnist_dataset(data_list, batch_size=batch_size)

# prediction accuracy before attack
model = Model(net)
batch_num = 3 # the number of batches of attacking samples
test_images = []
test_labels = []
predict_labels = []
i = 0
for data in ds.create_tuple_iterator():
i += 1
images = data[0].astype(np.float32)
labels = data[1]
test_images.append(images)
test_labels.append(labels)
pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(),
axis=1)
predict_labels.append(pred_labels)
if i >= batch_num:
break
predict_labels = np.concatenate(predict_labels)
true_labels = np.concatenate(test_labels)
accuracy = np.mean(np.equal(predict_labels, true_labels))
LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy)

# attacking
num_classes = 10
attack = CarliniWagnerL2Attack(net, num_classes, targeted=False)
start_time = time.clock()
adv_data = attack.batch_generate(np.concatenate(test_images),
np.concatenate(test_labels), batch_size=32)
stop_time = time.clock()
pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy()
# rescale predict confidences into (0, 1).
pred_logits_adv = softmax(pred_logits_adv, axis=1)
pred_labels_adv = np.argmax(pred_logits_adv, axis=1)
accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels))
LOGGER.info(TAG, "prediction accuracy after attacking is : %s",
accuracy_adv)
test_labels = np.eye(10)[np.concatenate(test_labels)]
attack_evaluate = AttackEvaluate(np.concatenate(test_images).transpose(0, 2, 3, 1),
test_labels, adv_data.transpose(0, 2, 3, 1),
pred_logits_adv)
LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
attack_evaluate.mis_classification_rate())
LOGGER.info(TAG, 'The average confidence of adversarial class is : %s',
attack_evaluate.avg_conf_adv_class())
LOGGER.info(TAG, 'The average confidence of true class is : %s',
attack_evaluate.avg_conf_true_class())
LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_lp_distance())
LOGGER.info(TAG, 'The average structural similarity between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_ssim())
LOGGER.info(TAG, 'The average costing time is %s',
(stop_time - start_time)/(batch_num*batch_size))


def test_carlini_wagner_attack_cpu():
"""
CW-Attack test for CPU device.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
@@ -188,4 +108,6 @@ def test_carlini_wagner_attack_cpu():


if __name__ == '__main__':
test_carlini_wagner_attack_cpu()
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
test_carlini_wagner_attack()

+ 3
- 82
example/mnist_demo/mnist_attack_deepfool.py View File

@@ -15,7 +15,6 @@ import sys
import time

import numpy as np
import pytest
from mindspore import Model
from mindspore import Tensor
from mindspore import context
@@ -36,90 +35,10 @@ LOGGER.set_level('INFO')
TAG = 'DeepFool_Test'


@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_deepfool_attack():
"""
DeepFool-Attack test
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
load_dict = load_checkpoint(ckpt_name)
load_param_into_net(net, load_dict)

# get test data
data_list = "./MNIST_unzip/test"
batch_size = 32
ds = generate_mnist_dataset(data_list, batch_size=batch_size)

# prediction accuracy before attack
model = Model(net)
batch_num = 3 # the number of batches of attacking samples
test_images = []
test_labels = []
predict_labels = []
i = 0
for data in ds.create_tuple_iterator():
i += 1
images = data[0].astype(np.float32)
labels = data[1]
test_images.append(images)
test_labels.append(labels)
pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(),
axis=1)
predict_labels.append(pred_labels)
if i >= batch_num:
break
predict_labels = np.concatenate(predict_labels)
true_labels = np.concatenate(test_labels)
accuracy = np.mean(np.equal(predict_labels, true_labels))
LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy)

# attacking
classes = 10
attack = DeepFool(net, classes, norm_level=2,
bounds=(0.0, 1.0))
start_time = time.clock()
adv_data = attack.batch_generate(np.concatenate(test_images),
np.concatenate(test_labels), batch_size=32)
stop_time = time.clock()
pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy()
# rescale predict confidences into (0, 1).
pred_logits_adv = softmax(pred_logits_adv, axis=1)
pred_labels_adv = np.argmax(pred_logits_adv, axis=1)
accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels))
LOGGER.info(TAG, "prediction accuracy after attacking is : %s",
accuracy_adv)
test_labels = np.eye(10)[np.concatenate(test_labels)]
attack_evaluate = AttackEvaluate(np.concatenate(test_images).transpose(0, 2, 3, 1),
test_labels, adv_data.transpose(0, 2, 3, 1),
pred_logits_adv)
LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
attack_evaluate.mis_classification_rate())
LOGGER.info(TAG, 'The average confidence of adversarial class is : %s',
attack_evaluate.avg_conf_adv_class())
LOGGER.info(TAG, 'The average confidence of true class is : %s',
attack_evaluate.avg_conf_true_class())
LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_lp_distance())
LOGGER.info(TAG, 'The average structural similarity between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_ssim())
LOGGER.info(TAG, 'The average costing time is %s',
(stop_time - start_time)/(batch_num*batch_size))


def test_deepfool_attack_cpu():
"""
DeepFool-Attack test for CPU device.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
@@ -190,4 +109,6 @@ def test_deepfool_attack_cpu():


if __name__ == '__main__':
test_deepfool_attack_cpu()
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
test_deepfool_attack()

+ 3
- 80
example/mnist_demo/mnist_attack_fgsm.py View File

@@ -15,7 +15,6 @@ import sys
import time

import numpy as np
import pytest
from mindspore import Model
from mindspore import Tensor
from mindspore import context
@@ -37,88 +36,10 @@ LOGGER.set_level('INFO')
TAG = 'FGSM_Test'


@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_fast_gradient_sign_method():
"""
FGSM-Attack test
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
load_dict = load_checkpoint(ckpt_name)
load_param_into_net(net, load_dict)

# get test data
data_list = "./MNIST_unzip/test"
batch_size = 32
ds = generate_mnist_dataset(data_list, batch_size, sparse=False)

# prediction accuracy before attack
model = Model(net)
batch_num = 3 # the number of batches of attacking samples
test_images = []
test_labels = []
predict_labels = []
i = 0
for data in ds.create_tuple_iterator():
i += 1
images = data[0].astype(np.float32)
labels = data[1]
test_images.append(images)
test_labels.append(labels)
pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(),
axis=1)
predict_labels.append(pred_labels)
if i >= batch_num:
break
predict_labels = np.concatenate(predict_labels)
true_labels = np.argmax(np.concatenate(test_labels), axis=1)
accuracy = np.mean(np.equal(predict_labels, true_labels))
LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy)

# attacking
attack = FastGradientSignMethod(net, eps=0.3)
start_time = time.clock()
adv_data = attack.batch_generate(np.concatenate(test_images),
np.concatenate(test_labels), batch_size=32)
stop_time = time.clock()
np.save('./adv_data', adv_data)
pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy()
# rescale predict confidences into (0, 1).
pred_logits_adv = softmax(pred_logits_adv, axis=1)
pred_labels_adv = np.argmax(pred_logits_adv, axis=1)
accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels))
LOGGER.info(TAG, "prediction accuracy after attacking is : %s", accuracy_adv)
attack_evaluate = AttackEvaluate(np.concatenate(test_images).transpose(0, 2, 3, 1),
np.concatenate(test_labels),
adv_data.transpose(0, 2, 3, 1),
pred_logits_adv)
LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
attack_evaluate.mis_classification_rate())
LOGGER.info(TAG, 'The average confidence of adversarial class is : %s',
attack_evaluate.avg_conf_adv_class())
LOGGER.info(TAG, 'The average confidence of true class is : %s',
attack_evaluate.avg_conf_true_class())
LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_lp_distance())
LOGGER.info(TAG, 'The average structural similarity between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_ssim())
LOGGER.info(TAG, 'The average costing time is %s',
(stop_time - start_time)/(batch_num*batch_size))


def test_fast_gradient_sign_method_cpu():
"""
FGSM-Attack test for CPU device.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
@@ -188,4 +109,6 @@ def test_fast_gradient_sign_method_cpu():


if __name__ == '__main__':
test_fast_gradient_sign_method_cpu()
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
test_fast_gradient_sign_method()

+ 3
- 88
example/mnist_demo/mnist_attack_genetic.py View File

@@ -15,7 +15,6 @@ import sys
import time

import numpy as np
import pytest
from mindspore import Tensor
from mindspore import context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
@@ -49,96 +48,10 @@ class ModelToBeAttacked(BlackModel):
return result.asnumpy()


@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_genetic_attack_on_mnist():
"""
Genetic-Attack test
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
load_dict = load_checkpoint(ckpt_name)
load_param_into_net(net, load_dict)

# get test data
data_list = "./MNIST_unzip/test"
batch_size = 32
ds = generate_mnist_dataset(data_list, batch_size=batch_size)

# prediction accuracy before attack
model = ModelToBeAttacked(net)
batch_num = 3 # the number of batches of attacking samples
test_images = []
test_labels = []
predict_labels = []
i = 0
for data in ds.create_tuple_iterator():
i += 1
images = data[0].astype(np.float32)
labels = data[1]
test_images.append(images)
test_labels.append(labels)
pred_labels = np.argmax(model.predict(images), axis=1)
predict_labels.append(pred_labels)
if i >= batch_num:
break
predict_labels = np.concatenate(predict_labels)
true_labels = np.concatenate(test_labels)
accuracy = np.mean(np.equal(predict_labels, true_labels))
LOGGER.info(TAG, "prediction accuracy before attacking is : %g", accuracy)

# attacking
attack = GeneticAttack(model=model, pop_size=6, mutation_rate=0.05,
per_bounds=0.1, step_size=0.25, temp=0.1,
sparse=True)
targeted_labels = np.random.randint(0, 10, size=len(true_labels))
for i, true_l in enumerate(true_labels):
if targeted_labels[i] == true_l:
targeted_labels[i] = (targeted_labels[i] + 1) % 10
start_time = time.clock()
success_list, adv_data, query_list = attack.generate(
np.concatenate(test_images), targeted_labels)
stop_time = time.clock()
LOGGER.info(TAG, 'success_list: %s', success_list)
LOGGER.info(TAG, 'average of query times is : %s', np.mean(query_list))
pred_logits_adv = model.predict(adv_data)
# rescale predict confidences into (0, 1).
pred_logits_adv = softmax(pred_logits_adv, axis=1)
pred_lables_adv = np.argmax(pred_logits_adv, axis=1)
accuracy_adv = np.mean(np.equal(pred_lables_adv, true_labels))
LOGGER.info(TAG, "prediction accuracy after attacking is : %g",
accuracy_adv)
test_labels_onehot = np.eye(10)[true_labels]
attack_evaluate = AttackEvaluate(np.concatenate(test_images),
test_labels_onehot, adv_data,
pred_logits_adv, targeted=True,
target_label=targeted_labels)
LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
attack_evaluate.mis_classification_rate())
LOGGER.info(TAG, 'The average confidence of adversarial class is : %s',
attack_evaluate.avg_conf_adv_class())
LOGGER.info(TAG, 'The average confidence of true class is : %s',
attack_evaluate.avg_conf_true_class())
LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_lp_distance())
LOGGER.info(TAG, 'The average structural similarity between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_ssim())
LOGGER.info(TAG, 'The average costing time is %s',
(stop_time - start_time)/(batch_num*batch_size))


def test_genetic_attack_on_mnist_cpu():
"""
Genetic-Attack test for CPU device.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
@@ -215,4 +128,6 @@ def test_genetic_attack_on_mnist_cpu():


if __name__ == '__main__':
test_genetic_attack_on_mnist_cpu()
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
test_genetic_attack_on_mnist()

+ 3
- 82
example/mnist_demo/mnist_attack_hsja.py View File

@@ -14,7 +14,6 @@
import sys

import numpy as np
import pytest
from mindspore import Tensor
from mindspore import context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
@@ -68,90 +67,10 @@ def create_target_images(dataset, data_labels, target_labels):
return np.array(res)


@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_hsja_mnist_attack():
"""
hsja-Attack test
"""
context.set_context(mode=context.GRAPH_MODE)
context.set_context(device_target="Ascend")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
load_dict = load_checkpoint(ckpt_name)
load_param_into_net(net, load_dict)
net.set_train(False)

# get test data
data_list = "./MNIST_unzip/test"
batch_size = 32
ds = generate_mnist_dataset(data_list, batch_size=batch_size)

# prediction accuracy before attack
model = ModelToBeAttacked(net)
batch_num = 5 # the number of batches of attacking samples
test_images = []
test_labels = []
predict_labels = []
i = 0
for data in ds.create_tuple_iterator():
i += 1
images = data[0].astype(np.float32)
labels = data[1]
test_images.append(images)
test_labels.append(labels)
pred_labels = np.argmax(model.predict(images), axis=1)
predict_labels.append(pred_labels)
if i >= batch_num:
break
predict_labels = np.concatenate(predict_labels)
true_labels = np.concatenate(test_labels)
accuracy = np.mean(np.equal(predict_labels, true_labels))
LOGGER.info(TAG, "prediction accuracy before attacking is : %s",
accuracy)
test_images = np.concatenate(test_images)

# attacking
norm = 'l2'
search = 'grid_search'
target = False
attack = HopSkipJumpAttack(model, constraint=norm, stepsize_search=search)
if target:
target_labels = random_target_labels(true_labels)
target_images = create_target_images(test_images, predict_labels,
target_labels)
attack.set_target_images(target_images)
success_list, adv_data, _ = attack.generate(test_images, target_labels)
else:
success_list, adv_data, _ = attack.generate(test_images, None)

adv_datas = []
gts = []
for success, adv, gt in zip(success_list, adv_data, true_labels):
if success:
adv_datas.append(adv)
gts.append(gt)
if gts:
adv_datas = np.concatenate(np.asarray(adv_datas), axis=0)
gts = np.asarray(gts)
pred_logits_adv = model.predict(adv_datas)
pred_lables_adv = np.argmax(pred_logits_adv, axis=1)
accuracy_adv = np.mean(np.equal(pred_lables_adv, gts))
mis_rate = (1 - accuracy_adv)*(len(adv_datas) / len(success_list))
LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
mis_rate)


def test_hsja_mnist_attack_cpu():
"""
hsja-Attack test
"""
context.set_context(mode=context.GRAPH_MODE)
context.set_context(device_target="CPU")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
@@ -220,4 +139,6 @@ def test_hsja_mnist_attack_cpu():


if __name__ == '__main__':
test_hsja_mnist_attack_cpu()
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
test_hsja_mnist_attack()

+ 3
- 86
example/mnist_demo/mnist_attack_jsma.py View File

@@ -15,7 +15,6 @@ import sys
import time

import numpy as np
import pytest
from mindspore import Model
from mindspore import Tensor
from mindspore import context
@@ -38,94 +37,10 @@ LOGGER.set_level('INFO')
TAG = 'JSMA_Test'


@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_jsma_attack():
"""
JSMA-Attack test
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
load_dict = load_checkpoint(ckpt_name)
load_param_into_net(net, load_dict)

# get test data
data_list = "./MNIST_unzip/test"
batch_size = 32
ds = generate_mnist_dataset(data_list, batch_size=batch_size)

# prediction accuracy before attack
model = Model(net)
batch_num = 3 # the number of batches of attacking samples
test_images = []
test_labels = []
predict_labels = []
i = 0
for data in ds.create_tuple_iterator():
i += 1
images = data[0].astype(np.float32)
labels = data[1]
test_images.append(images)
test_labels.append(labels)
pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(),
axis=1)
predict_labels.append(pred_labels)
if i >= batch_num:
break
predict_labels = np.concatenate(predict_labels)
true_labels = np.concatenate(test_labels)
targeted_labels = np.random.randint(0, 10, size=len(true_labels))
for i, true_l in enumerate(true_labels):
if targeted_labels[i] == true_l:
targeted_labels[i] = (targeted_labels[i] + 1) % 10
accuracy = np.mean(np.equal(predict_labels, true_labels))
LOGGER.info(TAG, "prediction accuracy before attacking is : %g", accuracy)

# attacking
classes = 10
attack = JSMAAttack(net, classes)
start_time = time.clock()
adv_data = attack.batch_generate(np.concatenate(test_images),
targeted_labels, batch_size=32)
stop_time = time.clock()
pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy()
# rescale predict confidences into (0, 1).
pred_logits_adv = softmax(pred_logits_adv, axis=1)
pred_lables_adv = np.argmax(pred_logits_adv, axis=1)
accuracy_adv = np.mean(np.equal(pred_lables_adv, true_labels))
LOGGER.info(TAG, "prediction accuracy after attacking is : %g",
accuracy_adv)
test_labels = np.eye(10)[np.concatenate(test_labels)]
attack_evaluate = AttackEvaluate(
np.concatenate(test_images).transpose(0, 2, 3, 1),
test_labels, adv_data.transpose(0, 2, 3, 1),
pred_logits_adv, targeted=True, target_label=targeted_labels)
LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
attack_evaluate.mis_classification_rate())
LOGGER.info(TAG, 'The average confidence of adversarial class is : %s',
attack_evaluate.avg_conf_adv_class())
LOGGER.info(TAG, 'The average confidence of true class is : %s',
attack_evaluate.avg_conf_true_class())
LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_lp_distance())
LOGGER.info(TAG, 'The average structural similarity between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_ssim())
LOGGER.info(TAG, 'The average costing time is %s',
(stop_time - start_time) / (batch_num*batch_size))


def test_jsma_attack_cpu():
"""
JSMA-Attack test for CPU device.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
@@ -200,4 +115,6 @@ def test_jsma_attack_cpu():


if __name__ == '__main__':
test_jsma_attack_cpu()
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
test_jsma_attack()

+ 3
- 94
example/mnist_demo/mnist_attack_lbfgs.py View File

@@ -15,7 +15,6 @@ import sys
import time

import numpy as np
import pytest
from mindspore import Model
from mindspore import Tensor
from mindspore import context
@@ -37,102 +36,10 @@ LOGGER.set_level('INFO')
TAG = 'LBFGS_Test'


@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_lbfgs_attack():
"""
LBFGS-Attack test
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
load_dict = load_checkpoint(ckpt_name)
load_param_into_net(net, load_dict)

# get test data
data_list = "./MNIST_unzip/test"
batch_size = 32
ds = generate_mnist_dataset(data_list, batch_size=batch_size, sparse=False)

# prediction accuracy before attack
model = Model(net)
batch_num = 3 # the number of batches of attacking samples
test_images = []
test_labels = []
predict_labels = []
i = 0
for data in ds.create_tuple_iterator():
i += 1
images = data[0].astype(np.float32)
labels = data[1]
test_images.append(images)
test_labels.append(labels)
pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(),
axis=1)
predict_labels.append(pred_labels)
if i >= batch_num:
break
predict_labels = np.concatenate(predict_labels)
true_labels = np.argmax(np.concatenate(test_labels), axis=1)
accuracy = np.mean(np.equal(predict_labels, true_labels))
LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy)

# attacking
is_targeted = True
if is_targeted:
targeted_labels = np.random.randint(0, 10, size=len(true_labels)).astype(np.int32)
for i, true_l in enumerate(true_labels):
if targeted_labels[i] == true_l:
targeted_labels[i] = (targeted_labels[i] + 1) % 10
else:
targeted_labels = true_labels.astype(np.int32)
targeted_labels = np.eye(10)[targeted_labels].astype(np.float32)
attack = LBFGS(net, is_targeted=is_targeted)
start_time = time.clock()
adv_data = attack.batch_generate(np.concatenate(test_images),
targeted_labels,
batch_size=batch_size)
stop_time = time.clock()
pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy()
# rescale predict confidences into (0, 1).
pred_logits_adv = softmax(pred_logits_adv, axis=1)
pred_labels_adv = np.argmax(pred_logits_adv, axis=1)

accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels))
LOGGER.info(TAG, "prediction accuracy after attacking is : %s",
accuracy_adv)
attack_evaluate = AttackEvaluate(np.concatenate(test_images).transpose(0, 2, 3, 1),
np.concatenate(test_labels),
adv_data.transpose(0, 2, 3, 1),
pred_logits_adv,
targeted=is_targeted,
target_label=np.argmax(targeted_labels,
axis=1))
LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
attack_evaluate.mis_classification_rate())
LOGGER.info(TAG, 'The average confidence of adversarial class is : %s',
attack_evaluate.avg_conf_adv_class())
LOGGER.info(TAG, 'The average confidence of true class is : %s',
attack_evaluate.avg_conf_true_class())
LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_lp_distance())
LOGGER.info(TAG, 'The average structural similarity between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_ssim())
LOGGER.info(TAG, 'The average costing time is %s',
(stop_time - start_time)/(batch_num*batch_size))


def test_lbfgs_attack_cpu():
"""
LBFGS-Attack test for CPU device.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
@@ -214,4 +121,6 @@ def test_lbfgs_attack_cpu():


if __name__ == '__main__':
test_lbfgs_attack_cpu()
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
test_lbfgs_attack()

+ 4
- 78
example/mnist_demo/mnist_attack_mdi2fgsm.py View File

@@ -15,7 +15,6 @@ import sys
import time

import numpy as np
import pytest
from mindspore import Model
from mindspore import Tensor
from mindspore import context
@@ -37,83 +36,8 @@ LOGGER = LogUtil.get_instance()
TAG = 'M_DI2_FGSM_Test'
LOGGER.set_level('INFO')

@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_momentum_diverse_input_iterative_method():
"""
M-DI2-FGSM Attack Test
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
load_dict = load_checkpoint(ckpt_name)
load_param_into_net(net, load_dict)

# get test data
data_list = "./MNIST_unzip/test"
batch_size = 32
ds = generate_mnist_dataset(data_list, batch_size, sparse=False)

# prediction accuracy before attack
model = Model(net)
batch_num = 32 # the number of batches of attacking samples
test_images = []
test_labels = []
predict_labels = []
i = 0
for data in ds.create_tuple_iterator():
i += 1
images = data[0].astype(np.float32)
labels = data[1]
test_images.append(images)
test_labels.append(labels)
pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(),
axis=1)
predict_labels.append(pred_labels)
if i >= batch_num:
break
predict_labels = np.concatenate(predict_labels)
true_labels = np.argmax(np.concatenate(test_labels), axis=1)
accuracy = np.mean(np.equal(predict_labels, true_labels))
LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy)

# attacking
attack = MomentumDiverseInputIterativeMethod(net)
start_time = time.clock()
adv_data = attack.batch_generate(np.concatenate(test_images),
np.concatenate(test_labels), batch_size=32)
stop_time = time.clock()
pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy()
# rescale predict confidences into (0, 1).
pred_logits_adv = softmax(pred_logits_adv, axis=1)
pred_labels_adv = np.argmax(pred_logits_adv, axis=1)
accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels))
LOGGER.info(TAG, "prediction accuracy after attacking is : %s", accuracy_adv)
attack_evaluate = AttackEvaluate(np.concatenate(test_images).transpose(0, 2, 3, 1),
np.concatenate(test_labels),
adv_data.transpose(0, 2, 3, 1),
pred_logits_adv)
LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
attack_evaluate.mis_classification_rate())
LOGGER.info(TAG, 'The average confidence of adversarial class is : %s',
attack_evaluate.avg_conf_adv_class())
LOGGER.info(TAG, 'The average confidence of true class is : %s',
attack_evaluate.avg_conf_true_class())
LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_lp_distance())
LOGGER.info(TAG, 'The average structural similarity between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_ssim())
LOGGER.info(TAG, 'The average costing time is %s',
(stop_time - start_time)/(batch_num*batch_size))


def test_momentum_diverse_input_iterative_method_cpu():
def test_momentum_diverse_input_iterative_method():
"""
M-DI2-FGSM Attack Test for CPU device.
"""
@@ -186,4 +110,6 @@ def test_momentum_diverse_input_iterative_method_cpu():


if __name__ == '__main__':
test_momentum_diverse_input_iterative_method_cpu()
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
test_momentum_diverse_input_iterative_method()

+ 3
- 90
example/mnist_demo/mnist_attack_nes.py View File

@@ -14,7 +14,6 @@
import sys

import numpy as np
import pytest
from mindspore import Tensor
from mindspore import context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
@@ -78,98 +77,10 @@ def create_target_images(dataset, data_labels, target_labels):
return np.array(res)


@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_nes_mnist_attack():
"""
hsja-Attack test
"""
context.set_context(mode=context.GRAPH_MODE)
context.set_context(device_target="Ascend")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
load_dict = load_checkpoint(ckpt_name)
load_param_into_net(net, load_dict)
net.set_train(False)

# get test data
data_list = "./MNIST_unzip/test"
batch_size = 32
ds = generate_mnist_dataset(data_list, batch_size=batch_size)

# prediction accuracy before attack
model = ModelToBeAttacked(net)
# the number of batches of attacking samples
batch_num = 5
test_images = []
test_labels = []
predict_labels = []
i = 0
for data in ds.create_tuple_iterator():
i += 1
images = data[0].astype(np.float32)
labels = data[1]
test_images.append(images)
test_labels.append(labels)
pred_labels = np.argmax(model.predict(images), axis=1)
predict_labels.append(pred_labels)
if i >= batch_num:
break
predict_labels = np.concatenate(predict_labels)
true_labels = np.concatenate(test_labels)

accuracy = np.mean(np.equal(predict_labels, true_labels))
LOGGER.info(TAG, "prediction accuracy before attacking is : %s",
accuracy)
test_images = np.concatenate(test_images)

# attacking
scene = 'Query_Limit'
if scene == 'Query_Limit':
top_k = -1
elif scene == 'Partial_Info':
top_k = 5
elif scene == 'Label_Only':
top_k = 5

success = 0
queries_num = 0

nes_instance = NES(model, scene, top_k=top_k)
test_length = 32
advs = []
for img_index in range(test_length):
# Initial image and class selection
initial_img = test_images[img_index]
orig_class = true_labels[img_index]
initial_img = [initial_img]
target_class = random_target_labels([orig_class], true_labels)
target_image = create_target_images(test_images, true_labels,
target_class)
nes_instance.set_target_images(target_image)
tag, adv, queries = nes_instance.generate(initial_img, target_class)
if tag[0]:
success += 1
queries_num += queries[0]
advs.append(adv)

advs = np.reshape(advs, (len(advs), 1, 32, 32))
adv_pred = np.argmax(model.predict(advs), axis=1)
adv_accuracy = np.mean(np.equal(adv_pred, true_labels[:test_length]))
LOGGER.info(TAG, "prediction accuracy after attacking is : %s",
adv_accuracy)


def test_nes_mnist_attack_cpu():
"""
hsja-Attack test for CPU device.
"""
context.set_context(mode=context.GRAPH_MODE)
context.set_context(device_target="CPU")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
@@ -246,4 +157,6 @@ def test_nes_mnist_attack_cpu():


if __name__ == '__main__':
test_nes_mnist_attack_cpu()
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
test_nes_mnist_attack()

+ 3
- 80
example/mnist_demo/mnist_attack_pgd.py View File

@@ -15,7 +15,6 @@ import sys
import time

import numpy as np
import pytest
from mindspore import Model
from mindspore import Tensor
from mindspore import context
@@ -37,88 +36,10 @@ LOGGER.set_level('INFO')
TAG = 'PGD_Test'


@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_projected_gradient_descent_method():
"""
PGD-Attack test
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
load_dict = load_checkpoint(ckpt_name)
load_param_into_net(net, load_dict)

# get test data
data_list = "./MNIST_unzip/test"
batch_size = 32
ds = generate_mnist_dataset(data_list, batch_size, sparse=False)

# prediction accuracy before attack
model = Model(net)
batch_num = 32 # the number of batches of attacking samples
test_images = []
test_labels = []
predict_labels = []
i = 0
for data in ds.create_tuple_iterator():
i += 1
images = data[0].astype(np.float32)
labels = data[1]
test_images.append(images)
test_labels.append(labels)
pred_labels = np.argmax(model.predict(Tensor(images)).asnumpy(),
axis=1)
predict_labels.append(pred_labels)
if i >= batch_num:
break
predict_labels = np.concatenate(predict_labels)
true_labels = np.argmax(np.concatenate(test_labels), axis=1)
accuracy = np.mean(np.equal(predict_labels, true_labels))
LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy)

# attacking
attack = ProjectedGradientDescent(net, eps=0.3)
start_time = time.clock()
adv_data = attack.batch_generate(np.concatenate(test_images),
np.concatenate(test_labels), batch_size=32)
stop_time = time.clock()
np.save('./adv_data', adv_data)
pred_logits_adv = model.predict(Tensor(adv_data)).asnumpy()
# rescale predict confidences into (0, 1).
pred_logits_adv = softmax(pred_logits_adv, axis=1)
pred_labels_adv = np.argmax(pred_logits_adv, axis=1)
accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels))
LOGGER.info(TAG, "prediction accuracy after attacking is : %s", accuracy_adv)
attack_evaluate = AttackEvaluate(np.concatenate(test_images).transpose(0, 2, 3, 1),
np.concatenate(test_labels),
adv_data.transpose(0, 2, 3, 1),
pred_logits_adv)
LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
attack_evaluate.mis_classification_rate())
LOGGER.info(TAG, 'The average confidence of adversarial class is : %s',
attack_evaluate.avg_conf_adv_class())
LOGGER.info(TAG, 'The average confidence of true class is : %s',
attack_evaluate.avg_conf_true_class())
LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_lp_distance())
LOGGER.info(TAG, 'The average structural similarity between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_ssim())
LOGGER.info(TAG, 'The average costing time is %s',
(stop_time - start_time)/(batch_num*batch_size))


def test_projected_gradient_descent_method_cpu():
"""
PGD-Attack test for CPU device.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
@@ -188,4 +109,6 @@ def test_projected_gradient_descent_method_cpu():


if __name__ == '__main__':
test_projected_gradient_descent_method_cpu()
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
test_projected_gradient_descent_method()

+ 3
- 86
example/mnist_demo/mnist_attack_pointwise.py View File

@@ -14,7 +14,6 @@
import sys

import numpy as np
import pytest
from mindspore import Tensor
from mindspore import context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
@@ -49,94 +48,10 @@ class ModelToBeAttacked(BlackModel):
return result.asnumpy()


@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_pointwise_attack_on_mnist():
"""
Salt-and-Pepper-Attack test
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
load_dict = load_checkpoint(ckpt_name)
load_param_into_net(net, load_dict)

# get test data
data_list = "./MNIST_unzip/test"
batch_size = 32
ds = generate_mnist_dataset(data_list, batch_size=batch_size)

# prediction accuracy before attack
model = ModelToBeAttacked(net)
batch_num = 3 # the number of batches of attacking samples
test_images = []
test_labels = []
predict_labels = []
i = 0
for data in ds.create_tuple_iterator():
i += 1
images = data[0].astype(np.float32)
labels = data[1]
test_images.append(images)
test_labels.append(labels)
pred_labels = np.argmax(model.predict(images), axis=1)
predict_labels.append(pred_labels)
if i >= batch_num:
break
predict_labels = np.concatenate(predict_labels)
true_labels = np.concatenate(test_labels)
accuracy = np.mean(np.equal(predict_labels, true_labels))
LOGGER.info(TAG, "prediction accuracy before attacking is : %g", accuracy)

# attacking
is_target = False
attack = PointWiseAttack(model=model, is_targeted=is_target)
if is_target:
targeted_labels = np.random.randint(0, 10, size=len(true_labels))
for i, true_l in enumerate(true_labels):
if targeted_labels[i] == true_l:
targeted_labels[i] = (targeted_labels[i] + 1) % 10
else:
targeted_labels = true_labels
success_list, adv_data, query_list = attack.generate(
np.concatenate(test_images), targeted_labels)
success_list = np.arange(success_list.shape[0])[success_list]
LOGGER.info(TAG, 'success_list: %s', success_list)
LOGGER.info(TAG, 'average of query times is : %s', np.mean(query_list))
adv_preds = []
for ite_data in adv_data:
pred_logits_adv = model.predict(ite_data)
# rescale predict confidences into (0, 1).
pred_logits_adv = softmax(pred_logits_adv, axis=1)
adv_preds.extend(pred_logits_adv)
accuracy_adv = np.mean(np.equal(np.max(adv_preds, axis=1), true_labels))
LOGGER.info(TAG, "prediction accuracy after attacking is : %g",
accuracy_adv)
test_labels_onehot = np.eye(10)[true_labels]
attack_evaluate = AttackEvaluate(np.concatenate(test_images),
test_labels_onehot, adv_data,
adv_preds, targeted=is_target,
target_label=targeted_labels)
LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
attack_evaluate.mis_classification_rate())
LOGGER.info(TAG, 'The average confidence of adversarial class is : %s',
attack_evaluate.avg_conf_adv_class())
LOGGER.info(TAG, 'The average confidence of true class is : %s',
attack_evaluate.avg_conf_true_class())
LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_lp_distance())


def test_pointwise_attack_on_mnist_cpu():
"""
Salt-and-Pepper-Attack test for CPU device.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
@@ -211,4 +126,6 @@ def test_pointwise_attack_on_mnist_cpu():


if __name__ == '__main__':
test_pointwise_attack_on_mnist_cpu()
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
test_pointwise_attack_on_mnist()

+ 3
- 81
example/mnist_demo/mnist_attack_pso.py View File

@@ -15,7 +15,6 @@ import sys
import time

import numpy as np
import pytest
from mindspore import Tensor
from mindspore import context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
@@ -49,89 +48,10 @@ class ModelToBeAttacked(BlackModel):
return result.asnumpy()


@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_pso_attack_on_mnist():
"""
PSO-Attack test
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
load_dict = load_checkpoint(ckpt_name)
load_param_into_net(net, load_dict)

# get test data
data_list = "./MNIST_unzip/test"
batch_size = 32
ds = generate_mnist_dataset(data_list, batch_size=batch_size)

# prediction accuracy before attack
model = ModelToBeAttacked(net)
batch_num = 3 # the number of batches of attacking samples
test_images = []
test_labels = []
predict_labels = []
i = 0
for data in ds.create_tuple_iterator():
i += 1
images = data[0].astype(np.float32)
labels = data[1]
test_images.append(images)
test_labels.append(labels)
pred_labels = np.argmax(model.predict(images), axis=1)
predict_labels.append(pred_labels)
if i >= batch_num:
break
predict_labels = np.concatenate(predict_labels)
true_labels = np.concatenate(test_labels)
accuracy = np.mean(np.equal(predict_labels, true_labels))
LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy)

# attacking
attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=True)
start_time = time.clock()
success_list, adv_data, query_list = attack.generate(
np.concatenate(test_images), np.concatenate(test_labels))
stop_time = time.clock()
LOGGER.info(TAG, 'success_list: %s', success_list)
LOGGER.info(TAG, 'average of query times is : %s', np.mean(query_list))
pred_logits_adv = model.predict(adv_data)
# rescale predict confidences into (0, 1).
pred_logits_adv = softmax(pred_logits_adv, axis=1)
pred_labels_adv = np.argmax(pred_logits_adv, axis=1)
accuracy_adv = np.mean(np.equal(pred_labels_adv, true_labels))
LOGGER.info(TAG, "prediction accuracy after attacking is : %s",
accuracy_adv)
test_labels_onehot = np.eye(10)[np.concatenate(test_labels)]
attack_evaluate = AttackEvaluate(np.concatenate(test_images),
test_labels_onehot, adv_data,
pred_logits_adv)
LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
attack_evaluate.mis_classification_rate())
LOGGER.info(TAG, 'The average confidence of adversarial class is : %s',
attack_evaluate.avg_conf_adv_class())
LOGGER.info(TAG, 'The average confidence of true class is : %s',
attack_evaluate.avg_conf_true_class())
LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_lp_distance())
LOGGER.info(TAG, 'The average structural similarity between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_ssim())
LOGGER.info(TAG, 'The average costing time is %s',
(stop_time - start_time)/(batch_num*batch_size))


def test_pso_attack_on_mnist_cpu():
"""
PSO-Attack test for CPU device.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
@@ -201,4 +121,6 @@ def test_pso_attack_on_mnist_cpu():


if __name__ == '__main__':
test_pso_attack_on_mnist_cpu()
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
test_pso_attack_on_mnist()

+ 3
- 90
example/mnist_demo/mnist_attack_salt_and_pepper.py View File

@@ -14,7 +14,6 @@
import sys

import numpy as np
import pytest
from mindspore import Tensor
from mindspore import context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
@@ -49,98 +48,10 @@ class ModelToBeAttacked(BlackModel):
return result.asnumpy()


@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_salt_and_pepper_attack_on_mnist():
"""
Salt-and-Pepper-Attack test
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
load_dict = load_checkpoint(ckpt_name)
load_param_into_net(net, load_dict)

# get test data
data_list = "./MNIST_unzip/test"
batch_size = 32
ds = generate_mnist_dataset(data_list, batch_size=batch_size)

# prediction accuracy before attack
model = ModelToBeAttacked(net)
batch_num = 3 # the number of batches of attacking samples
test_images = []
test_labels = []
predict_labels = []
i = 0
for data in ds.create_tuple_iterator():
i += 1
images = data[0].astype(np.float32)
labels = data[1]
test_images.append(images)
test_labels.append(labels)
pred_labels = np.argmax(model.predict(images), axis=1)
predict_labels.append(pred_labels)
if i >= batch_num:
break
LOGGER.debug(TAG, 'model input image shape is: {}'.format(np.array(test_images).shape))
predict_labels = np.concatenate(predict_labels)
true_labels = np.concatenate(test_labels)
accuracy = np.mean(np.equal(predict_labels, true_labels))
LOGGER.info(TAG, "prediction accuracy before attacking is : %g", accuracy)

# attacking
is_target = False
attack = SaltAndPepperNoiseAttack(model=model,
is_targeted=is_target,
sparse=True)
if is_target:
targeted_labels = np.random.randint(0, 10, size=len(true_labels))
for i, true_l in enumerate(true_labels):
if targeted_labels[i] == true_l:
targeted_labels[i] = (targeted_labels[i] + 1) % 10
else:
targeted_labels = true_labels
LOGGER.debug(TAG, 'input shape is: {}'.format(np.concatenate(test_images).shape))
success_list, adv_data, query_list = attack.generate(
np.concatenate(test_images), targeted_labels)
success_list = np.arange(success_list.shape[0])[success_list]
LOGGER.info(TAG, 'success_list: %s', success_list)
LOGGER.info(TAG, 'average of query times is : %s', np.mean(query_list))
adv_preds = []
for ite_data in adv_data:
pred_logits_adv = model.predict(ite_data)
# rescale predict confidences into (0, 1).
pred_logits_adv = softmax(pred_logits_adv, axis=1)
adv_preds.extend(pred_logits_adv)
accuracy_adv = np.mean(np.equal(np.max(adv_preds, axis=1), true_labels))
LOGGER.info(TAG, "prediction accuracy after attacking is : %g",
accuracy_adv)
test_labels_onehot = np.eye(10)[true_labels]
attack_evaluate = AttackEvaluate(np.concatenate(test_images),
test_labels_onehot, adv_data,
adv_preds, targeted=is_target,
target_label=targeted_labels)
LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
attack_evaluate.mis_classification_rate())
LOGGER.info(TAG, 'The average confidence of adversarial class is : %s',
attack_evaluate.avg_conf_adv_class())
LOGGER.info(TAG, 'The average confidence of true class is : %s',
attack_evaluate.avg_conf_true_class())
LOGGER.info(TAG, 'The average distance (l0, l2, linf) between original '
'samples and adversarial samples are: %s',
attack_evaluate.avg_lp_distance())


def test_salt_and_pepper_attack_on_mnist_cpu():
"""
Salt-and-Pepper-Attack test for CPU device.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
# upload trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
@@ -219,4 +130,6 @@ def test_salt_and_pepper_attack_on_mnist_cpu():


if __name__ == '__main__':
test_salt_and_pepper_attack_on_mnist_cpu()
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
test_salt_and_pepper_attack_on_mnist()

+ 3
- 105
example/mnist_demo/mnist_defense_nad.py View File

@@ -12,11 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""defense example using nad"""
import logging
import sys

import numpy as np
import pytest
from mindspore import Tensor
from mindspore import context
from mindspore import nn
@@ -36,111 +34,10 @@ LOGGER = LogUtil.get_instance()
TAG = 'Nad_Example'


@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_nad_method():
"""
NAD-Defense test.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
# 1. load trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
load_dict = load_checkpoint(ckpt_name)
load_param_into_net(net, load_dict)

loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False)
opt = nn.Momentum(net.trainable_params(), 0.01, 0.09)

nad = NaturalAdversarialDefense(net, loss_fn=loss, optimizer=opt,
bounds=(0.0, 1.0), eps=0.3)

# 2. get test data
data_list = "./MNIST_unzip/test"
batch_size = 32
ds_test = generate_mnist_dataset(data_list, batch_size=batch_size,
sparse=False)
inputs = []
labels = []
for data in ds_test.create_tuple_iterator():
inputs.append(data[0].astype(np.float32))
labels.append(data[1])
inputs = np.concatenate(inputs)
labels = np.concatenate(labels)

# 3. get accuracy of test data on original model
net.set_train(False)
acc_list = []
batchs = inputs.shape[0] // batch_size
for i in range(batchs):
batch_inputs = inputs[i*batch_size : (i + 1)*batch_size]
batch_labels = np.argmax(labels[i*batch_size : (i + 1)*batch_size], axis=1)
logits = net(Tensor(batch_inputs)).asnumpy()
label_pred = np.argmax(logits, axis=1)
acc_list.append(np.mean(batch_labels == label_pred))

LOGGER.debug(TAG, 'accuracy of TEST data on original model is : %s',
np.mean(acc_list))

# 4. get adv of test data
attack = FastGradientSignMethod(net, eps=0.3)
adv_data = attack.batch_generate(inputs, labels)
LOGGER.debug(TAG, 'adv_data.shape is : %s', adv_data.shape)

# 5. get accuracy of adv data on original model
net.set_train(False)
acc_list = []
batchs = adv_data.shape[0] // batch_size
for i in range(batchs):
batch_inputs = adv_data[i*batch_size : (i + 1)*batch_size]
batch_labels = np.argmax(labels[i*batch_size : (i + 1)*batch_size], axis=1)
logits = net(Tensor(batch_inputs)).asnumpy()
label_pred = np.argmax(logits, axis=1)
acc_list.append(np.mean(batch_labels == label_pred))

LOGGER.debug(TAG, 'accuracy of adv data on original model is : %s',
np.mean(acc_list))

# 6. defense
net.set_train()
nad.batch_defense(inputs, labels, batch_size=32, epochs=10)

# 7. get accuracy of test data on defensed model
net.set_train(False)
acc_list = []
batchs = inputs.shape[0] // batch_size
for i in range(batchs):
batch_inputs = inputs[i*batch_size : (i + 1)*batch_size]
batch_labels = np.argmax(labels[i*batch_size : (i + 1)*batch_size], axis=1)
logits = net(Tensor(batch_inputs)).asnumpy()
label_pred = np.argmax(logits, axis=1)
acc_list.append(np.mean(batch_labels == label_pred))

LOGGER.debug(TAG, 'accuracy of TEST data on defensed model is : %s',
np.mean(acc_list))

# 8. get accuracy of adv data on defensed model
acc_list = []
batchs = adv_data.shape[0] // batch_size
for i in range(batchs):
batch_inputs = adv_data[i*batch_size : (i + 1)*batch_size]
batch_labels = np.argmax(labels[i*batch_size : (i + 1)*batch_size], axis=1)
logits = net(Tensor(batch_inputs)).asnumpy()
label_pred = np.argmax(logits, axis=1)
acc_list.append(np.mean(batch_labels == label_pred))

LOGGER.debug(TAG, 'accuracy of adv data on defensed model is : %s',
np.mean(acc_list))


def test_nad_method_cpu():
"""
NAD-Defense test for CPU device.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
# 1. load trained network
ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = LeNet5()
@@ -231,5 +128,6 @@ def test_nad_method_cpu():


if __name__ == '__main__':
LOGGER.set_level(logging.DEBUG)
test_nad_method_cpu()
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
test_nad_method()

+ 17
- 16
example/mnist_demo/mnist_evaluation.py View File

@@ -40,7 +40,6 @@ from mindarmour.utils.logger import LogUtil
sys.path.append("..")
from data_processing import generate_mnist_dataset
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
LOGGER = LogUtil.get_instance()
TAG = 'Defense_Evaluate_Example'
@@ -140,20 +139,18 @@ def test_black_defense():
# get test data
data_list = "./MNIST_unzip/test"
batch_size = 32
ds_test = generate_mnist_dataset(data_list, batch_size=batch_size,
sparse=False)
ds_test = generate_mnist_dataset(data_list, batch_size=batch_size)
inputs = []
labels = []
for data in ds_test.create_tuple_iterator():
inputs.append(data[0].astype(np.float32))
labels.append(data[1])
inputs = np.concatenate(inputs).astype(np.float32)
labels = np.concatenate(labels).astype(np.float32)
labels_sparse = np.argmax(labels, axis=1)
labels = np.concatenate(labels).astype(np.int32)
target_label = np.random.randint(0, 10, size=labels_sparse.shape[0])
for idx in range(labels_sparse.shape[0]):
while target_label[idx] == labels_sparse[idx]:
target_label = np.random.randint(0, 10, size=labels.shape[0])
for idx in range(labels.shape[0]):
while target_label[idx] == labels[idx]:
target_label[idx] = np.random.randint(0, 10)
target_label = np.eye(10)[target_label].astype(np.float32)
@@ -167,23 +164,23 @@ def test_black_defense():
wb_model = ModelToBeAttacked(wb_net)
# gen white-box adversarial examples of test data
wb_attack = FastGradientSignMethod(wb_net, eps=0.3)
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
wb_attack = FastGradientSignMethod(wb_net, eps=0.3, loss_fn=loss)
wb_adv_sample = wb_attack.generate(attacked_sample,
attacked_true_label)
wb_raw_preds = softmax(wb_model.predict(wb_adv_sample), axis=1)
accuracy_test = np.mean(
np.equal(np.argmax(wb_model.predict(attacked_sample), axis=1),
np.argmax(attacked_true_label, axis=1)))
attacked_true_label))
LOGGER.info(TAG, "prediction accuracy before white-box attack is : %s",
accuracy_test)
accuracy_adv = np.mean(np.equal(np.argmax(wb_raw_preds, axis=1),
np.argmax(attacked_true_label, axis=1)))
attacked_true_label))
LOGGER.info(TAG, "prediction accuracy after white-box attack is : %s",
accuracy_adv)
# improve the robustness of model with white-box adversarial examples
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False)
opt = nn.Momentum(wb_net.trainable_params(), 0.01, 0.09)
nad = NaturalAdversarialDefense(wb_net, loss_fn=loss, optimizer=opt,
@@ -194,12 +191,12 @@ def test_black_defense():
wb_def_preds = wb_net(Tensor(wb_adv_sample)).asnumpy()
wb_def_preds = softmax(wb_def_preds, axis=1)
accuracy_def = np.mean(np.equal(np.argmax(wb_def_preds, axis=1),
np.argmax(attacked_true_label, axis=1)))
attacked_true_label))
LOGGER.info(TAG, "prediction accuracy after defense is : %s", accuracy_def)
# calculate defense evaluation metrics for defense against white-box attack
wb_def_evaluate = DefenseEvaluate(wb_raw_preds, wb_def_preds,
np.argmax(attacked_true_label, axis=1))
attacked_true_label)
LOGGER.info(TAG, 'defense evaluation for white-box adversarial attack')
LOGGER.info(TAG,
'classification accuracy variance (CAV) is : {:.2f}'.format(
@@ -232,7 +229,7 @@ def test_black_defense():
per_bounds=0.1, step_size=0.25, temp=0.1,
sparse=False)
attack_target_label = target_label[:attacked_size]
true_label = labels_sparse[:attacked_size + benign_size]
true_label = labels[:attacked_size + benign_size]
# evaluate robustness of original model
# gen black-box adversarial examples of test data
for idx in range(attacked_size):
@@ -323,4 +320,8 @@ def test_black_defense():
if __name__ == '__main__':
test_black_defense()
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
DEVICE = context.get_context("device_target")
if DEVICE in ("Ascend", "GPU"):
test_black_defense()

+ 5
- 8
example/mnist_demo/mnist_similarity_detector.py View File

@@ -14,7 +14,6 @@
import sys

import numpy as np
import pytest
from mindspore import Model
from mindspore import Tensor
from mindspore import context
@@ -29,7 +28,6 @@ from mindarmour.attacks.black.pso_attack import PSOAttack
from mindarmour.detectors.black.similarity_detector import SimilarityDetector
from mindarmour.utils.logger import LogUtil

context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")

sys.path.append("..")
from data_processing import generate_mnist_dataset
@@ -92,11 +90,6 @@ class EncoderNet(Cell):
return self._encode_dim


@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_similarity_detector():
"""
Similarity Detector test.
@@ -178,4 +171,8 @@ def test_similarity_detector():


if __name__ == '__main__':
test_similarity_detector()
# device_target can be "CPU", "GPU" or "Ascend"
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
DEVICE = context.get_context("device_target")
if DEVICE in ("Ascend", "GPU"):
test_similarity_detector()

+ 2
- 6
example/mnist_demo/mnist_train.py View File

@@ -31,12 +31,6 @@ TAG = "Lenet5_train"


def mnist_train(epoch_size, batch_size, lr, momentum):
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend",
enable_mem_reuse=False)

lr = lr
momentum = momentum
epoch_size = epoch_size
mnist_path = "./MNIST_unzip/"
ds = generate_mnist_dataset(os.path.join(mnist_path, "train"),
batch_size=batch_size, repeat_size=1)
@@ -67,4 +61,6 @@ def mnist_train(epoch_size, batch_size, lr, momentum):


if __name__ == '__main__':
context.set_context(mode=context.GRAPH_MODE, device_target="CPU",
enable_mem_reuse=False)
mnist_train(10, 32, 0.01, 0.9)

Loading…
Cancel
Save