@@ -76,19 +76,19 @@ class GeneticAttack(Attack): | |||||
>>> from mindarmour import BlackModel | >>> from mindarmour import BlackModel | ||||
>>> from mindarmour.adv_robustness.attacks import GeneticAttack | >>> from mindarmour.adv_robustness.attacks import GeneticAttack | ||||
>>> class ModelToBeAttacked(BlackModel): | >>> class ModelToBeAttacked(BlackModel): | ||||
>>> def __init__(self, network): | |||||
>>> super(ModelToBeAttacked, self).__init__() | |||||
>>> self._network = network | |||||
>>> def predict(self, inputs): | |||||
>>> result = self._network(Tensor(inputs.astype(np.float32))) | |||||
>>> return result.asnumpy() | |||||
... def __init__(self, network): | |||||
... super(ModelToBeAttacked, self).__init__() | |||||
... self._network = network | |||||
... def predict(self, inputs): | |||||
... result = self._network(Tensor(inputs.astype(np.float32))) | |||||
... return result.asnumpy() | |||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._softmax = M.Softmax() | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._softmax(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._softmax = M.Softmax() | |||||
... def construct(self, inputs): | |||||
... out = self._softmax(inputs) | |||||
... return out | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> model = ModelToBeAttacked(net) | >>> model = ModelToBeAttacked(net) | ||||
>>> attack = GeneticAttack(model, sparse=False) | >>> attack = GeneticAttack(model, sparse=False) | ||||
@@ -81,14 +81,14 @@ class HopSkipJumpAttack(Attack): | |||||
>>> from mindarmour.adv_robustness.attacks import HopSkipJumpAttack | >>> from mindarmour.adv_robustness.attacks import HopSkipJumpAttack | ||||
>>> from tests.ut.python.utils.mock_net import Net | >>> from tests.ut.python.utils.mock_net import Net | ||||
>>> class ModelToBeAttacked(BlackModel): | >>> class ModelToBeAttacked(BlackModel): | ||||
>>> def __init__(self, network): | |||||
>>> super(ModelToBeAttacked, self).__init__() | |||||
>>> self._network = network | |||||
>>> def predict(self, inputs): | |||||
>>> if len(inputs.shape) == 3: | |||||
>>> inputs = inputs[np.newaxis, :] | |||||
>>> result = self._network(Tensor(inputs.astype(np.float32))) | |||||
>>> return result.asnumpy() | |||||
... def __init__(self, network): | |||||
... super(ModelToBeAttacked, self).__init__() | |||||
... self._network = network | |||||
... def predict(self, inputs): | |||||
... if len(inputs.shape) == 3: | |||||
... inputs = inputs[np.newaxis, :] | |||||
... result = self._network(Tensor(inputs.astype(np.float32))) | |||||
... return result.asnumpy() | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> model = ModelToBeAttacked(net) | >>> model = ModelToBeAttacked(net) | ||||
>>> attack = HopSkipJumpAttack(model) | >>> attack = HopSkipJumpAttack(model) | ||||
@@ -191,14 +191,14 @@ class HopSkipJumpAttack(Attack): | |||||
>>> from mindarmour.adv_robustness.attacks import HopSkipJumpAttack | >>> from mindarmour.adv_robustness.attacks import HopSkipJumpAttack | ||||
>>> from tests.ut.python.utils.mock_net import Net | >>> from tests.ut.python.utils.mock_net import Net | ||||
>>> class ModelToBeAttacked(BlackModel): | >>> class ModelToBeAttacked(BlackModel): | ||||
>>> def __init__(self, network): | |||||
>>> super(ModelToBeAttacked, self).__init__() | |||||
>>> self._network = network | |||||
>>> def predict(self, inputs): | |||||
>>> if len(inputs.shape) == 3: | |||||
>>> inputs = inputs[np.newaxis, :] | |||||
>>> result = self._network(Tensor(inputs.astype(np.float32))) | |||||
>>> return result.asnumpy() | |||||
... def __init__(self, network): | |||||
... super(ModelToBeAttacked, self).__init__() | |||||
... self._network = network | |||||
... def predict(self, inputs): | |||||
... if len(inputs.shape) == 3: | |||||
... inputs = inputs[np.newaxis, :] | |||||
... result = self._network(Tensor(inputs.astype(np.float32))) | |||||
... return result.asnumpy() | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> model = ModelToBeAttacked(net) | >>> model = ModelToBeAttacked(net) | ||||
>>> attack = HopSkipJumpAttack(model) | >>> attack = HopSkipJumpAttack(model) | ||||
@@ -85,14 +85,14 @@ class NES(Attack): | |||||
>>> from mindarmour.adv_robustness.attacks import NES | >>> from mindarmour.adv_robustness.attacks import NES | ||||
>>> from tests.ut.python.utils.mock_net import Net | >>> from tests.ut.python.utils.mock_net import Net | ||||
>>> class ModelToBeAttacked(BlackModel): | >>> class ModelToBeAttacked(BlackModel): | ||||
>>> def __init__(self, network): | |||||
>>> super(ModelToBeAttacked, self).__init__() | |||||
>>> self._network = network | |||||
>>> def predict(self, inputs): | |||||
>>> if len(inputs.shape) == 3: | |||||
>>> inputs = inputs[np.newaxis, :] | |||||
>>> result = self._network(Tensor(inputs.astype(np.float32))) | |||||
>>> return result.asnumpy() | |||||
... def __init__(self, network): | |||||
... super(ModelToBeAttacked, self).__init__() | |||||
... self._network = network | |||||
... def predict(self, inputs): | |||||
... if len(inputs.shape) == 3: | |||||
... inputs = inputs[np.newaxis, :] | |||||
... result = self._network(Tensor(inputs.astype(np.float32))) | |||||
... return result.asnumpy() | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> model = ModelToBeAttacked(net) | >>> model = ModelToBeAttacked(net) | ||||
>>> SCENE = 'Query_Limit' | >>> SCENE = 'Query_Limit' | ||||
@@ -161,14 +161,14 @@ class NES(Attack): | |||||
>>> from mindarmour.adv_robustness.attacks import NES | >>> from mindarmour.adv_robustness.attacks import NES | ||||
>>> from tests.ut.python.utils.mock_net import Net | >>> from tests.ut.python.utils.mock_net import Net | ||||
>>> class ModelToBeAttacked(BlackModel): | >>> class ModelToBeAttacked(BlackModel): | ||||
>>> def __init__(self, network): | |||||
>>> super(ModelToBeAttacked, self).__init__() | |||||
>>> self._network = network | |||||
>>> def predict(self, inputs): | |||||
>>> if len(inputs.shape) == 3: | |||||
>>> inputs = inputs[np.newaxis, :] | |||||
>>> result = self._network(Tensor(inputs.astype(np.float32))) | |||||
>>> return result.asnumpy() | |||||
... def __init__(self, network): | |||||
... super(ModelToBeAttacked, self).__init__() | |||||
... self._network = network | |||||
... def predict(self, inputs): | |||||
... if len(inputs.shape) == 3: | |||||
... inputs = inputs[np.newaxis, :] | |||||
... result = self._network(Tensor(inputs.astype(np.float32))) | |||||
... return result.asnumpy() | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> model = ModelToBeAttacked(net) | >>> model = ModelToBeAttacked(net) | ||||
>>> SCENE = 'Query_Limit' | >>> SCENE = 'Query_Limit' | ||||
@@ -53,12 +53,12 @@ class PointWiseAttack(Attack): | |||||
>>> from mindarmour.adv_robustness.attacks import PointWiseAttack | >>> from mindarmour.adv_robustness.attacks import PointWiseAttack | ||||
>>> from tests.ut.python.utils.mock_net import Net | >>> from tests.ut.python.utils.mock_net import Net | ||||
>>> class ModelToBeAttacked(BlackModel): | >>> class ModelToBeAttacked(BlackModel): | ||||
>>> def __init__(self, network): | |||||
>>> super(ModelToBeAttacked, self).__init__() | |||||
>>> self._network = network | |||||
>>> def predict(self, inputs): | |||||
>>> result = self._network(Tensor(inputs.astype(np.float32))) | |||||
>>> return result.asnumpy() | |||||
... def __init__(self, network): | |||||
... super(ModelToBeAttacked, self).__init__() | |||||
... self._network = network | |||||
... def predict(self, inputs): | |||||
... result = self._network(Tensor(inputs.astype(np.float32))) | |||||
... return result.asnumpy() | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> model = ModelToBeAttacked(net) | >>> model = ModelToBeAttacked(net) | ||||
>>> attack = PointWiseAttack(model) | >>> attack = PointWiseAttack(model) | ||||
@@ -99,12 +99,12 @@ class PointWiseAttack(Attack): | |||||
>>> from mindarmour.adv_robustness.attacks import PointWiseAttack | >>> from mindarmour.adv_robustness.attacks import PointWiseAttack | ||||
>>> from tests.ut.python.utils.mock_net import Net | >>> from tests.ut.python.utils.mock_net import Net | ||||
>>> class ModelToBeAttacked(BlackModel): | >>> class ModelToBeAttacked(BlackModel): | ||||
>>> def __init__(self, network): | |||||
>>> super(ModelToBeAttacked, self).__init__() | |||||
>>> self._network = network | |||||
>>> def predict(self, inputs): | |||||
>>> result = self._network(Tensor(inputs.astype(np.float32))) | |||||
>>> return result.asnumpy() | |||||
... def __init__(self, network): | |||||
... super(ModelToBeAttacked, self).__init__() | |||||
... self._network = network | |||||
... def predict(self, inputs): | |||||
... result = self._network(Tensor(inputs.astype(np.float32))) | |||||
... return result.asnumpy() | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> model = ModelToBeAttacked(net) | >>> model = ModelToBeAttacked(net) | ||||
>>> attack = PointWiseAttack(model) | >>> attack = PointWiseAttack(model) | ||||
@@ -71,22 +71,22 @@ class PSOAttack(Attack): | |||||
>>> from mindarmour import BlackModel | >>> from mindarmour import BlackModel | ||||
>>> from mindarmour.adv_robustness.attacks import PSOAttack | >>> from mindarmour.adv_robustness.attacks import PSOAttack | ||||
>>> class ModelToBeAttacked(BlackModel): | >>> class ModelToBeAttacked(BlackModel): | ||||
>>> def __init__(self, network): | |||||
>>> super(ModelToBeAttacked, self).__init__() | |||||
>>> self._network = network | |||||
>>> def predict(self, inputs): | |||||
>>> if len(inputs.shape) == 1: | |||||
>>> inputs = np.expand_dims(inputs, axis=0) | |||||
>>> result = self._network(Tensor(inputs.astype(np.float32))) | |||||
>>> return result.asnumpy() | |||||
... def __init__(self, network): | |||||
... super(ModelToBeAttacked, self).__init__() | |||||
... self._network = network | |||||
... def predict(self, inputs): | |||||
... if len(inputs.shape) == 1: | |||||
... inputs = np.expand_dims(inputs, axis=0) | |||||
... result = self._network(Tensor(inputs.astype(np.float32))) | |||||
... return result.asnumpy() | |||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._relu = nn.ReLU() | |||||
>>> | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._relu(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._relu = nn.ReLU() | |||||
... | |||||
... def construct(self, inputs): | |||||
... out = self._relu(inputs) | |||||
... return out | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> model = ModelToBeAttacked(net) | >>> model = ModelToBeAttacked(net) | ||||
>>> attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False) | >>> attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False) | ||||
@@ -516,22 +516,22 @@ class PSOAttack(Attack): | |||||
>>> from mindarmour import BlackModel | >>> from mindarmour import BlackModel | ||||
>>> from mindarmour.adv_robustness.attacks import PSOAttack | >>> from mindarmour.adv_robustness.attacks import PSOAttack | ||||
>>> class ModelToBeAttacked(BlackModel): | >>> class ModelToBeAttacked(BlackModel): | ||||
>>> def __init__(self, network): | |||||
>>> super(ModelToBeAttacked, self).__init__() | |||||
>>> self._network = network | |||||
>>> def predict(self, inputs): | |||||
>>> if len(inputs.shape) == 1: | |||||
>>> inputs = np.expand_dims(inputs, axis=0) | |||||
>>> result = self._network(Tensor(inputs.astype(np.float32))) | |||||
>>> return result.asnumpy() | |||||
... def __init__(self, network): | |||||
... super(ModelToBeAttacked, self).__init__() | |||||
... self._network = network | |||||
... def predict(self, inputs): | |||||
... if len(inputs.shape) == 1: | |||||
... inputs = np.expand_dims(inputs, axis=0) | |||||
... result = self._network(Tensor(inputs.astype(np.float32))) | |||||
... return result.asnumpy() | |||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._relu = nn.ReLU() | |||||
>>> | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._relu(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._relu = nn.ReLU() | |||||
... | |||||
... def construct(self, inputs): | |||||
... out = self._relu(inputs) | |||||
... return out | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> model = ModelToBeAttacked(net) | >>> model = ModelToBeAttacked(net) | ||||
>>> attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False) | >>> attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False) | ||||
@@ -46,12 +46,12 @@ class SaltAndPepperNoiseAttack(Attack): | |||||
>>> from mindarmour.adv_robustness.attacks import SaltAndPepperNoiseAttack | >>> from mindarmour.adv_robustness.attacks import SaltAndPepperNoiseAttack | ||||
>>> from tests.ut.python.utils.mock_net import Net | >>> from tests.ut.python.utils.mock_net import Net | ||||
>>> class ModelToBeAttacked(BlackModel): | >>> class ModelToBeAttacked(BlackModel): | ||||
>>> def __init__(self, network): | |||||
>>> super(ModelToBeAttacked, self).__init__() | |||||
>>> self._network = network | |||||
>>> def predict(self, inputs): | |||||
>>> result = self._network(Tensor(inputs.astype(np.float32))) | |||||
>>> return result.asnumpy() | |||||
... def __init__(self, network): | |||||
... super(ModelToBeAttacked, self).__init__() | |||||
... self._network = network | |||||
... def predict(self, inputs): | |||||
... result = self._network(Tensor(inputs.astype(np.float32))) | |||||
... return result.asnumpy() | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> model = ModelToBeAttacked(net) | >>> model = ModelToBeAttacked(net) | ||||
>>> attack = SaltAndPepperNoiseAttack(model) | >>> attack = SaltAndPepperNoiseAttack(model) | ||||
@@ -89,12 +89,12 @@ class SaltAndPepperNoiseAttack(Attack): | |||||
>>> from mindarmour.adv_robustness.attacks import SaltAndPepperNoiseAttack | >>> from mindarmour.adv_robustness.attacks import SaltAndPepperNoiseAttack | ||||
>>> from tests.ut.python.utils.mock_net import Net | >>> from tests.ut.python.utils.mock_net import Net | ||||
>>> class ModelToBeAttacked(BlackModel): | >>> class ModelToBeAttacked(BlackModel): | ||||
>>> def __init__(self, network): | |||||
>>> super(ModelToBeAttacked, self).__init__() | |||||
>>> self._network = network | |||||
>>> def predict(self, inputs): | |||||
>>> result = self._network(Tensor(inputs.astype(np.float32))) | |||||
>>> return result.asnumpy() | |||||
... def __init__(self, network): | |||||
... super(ModelToBeAttacked, self).__init__() | |||||
... self._network = network | |||||
... def predict(self, inputs): | |||||
... result = self._network(Tensor(inputs.astype(np.float32))) | |||||
... return result.asnumpy() | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> model = ModelToBeAttacked(net) | >>> model = ModelToBeAttacked(net) | ||||
>>> attack = PointWiseAttack(model) | >>> attack = PointWiseAttack(model) | ||||
@@ -100,13 +100,13 @@ class CarliniWagnerL2Attack(Attack): | |||||
>>> from mindspore.nn import Cell | >>> from mindspore.nn import Cell | ||||
>>> from mindarmour.adv_robustness.attacks import CarliniWagnerL2Attack | >>> from mindarmour.adv_robustness.attacks import CarliniWagnerL2Attack | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._softmax = M.Softmax() | |||||
>>> | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._softmax(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._softmax = M.Softmax() | |||||
... | |||||
... def construct(self, inputs): | |||||
... out = self._softmax(inputs) | |||||
... return out | |||||
>>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32) | >>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32) | ||||
>>> label_np = np.array([3]).astype(np.int64) | >>> label_np = np.array([3]).astype(np.int64) | ||||
>>> num_classes = input_np.shape[1] | >>> num_classes = input_np.shape[1] | ||||
@@ -286,23 +286,19 @@ class CarliniWagnerL2Attack(Attack): | |||||
>>> import mindspore.ops.operations as M | >>> import mindspore.ops.operations as M | ||||
>>> from mindspore.nn import Cell | >>> from mindspore.nn import Cell | ||||
>>> from mindarmour.adv_robustness.attacks import CarliniWagnerL2Attack | >>> from mindarmour.adv_robustness.attacks import CarliniWagnerL2Attack | ||||
>>> | |||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._softmax = M.Softmax() | |||||
>>> | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._softmax(inputs) | |||||
>>> return out | |||||
>>> | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._softmax = M.Softmax() | |||||
... | |||||
... def construct(self, inputs): | |||||
... out = self._softmax(inputs) | |||||
... return out | |||||
>>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32) | >>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32) | ||||
>>> num_classes = input_np.shape[1] | >>> num_classes = input_np.shape[1] | ||||
>>> | |||||
>>> label_np = np.array([3]).astype(np.int64) | >>> label_np = np.array([3]).astype(np.int64) | ||||
>>> attack_nonTargeted = CarliniWagnerL2Attack(net, num_classes, targeted=False) | >>> attack_nonTargeted = CarliniWagnerL2Attack(net, num_classes, targeted=False) | ||||
>>> advs_nonTargeted = attack_nonTargeted.generate(input_np, label_np) | >>> advs_nonTargeted = attack_nonTargeted.generate(input_np, label_np) | ||||
>>> | |||||
>>> target_np = np.array([1]).astype(np.int64) | >>> target_np = np.array([1]).astype(np.int64) | ||||
>>> attack_targeted = CarliniWagnerL2Attack(net, num_classes, targeted=False) | >>> attack_targeted = CarliniWagnerL2Attack(net, num_classes, targeted=False) | ||||
>>> advs_targeted = attack_targeted.generate(input_np, target_np) | >>> advs_targeted = attack_targeted.generate(input_np, target_np) | ||||
@@ -123,12 +123,12 @@ class DeepFool(Attack): | |||||
>>> from mindspore import Tensor | >>> from mindspore import Tensor | ||||
>>> from mindarmour.adv_robustness.attacks import DeepFool | >>> from mindarmour.adv_robustness.attacks import DeepFool | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._softmax = P.Softmax() | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._softmax(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._softmax = P.Softmax() | |||||
... def construct(self, inputs): | |||||
... out = self._softmax(inputs) | |||||
... return out | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> attack = DeepFool(net, classes, max_iters=10, norm_level=2, | >>> attack = DeepFool(net, classes, max_iters=10, norm_level=2, | ||||
... bounds=(0.0, 1.0)) | ... bounds=(0.0, 1.0)) | ||||
@@ -185,12 +185,12 @@ class DeepFool(Attack): | |||||
>>> from mindspore import Tensor | >>> from mindspore import Tensor | ||||
>>> from mindarmour.adv_robustness.attacks import DeepFool | >>> from mindarmour.adv_robustness.attacks import DeepFool | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._softmax = P.Softmax() | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._softmax(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._softmax = P.Softmax() | |||||
... def construct(self, inputs): | |||||
... out = self._softmax(inputs) | |||||
... return out | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> attack = DeepFool(net, classes, max_iters=10, norm_level=2, | >>> attack = DeepFool(net, classes, max_iters=10, norm_level=2, | ||||
... bounds=(0.0, 1.0)) | ... bounds=(0.0, 1.0)) | ||||
@@ -53,13 +53,13 @@ class GradientMethod(Attack): | |||||
>>> from mindspore import Tensor | >>> from mindspore import Tensor | ||||
>>> from mindarmour.adv_robustness.attacks import FastGradientMethod | >>> from mindarmour.adv_robustness.attacks import FastGradientMethod | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._relu = nn.ReLU() | |||||
>>> | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._relu(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._relu = nn.ReLU() | |||||
... | |||||
... def construct(self, inputs): | |||||
... out = self._relu(inputs) | |||||
... return out | |||||
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) | >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) | ||||
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) | >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) | ||||
>>> net = Net() | >>> net = Net() | ||||
@@ -174,12 +174,12 @@ class FastGradientMethod(GradientMethod): | |||||
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | ||||
>>> from mindarmour.adv_robustness.attacks import FastGradientMethod | >>> from mindarmour.adv_robustness.attacks import FastGradientMethod | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._relu = nn.ReLU() | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._relu(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._relu = nn.ReLU() | |||||
... def construct(self, inputs): | |||||
... out = self._relu(inputs) | |||||
... return out | |||||
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) | >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) | ||||
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) | >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) | ||||
>>> net = Net() | >>> net = Net() | ||||
@@ -254,12 +254,12 @@ class RandomFastGradientMethod(FastGradientMethod): | |||||
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | ||||
>>> from mindarmour.adv_robustness.attacks import RandomFastGradientMethod | >>> from mindarmour.adv_robustness.attacks import RandomFastGradientMethod | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._relu = nn.ReLU() | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._relu(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._relu = nn.ReLU() | |||||
... def construct(self, inputs): | |||||
... out = self._relu(inputs) | |||||
... return out | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) | >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) | ||||
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) | >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) | ||||
@@ -308,12 +308,12 @@ class FastGradientSignMethod(GradientMethod): | |||||
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | ||||
>>> from mindarmour.adv_robustness.attacks import FastGradientSignMethod | >>> from mindarmour.adv_robustness.attacks import FastGradientSignMethod | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._relu = nn.ReLU() | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._relu(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._relu = nn.ReLU() | |||||
... def construct(self, inputs): | |||||
... out = self._relu(inputs) | |||||
... return out | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) | >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) | ||||
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) | >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) | ||||
@@ -383,14 +383,13 @@ class RandomFastGradientSignMethod(FastGradientSignMethod): | |||||
>>> import mindspore.nn as nn | >>> import mindspore.nn as nn | ||||
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | ||||
>>> from mindarmour.adv_robustness.attacks import RandomFastGradientSignMethod | >>> from mindarmour.adv_robustness.attacks import RandomFastGradientSignMethod | ||||
>>> | |||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._relu = nn.ReLU() | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._relu(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._relu = nn.ReLU() | |||||
... def construct(self, inputs): | |||||
... out = self._relu(inputs) | |||||
... return out | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) | >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) | ||||
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) | >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) | ||||
@@ -434,13 +433,12 @@ class LeastLikelyClassMethod(FastGradientSignMethod): | |||||
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | ||||
>>> from mindarmour.adv_robustness.attacks import LeastLikelyClassMethod | >>> from mindarmour.adv_robustness.attacks import LeastLikelyClassMethod | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._relu = nn.ReLU() | |||||
>>> | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._relu(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._relu = nn.ReLU() | |||||
... def construct(self, inputs): | |||||
... out = self._relu(inputs) | |||||
... return out | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) | >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) | ||||
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) | >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) | ||||
@@ -485,12 +483,12 @@ class RandomLeastLikelyClassMethod(FastGradientSignMethod): | |||||
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | ||||
>>> from mindarmour.adv_robustness.attacks import RandomLeastLikelyClassMethod | >>> from mindarmour.adv_robustness.attacks import RandomLeastLikelyClassMethod | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._relu = nn.ReLU() | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._relu(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._relu = nn.ReLU() | |||||
... def construct(self, inputs): | |||||
... out = self._relu(inputs) | |||||
... return out | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) | >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) | ||||
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) | >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) | ||||
@@ -189,12 +189,12 @@ class BasicIterativeMethod(IterativeGradientMethod): | |||||
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | ||||
>>> from mindarmour.adv_robustness.attacks import BasicIterativeMethod | >>> from mindarmour.adv_robustness.attacks import BasicIterativeMethod | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._relu = nn.ReLU() | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._relu(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._relu = nn.ReLU() | |||||
... def construct(self, inputs): | |||||
... out = self._relu(inputs) | |||||
... return out | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> attack = BasicIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) | >>> attack = BasicIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) | ||||
""" | """ | ||||
@@ -232,12 +232,12 @@ class BasicIterativeMethod(IterativeGradientMethod): | |||||
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | ||||
>>> from mindarmour.adv_robustness.attacks import BasicIterativeMethod | >>> from mindarmour.adv_robustness.attacks import BasicIterativeMethod | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._relu = nn.ReLU() | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._relu(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._relu = nn.ReLU() | |||||
... def construct(self, inputs): | |||||
... out = self._relu(inputs) | |||||
... return out | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> attack = BasicIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) | >>> attack = BasicIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) | ||||
>>> adv_x = attack.generate([[0.3, 0.2, 0.6], | >>> adv_x = attack.generate([[0.3, 0.2, 0.6], | ||||
@@ -333,12 +333,12 @@ class MomentumIterativeMethod(IterativeGradientMethod): | |||||
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | ||||
>>> from mindarmour.adv_robustness.attacks import MomentumIterativeMethod | >>> from mindarmour.adv_robustness.attacks import MomentumIterativeMethod | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._relu = nn.ReLU() | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._relu(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._relu = nn.ReLU() | |||||
... def construct(self, inputs): | |||||
... out = self._relu(inputs) | |||||
... return out | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> attack = MomentumIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) | >>> attack = MomentumIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) | ||||
>>> adv_x = attack.generate([[0.5, 0.2, 0.6], | >>> adv_x = attack.generate([[0.5, 0.2, 0.6], | ||||
@@ -476,12 +476,12 @@ class ProjectedGradientDescent(BasicIterativeMethod): | |||||
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits | ||||
>>> from mindarmour.adv_robustness.attacks import ProjectedGradientDescent | >>> from mindarmour.adv_robustness.attacks import ProjectedGradientDescent | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._relu = nn.ReLU() | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._relu(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._relu = nn.ReLU() | |||||
... def construct(self, inputs): | |||||
... out = self._relu(inputs) | |||||
... return out | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> attack = ProjectedGradientDescent(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) | >>> attack = ProjectedGradientDescent(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) | ||||
>>> adv_x = attack.generate([[0.6, 0.2, 0.6], | >>> adv_x = attack.generate([[0.6, 0.2, 0.6], | ||||
@@ -59,12 +59,12 @@ class JSMAAttack(Attack): | |||||
>>> from mindspore.nn import Cell | >>> from mindspore.nn import Cell | ||||
>>> from mindarmour.adv_robustness.attacks import JSMAAttack | >>> from mindarmour.adv_robustness.attacks import JSMAAttack | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._relu = nn.ReLU() | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._relu(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._relu = nn.ReLU() | |||||
... def construct(self, inputs): | |||||
... out = self._relu(inputs) | |||||
... return out | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> input_shape = (1, 5) | >>> input_shape = (1, 5) | ||||
>>> batch_size, classes = input_shape | >>> batch_size, classes = input_shape | ||||
@@ -200,12 +200,12 @@ class JSMAAttack(Attack): | |||||
>>> from mindspore.nn import Cell | >>> from mindspore.nn import Cell | ||||
>>> from mindarmour.adv_robustness.attacks import JSMAAttack | >>> from mindarmour.adv_robustness.attacks import JSMAAttack | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._relu = nn.ReLU() | |||||
>>> def construct(self, inputs): | |||||
>>> out = self._relu(inputs) | |||||
>>> return out | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._relu = nn.ReLU() | |||||
... def construct(self, inputs): | |||||
... out = self._relu(inputs) | |||||
... return out | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> input_shape = (1, 5) | >>> input_shape = (1, 5) | ||||
>>> batch_size, classes = input_shape | >>> batch_size, classes = input_shape | ||||
@@ -57,7 +57,6 @@ class LBFGS(Attack): | |||||
>>> import numpy as np | >>> import numpy as np | ||||
>>> from mindarmour.adv_robustness.attacks import LBFGS | >>> from mindarmour.adv_robustness.attacks import LBFGS | ||||
>>> from tests.ut.python.utils.mock_net import Net | >>> from tests.ut.python.utils.mock_net import Net | ||||
>>> | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> attack = LBFGS(net, is_targeted=True) | >>> attack = LBFGS(net, is_targeted=True) | ||||
""" | """ | ||||
@@ -41,13 +41,11 @@ class AdversarialDefense(Defense): | |||||
>>> from mindarmour.adv_robustness.defenses import AdversarialDefense | >>> from mindarmour.adv_robustness.defenses import AdversarialDefense | ||||
>>> from mindspore import nn | >>> from mindspore import nn | ||||
>>> from tests.ut.python.utils.mock_net import Net | >>> from tests.ut.python.utils.mock_net import Net | ||||
>>> | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> lr = 0.001 | >>> lr = 0.001 | ||||
>>> momentum = 0.9 | >>> momentum = 0.9 | ||||
>>> batch_size = 32 | >>> batch_size = 32 | ||||
>>> num_class = 10 | >>> num_class = 10 | ||||
>>> | |||||
>>> loss_fn = SoftmaxCrossEntropyWithLogits(sparse=False) | >>> loss_fn = SoftmaxCrossEntropyWithLogits(sparse=False) | ||||
>>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum) | >>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum) | ||||
>>> adv_defense = AdversarialDefense(net, loss_fn, optimizer) | >>> adv_defense = AdversarialDefense(net, loss_fn, optimizer) | ||||
@@ -203,7 +201,6 @@ class EnsembleAdversarialDefense(AdversarialDefenseWithAttacks): | |||||
>>> from mindarmour.adv_robustness.defenses import EnsembleAdversarialDefense | >>> from mindarmour.adv_robustness.defenses import EnsembleAdversarialDefense | ||||
>>> from mindspore import nn | >>> from mindspore import nn | ||||
>>> from tests.ut.python.utils.mock_net import Net | >>> from tests.ut.python.utils.mock_net import Net | ||||
>>> | |||||
>>> net = Net() | >>> net = Net() | ||||
>>> lr = 0.001 | >>> lr = 0.001 | ||||
>>> momentum = 0.9 | >>> momentum = 0.9 | ||||
@@ -211,12 +208,10 @@ class EnsembleAdversarialDefense(AdversarialDefenseWithAttacks): | |||||
>>> num_class = 10 | >>> num_class = 10 | ||||
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False) | >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False) | ||||
>>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum) | >>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum) | ||||
>>> | |||||
>>> fgsm = FastGradientSignMethod(net, loss_fn=loss_fn) | >>> fgsm = FastGradientSignMethod(net, loss_fn=loss_fn) | ||||
>>> pgd = ProjectedGradientDescent(net, loss_fn=loss_fn) | >>> pgd = ProjectedGradientDescent(net, loss_fn=loss_fn) | ||||
>>> ead = EnsembleAdversarialDefense(net, [fgsm, pgd], loss_fn=loss_fn, | >>> ead = EnsembleAdversarialDefense(net, [fgsm, pgd], loss_fn=loss_fn, | ||||
>>> optimizer=optimizer) | |||||
>>> | |||||
... optimizer=optimizer) | |||||
>>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) | >>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) | ||||
>>> labels = np.random.randint(num_class, size=batch_size).astype(np.int32) | >>> labels = np.random.randint(num_class, size=batch_size).astype(np.int32) | ||||
>>> labels = np.eye(num_classes)[labels].astype(np.float32) | >>> labels = np.eye(num_classes)[labels].astype(np.float32) | ||||
@@ -80,14 +80,14 @@ class SimilarityDetector(Detector): | |||||
>>> from mindspore import context | >>> from mindspore import context | ||||
>>> from mindarmour.adv_robustness.detectors import SimilarityDetector | >>> from mindarmour.adv_robustness.detectors import SimilarityDetector | ||||
>>> class EncoderNet(Cell): | >>> class EncoderNet(Cell): | ||||
>>> def __init__(self, encode_dim): | |||||
>>> super(EncoderNet, self).__init__() | |||||
>>> self._encode_dim = encode_dim | |||||
>>> self.add = Add() | |||||
>>> def construct(self, inputs): | |||||
>>> return self.add(inputs, inputs) | |||||
>>> def get_encode_dim(self): | |||||
>>> return self._encode_dim | |||||
... def __init__(self, encode_dim): | |||||
... super(EncoderNet, self).__init__() | |||||
... self._encode_dim = encode_dim | |||||
... self.add = Add() | |||||
... def construct(self, inputs): | |||||
... return self.add(inputs, inputs) | |||||
... def get_encode_dim(self): | |||||
... return self._encode_dim | |||||
>>> np.random.seed(5) | >>> np.random.seed(5) | ||||
>>> x_train = np.random.rand(10, 32, 32, 3).astype(np.float32) | >>> x_train = np.random.rand(10, 32, 32, 3).astype(np.float32) | ||||
>>> perm = np.random.permutation(x_train.shape[0]) | >>> perm = np.random.permutation(x_train.shape[0]) | ||||
@@ -43,18 +43,17 @@ class EnsembleDetector(Detector): | |||||
>>> from mindarmour.adv_robustness.detectors import RegionBasedDetector | >>> from mindarmour.adv_robustness.detectors import RegionBasedDetector | ||||
>>> from mindarmour.adv_robustness.detectors import EnsembleDetector | >>> from mindarmour.adv_robustness.detectors import EnsembleDetector | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self.add = Add() | |||||
>>> def construct(self, inputs): | |||||
>>> return self.add(inputs, inputs) | |||||
>>> | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self.add = Add() | |||||
... def construct(self, inputs): | |||||
... return self.add(inputs, inputs) | |||||
>>> class AutoNet(Cell): | >>> class AutoNet(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(AutoNet, self).__init__() | |||||
>>> self.add = Add() | |||||
>>> def construct(self, inputs): | |||||
>>> return self.add(inputs, inputs) | |||||
... def __init__(self): | |||||
... super(AutoNet, self).__init__() | |||||
... self.add = Add() | |||||
... def construct(self, inputs): | |||||
... return self.add(inputs, inputs) | |||||
>>> np.random.seed(6) | >>> np.random.seed(6) | ||||
>>> adv = np.random.rand(4, 4).astype(np.float32) | >>> adv = np.random.rand(4, 4).astype(np.float32) | ||||
>>> model = Model(Net()) | >>> model = Model(Net()) | ||||
@@ -55,11 +55,11 @@ class ErrorBasedDetector(Detector): | |||||
>>> from mindspore import context | >>> from mindspore import context | ||||
>>> from mindarmour.adv_robustness.detectors import ErrorBasedDetector | >>> from mindarmour.adv_robustness.detectors import ErrorBasedDetector | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self.add = Add() | |||||
>>> def construct(self, inputs): | |||||
>>> return self.add(inputs, inputs) | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self.add = Add() | |||||
... def construct(self, inputs): | |||||
... return self.add(inputs, inputs) | |||||
>>> np.random.seed(5) | >>> np.random.seed(5) | ||||
>>> ori = np.random.rand(4, 4, 4).astype(np.float32) | >>> ori = np.random.rand(4, 4, 4).astype(np.float32) | ||||
>>> np.random.seed(6) | >>> np.random.seed(6) | ||||
@@ -59,11 +59,11 @@ class RegionBasedDetector(Detector): | |||||
>>> from mindspore import context | >>> from mindspore import context | ||||
>>> from mindarmour.adv_robustness.detectors import ErrorBasedDetector | >>> from mindarmour.adv_robustness.detectors import ErrorBasedDetector | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self.add = Add() | |||||
>>> def construct(self, inputs): | |||||
>>> return self.add(inputs, inputs) | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self.add = Add() | |||||
... def construct(self, inputs): | |||||
... return self.add(inputs, inputs) | |||||
>>> np.random.seed(5) | >>> np.random.seed(5) | ||||
>>> ori = np.random.rand(4, 4).astype(np.float32) | >>> ori = np.random.rand(4, 4).astype(np.float32) | ||||
>>> labels = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0], | >>> labels = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0], | ||||
@@ -50,17 +50,17 @@ class SpatialSmoothing(Detector): | |||||
Examples: | Examples: | ||||
>>> import numpy as np | >>> import numpy as np | ||||
>>> from mindspore.ops.operations as P | |||||
>>> import mindspore.ops.operations as P | |||||
>>> from mindspore.nn import Cell | >>> from mindspore.nn import Cell | ||||
>>> from mindspore import Model | >>> from mindspore import Model | ||||
>>> from mindspore import context | >>> from mindspore import context | ||||
>>> from mindarmour.adv_robustness.detectors import SpatialSmoothing | >>> from mindarmour.adv_robustness.detectors import SpatialSmoothing | ||||
>>> class Net(Cell): | >>> class Net(Cell): | ||||
>>> def __init__(self): | |||||
>>> super(Net, self).__init__() | |||||
>>> self._softmax = P.Softmax() | |||||
>>> def construct(self, inputs): | |||||
>>> return self._softmax(inputs) | |||||
... def __init__(self): | |||||
... super(Net, self).__init__() | |||||
... self._softmax = P.Softmax() | |||||
... def construct(self, inputs): | |||||
... return self._softmax(inputs) | |||||
>>> input_shape = (50, 3) | >>> input_shape = (50, 3) | ||||
>>> np.random.seed(1) | >>> np.random.seed(1) | ||||
>>> input_np = np.random.randn(*input_shape).astype(np.float32) | >>> input_np = np.random.randn(*input_shape).astype(np.float32) | ||||
@@ -47,6 +47,8 @@ class AttackEvaluate: | |||||
ValueError: If target_label is None when targeted is True. | ValueError: If target_label is None when targeted is True. | ||||
Examples: | Examples: | ||||
>>> import numpy as np | |||||
>>> from mindarmour.adv_robustness.evaluations import AttackEvaluate | |||||
>>> x = np.random.normal(size=(3, 512, 512, 3)) | >>> x = np.random.normal(size=(3, 512, 512, 3)) | ||||
>>> adv_x = np.random.normal(size=(3, 512, 512, 3)) | >>> adv_x = np.random.normal(size=(3, 512, 512, 3)) | ||||
>>> y = np.array([[0.1, 0.1, 0.2, 0.6], | >>> y = np.array([[0.1, 0.1, 0.2, 0.6], | ||||
@@ -57,6 +59,10 @@ class AttackEvaluate: | |||||
... [0.0, 0.9, 0.1, 0.0]]) | ... [0.0, 0.9, 0.1, 0.0]]) | ||||
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y) | >>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y) | ||||
>>> mr = attack_eval.mis_classification_rate() | >>> mr = attack_eval.mis_classification_rate() | ||||
>>> acac = attack_eval.avg_conf_adv_class() | |||||
>>> l_0, l_2, l_inf = attack_eval.avg_lp_distance() | |||||
>>> ass = attack_eval.avg_ssim() | |||||
>>> nte = attack_eval.nte() | |||||
""" | """ | ||||
def __init__(self, inputs, labels, adv_inputs, adv_preds, | def __init__(self, inputs, labels, adv_inputs, adv_preds, | ||||
@@ -97,6 +103,10 @@ class AttackEvaluate: | |||||
Returns: | Returns: | ||||
float, ranges between (0, 1). The higher, the more successful the attack is. | float, ranges between (0, 1). The higher, the more successful the attack is. | ||||
Examples: | |||||
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y) | |||||
>>> mr = attack_eval.mis_classification_rate() | |||||
""" | """ | ||||
return self._success_idxes.shape[0]*1.0 / self._inputs.shape[0] | return self._success_idxes.shape[0]*1.0 / self._inputs.shape[0] | ||||
@@ -106,6 +116,10 @@ class AttackEvaluate: | |||||
Returns: | Returns: | ||||
float, ranges between (0, 1). The higher, the more successful the attack is. | float, ranges between (0, 1). The higher, the more successful the attack is. | ||||
Examples: | |||||
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y) | |||||
>>> acac = attack_eval.avg_conf_adv_class() | |||||
""" | """ | ||||
idxes = self._success_idxes | idxes = self._success_idxes | ||||
success_num = idxes.shape[0] | success_num = idxes.shape[0] | ||||
@@ -121,6 +135,10 @@ class AttackEvaluate: | |||||
Returns: | Returns: | ||||
float, ranges between (0, 1). The lower, the more successful the attack is. | float, ranges between (0, 1). The lower, the more successful the attack is. | ||||
Examples: | |||||
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y) | |||||
>>> acac = attack_eval.avg_conf_adv_class() | |||||
""" | """ | ||||
idxes = self._success_idxes | idxes = self._success_idxes | ||||
success_num = idxes.shape[0] | success_num = idxes.shape[0] | ||||
@@ -140,6 +158,10 @@ class AttackEvaluate: | |||||
the more successful the attack is. | the more successful the attack is. | ||||
- If return value is -1, there is no success adversarial examples. | - If return value is -1, there is no success adversarial examples. | ||||
Examples: | |||||
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y) | |||||
>>> l_0, l_2, l_inf = attack_eval.avg_lp_distance() | |||||
""" | """ | ||||
idxes = self._success_idxes | idxes = self._success_idxes | ||||
success_num = idxes.shape[0] | success_num = idxes.shape[0] | ||||
@@ -168,6 +190,10 @@ class AttackEvaluate: | |||||
successful the attack is. | successful the attack is. | ||||
- If return value is -1: there is no success adversarial examples. | - If return value is -1: there is no success adversarial examples. | ||||
Examples: | |||||
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y) | |||||
>>> ass = attack_eval.avg_ssim() | |||||
""" | """ | ||||
success_num = self._success_idxes.shape[0] | success_num = self._success_idxes.shape[0] | ||||
if success_num == 0: | if success_num == 0: | ||||
@@ -186,10 +212,13 @@ class AttackEvaluate: | |||||
References: `Towards Imperceptible and Robust Adversarial Example Attacks | References: `Towards Imperceptible and Robust Adversarial Example Attacks | ||||
against Neural Networks <https://arxiv.org/abs/1801.04693>`_ | against Neural Networks <https://arxiv.org/abs/1801.04693>`_ | ||||
Returns: | Returns: | ||||
float, ranges between (0, 1). The higher, the more successful the | float, ranges between (0, 1). The higher, the more successful the | ||||
attack is. | attack is. | ||||
Examples: | |||||
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y) | |||||
>>> nte = attack_eval.nte() | |||||
""" | """ | ||||
idxes = self._success_idxes | idxes = self._success_idxes | ||||
success_num = idxes.shape[0] | success_num = idxes.shape[0] | ||||
@@ -57,12 +57,14 @@ class BlackDefenseEvaluate: | |||||
max_queries (int): Attack budget, the maximum number of queries. | max_queries (int): Attack budget, the maximum number of queries. | ||||
Examples: | Examples: | ||||
>>> import numpy as np | |||||
>>> from mindarmour.adv_robustness.evaluations import BlackDefenseEvaluate | |||||
>>> raw_preds = np.array([[0.1, 0.1, 0.2, 0.6], | >>> raw_preds = np.array([[0.1, 0.1, 0.2, 0.6], | ||||
>>> [0.1, 0.7, 0.0, 0.2], | |||||
>>> [0.8, 0.1, 0.0, 0.1]]) | |||||
... [0.1, 0.7, 0.0, 0.2], | |||||
... [0.8, 0.1, 0.0, 0.1]]) | |||||
>>> def_preds = np.array([[0.1, 0.1, 0.1, 0.7], | >>> def_preds = np.array([[0.1, 0.1, 0.1, 0.7], | ||||
>>> [0.1, 0.6, 0.2, 0.1], | |||||
>>> [0.1, 0.2, 0.1, 0.6]]) | |||||
... [0.1, 0.6, 0.2, 0.1], | |||||
... [0.1, 0.2, 0.1, 0.6]]) | |||||
>>> raw_query_counts = np.array([0,20,10]) | >>> raw_query_counts = np.array([0,20,10]) | ||||
>>> def_query_counts = np.array([0,50,60]) | >>> def_query_counts = np.array([0,50,60]) | ||||
>>> raw_query_time = np.array([0.1, 2, 1]) | >>> raw_query_time = np.array([0.1, 2, 1]) | ||||
@@ -79,7 +81,10 @@ class BlackDefenseEvaluate: | |||||
... def_detection_counts, | ... def_detection_counts, | ||||
... true_labels, | ... true_labels, | ||||
... max_queries) | ... max_queries) | ||||
>>> def_eval.qcv() | |||||
>>> qcv = def_eval.qcv() | |||||
>>> asv = def_eval.asv() | |||||
>>> fpr = def_eval.fpr() | |||||
>>> qrv = def_eval.qrv() | |||||
""" | """ | ||||
def __init__(self, raw_preds, def_preds, raw_query_counts, def_query_counts, | def __init__(self, raw_preds, def_preds, raw_query_counts, def_query_counts, | ||||
@@ -39,6 +39,9 @@ class DefenseEvaluate: | |||||
one-dimension array whose size is raw_preds.shape[0]. | one-dimension array whose size is raw_preds.shape[0]. | ||||
Examples: | Examples: | ||||
>>> import numpy as np | |||||
>>> from mindarmour.adv_robustness.evaluations import DefenseEvaluate | |||||
>>> raw_preds = np.array([[0.1, 0.1, 0.2, 0.6], | >>> raw_preds = np.array([[0.1, 0.1, 0.2, 0.6], | ||||
... [0.1, 0.7, 0.0, 0.2], | ... [0.1, 0.7, 0.0, 0.2], | ||||
... [0.8, 0.1, 0.0, 0.1]]) | ... [0.8, 0.1, 0.0, 0.1]]) | ||||
@@ -65,6 +68,9 @@ class DefenseEvaluate: | |||||
Returns: | Returns: | ||||
float, the higher, the more successful the defense is. | float, the higher, the more successful the defense is. | ||||
Examples: | |||||
>>> def_eval.cav() | |||||
""" | """ | ||||
def_succ_num = np.sum(np.argmax(self._def_preds, axis=1) | def_succ_num = np.sum(np.argmax(self._def_preds, axis=1) | ||||
== self._true_labels) | == self._true_labels) | ||||
@@ -79,6 +85,9 @@ class DefenseEvaluate: | |||||
Returns: | Returns: | ||||
float, the higher, the more successful the defense is. | float, the higher, the more successful the defense is. | ||||
Examples: | |||||
>>> def_eval.crr() | |||||
""" | """ | ||||
cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels | cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels | ||||
cond2 = np.argmax(self._raw_preds, axis=1) != self._true_labels | cond2 = np.argmax(self._raw_preds, axis=1) != self._true_labels | ||||
@@ -107,6 +116,9 @@ class DefenseEvaluate: | |||||
- float, the lower, the more successful the defense is. | - float, the lower, the more successful the defense is. | ||||
- If return value == -1, len(idxes) == 0. | - If return value == -1, len(idxes) == 0. | ||||
Examples: | |||||
>>> def_eval.ccv() | |||||
""" | """ | ||||
idxes = np.arange(self._num_samples) | idxes = np.arange(self._num_samples) | ||||
cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels | cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels | ||||
@@ -133,6 +145,9 @@ class DefenseEvaluate: | |||||
more successful the defense. | more successful the defense. | ||||
- If return value == -1, idxes == 0. | - If return value == -1, idxes == 0. | ||||
Examples: | |||||
>>> def_eval.cos() | |||||
""" | """ | ||||
idxes = np.arange(self._num_samples) | idxes = np.arange(self._num_samples) | ||||
cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels | cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels | ||||
@@ -46,10 +46,12 @@ class RadarMetric: | |||||
ValueError: If scale not in ['hide', 'norm', 'sparse', 'dense']. | ValueError: If scale not in ['hide', 'norm', 'sparse', 'dense']. | ||||
Examples: | Examples: | ||||
>>> import numpy as np | |||||
>>> from mindarmour.adv_robustness.evaluations import RadarMetric | |||||
>>> metrics_name = ['MR', 'ACAC', 'ASS', 'NTE', 'ACTC'] | >>> metrics_name = ['MR', 'ACAC', 'ASS', 'NTE', 'ACTC'] | ||||
>>> def_metrics = [0.9, 0.85, 0.6, 0.7, 0.8] | >>> def_metrics = [0.9, 0.85, 0.6, 0.7, 0.8] | ||||
>>> raw_metrics = [0.5, 0.3, 0.55, 0.65, 0.7] | >>> raw_metrics = [0.5, 0.3, 0.55, 0.65, 0.7] | ||||
>>> metrics_data = [def_metrics, raw_metrics] | |||||
>>> metrics_data = np.array([def_metrics, raw_metrics]) | |||||
>>> metrics_labels = ['before', 'after'] | >>> metrics_labels = ['before', 'after'] | ||||
>>> rm = RadarMetric(metrics_name, | >>> rm = RadarMetric(metrics_name, | ||||
... metrics_data, | ... metrics_data, | ||||