删除文件 examples/face_adversarial_attack/example update update examples/face_adversarial_attack/example/AFR.py. Signed-off-by: 君君臣臣君 <mingjun@isrc.iscas.ac.cn> update examples/face_adversarial_attack/example/loss_design.py. MyTrainOneStepCell继承了nn.TrainOneStepCell减少了代码量;删除了无用代码;修改了命名错误。 Signed-off-by: 君君臣臣君 <mingjun@isrc.iscas.ac.cn> update examples/face_adversarial_attack/example/AFR.py. 修改了命名错误;包的顺序。 Signed-off-by: 君君臣臣君 <mingjun@isrc.iscas.ac.cn> update examples/face_adversarial_attack/example/example_non-target_attack.py. 修改包的名称。 Signed-off-by: 君君臣臣君 <mingjun@isrc.iscas.ac.cn> update examples/face_adversarial_attack/example/example_target_attack.py. Signed-off-by: 君君臣臣君 <mingjun@isrc.iscas.ac.cn> update examples/face_adversarial_attack/example/loss_design.py. Signed-off-by: 君君臣臣君 <mingjun@isrc.iscas.ac.cn> update examples/face_adversarial_attack/example/loss_design.py. Signed-off-by: 君君臣臣君 <mingjun@isrc.iscas.ac.cn>pull/418/head
@@ -15,25 +15,25 @@ | |||||
import os | import os | ||||
import re | import re | ||||
import numpy as np | import numpy as np | ||||
import matplotlib.image as mp | |||||
import mindspore | |||||
import mindspore.dataset.vision.py_transforms as P | |||||
from mindspore.dataset.vision.py_transforms import ToPIL as ToPILImage | |||||
from mindspore import Parameter, ops, nn, Tensor | |||||
from mindspore.dataset.vision.py_transforms import ToTensor | |||||
import dlib | import dlib | ||||
import matplotlib.image as mp | |||||
import face_recognition as fr | import face_recognition as fr | ||||
import face_recognition_models as frm | import face_recognition_models as frm | ||||
from PIL import Image, ImageDraw | from PIL import Image, ImageDraw | ||||
import mindspore | |||||
from mindspore.dataset.vision.py_transforms import ToPIL as ToPILImage | |||||
from mindspore import Parameter,ops, nn,Tensor | |||||
from mindspore.dataset.vision.py_transforms import ToTensor | |||||
import mindspore.dataset.vision.py_transforms as P | |||||
from loss_design import TrainOneStepCell,MyWithLossCell,FaceLoss_no_target_attack,FaceLoss_target_attack | |||||
from loss_design import MyTrainOneStepCell, MyWithLossCell, FaceLossTargetAttack, FaceLossNoTargetAttack | |||||
from FaceRecognition.eval import get_net | from FaceRecognition.eval import get_net | ||||
class Attack(object): | |||||
class FaceAdversarialAttack(object): | |||||
""" | """ | ||||
Class used to create adversarial facial recognition attacks | Class used to create adversarial facial recognition attacks | ||||
""" | """ | ||||
def __init__(self,input_img,target_img,seed=None): | |||||
def __init__(self, input_img, target_img, seed=None): | |||||
""" | """ | ||||
Initialization for Attack class. | Initialization for Attack class. | ||||
@@ -44,7 +44,7 @@ class Attack(object): | |||||
""" | """ | ||||
if (seed != None): np.random.seed(seed) | |||||
if (seed is not None): np.random.seed(seed) | |||||
self.MEAN = Tensor([0.485, 0.456, 0.406]) | self.MEAN = Tensor([0.485, 0.456, 0.406]) | ||||
self.STD = Tensor([0.229, 0.224, 0.225]) | self.STD = Tensor([0.229, 0.224, 0.225]) | ||||
self.LOSS = Tensor(0) | self.LOSS = Tensor(0) | ||||
@@ -56,11 +56,12 @@ class Attack(object): | |||||
self.input_tensor = Tensor(self.normalize(self.tensorize(input_img))) | self.input_tensor = Tensor(self.normalize(self.tensorize(input_img))) | ||||
self.target_tensor = Tensor(self.normalize(self.tensorize(target_img))) | self.target_tensor = Tensor(self.normalize(self.tensorize(target_img))) | ||||
mp.imsave('./outputs/input图像.jpg', np.transpose(self._reverse_norm(self.input_tensor).asnumpy(), (1, 2, 0))) | mp.imsave('./outputs/input图像.jpg', np.transpose(self._reverse_norm(self.input_tensor).asnumpy(), (1, 2, 0))) | ||||
mp.imsave('./outputs/target图像.jpg', np.transpose(self._reverse_norm(self.target_tensor).asnumpy(), (1, 2, 0))) | |||||
mp.imsave('./outputs/target图像.jpg', | |||||
np.transpose(self._reverse_norm(self.target_tensor).asnumpy(), (1, 2, 0))) | |||||
self.input_emb = self.resnet(self.expand_dims(self.input_tensor,0)) | |||||
self.target_emb = self.resnet(self.expand_dims(self.target_tensor,0)) | |||||
self.input_emb = self.resnet(self.expand_dims(self.input_tensor, 0)) | |||||
self.target_emb = self.resnet(self.expand_dims(self.target_tensor, 0)) | |||||
self.adversarial_emb = None | self.adversarial_emb = None | ||||
self.mask_tensor = self._create_mask(input_img) | self.mask_tensor = self._create_mask(input_img) | ||||
self.ref = self.mask_tensor | self.ref = self.mask_tensor | ||||
@@ -69,28 +70,29 @@ class Attack(object): | |||||
def train(self,attack_method): | |||||
def train(self, attack_method): | |||||
""" | """ | ||||
Optimized adversarial image. | Optimized adversarial image. | ||||
""" | """ | ||||
if attack_method == "non-target attack": | if attack_method == "non-target attack": | ||||
LOSS = FaceLoss_no_target_attack(self.target_emb) | |||||
LOSS = FaceLossNoTargetAttack(self.target_emb) | |||||
if attack_method == "target_attack": | if attack_method == "target_attack": | ||||
LOSS = FaceLoss_target_attack(self.target_emb) | |||||
LOSS = FaceLossTargetAttack(self.target_emb) | |||||
net_with_criterion = MyWithLossCell(self.resnet, LOSS,self.input_tensor) | |||||
train_net = TrainOneStepCell(net_with_criterion, self.opt) | |||||
net_with_criterion = MyWithLossCell(self.resnet, LOSS, self.input_tensor) | |||||
train_net = MyTrainOneStepCell(net_with_criterion, self.opt) | |||||
for i in range(2000): | for i in range(2000): | ||||
self.mask_tensor = Tensor(self.pm) | self.mask_tensor = Tensor(self.pm) | ||||
grads,loss = train_net(self.mask_tensor) | |||||
grads, loss = train_net(self.mask_tensor) | |||||
print("epoch %d ,loss: %f \n " % (i, loss.asnumpy().item())) | print("epoch %d ,loss: %f \n " % (i, loss.asnumpy().item())) | ||||
self.mask_tensor = ops.clip_by_value(self.mask_tensor, Tensor(0, mindspore.float32), Tensor(1, mindspore.float32)) | |||||
self.mask_tensor = ops.clip_by_value( | |||||
self.mask_tensor, Tensor(0, mindspore.float32), Tensor(1, mindspore.float32)) | |||||
adversarial_tensor = self._apply( | adversarial_tensor = self._apply( | ||||
self.input_tensor, | self.input_tensor, | ||||
@@ -124,33 +126,12 @@ class Attack(object): | |||||
print("================================") | print("================================") | ||||
print("adversarial:", adversarial) | print("adversarial:", adversarial) | ||||
print("adversarial_confidence:", self.adversarial_emb.asnumpy()[0][adversarial]) | print("adversarial_confidence:", self.adversarial_emb.asnumpy()[0][adversarial]) | ||||
print("Confidence changes for target::", self.adversarial_emb.asnumpy()[0][target]) | |||||
print("Confidence changes for input:", self.adversarial_emb.asnumpy()[0][input]) | |||||
print("================================") | |||||
print("target:", target) | |||||
print("target_confidence:", self.target_emb.asnumpy()[0][target]) | |||||
print("input:%d, target:%d, adversarial:%d" % (input,target, adversarial)) | |||||
def test1(self,adversarial_tensor): | |||||
self.adversarial_emb = self.resnet(self.expand_dims((adversarial_tensor - self.MEAN[:, None, None]) / self.STD[:, None, None], 0)) | |||||
self.input_emb = self.resnet(self.expand_dims(self.input_tensor, 0)) | |||||
self.target_emb = self.resnet(self.expand_dims(self.target_tensor, 0)) | |||||
adversarial = np.argmax(self.adversarial_emb.asnumpy()) | |||||
target = np.argmax(self.target_emb.asnumpy()) | |||||
input = np.argmax(self.input_emb.asnumpy()) | |||||
print("input:", input) | |||||
print("input_confidence:", self.input_emb.asnumpy()[0][input]) | |||||
print("================================") | |||||
print("adversarial:", adversarial) | |||||
print("adversarial_confidence:", self.adversarial_emb.asnumpy()[0][adversarial]) | |||||
print("Confidence changes for target:", self.adversarial_emb.asnumpy()[0][target]) | |||||
print("Confidence changes for input:", self.adversarial_emb.asnumpy()[0][input]) | print("Confidence changes for input:", self.adversarial_emb.asnumpy()[0][input]) | ||||
print("================================") | print("================================") | ||||
print("target:", target) | print("target:", target) | ||||
print("target_confidence:", self.target_emb.asnumpy()[0][target]) | print("target_confidence:", self.target_emb.asnumpy()[0][target]) | ||||
print("input:%d, target:%d, adversarial:%d" % (input, target, adversarial)) | |||||
print("input: %d, target: %d, adversarial: %d" % (input, target, adversarial)) | |||||
def _reverse_norm(self, image_tensor): | def _reverse_norm(self, image_tensor): | ||||
@@ -29,7 +29,7 @@ if __name__ == '__main__': | |||||
targets = AFR.load_data('opencv_photo/target/') | targets = AFR.load_data('opencv_photo/target/') | ||||
adversarial = AFR.Attack(inputs[0], targets[0]) | |||||
adversarial = AFR.FaceAdversarialAttack(inputs[0], targets[0]) | |||||
attack_method = "non-target attack" | attack_method = "non-target attack" | ||||
adversarial_tensor, mask_tensor = adversarial.train(attack_method) | adversarial_tensor, mask_tensor = adversarial.train(attack_method) | ||||
@@ -29,7 +29,7 @@ if __name__ == '__main__': | |||||
targets = AFR.load_data('opencv_photo/target/') | targets = AFR.load_data('opencv_photo/target/') | ||||
adversarial = AFR.Attack(inputs[0], targets[0]) | |||||
adversarial = AFR.FaceAdversarialAttack(inputs[0], targets[0]) | |||||
attack_method = "target_attack" | attack_method = "target_attack" | ||||
@@ -13,14 +13,14 @@ | |||||
# limitations under the License. | # limitations under the License. | ||||
# ============================================================================ | # ============================================================================ | ||||
import mindspore | import mindspore | ||||
from mindspore import ops, nn,Tensor | |||||
from mindspore import ops, nn, Tensor | |||||
from mindspore.dataset.vision.py_transforms import ToTensor | from mindspore.dataset.vision.py_transforms import ToTensor | ||||
import mindspore.dataset.vision.py_transforms as P | import mindspore.dataset.vision.py_transforms as P | ||||
class TrainOneStepCell(nn.Cell): | |||||
class MyTrainOneStepCell(nn.TrainOneStepCell): | |||||
""" | """ | ||||
Encapsulation class of network training. | Encapsulation class of network training. | ||||
@@ -34,24 +34,22 @@ class TrainOneStepCell(nn.Cell): | |||||
""" | """ | ||||
def __init__(self, network, optimizer, sens=1.0): | def __init__(self, network, optimizer, sens=1.0): | ||||
super(TrainOneStepCell, self).__init__(auto_prefix=False) | |||||
self.network = network | |||||
self.network.set_grad() | |||||
self.optimizer = optimizer | |||||
self.weights = self.optimizer.parameters | |||||
super(MyTrainOneStepCell, self).__init__(network, optimizer, sens) | |||||
self.grad = ops.composite.GradOperation(get_all=True, sens_param=False) | self.grad = ops.composite.GradOperation(get_all=True, sens_param=False) | ||||
def construct(self,*inputs): | |||||
def construct(self, *inputs): | |||||
"""Defines the computation performed.""" | """Defines the computation performed.""" | ||||
loss = self.network(*inputs) | loss = self.network(*inputs) | ||||
grads = self.grad(self.network)(*inputs) | grads = self.grad(self.network)(*inputs) | ||||
self.optimizer(grads) | self.optimizer(grads) | ||||
return grads,loss | |||||
return grads, loss | |||||
class MyWithLossCell(nn.Cell): | class MyWithLossCell(nn.Cell): | ||||
def __init__(self,net,loss_fn,input_tensor): | |||||
def __init__(self, net, loss_fn, input_tensor): | |||||
super(MyWithLossCell, self).__init__(auto_prefix=False) | super(MyWithLossCell, self).__init__(auto_prefix=False) | ||||
self.net = net | self.net = net | ||||
self._loss_fn = loss_fn | self._loss_fn = loss_fn | ||||
@@ -63,11 +61,14 @@ class MyWithLossCell(nn.Cell): | |||||
self.input_tensor = input_tensor | self.input_tensor = input_tensor | ||||
self.input_emb = self.net(self.expand_dims(self.input_tensor, 0)) | self.input_emb = self.net(self.expand_dims(self.input_tensor, 0)) | ||||
def construct(self,mask_tensor): | |||||
def construct(self, mask_tensor): | |||||
ref = mask_tensor | ref = mask_tensor | ||||
adversarial_tensor = mindspore.numpy.where((ref == 0), self.input_tensor, (mask_tensor - self.MEAN[:, None, None] )/ self.STD[:, None, None]) | |||||
adversarial_tensor = mindspore.numpy.where( | |||||
(ref == 0), | |||||
self.input_tensor, | |||||
(mask_tensor - self.MEAN[:, None, None] )/ self.STD[:, None, None]) | |||||
adversarial_emb = self.net(self.expand_dims(adversarial_tensor, 0)) | adversarial_emb = self.net(self.expand_dims(adversarial_tensor, 0)) | ||||
loss = self._loss_fn( adversarial_emb,self.input_emb,mask_tensor) | |||||
loss = self._loss_fn( adversarial_emb, self.input_emb, mask_tensor) | |||||
return loss | return loss | ||||
@property | @property | ||||
@@ -75,11 +76,11 @@ class MyWithLossCell(nn.Cell): | |||||
return self.net | return self.net | ||||
class FaceLoss_target_attack(nn.Cell): | |||||
class FaceLossTargetAttack(nn.Cell): | |||||
"""The loss function of the target attack""" | """The loss function of the target attack""" | ||||
def __init__(self,target_emb): | |||||
super(FaceLoss_target_attack, self).__init__() | |||||
def __init__(self, target_emb): | |||||
super(FaceLossTargetAttack, self).__init__() | |||||
self.uniformreal = ops.UniformReal(seed=2) | self.uniformreal = ops.UniformReal(seed=2) | ||||
self.sum = ops.ReduceSum(keep_dims=False) | self.sum = ops.ReduceSum(keep_dims=False) | ||||
self.norm = nn.Norm(keep_dims=True) | self.norm = nn.Norm(keep_dims=True) | ||||
@@ -92,21 +93,7 @@ class FaceLoss_target_attack(nn.Cell): | |||||
self.abs = ops.Abs() | self.abs = ops.Abs() | ||||
self.reduce_mean = ops.ReduceMean() | self.reduce_mean = ops.ReduceMean() | ||||
def construct(self, adversarial_emb,input_emb,mask_tensor): | |||||
#像素平滑 | |||||
# vert_diff = mask_tensor[:, 1:] - mask_tensor[:, :-1] | |||||
# hor_diff = mask_tensor[:, :, 1:] - mask_tensor[:, :, :-1] | |||||
# vert_diff_sq = self.pow(vert_diff, 2) | |||||
# hor_diff_sq = self.pow(hor_diff, 2) | |||||
# A = self.zeroslike(Tensor(self.uniformreal((3, 1, 112)))) | |||||
# B = self.zeroslike(Tensor(self.uniformreal((3, 112, 1)))) | |||||
# vert_pad = self.concat_op1((vert_diff_sq, A)) | |||||
# hor_pad = self.concat_op2((hor_diff_sq, B)) | |||||
# tv_sum = vert_pad + hor_pad | |||||
# tv = ops.functional.sqrt(tv_sum + 1e-5) | |||||
# tv_final_sum = self.sum(tv) | |||||
# tv_loss = (1e-4) * tv_final_sum | |||||
# print("tv_loss:",tv_loss) | |||||
def construct(self, adversarial_emb, input_emb, mask_tensor): | |||||
prod_sum = self.reduce_sum(adversarial_emb * self.target_emb, (1,)) | prod_sum = self.reduce_sum(adversarial_emb * self.target_emb, (1,)) | ||||
square1 = self.reduce_sum(ops.functional.square(adversarial_emb), (1,)) | square1 = self.reduce_sum(ops.functional.square(adversarial_emb), (1,)) | ||||
square2 = self.reduce_sum(ops.functional.square(self.target_emb), (1,)) | square2 = self.reduce_sum(ops.functional.square(self.target_emb), (1,)) | ||||
@@ -116,11 +103,11 @@ class FaceLoss_target_attack(nn.Cell): | |||||
return loss | return loss | ||||
class FaceLoss_no_target_attack(nn.Cell): | |||||
class FaceLossNoTargetAttack(nn.Cell): | |||||
"""The loss function of the non-target attack""" | """The loss function of the non-target attack""" | ||||
def __init__(self, target_emb): | def __init__(self, target_emb): | ||||
"""初始化""" | """初始化""" | ||||
super(FaceLoss_no_target_attack, self).__init__() | |||||
super(FaceLossNoTargetAttack, self).__init__() | |||||
self.uniformreal = ops.UniformReal(seed=2) | self.uniformreal = ops.UniformReal(seed=2) | ||||
self.sum = ops.ReduceSum(keep_dims=False) | self.sum = ops.ReduceSum(keep_dims=False) | ||||
self.norm = nn.Norm(keep_dims=True) | self.norm = nn.Norm(keep_dims=True) | ||||
@@ -134,21 +121,6 @@ class FaceLoss_no_target_attack(nn.Cell): | |||||
self.reduce_mean = ops.ReduceMean() | self.reduce_mean = ops.ReduceMean() | ||||
def construct(self, adversarial_emb, input_emb, mask_tensor): | def construct(self, adversarial_emb, input_emb, mask_tensor): | ||||
# 像素平滑 | |||||
# vert_diff = mask_tensor[:, 1:] - mask_tensor[:, :-1] # | |||||
# hor_diff = mask_tensor[:, :, 1:] - mask_tensor[:, :, :-1] | |||||
# vert_diff_sq = self.pow(vert_diff, 2) | |||||
# hor_diff_sq = self.pow(hor_diff, 2) | |||||
# A = self.zeroslike(Tensor(self.uniformreal((3, 1, 112)))) # | |||||
# B = self.zeroslike(Tensor(self.uniformreal((3, 112, 1)))) | |||||
# vert_pad = self.concat_op1((vert_diff_sq, A)) | |||||
# hor_pad = self.concat_op2((hor_diff_sq, B)) | |||||
# tv_sum = vert_pad + hor_pad | |||||
# tv = ops.functional.sqrt(tv_sum + 1e-5) | |||||
# tv_final_sum = self.sum(tv) | |||||
# tv_loss = (1e-4) * tv_final_sum | |||||
# print("tv_loss:",tv_loss) | |||||
prod_sum = self.reduce_sum(adversarial_emb * input_emb, (1,)) | prod_sum = self.reduce_sum(adversarial_emb * input_emb, (1,)) | ||||
square1 = self.reduce_sum(ops.functional.square(adversarial_emb), (1,)) | square1 = self.reduce_sum(ops.functional.square(adversarial_emb), (1,)) | ||||
square2 = self.reduce_sum(ops.functional.square(input_emb), (1,)) | square2 = self.reduce_sum(ops.functional.square(input_emb), (1,)) | ||||
@@ -15,7 +15,7 @@ | |||||
import numpy as np | import numpy as np | ||||
import matplotlib.image as mp | import matplotlib.image as mp | ||||
from mindspore import context,Tensor | |||||
from mindspore import context, Tensor | |||||
import mindspore | import mindspore | ||||
from mindspore.dataset.vision.py_transforms import ToTensor | from mindspore.dataset.vision.py_transforms import ToTensor | ||||
import mindspore.dataset.vision.py_transforms as P | import mindspore.dataset.vision.py_transforms as P | ||||