Browse Source

update

pull/418/head
lmj 2 years ago
parent
commit
87aca7f8dd
9 changed files with 673 additions and 0 deletions
  1. +270
    -0
      examples/community/face_adversarial_attack/AFR.py
  2. +86
    -0
      examples/community/face_adversarial_attack/README.md
  3. +34
    -0
      examples/community/face_adversarial_attack/camera.py
  4. +40
    -0
      examples/community/face_adversarial_attack/example_non-target_attack.py
  5. +41
    -0
      examples/community/face_adversarial_attack/example_target_attack.py
  6. +132
    -0
      examples/community/face_adversarial_attack/loss_design.py
  7. BIN
      examples/community/face_adversarial_attack/opencv_photo/input/input1.jpg
  8. BIN
      examples/community/face_adversarial_attack/opencv_photo/target/target1.jpg
  9. +70
    -0
      examples/community/face_adversarial_attack/test.py

+ 270
- 0
examples/community/face_adversarial_attack/AFR.py View File

@@ -0,0 +1,270 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import re
import numpy as np
import mindspore
import mindspore.dataset.vision.py_transforms as P
from mindspore.dataset.vision.py_transforms import ToPIL as ToPILImage
from mindspore import Parameter, ops, nn, Tensor
from mindspore.dataset.vision.py_transforms import ToTensor
import dlib
import matplotlib.image as mp
import face_recognition as fr
import face_recognition_models as frm
from PIL import Image, ImageDraw
from loss_design import MyTrainOneStepCell, MyWithLossCell, FaceLossTargetAttack, FaceLossNoTargetAttack
from FaceRecognition.eval import get_net

class FaceAdversarialAttack(object):
"""
Class used to create adversarial facial recognition attacks
"""

def __init__(self, input_img, target_img, seed=None):
"""
Initialization for Attack class.

Args:
input_img : Image to train on.
target_img : Image to target the adversarial attack against.
seed : optional Sets custom seed for reproducability. Default is generated randomly.

"""

if (seed is not None): np.random.seed(seed)
self.MEAN = Tensor([0.485, 0.456, 0.406])
self.STD = Tensor([0.229, 0.224, 0.225])
self.LOSS = Tensor(0)
self.expand_dims = mindspore.ops.ExpandDims()
self.imageize = ToPILImage()
self.tensorize = ToTensor()
self.normalize = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.resnet = get_net()
self.input_tensor = Tensor(self.normalize(self.tensorize(input_img)))
self.target_tensor = Tensor(self.normalize(self.tensorize(target_img)))
mp.imsave('./outputs/input图像.jpg', np.transpose(self._reverse_norm(self.input_tensor).asnumpy(), (1, 2, 0)))
mp.imsave('./outputs/target图像.jpg',
np.transpose(self._reverse_norm(self.target_tensor).asnumpy(), (1, 2, 0)))


self.input_emb = self.resnet(self.expand_dims(self.input_tensor, 0))
self.target_emb = self.resnet(self.expand_dims(self.target_tensor, 0))
self.adversarial_emb = None
self.mask_tensor = self._create_mask(input_img)
self.ref = self.mask_tensor
self.pm = Parameter(self.mask_tensor)
self.opt = nn.Adam([self.pm], learning_rate=0.01, weight_decay=0.0001)



def train(self, attack_method):
"""
Optimized adversarial image.
"""

if attack_method == "non-target attack":
LOSS = FaceLossNoTargetAttack(self.target_emb)
if attack_method == "target_attack":
LOSS = FaceLossTargetAttack(self.target_emb)

net_with_criterion = MyWithLossCell(self.resnet, LOSS, self.input_tensor)
train_net = MyTrainOneStepCell(net_with_criterion, self.opt)

for i in range(2000):

self.mask_tensor = Tensor(self.pm)

grads, loss = train_net(self.mask_tensor)

print("epoch %d ,loss: %f \n " % (i, loss.asnumpy().item()))

self.mask_tensor = ops.clip_by_value(
self.mask_tensor, Tensor(0, mindspore.float32), Tensor(1, mindspore.float32))

adversarial_tensor = self._apply(
self.input_tensor,
(self.mask_tensor - self.MEAN[:, None, None]) / self.STD[:, None, None],
self.ref)

adversarial_tensor = self._reverse_norm(adversarial_tensor)

return adversarial_tensor, self.mask_tensor

def test(self):
"""
Test the recognition of adversarial images by the model.
"""

adversarial_tensor = self._apply(
self.input_tensor,
(self.mask_tensor - self.MEAN[:, None, None] )/ self.STD[:, None, None],
self.ref)

self.adversarial_emb = self.resnet(self.expand_dims(adversarial_tensor, 0))
self.input_emb = self.resnet(self.expand_dims(self.input_tensor, 0))
self.target_emb = self.resnet(self.expand_dims(self.target_tensor, 0))

adversarial = np.argmax(self.adversarial_emb.asnumpy())
target = np.argmax(self.target_emb.asnumpy())
input = np.argmax(self.input_emb.asnumpy())

print("input:", input)
print("input_confidence:", self.input_emb.asnumpy()[0][input])
print("================================")
print("adversarial:", adversarial)
print("adversarial_confidence:", self.adversarial_emb.asnumpy()[0][adversarial])
print("Confidence changes for target:", self.adversarial_emb.asnumpy()[0][target])
print("Confidence changes for input:", self.adversarial_emb.asnumpy()[0][input])
print("================================")
print("target:", target)
print("target_confidence:", self.target_emb.asnumpy()[0][target])
print("input: %d, target: %d, adversarial: %d" % (input, target, adversarial))


def _reverse_norm(self, image_tensor):
"""
Reverses normalization for a given image_tensor

Args:
image_tensor : Tensor

Returns:
Tensor
"""
tensor = image_tensor * self.STD[:, None, None] + self.MEAN[:, None, None]

return tensor

def _apply(self,
image_tensor,
mask_tensor,
reference_tensor
):
"""
Apply a mask over an image.

Args:
image_tensor : Canvas to be used to apply mask on.
mask_tensor : Mask to apply over the image.
reference_tensor : Used to reference mask boundaries

Returns:
Tensor
"""
tensor = mindspore.numpy.where((reference_tensor == 0), image_tensor, mask_tensor)

return tensor

def _create_mask(self, face_image):
"""
Create mask image.

Args:
face_image : image of a detected face.

Returns:
mask_tensor : A mask image.
"""

mask = Image.new('RGB', face_image.size, color=(0, 0, 0))
d = ImageDraw.Draw(mask)
landmarks = fr.face_landmarks(np.array(face_image))
area = [landmark
for landmark in landmarks[0]['chin']
if landmark[1] > max(landmarks[0]['nose_tip'])[1]]
area.append(landmarks[0]['nose_bridge'][1])
d.polygon(area, fill=(255, 255, 255))
mask_array = np.array(mask)
mask_array = mask_array.astype(np.float32)

for i in range(mask_array.shape[0]):

for j in range(mask_array.shape[1]):

for k in range(mask_array.shape[2]):

if mask_array[i][j][k] == 255.:
mask_array[i][j][k] = 0.5
else:
mask_array[i][j][k] = 0

mask_tensor = Tensor(mask_array)
mask_tensor = mask_tensor.swapaxes(0, 2).swapaxes(1, 2)
mask_tensor.requires_grad = True
return mask_tensor

def _reverse_norm(self, image_tensor):
"""
Reverses normalization for a given image_tensor.

Args:
image_tensor : Tensor.

Returns:
Tensor.
"""
tensor = image_tensor * self.STD[:, None, None] + self.MEAN[:, None, None]
return tensor


def detect_face(image_loc):
"""
Helper function to run the facial detection and alignment process using
dlib. Detects a given face and aligns it using dlib's 5 point landmark
detector.

Args:
image_loc : image file location.

Returns:
face_image : Resized face image.

"""

detector = dlib.get_frontal_face_detector()
shape_predictor = dlib.shape_predictor(frm.pose_predictor_model_location())
image = dlib.load_rgb_image(image_loc)
dets = detector(image, 1)

faces = dlib.full_object_detections()
for detection in dets:
faces.append(shape_predictor(image, detection))
face_image = Image.fromarray(dlib.get_face_chip(image, faces[0], size=112))

return face_image


def load_data(path_to_data):
"""
Helper function for loading image data. Allows user to load the input, target,
and test images.

Args:
path_to_data : Path to the given data.

Returns:
list : List of resized face images.
"""
img_files = [f for f in os.listdir(path_to_data) if re.search(r'.*\.(jpe?g|png)', f)]
img_files_locs = [os.path.join(path_to_data, f) for f in img_files]

image_list = []

for loc in img_files_locs:
image_list.append(detect_face(loc))

return image_list


+ 86
- 0
examples/community/face_adversarial_attack/README.md View File

@@ -0,0 +1,86 @@
# 人脸识别物理对抗攻击



##描述
本项目是对人脸识别模型的物理对抗攻击,通过生成对抗口罩,使人脸佩戴后实现目标攻击和非目标攻击,并应用于mindspore平台。



##模型结构
采用华为mindspore官方训练的FaceRecognition模型
https://www.mindspore.cn/resources/hub/details?MindSpore/1.7/facerecognition_ms1mv2




##环境要求
mindspore=1.7,硬件平台为GPU。



##脚本说明
├── readme.md
├── opencv_photo
│ ├── adv_input //对抗图像
│ ├── input //输入图像
│ └── target //目标图像
├── outputs //训练后的图像
├── FaceRecognition //模型设置
├── AFR.py //训练脚本
├── camera.py //opencv图像采集
│── example_non-target_attack.py //无目标攻击训练
│── example_target_attack.py //目标攻击训练
│── loss_design.py //训练优化设置
└── test.py //评估攻击效果


##模型调用
方法一:

#基于mindspore_hub库调用FaceRecognition模型

import mindspore_hub as mshub
from mindspore import context
def get_net():
context.set_context(mode=context.GRAPH_MODE,
device_target="GPU",
device_id=0)
model = "mindspore/1.7/facerecognition_ms1mv2"
network = mshub.load(model)
network.set_train(False)
return network

方法二:

利用 MindSpore代码仓中的https://gitee.com/mindspore/models/blob/master/research/cv/FaceRecognition/eval.py的get_model函数加载模型

##训练过程
目标攻击:
$ cd face_adversarial_attack/example/
$ python example_target_attack.py

非目标攻击:
$ cd face_adversarial_attack/example/
$ python example_non-target_attack.py




##默认训练参数
optimizer=adam, learning rate=0.01, weight_decay=0.0001, epoch=2000


##评估过程
评估方法一:
AFR.Attack.test()

评估方法二:
$ cd face_adversarial_attack/example/
$ python test.py

+ 34
- 0
examples/community/face_adversarial_attack/camera.py View File

@@ -0,0 +1,34 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import cv2


capture = cv2.VideoCapture(0)


for i in range(2):
while(True):
ret, frame = capture.read()
width, height = capture.get(3), capture.get(4)
cv2.imwrite('./opencv_photo/input/input'+str(i)+'.png', frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) == ord('q'):
break


print(width, height)

capture.release()
cv2.destroyAllWindows()

+ 40
- 0
examples/community/face_adversarial_attack/example_non-target_attack.py View File

@@ -0,0 +1,40 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

import numpy as np
import matplotlib.image as mp
from mindspore import context
import AFR

context.set_context(mode=context.GRAPH_MODE, device_target="GPU")




if __name__ == '__main__':

inputs = AFR.load_data('opencv_photo/input/')
targets = AFR.load_data('opencv_photo/target/')


adversarial = AFR.FaceAdversarialAttack(inputs[0], targets[0])
attack_method = "non-target attack"

adversarial_tensor, mask_tensor = adversarial.train(attack_method)

mp.imsave('./outputs/对抗图像.jpg', np.transpose(adversarial_tensor.asnumpy(), (1, 2, 0)))
mp.imsave('./outputs/口罩.jpg', np.transpose(mask_tensor.asnumpy(), (1, 2, 0)))

adversarial.test()

+ 41
- 0
examples/community/face_adversarial_attack/example_target_attack.py View File

@@ -0,0 +1,41 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

import numpy as np
import matplotlib.image as mp
from mindspore import context
import AFR

context.set_context(mode=context.GRAPH_MODE, device_target="GPU")




if __name__ == '__main__':

inputs = AFR.load_data('opencv_photo/input/')
targets = AFR.load_data('opencv_photo/target/')


adversarial = AFR.FaceAdversarialAttack(inputs[0], targets[0])

attack_method = "target_attack"

adversarial_tensor, mask_tensor = adversarial.train(attack_method)

mp.imsave('./outputs/对抗图像.jpg', np.transpose(adversarial_tensor.asnumpy(), (1, 2, 0)))
mp.imsave('./outputs/口罩.jpg', np.transpose(mask_tensor.asnumpy(), (1, 2, 0)))

adversarial.test()

+ 132
- 0
examples/community/face_adversarial_attack/loss_design.py View File

@@ -0,0 +1,132 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore
from mindspore import ops, nn, Tensor
from mindspore.dataset.vision.py_transforms import ToTensor
import mindspore.dataset.vision.py_transforms as P




class MyTrainOneStepCell(nn.TrainOneStepCell):
"""
Encapsulation class of network training.

Append an optimizer to the training network after that the construct
function can be called to create the backward graph.

Args:
network (Cell): The training network. Note that loss function should have been added.
optimizer (Optimizer): Optimizer for updating the weights.
sens (Number): The adjust parameter. Default: 1.0.
"""

def __init__(self, network, optimizer, sens=1.0):
super(MyTrainOneStepCell, self).__init__(network, optimizer, sens)
self.grad = ops.composite.GradOperation(get_all=True, sens_param=False)

def construct(self, *inputs):
"""Defines the computation performed."""
loss = self.network(*inputs)
grads = self.grad(self.network)(*inputs)
self.optimizer(grads)
return grads, loss



class MyWithLossCell(nn.Cell):
def __init__(self, net, loss_fn, input_tensor):
super(MyWithLossCell, self).__init__(auto_prefix=False)
self.net = net
self._loss_fn = loss_fn
self.STD = Tensor([0.229, 0.224, 0.225])
self.MEAN = Tensor([0.485, 0.456, 0.406])
self.expand_dims = mindspore.ops.ExpandDims()
self.normalize = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.tensorize = ToTensor()
self.input_tensor = input_tensor
self.input_emb = self.net(self.expand_dims(self.input_tensor, 0))

def construct(self, mask_tensor):
ref = mask_tensor
adversarial_tensor = mindspore.numpy.where(
(ref == 0),
self.input_tensor,
(mask_tensor - self.MEAN[:, None, None] )/ self.STD[:, None, None])
adversarial_emb = self.net(self.expand_dims(adversarial_tensor, 0))
loss = self._loss_fn( adversarial_emb, self.input_emb, mask_tensor)
return loss

@property
def backbone_network(self):
return self.net


class FaceLossTargetAttack(nn.Cell):
"""The loss function of the target attack"""

def __init__(self, target_emb):
super(FaceLossTargetAttack, self).__init__()
self.uniformreal = ops.UniformReal(seed=2)
self.sum = ops.ReduceSum(keep_dims=False)
self.norm = nn.Norm(keep_dims=True)
self.zeroslike = ops.ZerosLike()
self.concat_op1 = ops.Concat(1)
self.concat_op2 = ops.Concat(2)
self.pow = ops.Pow()
self.reduce_sum = ops.operations.ReduceSum()
self.target_emb = target_emb
self.abs = ops.Abs()
self.reduce_mean = ops.ReduceMean()

def construct(self, adversarial_emb, input_emb, mask_tensor):
prod_sum = self.reduce_sum(adversarial_emb * self.target_emb, (1,))
square1 = self.reduce_sum(ops.functional.square(adversarial_emb), (1,))
square2 = self.reduce_sum(ops.functional.square(self.target_emb), (1,))
denom = ops.functional.sqrt(square1) * ops.functional.sqrt(square2)
loss = -(prod_sum / denom)
print("dis_loss:", loss)
return loss


class FaceLossNoTargetAttack(nn.Cell):
"""The loss function of the non-target attack"""
def __init__(self, target_emb):
"""初始化"""
super(FaceLossNoTargetAttack, self).__init__()
self.uniformreal = ops.UniformReal(seed=2)
self.sum = ops.ReduceSum(keep_dims=False)
self.norm = nn.Norm(keep_dims=True)
self.zeroslike = ops.ZerosLike()
self.concat_op1 = ops.Concat(1)
self.concat_op2 = ops.Concat(2)
self.pow = ops.Pow()
self.reduce_sum = ops.operations.ReduceSum()
self.target_emb = target_emb
self.abs = ops.Abs()
self.reduce_mean = ops.ReduceMean()

def construct(self, adversarial_emb, input_emb, mask_tensor):
prod_sum = self.reduce_sum(adversarial_emb * input_emb, (1,))
square1 = self.reduce_sum(ops.functional.square(adversarial_emb), (1,))
square2 = self.reduce_sum(ops.functional.square(input_emb), (1,))
denom = ops.functional.sqrt(square1) * ops.functional.sqrt(square2)
loss = prod_sum / denom
print("cosine_loss:", loss)
return loss



BIN
examples/community/face_adversarial_attack/opencv_photo/input/input1.jpg View File

Before After
Width: 1024  |  Height: 774  |  Size: 88 kB

BIN
examples/community/face_adversarial_attack/opencv_photo/target/target1.jpg View File

Before After
Width: 1523  |  Height: 2048  |  Size: 653 kB

+ 70
- 0
examples/community/face_adversarial_attack/test.py View File

@@ -0,0 +1,70 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

import numpy as np
import matplotlib.image as mp
from mindspore import context, Tensor
import mindspore
from mindspore.dataset.vision.py_transforms import ToTensor
import mindspore.dataset.vision.py_transforms as P
from mindspore.dataset.vision.py_transforms import ToPIL as ToPILImage
from FaceRecognition.eval import get_net
import AFR

context.set_context(mode=context.GRAPH_MODE, device_target="GPU")


imageize = ToPILImage()

if __name__ == '__main__':
"""
The input image, target image and adversarial image are tested using the FaceRecognition model.
"""

image = AFR.load_data('opencv_photo/adv_input')
inputs = AFR.load_data('opencv_photo/input/')
targets = AFR.load_data('opencv_photo/target/')

tensorize = ToTensor()
normalize = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
expand_dims = mindspore.ops.ExpandDims()
MEAN = Tensor([0.485, 0.456, 0.406])
STD = Tensor([0.229, 0.224, 0.225])

resnet = get_net()

image = mp.imread("./对抗图像.jpg")
adv = Tensor(normalize(tensorize(image)))

input_tensor = Tensor(normalize(tensorize(inputs[0])))
target_tensor = Tensor(normalize(tensorize(targets[0])))

adversarial_emb = resnet(expand_dims(adv, 0))
input_emb = resnet(expand_dims(input_tensor, 0))
target_emb = resnet(expand_dims(target_tensor, 0))

adversarial = np.argmax(adversarial_emb.asnumpy())
target = np.argmax(target_emb.asnumpy())
input = np.argmax(input_emb.asnumpy())

print("input:", input)
print("input_confidence:", input_emb.asnumpy()[0][input])
print("================================")
print("adversarial:", adversarial)
print("adversarial_confidence:", adversarial_emb.asnumpy()[0][adversarial])
print("Confidence changes for input:", adversarial_emb.asnumpy()[0][input])
print("================================")

print("input:%d, target:%d, adversarial:%d" % (input, target, adversarial))

Loading…
Cancel
Save