@@ -3,12 +3,12 @@ | |||
##描述 | |||
本项目是对人脸识别模型的物理对抗攻击,通过生成对抗口罩,使人脸佩戴后实现目标攻击和非目标攻击,并应用于mindspore平台。 | |||
本项目是对人脸识别模型的物理对抗攻击,通过生成对抗口罩,使人脸佩戴后实现有目标攻击和非目标攻击,并应用于MindSpore平台。 | |||
##模型结构 | |||
采用华为mindspore官方训练的FaceRecognition模型 | |||
采用华为MindSpore官方训练的FaceRecognition模型 | |||
https://www.mindspore.cn/resources/hub/details?MindSpore/1.7/facerecognition_ms1mv2 | |||
@@ -21,16 +21,15 @@ mindspore=1.7,硬件平台为GPU。 | |||
##脚本说明 | |||
├── readme.md | |||
├── opencv_photo | |||
├── photos | |||
│ ├── adv_input //对抗图像 | |||
│ ├── input //输入图像 | |||
│ └── target //目标图像 | |||
├── outputs //训练后的图像 | |||
├── FaceRecognition //模型设置 | |||
├── AFR.py //训练脚本 | |||
├── camera.py //opencv图像采集 | |||
├── adversarial_attack.py //训练脚本 | |||
│── example_non-target_attack.py //无目标攻击训练 | |||
│── example_target_attack.py //目标攻击训练 | |||
│── example_target_attack.py //有目标攻击训练 | |||
│── loss_design.py //训练优化设置 | |||
└── test.py //评估攻击效果 | |||
@@ -58,14 +57,14 @@ mindspore=1.7,硬件平台为GPU。 | |||
利用 MindSpore代码仓中的https://gitee.com/mindspore/models/blob/master/research/cv/FaceRecognition/eval.py的get_model函数加载模型 | |||
##训练过程 | |||
目标攻击: | |||
有目标攻击: | |||
$ cd face_adversarial_attack/example/ | |||
$ cd face_adversarial_attack/ | |||
$ python example_target_attack.py | |||
非目标攻击: | |||
$ cd face_adversarial_attack/example/ | |||
$ cd face_adversarial_attack/ | |||
$ python example_non-target_attack.py | |||
@@ -78,9 +77,13 @@ optimizer=adam, learning rate=0.01, weight_decay=0.0001, epoch=2000 | |||
##评估过程 | |||
评估方法一: | |||
AFR.Attack.test() | |||
adversarial_attack.Attack.test() | |||
评估方法二: | |||
$ cd face_adversarial_attack/example/ | |||
$ cd face_adversarial_attack/ | |||
$ python test.py | |||
@@ -15,39 +15,38 @@ | |||
import os | |||
import re | |||
import numpy as np | |||
import face_recognition as fr | |||
import face_recognition_models as frm | |||
import dlib | |||
import matplotlib.image as mp | |||
from PIL import Image, ImageDraw | |||
import mindspore | |||
import mindspore.dataset.vision.py_transforms as P | |||
from mindspore.dataset.vision.py_transforms import ToPIL as ToPILImage | |||
from mindspore import Parameter, ops, nn, Tensor | |||
from mindspore.dataset.vision.py_transforms import ToTensor | |||
import dlib | |||
import matplotlib.image as mp | |||
import face_recognition as fr | |||
import face_recognition_models as frm | |||
from PIL import Image, ImageDraw | |||
from mindspore import Parameter, ops, nn, Tensor | |||
from loss_design import MyTrainOneStepCell, MyWithLossCell, FaceLossTargetAttack, FaceLossNoTargetAttack | |||
from FaceRecognition.eval import get_net | |||
class FaceAdversarialAttack(object): | |||
""" | |||
Class used to create adversarial facial recognition attacks | |||
Class used to create adversarial facial recognition attacks. | |||
""" | |||
def __init__(self, input_img, target_img, seed=None): | |||
""" | |||
Initialization for Attack class. | |||
Initialization for attack class. | |||
Args: | |||
input_img : Image to train on. | |||
target_img : Image to target the adversarial attack against. | |||
seed : optional Sets custom seed for reproducability. Default is generated randomly. | |||
input_img (numpy.ndarray): The input image. | |||
target_img (numpy.ndarray): The target image. | |||
seed (int): optional Sets custom seed for reproducability. Default is generated randomly. | |||
""" | |||
if (seed is not None): np.random.seed(seed) | |||
self.MEAN = Tensor([0.485, 0.456, 0.406]) | |||
self.STD = Tensor([0.229, 0.224, 0.225]) | |||
self.LOSS = Tensor(0) | |||
self.expand_dims = mindspore.ops.ExpandDims() | |||
self.imageize = ToPILImage() | |||
self.tensorize = ToTensor() | |||
@@ -55,8 +54,8 @@ class FaceAdversarialAttack(object): | |||
self.resnet = get_net() | |||
self.input_tensor = Tensor(self.normalize(self.tensorize(input_img))) | |||
self.target_tensor = Tensor(self.normalize(self.tensorize(target_img))) | |||
mp.imsave('./outputs/input图像.jpg', np.transpose(self._reverse_norm(self.input_tensor).asnumpy(), (1, 2, 0))) | |||
mp.imsave('./outputs/target图像.jpg', | |||
mp.imsave('./outputs/input_image.jpg', np.transpose(self._reverse_norm(self.input_tensor).asnumpy(), (1, 2, 0))) | |||
mp.imsave('./outputs/target_image.jpg', | |||
np.transpose(self._reverse_norm(self.target_tensor).asnumpy(), (1, 2, 0))) | |||
@@ -73,14 +72,22 @@ class FaceAdversarialAttack(object): | |||
def train(self, attack_method): | |||
""" | |||
Optimized adversarial image. | |||
Args: | |||
attack_method (Sting) : Including target attack and non-target attack. | |||
Returns: | |||
Tensor, adversarial image. | |||
Tensor, mask image. | |||
""" | |||
if attack_method == "non-target attack": | |||
LOSS = FaceLossNoTargetAttack(self.target_emb) | |||
loss = FaceLossNoTargetAttack(self.target_emb) | |||
if attack_method == "target_attack": | |||
LOSS = FaceLossTargetAttack(self.target_emb) | |||
loss = FaceLossTargetAttack(self.target_emb) | |||
net_with_criterion = MyWithLossCell(self.resnet, LOSS, self.input_tensor) | |||
net_with_criterion = MyWithLossCell(self.resnet, loss, self.input_tensor) | |||
train_net = MyTrainOneStepCell(net_with_criterion, self.opt) | |||
for i in range(2000): | |||
@@ -136,13 +143,13 @@ class FaceAdversarialAttack(object): | |||
def _reverse_norm(self, image_tensor): | |||
""" | |||
Reverses normalization for a given image_tensor | |||
Reverses normalization for a given image_tensor. | |||
Args: | |||
image_tensor : Tensor | |||
image_tensor (Tensor): image. | |||
Returns: | |||
Tensor | |||
Tensor, image. | |||
""" | |||
tensor = image_tensor * self.STD[:, None, None] + self.MEAN[:, None, None] | |||
@@ -157,12 +164,12 @@ class FaceAdversarialAttack(object): | |||
Apply a mask over an image. | |||
Args: | |||
image_tensor : Canvas to be used to apply mask on. | |||
mask_tensor : Mask to apply over the image. | |||
reference_tensor : Used to reference mask boundaries | |||
image_tensor (Tensor): Canvas to be used to apply mask on. | |||
mask_tensor (Tensor): Mask to apply over the image. | |||
reference_tensor (Tensor): Used to reference mask boundaries | |||
Returns: | |||
Tensor | |||
Tensor, image. | |||
""" | |||
tensor = mindspore.numpy.where((reference_tensor == 0), image_tensor, mask_tensor) | |||
@@ -173,10 +180,10 @@ class FaceAdversarialAttack(object): | |||
Create mask image. | |||
Args: | |||
face_image : image of a detected face. | |||
face_image (PIL.Image): image of a detected face. | |||
Returns: | |||
mask_tensor : A mask image. | |||
mask_tensor : a mask image. | |||
""" | |||
mask = Image.new('RGB', face_image.size, color=(0, 0, 0)) | |||
@@ -211,10 +218,10 @@ class FaceAdversarialAttack(object): | |||
Reverses normalization for a given image_tensor. | |||
Args: | |||
image_tensor : Tensor. | |||
image_tensor (Tensor): Tensor. | |||
Returns: | |||
Tensor. | |||
Tensor, image. | |||
""" | |||
tensor = image_tensor * self.STD[:, None, None] + self.MEAN[:, None, None] | |||
return tensor | |||
@@ -227,11 +234,10 @@ def detect_face(image_loc): | |||
detector. | |||
Args: | |||
image_loc : image file location. | |||
image_loc (numpy.ndarray): image file location. | |||
Returns: | |||
face_image : Resized face image. | |||
""" | |||
detector = dlib.get_frontal_face_detector() | |||
@@ -253,7 +259,7 @@ def load_data(path_to_data): | |||
and test images. | |||
Args: | |||
path_to_data : Path to the given data. | |||
path_to_data (String): Path to the given data. | |||
Returns: | |||
list : List of resized face images. |
@@ -1,34 +0,0 @@ | |||
# Copyright 2022 Huawei Technologies Co., Ltd | |||
# | |||
# Licensed under the Apache License, Version 2.0 (the "License"); | |||
# you may not use this file except in compliance with the License. | |||
# You may obtain a copy of the License at | |||
# | |||
# http://www.apache.org/licenses/LICENSE-2.0 | |||
# | |||
# Unless required by applicable law or agreed to in writing, software | |||
# distributed under the License is distributed on an "AS IS" BASIS, | |||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
# See the License for the specific language governing permissions and | |||
# limitations under the License. | |||
# ============================================================================ | |||
import cv2 | |||
capture = cv2.VideoCapture(0) | |||
for i in range(2): | |||
while(True): | |||
ret, frame = capture.read() | |||
width, height = capture.get(3), capture.get(4) | |||
cv2.imwrite('./opencv_photo/input/input'+str(i)+'.png', frame) | |||
cv2.imshow('frame', frame) | |||
if cv2.waitKey(1) == ord('q'): | |||
break | |||
print(width, height) | |||
capture.release() | |||
cv2.destroyAllWindows() |
@@ -16,7 +16,7 @@ | |||
import numpy as np | |||
import matplotlib.image as mp | |||
from mindspore import context | |||
import AFR | |||
import ad | |||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU") | |||
@@ -25,8 +25,8 @@ context.set_context(mode=context.GRAPH_MODE, device_target="GPU") | |||
if __name__ == '__main__': | |||
inputs = AFR.load_data('opencv_photo/input/') | |||
targets = AFR.load_data('opencv_photo/target/') | |||
inputs = AFR.load_data('photos/input/') | |||
targets = AFR.load_data('photos/target/') | |||
adversarial = AFR.FaceAdversarialAttack(inputs[0], targets[0]) | |||
@@ -34,7 +34,7 @@ if __name__ == '__main__': | |||
adversarial_tensor, mask_tensor = adversarial.train(attack_method) | |||
mp.imsave('./outputs/对抗图像.jpg', np.transpose(adversarial_tensor.asnumpy(), (1, 2, 0))) | |||
mp.imsave('./outputs/口罩.jpg', np.transpose(mask_tensor.asnumpy(), (1, 2, 0))) | |||
mp.imsave('./outputs/adversarial_example.jpg', np.transpose(adversarial_tensor.asnumpy(), (1, 2, 0))) | |||
mp.imsave('./outputs/mask.jpg', np.transpose(mask_tensor.asnumpy(), (1, 2, 0))) | |||
adversarial.test() |
@@ -25,8 +25,8 @@ context.set_context(mode=context.GRAPH_MODE, device_target="GPU") | |||
if __name__ == '__main__': | |||
inputs = AFR.load_data('opencv_photo/input/') | |||
targets = AFR.load_data('opencv_photo/target/') | |||
inputs = AFR.load_data('photos/input/') | |||
targets = AFR.load_data('photos/target/') | |||
adversarial = AFR.FaceAdversarialAttack(inputs[0], targets[0]) | |||
@@ -35,7 +35,7 @@ if __name__ == '__main__': | |||
adversarial_tensor, mask_tensor = adversarial.train(attack_method) | |||
mp.imsave('./outputs/对抗图像.jpg', np.transpose(adversarial_tensor.asnumpy(), (1, 2, 0))) | |||
mp.imsave('./outputs/口罩.jpg', np.transpose(mask_tensor.asnumpy(), (1, 2, 0))) | |||
mp.imsave('./outputs/adversarial_example.jpg', np.transpose(adversarial_tensor.asnumpy(), (1, 2, 0))) | |||
mp.imsave('./outputs/mask.jpg', np.transpose(mask_tensor.asnumpy(), (1, 2, 0))) | |||
adversarial.test() |
@@ -105,8 +105,9 @@ class FaceLossTargetAttack(nn.Cell): | |||
class FaceLossNoTargetAttack(nn.Cell): | |||
"""The loss function of the non-target attack""" | |||
def __init__(self, target_emb): | |||
"""初始化""" | |||
"""Initialization""" | |||
super(FaceLossNoTargetAttack, self).__init__() | |||
self.uniformreal = ops.UniformReal(seed=2) | |||
self.sum = ops.ReduceSum(keep_dims=False) | |||
@@ -33,9 +33,9 @@ if __name__ == '__main__': | |||
The input image, target image and adversarial image are tested using the FaceRecognition model. | |||
""" | |||
image = AFR.load_data('opencv_photo/adv_input') | |||
inputs = AFR.load_data('opencv_photo/input/') | |||
targets = AFR.load_data('opencv_photo/target/') | |||
image = AFR.load_data('photos/adv_input/') | |||
inputs = AFR.load_data('photos/input/') | |||
targets = AFR.load_data('photos/target/') | |||
tensorize = ToTensor() | |||
normalize = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) | |||
@@ -45,7 +45,7 @@ if __name__ == '__main__': | |||
resnet = get_net() | |||
image = mp.imread("./对抗图像.jpg") | |||
image = mp.imread("./outputs/adversarial_example.jpg") | |||
adv = Tensor(normalize(tensorize(image))) | |||
input_tensor = Tensor(normalize(tensorize(inputs[0]))) | |||