Browse Source

删除文件 examples/face_adversarial_attack/example

“update”
pull/418/head
君君臣臣君 lmj 2 years ago
parent
commit
74756a91fa
18 changed files with 66 additions and 763 deletions
  1. +14
    -11
      examples/community/face_adversarial_attack/README.md
  2. +37
    -31
      examples/community/face_adversarial_attack/adversarial_attack.py
  3. +0
    -34
      examples/community/face_adversarial_attack/camera.py
  4. +5
    -5
      examples/community/face_adversarial_attack/example_non-target_attack.py
  5. +4
    -4
      examples/community/face_adversarial_attack/example_target_attack.py
  6. +2
    -1
      examples/community/face_adversarial_attack/loss_design.py
  7. +0
    -0
      examples/community/face_adversarial_attack/photos/input/input1.jpg
  8. +0
    -0
      examples/community/face_adversarial_attack/photos/target/target1.jpg
  9. +4
    -4
      examples/community/face_adversarial_attack/test.py
  10. +0
    -270
      examples/face_adversarial_attack/example/AFR.py
  11. +0
    -86
      examples/face_adversarial_attack/example/README.md
  12. +0
    -34
      examples/face_adversarial_attack/example/camera.py
  13. +0
    -40
      examples/face_adversarial_attack/example/example_non-target_attack.py
  14. +0
    -41
      examples/face_adversarial_attack/example/example_target_attack.py
  15. +0
    -132
      examples/face_adversarial_attack/example/loss_design.py
  16. BIN
      examples/face_adversarial_attack/example/opencv_photo/input/input1.jpg
  17. BIN
      examples/face_adversarial_attack/example/opencv_photo/target/target1.jpg
  18. +0
    -70
      examples/face_adversarial_attack/example/test.py

+ 14
- 11
examples/community/face_adversarial_attack/README.md View File

@@ -3,12 +3,12 @@




##描述 ##描述
本项目是对人脸识别模型的物理对抗攻击,通过生成对抗口罩,使人脸佩戴后实现目标攻击和非目标攻击,并应用于mindspore平台。
本项目是对人脸识别模型的物理对抗攻击,通过生成对抗口罩,使人脸佩戴后实现有目标攻击和非目标攻击,并应用于MindSpore平台。






##模型结构 ##模型结构
采用华为mindspore官方训练的FaceRecognition模型
采用华为MindSpore官方训练的FaceRecognition模型
https://www.mindspore.cn/resources/hub/details?MindSpore/1.7/facerecognition_ms1mv2 https://www.mindspore.cn/resources/hub/details?MindSpore/1.7/facerecognition_ms1mv2




@@ -21,16 +21,15 @@ mindspore=1.7,硬件平台为GPU。


##脚本说明 ##脚本说明
├── readme.md ├── readme.md
├── opencv_photo
├── photos
│ ├── adv_input //对抗图像 │ ├── adv_input //对抗图像
│ ├── input //输入图像 │ ├── input //输入图像
│ └── target //目标图像 │ └── target //目标图像
├── outputs //训练后的图像 ├── outputs //训练后的图像
├── FaceRecognition //模型设置 ├── FaceRecognition //模型设置
├── AFR.py //训练脚本
├── camera.py //opencv图像采集
├── adversarial_attack.py //训练脚本
│── example_non-target_attack.py //无目标攻击训练 │── example_non-target_attack.py //无目标攻击训练
│── example_target_attack.py //目标攻击训练
│── example_target_attack.py //目标攻击训练
│── loss_design.py //训练优化设置 │── loss_design.py //训练优化设置
└── test.py //评估攻击效果 └── test.py //评估攻击效果


@@ -58,14 +57,14 @@ mindspore=1.7,硬件平台为GPU。
利用 MindSpore代码仓中的https://gitee.com/mindspore/models/blob/master/research/cv/FaceRecognition/eval.py的get_model函数加载模型 利用 MindSpore代码仓中的https://gitee.com/mindspore/models/blob/master/research/cv/FaceRecognition/eval.py的get_model函数加载模型


##训练过程 ##训练过程
目标攻击:
目标攻击:
$ cd face_adversarial_attack/example/
$ cd face_adversarial_attack/
$ python example_target_attack.py $ python example_target_attack.py


非目标攻击: 非目标攻击:
$ cd face_adversarial_attack/example/
$ cd face_adversarial_attack/
$ python example_non-target_attack.py $ python example_non-target_attack.py




@@ -78,9 +77,13 @@ optimizer=adam, learning rate=0.01, weight_decay=0.0001, epoch=2000
##评估过程 ##评估过程
评估方法一: 评估方法一:
AFR.Attack.test()
adversarial_attack.Attack.test()



评估方法二: 评估方法二:
$ cd face_adversarial_attack/example/
$ cd face_adversarial_attack/
$ python test.py $ python test.py




examples/community/face_adversarial_attack/AFR.py → examples/community/face_adversarial_attack/adversarial_attack.py View File

@@ -15,39 +15,38 @@
import os import os
import re import re
import numpy as np import numpy as np
import face_recognition as fr
import face_recognition_models as frm
import dlib
import matplotlib.image as mp
from PIL import Image, ImageDraw
import mindspore import mindspore
import mindspore.dataset.vision.py_transforms as P import mindspore.dataset.vision.py_transforms as P
from mindspore.dataset.vision.py_transforms import ToPIL as ToPILImage from mindspore.dataset.vision.py_transforms import ToPIL as ToPILImage
from mindspore import Parameter, ops, nn, Tensor
from mindspore.dataset.vision.py_transforms import ToTensor from mindspore.dataset.vision.py_transforms import ToTensor
import dlib
import matplotlib.image as mp
import face_recognition as fr
import face_recognition_models as frm
from PIL import Image, ImageDraw
from mindspore import Parameter, ops, nn, Tensor
from loss_design import MyTrainOneStepCell, MyWithLossCell, FaceLossTargetAttack, FaceLossNoTargetAttack from loss_design import MyTrainOneStepCell, MyWithLossCell, FaceLossTargetAttack, FaceLossNoTargetAttack
from FaceRecognition.eval import get_net from FaceRecognition.eval import get_net


class FaceAdversarialAttack(object): class FaceAdversarialAttack(object):
""" """
Class used to create adversarial facial recognition attacks
Class used to create adversarial facial recognition attacks.
""" """


def __init__(self, input_img, target_img, seed=None): def __init__(self, input_img, target_img, seed=None):
""" """
Initialization for Attack class.
Initialization for attack class.


Args: Args:
input_img : Image to train on.
target_img : Image to target the adversarial attack against.
seed : optional Sets custom seed for reproducability. Default is generated randomly.
input_img (numpy.ndarray): The input image.
target_img (numpy.ndarray): The target image.
seed (int): optional Sets custom seed for reproducability. Default is generated randomly.


""" """


if (seed is not None): np.random.seed(seed) if (seed is not None): np.random.seed(seed)
self.MEAN = Tensor([0.485, 0.456, 0.406]) self.MEAN = Tensor([0.485, 0.456, 0.406])
self.STD = Tensor([0.229, 0.224, 0.225]) self.STD = Tensor([0.229, 0.224, 0.225])
self.LOSS = Tensor(0)
self.expand_dims = mindspore.ops.ExpandDims() self.expand_dims = mindspore.ops.ExpandDims()
self.imageize = ToPILImage() self.imageize = ToPILImage()
self.tensorize = ToTensor() self.tensorize = ToTensor()
@@ -55,8 +54,8 @@ class FaceAdversarialAttack(object):
self.resnet = get_net() self.resnet = get_net()
self.input_tensor = Tensor(self.normalize(self.tensorize(input_img))) self.input_tensor = Tensor(self.normalize(self.tensorize(input_img)))
self.target_tensor = Tensor(self.normalize(self.tensorize(target_img))) self.target_tensor = Tensor(self.normalize(self.tensorize(target_img)))
mp.imsave('./outputs/input图像.jpg', np.transpose(self._reverse_norm(self.input_tensor).asnumpy(), (1, 2, 0)))
mp.imsave('./outputs/target图像.jpg',
mp.imsave('./outputs/input_image.jpg', np.transpose(self._reverse_norm(self.input_tensor).asnumpy(), (1, 2, 0)))
mp.imsave('./outputs/target_image.jpg',
np.transpose(self._reverse_norm(self.target_tensor).asnumpy(), (1, 2, 0))) np.transpose(self._reverse_norm(self.target_tensor).asnumpy(), (1, 2, 0)))




@@ -73,14 +72,22 @@ class FaceAdversarialAttack(object):
def train(self, attack_method): def train(self, attack_method):
""" """
Optimized adversarial image. Optimized adversarial image.

Args:
attack_method (Sting) : Including target attack and non-target attack.

Returns:
Tensor, adversarial image.
Tensor, mask image.

""" """


if attack_method == "non-target attack": if attack_method == "non-target attack":
LOSS = FaceLossNoTargetAttack(self.target_emb)
loss = FaceLossNoTargetAttack(self.target_emb)
if attack_method == "target_attack": if attack_method == "target_attack":
LOSS = FaceLossTargetAttack(self.target_emb)
loss = FaceLossTargetAttack(self.target_emb)


net_with_criterion = MyWithLossCell(self.resnet, LOSS, self.input_tensor)
net_with_criterion = MyWithLossCell(self.resnet, loss, self.input_tensor)
train_net = MyTrainOneStepCell(net_with_criterion, self.opt) train_net = MyTrainOneStepCell(net_with_criterion, self.opt)


for i in range(2000): for i in range(2000):
@@ -136,13 +143,13 @@ class FaceAdversarialAttack(object):


def _reverse_norm(self, image_tensor): def _reverse_norm(self, image_tensor):
""" """
Reverses normalization for a given image_tensor
Reverses normalization for a given image_tensor.


Args: Args:
image_tensor : Tensor
image_tensor (Tensor): image.


Returns: Returns:
Tensor
Tensor, image.
""" """
tensor = image_tensor * self.STD[:, None, None] + self.MEAN[:, None, None] tensor = image_tensor * self.STD[:, None, None] + self.MEAN[:, None, None]


@@ -157,12 +164,12 @@ class FaceAdversarialAttack(object):
Apply a mask over an image. Apply a mask over an image.


Args: Args:
image_tensor : Canvas to be used to apply mask on.
mask_tensor : Mask to apply over the image.
reference_tensor : Used to reference mask boundaries
image_tensor (Tensor): Canvas to be used to apply mask on.
mask_tensor (Tensor): Mask to apply over the image.
reference_tensor (Tensor): Used to reference mask boundaries


Returns: Returns:
Tensor
Tensor, image.
""" """
tensor = mindspore.numpy.where((reference_tensor == 0), image_tensor, mask_tensor) tensor = mindspore.numpy.where((reference_tensor == 0), image_tensor, mask_tensor)


@@ -173,10 +180,10 @@ class FaceAdversarialAttack(object):
Create mask image. Create mask image.


Args: Args:
face_image : image of a detected face.
face_image (PIL.Image): image of a detected face.


Returns: Returns:
mask_tensor : A mask image.
mask_tensor : a mask image.
""" """


mask = Image.new('RGB', face_image.size, color=(0, 0, 0)) mask = Image.new('RGB', face_image.size, color=(0, 0, 0))
@@ -211,10 +218,10 @@ class FaceAdversarialAttack(object):
Reverses normalization for a given image_tensor. Reverses normalization for a given image_tensor.


Args: Args:
image_tensor : Tensor.
image_tensor (Tensor): Tensor.


Returns: Returns:
Tensor.
Tensor, image.
""" """
tensor = image_tensor * self.STD[:, None, None] + self.MEAN[:, None, None] tensor = image_tensor * self.STD[:, None, None] + self.MEAN[:, None, None]
return tensor return tensor
@@ -227,11 +234,10 @@ def detect_face(image_loc):
detector. detector.


Args: Args:
image_loc : image file location.
image_loc (numpy.ndarray): image file location.


Returns: Returns:
face_image : Resized face image. face_image : Resized face image.

""" """


detector = dlib.get_frontal_face_detector() detector = dlib.get_frontal_face_detector()
@@ -253,7 +259,7 @@ def load_data(path_to_data):
and test images. and test images.


Args: Args:
path_to_data : Path to the given data.
path_to_data (String): Path to the given data.


Returns: Returns:
list : List of resized face images. list : List of resized face images.

+ 0
- 34
examples/community/face_adversarial_attack/camera.py View File

@@ -1,34 +0,0 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import cv2


capture = cv2.VideoCapture(0)


for i in range(2):
while(True):
ret, frame = capture.read()
width, height = capture.get(3), capture.get(4)
cv2.imwrite('./opencv_photo/input/input'+str(i)+'.png', frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) == ord('q'):
break


print(width, height)

capture.release()
cv2.destroyAllWindows()

+ 5
- 5
examples/community/face_adversarial_attack/example_non-target_attack.py View File

@@ -16,7 +16,7 @@
import numpy as np import numpy as np
import matplotlib.image as mp import matplotlib.image as mp
from mindspore import context from mindspore import context
import AFR
import ad


context.set_context(mode=context.GRAPH_MODE, device_target="GPU") context.set_context(mode=context.GRAPH_MODE, device_target="GPU")


@@ -25,8 +25,8 @@ context.set_context(mode=context.GRAPH_MODE, device_target="GPU")


if __name__ == '__main__': if __name__ == '__main__':


inputs = AFR.load_data('opencv_photo/input/')
targets = AFR.load_data('opencv_photo/target/')
inputs = AFR.load_data('photos/input/')
targets = AFR.load_data('photos/target/')




adversarial = AFR.FaceAdversarialAttack(inputs[0], targets[0]) adversarial = AFR.FaceAdversarialAttack(inputs[0], targets[0])
@@ -34,7 +34,7 @@ if __name__ == '__main__':


adversarial_tensor, mask_tensor = adversarial.train(attack_method) adversarial_tensor, mask_tensor = adversarial.train(attack_method)


mp.imsave('./outputs/对抗图像.jpg', np.transpose(adversarial_tensor.asnumpy(), (1, 2, 0)))
mp.imsave('./outputs/口罩.jpg', np.transpose(mask_tensor.asnumpy(), (1, 2, 0)))
mp.imsave('./outputs/adversarial_example.jpg', np.transpose(adversarial_tensor.asnumpy(), (1, 2, 0)))
mp.imsave('./outputs/mask.jpg', np.transpose(mask_tensor.asnumpy(), (1, 2, 0)))


adversarial.test() adversarial.test()

+ 4
- 4
examples/community/face_adversarial_attack/example_target_attack.py View File

@@ -25,8 +25,8 @@ context.set_context(mode=context.GRAPH_MODE, device_target="GPU")


if __name__ == '__main__': if __name__ == '__main__':


inputs = AFR.load_data('opencv_photo/input/')
targets = AFR.load_data('opencv_photo/target/')
inputs = AFR.load_data('photos/input/')
targets = AFR.load_data('photos/target/')




adversarial = AFR.FaceAdversarialAttack(inputs[0], targets[0]) adversarial = AFR.FaceAdversarialAttack(inputs[0], targets[0])
@@ -35,7 +35,7 @@ if __name__ == '__main__':


adversarial_tensor, mask_tensor = adversarial.train(attack_method) adversarial_tensor, mask_tensor = adversarial.train(attack_method)


mp.imsave('./outputs/对抗图像.jpg', np.transpose(adversarial_tensor.asnumpy(), (1, 2, 0)))
mp.imsave('./outputs/口罩.jpg', np.transpose(mask_tensor.asnumpy(), (1, 2, 0)))
mp.imsave('./outputs/adversarial_example.jpg', np.transpose(adversarial_tensor.asnumpy(), (1, 2, 0)))
mp.imsave('./outputs/mask.jpg', np.transpose(mask_tensor.asnumpy(), (1, 2, 0)))


adversarial.test() adversarial.test()

+ 2
- 1
examples/community/face_adversarial_attack/loss_design.py View File

@@ -105,8 +105,9 @@ class FaceLossTargetAttack(nn.Cell):


class FaceLossNoTargetAttack(nn.Cell): class FaceLossNoTargetAttack(nn.Cell):
"""The loss function of the non-target attack""" """The loss function of the non-target attack"""
def __init__(self, target_emb): def __init__(self, target_emb):
"""初始化"""
"""Initialization"""
super(FaceLossNoTargetAttack, self).__init__() super(FaceLossNoTargetAttack, self).__init__()
self.uniformreal = ops.UniformReal(seed=2) self.uniformreal = ops.UniformReal(seed=2)
self.sum = ops.ReduceSum(keep_dims=False) self.sum = ops.ReduceSum(keep_dims=False)


examples/community/face_adversarial_attack/opencv_photo/input/input1.jpg → examples/community/face_adversarial_attack/photos/input/input1.jpg View File


examples/community/face_adversarial_attack/opencv_photo/target/target1.jpg → examples/community/face_adversarial_attack/photos/target/target1.jpg View File


+ 4
- 4
examples/community/face_adversarial_attack/test.py View File

@@ -33,9 +33,9 @@ if __name__ == '__main__':
The input image, target image and adversarial image are tested using the FaceRecognition model. The input image, target image and adversarial image are tested using the FaceRecognition model.
""" """


image = AFR.load_data('opencv_photo/adv_input')
inputs = AFR.load_data('opencv_photo/input/')
targets = AFR.load_data('opencv_photo/target/')
image = AFR.load_data('photos/adv_input/')
inputs = AFR.load_data('photos/input/')
targets = AFR.load_data('photos/target/')


tensorize = ToTensor() tensorize = ToTensor()
normalize = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) normalize = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
@@ -45,7 +45,7 @@ if __name__ == '__main__':


resnet = get_net() resnet = get_net()


image = mp.imread("./对抗图像.jpg")
image = mp.imread("./outputs/adversarial_example.jpg")
adv = Tensor(normalize(tensorize(image))) adv = Tensor(normalize(tensorize(image)))


input_tensor = Tensor(normalize(tensorize(inputs[0]))) input_tensor = Tensor(normalize(tensorize(inputs[0])))


+ 0
- 270
examples/face_adversarial_attack/example/AFR.py View File

@@ -1,270 +0,0 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import re
import numpy as np
import mindspore
import mindspore.dataset.vision.py_transforms as P
from mindspore.dataset.vision.py_transforms import ToPIL as ToPILImage
from mindspore import Parameter, ops, nn, Tensor
from mindspore.dataset.vision.py_transforms import ToTensor
import dlib
import matplotlib.image as mp
import face_recognition as fr
import face_recognition_models as frm
from PIL import Image, ImageDraw
from loss_design import MyTrainOneStepCell, MyWithLossCell, FaceLossTargetAttack, FaceLossNoTargetAttack
from FaceRecognition.eval import get_net

class FaceAdversarialAttack(object):
"""
Class used to create adversarial facial recognition attacks
"""

def __init__(self, input_img, target_img, seed=None):
"""
Initialization for Attack class.

Args:
input_img : Image to train on.
target_img : Image to target the adversarial attack against.
seed : optional Sets custom seed for reproducability. Default is generated randomly.

"""

if (seed is not None): np.random.seed(seed)
self.MEAN = Tensor([0.485, 0.456, 0.406])
self.STD = Tensor([0.229, 0.224, 0.225])
self.LOSS = Tensor(0)
self.expand_dims = mindspore.ops.ExpandDims()
self.imageize = ToPILImage()
self.tensorize = ToTensor()
self.normalize = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.resnet = get_net()
self.input_tensor = Tensor(self.normalize(self.tensorize(input_img)))
self.target_tensor = Tensor(self.normalize(self.tensorize(target_img)))
mp.imsave('./outputs/input图像.jpg', np.transpose(self._reverse_norm(self.input_tensor).asnumpy(), (1, 2, 0)))
mp.imsave('./outputs/target图像.jpg',
np.transpose(self._reverse_norm(self.target_tensor).asnumpy(), (1, 2, 0)))


self.input_emb = self.resnet(self.expand_dims(self.input_tensor, 0))
self.target_emb = self.resnet(self.expand_dims(self.target_tensor, 0))
self.adversarial_emb = None
self.mask_tensor = self._create_mask(input_img)
self.ref = self.mask_tensor
self.pm = Parameter(self.mask_tensor)
self.opt = nn.Adam([self.pm], learning_rate=0.01, weight_decay=0.0001)



def train(self, attack_method):
"""
Optimized adversarial image.
"""

if attack_method == "non-target attack":
LOSS = FaceLossNoTargetAttack(self.target_emb)
if attack_method == "target_attack":
LOSS = FaceLossTargetAttack(self.target_emb)

net_with_criterion = MyWithLossCell(self.resnet, LOSS, self.input_tensor)
train_net = MyTrainOneStepCell(net_with_criterion, self.opt)

for i in range(2000):

self.mask_tensor = Tensor(self.pm)

grads, loss = train_net(self.mask_tensor)

print("epoch %d ,loss: %f \n " % (i, loss.asnumpy().item()))

self.mask_tensor = ops.clip_by_value(
self.mask_tensor, Tensor(0, mindspore.float32), Tensor(1, mindspore.float32))

adversarial_tensor = self._apply(
self.input_tensor,
(self.mask_tensor - self.MEAN[:, None, None]) / self.STD[:, None, None],
self.ref)

adversarial_tensor = self._reverse_norm(adversarial_tensor)

return adversarial_tensor, self.mask_tensor

def test(self):
"""
Test the recognition of adversarial images by the model.
"""

adversarial_tensor = self._apply(
self.input_tensor,
(self.mask_tensor - self.MEAN[:, None, None] )/ self.STD[:, None, None],
self.ref)

self.adversarial_emb = self.resnet(self.expand_dims(adversarial_tensor, 0))
self.input_emb = self.resnet(self.expand_dims(self.input_tensor, 0))
self.target_emb = self.resnet(self.expand_dims(self.target_tensor, 0))

adversarial = np.argmax(self.adversarial_emb.asnumpy())
target = np.argmax(self.target_emb.asnumpy())
input = np.argmax(self.input_emb.asnumpy())

print("input:", input)
print("input_confidence:", self.input_emb.asnumpy()[0][input])
print("================================")
print("adversarial:", adversarial)
print("adversarial_confidence:", self.adversarial_emb.asnumpy()[0][adversarial])
print("Confidence changes for target:", self.adversarial_emb.asnumpy()[0][target])
print("Confidence changes for input:", self.adversarial_emb.asnumpy()[0][input])
print("================================")
print("target:", target)
print("target_confidence:", self.target_emb.asnumpy()[0][target])
print("input: %d, target: %d, adversarial: %d" % (input, target, adversarial))


def _reverse_norm(self, image_tensor):
"""
Reverses normalization for a given image_tensor

Args:
image_tensor : Tensor

Returns:
Tensor
"""
tensor = image_tensor * self.STD[:, None, None] + self.MEAN[:, None, None]

return tensor

def _apply(self,
image_tensor,
mask_tensor,
reference_tensor
):
"""
Apply a mask over an image.

Args:
image_tensor : Canvas to be used to apply mask on.
mask_tensor : Mask to apply over the image.
reference_tensor : Used to reference mask boundaries

Returns:
Tensor
"""
tensor = mindspore.numpy.where((reference_tensor == 0), image_tensor, mask_tensor)

return tensor

def _create_mask(self, face_image):
"""
Create mask image.

Args:
face_image : image of a detected face.

Returns:
mask_tensor : A mask image.
"""

mask = Image.new('RGB', face_image.size, color=(0, 0, 0))
d = ImageDraw.Draw(mask)
landmarks = fr.face_landmarks(np.array(face_image))
area = [landmark
for landmark in landmarks[0]['chin']
if landmark[1] > max(landmarks[0]['nose_tip'])[1]]
area.append(landmarks[0]['nose_bridge'][1])
d.polygon(area, fill=(255, 255, 255))
mask_array = np.array(mask)
mask_array = mask_array.astype(np.float32)

for i in range(mask_array.shape[0]):

for j in range(mask_array.shape[1]):

for k in range(mask_array.shape[2]):

if mask_array[i][j][k] == 255.:
mask_array[i][j][k] = 0.5
else:
mask_array[i][j][k] = 0

mask_tensor = Tensor(mask_array)
mask_tensor = mask_tensor.swapaxes(0, 2).swapaxes(1, 2)
mask_tensor.requires_grad = True
return mask_tensor

def _reverse_norm(self, image_tensor):
"""
Reverses normalization for a given image_tensor.

Args:
image_tensor : Tensor.

Returns:
Tensor.
"""
tensor = image_tensor * self.STD[:, None, None] + self.MEAN[:, None, None]
return tensor


def detect_face(image_loc):
"""
Helper function to run the facial detection and alignment process using
dlib. Detects a given face and aligns it using dlib's 5 point landmark
detector.

Args:
image_loc : image file location.

Returns:
face_image : Resized face image.

"""

detector = dlib.get_frontal_face_detector()
shape_predictor = dlib.shape_predictor(frm.pose_predictor_model_location())
image = dlib.load_rgb_image(image_loc)
dets = detector(image, 1)

faces = dlib.full_object_detections()
for detection in dets:
faces.append(shape_predictor(image, detection))
face_image = Image.fromarray(dlib.get_face_chip(image, faces[0], size=112))

return face_image


def load_data(path_to_data):
"""
Helper function for loading image data. Allows user to load the input, target,
and test images.

Args:
path_to_data : Path to the given data.

Returns:
list : List of resized face images.
"""
img_files = [f for f in os.listdir(path_to_data) if re.search(r'.*\.(jpe?g|png)', f)]
img_files_locs = [os.path.join(path_to_data, f) for f in img_files]

image_list = []

for loc in img_files_locs:
image_list.append(detect_face(loc))

return image_list


+ 0
- 86
examples/face_adversarial_attack/example/README.md View File

@@ -1,86 +0,0 @@
# 人脸识别物理对抗攻击



##描述
本项目是对人脸识别模型的物理对抗攻击,通过生成对抗口罩,使人脸佩戴后实现目标攻击和非目标攻击,并应用于mindspore平台。



##模型结构
采用华为mindspore官方训练的FaceRecognition模型
https://www.mindspore.cn/resources/hub/details?MindSpore/1.7/facerecognition_ms1mv2




##环境要求
mindspore=1.7,硬件平台为GPU。



##脚本说明
├── readme.md
├── opencv_photo
│ ├── adv_input //对抗图像
│ ├── input //输入图像
│ └── target //目标图像
├── outputs //训练后的图像
├── FaceRecognition //模型设置
├── AFR.py //训练脚本
├── camera.py //opencv图像采集
│── example_non-target_attack.py //无目标攻击训练
│── example_target_attack.py //目标攻击训练
│── loss_design.py //训练优化设置
└── test.py //评估攻击效果


##模型调用
方法一:

#基于mindspore_hub库调用FaceRecognition模型

import mindspore_hub as mshub
from mindspore import context
def get_net():
context.set_context(mode=context.GRAPH_MODE,
device_target="GPU",
device_id=0)
model = "mindspore/1.7/facerecognition_ms1mv2"
network = mshub.load(model)
network.set_train(False)
return network

方法二:

利用 MindSpore代码仓中的https://gitee.com/mindspore/models/blob/master/research/cv/FaceRecognition/eval.py的get_model函数加载模型

##训练过程
目标攻击:
$ cd face_adversarial_attack/example/
$ python example_target_attack.py

非目标攻击:
$ cd face_adversarial_attack/example/
$ python example_non-target_attack.py




##默认训练参数
optimizer=adam, learning rate=0.01, weight_decay=0.0001, epoch=2000


##评估过程
评估方法一:
AFR.Attack.test()

评估方法二:
$ cd face_adversarial_attack/example/
$ python test.py

+ 0
- 34
examples/face_adversarial_attack/example/camera.py View File

@@ -1,34 +0,0 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import cv2


capture = cv2.VideoCapture(0)


for i in range(2):
while(True):
ret, frame = capture.read()
width, height = capture.get(3), capture.get(4)
cv2.imwrite('./opencv_photo/input/input'+str(i)+'.png', frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) == ord('q'):
break


print(width, height)

capture.release()
cv2.destroyAllWindows()

+ 0
- 40
examples/face_adversarial_attack/example/example_non-target_attack.py View File

@@ -1,40 +0,0 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

import numpy as np
import matplotlib.image as mp
from mindspore import context
import AFR

context.set_context(mode=context.GRAPH_MODE, device_target="GPU")




if __name__ == '__main__':

inputs = AFR.load_data('opencv_photo/input/')
targets = AFR.load_data('opencv_photo/target/')


adversarial = AFR.FaceAdversarialAttack(inputs[0], targets[0])
attack_method = "non-target attack"

adversarial_tensor, mask_tensor = adversarial.train(attack_method)

mp.imsave('./outputs/对抗图像.jpg', np.transpose(adversarial_tensor.asnumpy(), (1, 2, 0)))
mp.imsave('./outputs/口罩.jpg', np.transpose(mask_tensor.asnumpy(), (1, 2, 0)))

adversarial.test()

+ 0
- 41
examples/face_adversarial_attack/example/example_target_attack.py View File

@@ -1,41 +0,0 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

import numpy as np
import matplotlib.image as mp
from mindspore import context
import AFR

context.set_context(mode=context.GRAPH_MODE, device_target="GPU")




if __name__ == '__main__':

inputs = AFR.load_data('opencv_photo/input/')
targets = AFR.load_data('opencv_photo/target/')


adversarial = AFR.FaceAdversarialAttack(inputs[0], targets[0])

attack_method = "target_attack"

adversarial_tensor, mask_tensor = adversarial.train(attack_method)

mp.imsave('./outputs/对抗图像.jpg', np.transpose(adversarial_tensor.asnumpy(), (1, 2, 0)))
mp.imsave('./outputs/口罩.jpg', np.transpose(mask_tensor.asnumpy(), (1, 2, 0)))

adversarial.test()

+ 0
- 132
examples/face_adversarial_attack/example/loss_design.py View File

@@ -1,132 +0,0 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore
from mindspore import ops, nn, Tensor
from mindspore.dataset.vision.py_transforms import ToTensor
import mindspore.dataset.vision.py_transforms as P




class MyTrainOneStepCell(nn.TrainOneStepCell):
"""
Encapsulation class of network training.

Append an optimizer to the training network after that the construct
function can be called to create the backward graph.

Args:
network (Cell): The training network. Note that loss function should have been added.
optimizer (Optimizer): Optimizer for updating the weights.
sens (Number): The adjust parameter. Default: 1.0.
"""

def __init__(self, network, optimizer, sens=1.0):
super(MyTrainOneStepCell, self).__init__(network, optimizer, sens)
self.grad = ops.composite.GradOperation(get_all=True, sens_param=False)

def construct(self, *inputs):
"""Defines the computation performed."""
loss = self.network(*inputs)
grads = self.grad(self.network)(*inputs)
self.optimizer(grads)
return grads, loss



class MyWithLossCell(nn.Cell):
def __init__(self, net, loss_fn, input_tensor):
super(MyWithLossCell, self).__init__(auto_prefix=False)
self.net = net
self._loss_fn = loss_fn
self.STD = Tensor([0.229, 0.224, 0.225])
self.MEAN = Tensor([0.485, 0.456, 0.406])
self.expand_dims = mindspore.ops.ExpandDims()
self.normalize = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.tensorize = ToTensor()
self.input_tensor = input_tensor
self.input_emb = self.net(self.expand_dims(self.input_tensor, 0))

def construct(self, mask_tensor):
ref = mask_tensor
adversarial_tensor = mindspore.numpy.where(
(ref == 0),
self.input_tensor,
(mask_tensor - self.MEAN[:, None, None] )/ self.STD[:, None, None])
adversarial_emb = self.net(self.expand_dims(adversarial_tensor, 0))
loss = self._loss_fn( adversarial_emb, self.input_emb, mask_tensor)
return loss

@property
def backbone_network(self):
return self.net


class FaceLossTargetAttack(nn.Cell):
"""The loss function of the target attack"""

def __init__(self, target_emb):
super(FaceLossTargetAttack, self).__init__()
self.uniformreal = ops.UniformReal(seed=2)
self.sum = ops.ReduceSum(keep_dims=False)
self.norm = nn.Norm(keep_dims=True)
self.zeroslike = ops.ZerosLike()
self.concat_op1 = ops.Concat(1)
self.concat_op2 = ops.Concat(2)
self.pow = ops.Pow()
self.reduce_sum = ops.operations.ReduceSum()
self.target_emb = target_emb
self.abs = ops.Abs()
self.reduce_mean = ops.ReduceMean()

def construct(self, adversarial_emb, input_emb, mask_tensor):
prod_sum = self.reduce_sum(adversarial_emb * self.target_emb, (1,))
square1 = self.reduce_sum(ops.functional.square(adversarial_emb), (1,))
square2 = self.reduce_sum(ops.functional.square(self.target_emb), (1,))
denom = ops.functional.sqrt(square1) * ops.functional.sqrt(square2)
loss = -(prod_sum / denom)
print("dis_loss:", loss)
return loss


class FaceLossNoTargetAttack(nn.Cell):
"""The loss function of the non-target attack"""
def __init__(self, target_emb):
"""初始化"""
super(FaceLossNoTargetAttack, self).__init__()
self.uniformreal = ops.UniformReal(seed=2)
self.sum = ops.ReduceSum(keep_dims=False)
self.norm = nn.Norm(keep_dims=True)
self.zeroslike = ops.ZerosLike()
self.concat_op1 = ops.Concat(1)
self.concat_op2 = ops.Concat(2)
self.pow = ops.Pow()
self.reduce_sum = ops.operations.ReduceSum()
self.target_emb = target_emb
self.abs = ops.Abs()
self.reduce_mean = ops.ReduceMean()

def construct(self, adversarial_emb, input_emb, mask_tensor):
prod_sum = self.reduce_sum(adversarial_emb * input_emb, (1,))
square1 = self.reduce_sum(ops.functional.square(adversarial_emb), (1,))
square2 = self.reduce_sum(ops.functional.square(input_emb), (1,))
denom = ops.functional.sqrt(square1) * ops.functional.sqrt(square2)
loss = prod_sum / denom
print("cosine_loss:", loss)
return loss



BIN
examples/face_adversarial_attack/example/opencv_photo/input/input1.jpg View File

Before After
Width: 1024  |  Height: 774  |  Size: 88 kB

BIN
examples/face_adversarial_attack/example/opencv_photo/target/target1.jpg View File

Before After
Width: 1523  |  Height: 2048  |  Size: 653 kB

+ 0
- 70
examples/face_adversarial_attack/example/test.py View File

@@ -1,70 +0,0 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

import numpy as np
import matplotlib.image as mp
from mindspore import context, Tensor
import mindspore
from mindspore.dataset.vision.py_transforms import ToTensor
import mindspore.dataset.vision.py_transforms as P
from mindspore.dataset.vision.py_transforms import ToPIL as ToPILImage
from FaceRecognition.eval import get_net
import AFR

context.set_context(mode=context.GRAPH_MODE, device_target="GPU")


imageize = ToPILImage()

if __name__ == '__main__':
"""
The input image, target image and adversarial image are tested using the FaceRecognition model.
"""

image = AFR.load_data('opencv_photo/adv_input')
inputs = AFR.load_data('opencv_photo/input/')
targets = AFR.load_data('opencv_photo/target/')

tensorize = ToTensor()
normalize = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
expand_dims = mindspore.ops.ExpandDims()
MEAN = Tensor([0.485, 0.456, 0.406])
STD = Tensor([0.229, 0.224, 0.225])

resnet = get_net()

image = mp.imread("./对抗图像.jpg")
adv = Tensor(normalize(tensorize(image)))

input_tensor = Tensor(normalize(tensorize(inputs[0])))
target_tensor = Tensor(normalize(tensorize(targets[0])))

adversarial_emb = resnet(expand_dims(adv, 0))
input_emb = resnet(expand_dims(input_tensor, 0))
target_emb = resnet(expand_dims(target_tensor, 0))

adversarial = np.argmax(adversarial_emb.asnumpy())
target = np.argmax(target_emb.asnumpy())
input = np.argmax(input_emb.asnumpy())

print("input:", input)
print("input_confidence:", input_emb.asnumpy()[0][input])
print("================================")
print("adversarial:", adversarial)
print("adversarial_confidence:", adversarial_emb.asnumpy()[0][adversarial])
print("Confidence changes for input:", adversarial_emb.asnumpy()[0][input])
print("================================")

print("input:%d, target:%d, adversarial:%d" % (input, target, adversarial))

Loading…
Cancel
Save