|
- # Copyright 2022 Huawei Technologies Co., Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- # ============================================================================
- import os
- import re
- import numpy as np
- import face_recognition as fr
- import face_recognition_models as frm
- import dlib
- import matplotlib.image as mp
- from PIL import Image, ImageDraw
- import mindspore
- import mindspore.dataset.vision.py_transforms as P
- from mindspore.dataset.vision.py_transforms import ToPIL as ToPILImage
- from mindspore.dataset.vision.py_transforms import ToTensor
- from mindspore import Parameter, ops, nn, Tensor
- from loss_design import MyTrainOneStepCell, MyWithLossCell, FaceLossTargetAttack, FaceLossNoTargetAttack
- from FaceRecognition.eval import get_net
-
- class FaceAdversarialAttack(object):
- """
- Class used to create adversarial facial recognition attacks.
- """
-
- def __init__(self, input_img, target_img, seed=None):
- """
- Initialization for attack class.
-
- Args:
- input_img (numpy.ndarray): The input image.
- target_img (numpy.ndarray): The target image.
- seed (int): optional Sets custom seed for reproducability. Default is generated randomly.
-
- """
-
- if (seed is not None): np.random.seed(seed)
- self.MEAN = Tensor([0.485, 0.456, 0.406])
- self.STD = Tensor([0.229, 0.224, 0.225])
- self.expand_dims = mindspore.ops.ExpandDims()
- self.imageize = ToPILImage()
- self.tensorize = ToTensor()
- self.normalize = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
- self.resnet = get_net()
- self.input_tensor = Tensor(self.normalize(self.tensorize(input_img)))
- self.target_tensor = Tensor(self.normalize(self.tensorize(target_img)))
- mp.imsave('./outputs/input_image.jpg', np.transpose(self._reverse_norm(self.input_tensor).asnumpy(), (1, 2, 0)))
- mp.imsave('./outputs/target_image.jpg',
- np.transpose(self._reverse_norm(self.target_tensor).asnumpy(), (1, 2, 0)))
-
-
- self.input_emb = self.resnet(self.expand_dims(self.input_tensor, 0))
- self.target_emb = self.resnet(self.expand_dims(self.target_tensor, 0))
- self.adversarial_emb = None
- self.mask_tensor = self._create_mask(input_img)
- self.ref = self.mask_tensor
- self.pm = Parameter(self.mask_tensor)
- self.opt = nn.Adam([self.pm], learning_rate=0.01, weight_decay=0.0001)
-
-
-
- def train(self, attack_method):
- """
- Optimized adversarial image.
-
- Args:
- attack_method (Sting) : Including target attack and non-target attack.
-
- Returns:
- Tensor, adversarial image.
- Tensor, mask image.
-
- """
-
- if attack_method == "non-target attack":
- loss = FaceLossNoTargetAttack(self.target_emb)
- if attack_method == "target_attack":
- loss = FaceLossTargetAttack(self.target_emb)
-
- net_with_criterion = MyWithLossCell(self.resnet, loss, self.input_tensor)
- train_net = MyTrainOneStepCell(net_with_criterion, self.opt)
-
- for i in range(2000):
-
- self.mask_tensor = Tensor(self.pm)
-
- grads, loss = train_net(self.mask_tensor)
-
- print("epoch %d ,loss: %f \n " % (i, loss.asnumpy().item()))
-
- self.mask_tensor = ops.clip_by_value(
- self.mask_tensor, Tensor(0, mindspore.float32), Tensor(1, mindspore.float32))
-
- adversarial_tensor = self._apply(
- self.input_tensor,
- (self.mask_tensor - self.MEAN[:, None, None]) / self.STD[:, None, None],
- self.ref)
-
- adversarial_tensor = self._reverse_norm(adversarial_tensor)
-
- return adversarial_tensor, self.mask_tensor
-
- def test(self):
- """
- Test the recognition of adversarial images by the model.
- """
-
- adversarial_tensor = self._apply(
- self.input_tensor,
- (self.mask_tensor - self.MEAN[:, None, None] )/ self.STD[:, None, None],
- self.ref)
-
- self.adversarial_emb = self.resnet(self.expand_dims(adversarial_tensor, 0))
- self.input_emb = self.resnet(self.expand_dims(self.input_tensor, 0))
- self.target_emb = self.resnet(self.expand_dims(self.target_tensor, 0))
-
- adversarial = np.argmax(self.adversarial_emb.asnumpy())
- target = np.argmax(self.target_emb.asnumpy())
- input = np.argmax(self.input_emb.asnumpy())
-
- print("input:", input)
- print("input_confidence:", self.input_emb.asnumpy()[0][input])
- print("================================")
- print("adversarial:", adversarial)
- print("adversarial_confidence:", self.adversarial_emb.asnumpy()[0][adversarial])
- print("Confidence changes for target:", self.adversarial_emb.asnumpy()[0][target])
- print("Confidence changes for input:", self.adversarial_emb.asnumpy()[0][input])
- print("================================")
- print("target:", target)
- print("target_confidence:", self.target_emb.asnumpy()[0][target])
- print("input: %d, target: %d, adversarial: %d" % (input, target, adversarial))
-
-
- def _reverse_norm(self, image_tensor):
- """
- Reverses normalization for a given image_tensor.
-
- Args:
- image_tensor (Tensor): image.
-
- Returns:
- Tensor, image.
- """
- tensor = image_tensor * self.STD[:, None, None] + self.MEAN[:, None, None]
-
- return tensor
-
- def _apply(self,
- image_tensor,
- mask_tensor,
- reference_tensor
- ):
- """
- Apply a mask over an image.
-
- Args:
- image_tensor (Tensor): Canvas to be used to apply mask on.
- mask_tensor (Tensor): Mask to apply over the image.
- reference_tensor (Tensor): Used to reference mask boundaries
-
- Returns:
- Tensor, image.
- """
- tensor = mindspore.numpy.where((reference_tensor == 0), image_tensor, mask_tensor)
-
- return tensor
-
- def _create_mask(self, face_image):
- """
- Create mask image.
-
- Args:
- face_image (PIL.Image): image of a detected face.
-
- Returns:
- mask_tensor : a mask image.
- """
-
- mask = Image.new('RGB', face_image.size, color=(0, 0, 0))
- d = ImageDraw.Draw(mask)
- landmarks = fr.face_landmarks(np.array(face_image))
- area = [landmark
- for landmark in landmarks[0]['chin']
- if landmark[1] > max(landmarks[0]['nose_tip'])[1]]
- area.append(landmarks[0]['nose_bridge'][1])
- d.polygon(area, fill=(255, 255, 255))
- mask_array = np.array(mask)
- mask_array = mask_array.astype(np.float32)
-
- for i in range(mask_array.shape[0]):
-
- for j in range(mask_array.shape[1]):
-
- for k in range(mask_array.shape[2]):
-
- if mask_array[i][j][k] == 255.:
- mask_array[i][j][k] = 0.5
- else:
- mask_array[i][j][k] = 0
-
- mask_tensor = Tensor(mask_array)
- mask_tensor = mask_tensor.swapaxes(0, 2).swapaxes(1, 2)
- mask_tensor.requires_grad = True
- return mask_tensor
-
- def _reverse_norm(self, image_tensor):
- """
- Reverses normalization for a given image_tensor.
-
- Args:
- image_tensor (Tensor): Tensor.
-
- Returns:
- Tensor, image.
- """
- tensor = image_tensor * self.STD[:, None, None] + self.MEAN[:, None, None]
- return tensor
-
-
- def detect_face(image_loc):
- """
- Helper function to run the facial detection and alignment process using
- dlib. Detects a given face and aligns it using dlib's 5 point landmark
- detector.
-
- Args:
- image_loc (numpy.ndarray): image file location.
-
- Returns:
- face_image : Resized face image.
- """
-
- detector = dlib.get_frontal_face_detector()
- shape_predictor = dlib.shape_predictor(frm.pose_predictor_model_location())
- image = dlib.load_rgb_image(image_loc)
- dets = detector(image, 1)
-
- faces = dlib.full_object_detections()
- for detection in dets:
- faces.append(shape_predictor(image, detection))
- face_image = Image.fromarray(dlib.get_face_chip(image, faces[0], size=112))
-
- return face_image
-
-
- def load_data(path_to_data):
- """
- Helper function for loading image data. Allows user to load the input, target,
- and test images.
-
- Args:
- path_to_data (String): Path to the given data.
-
- Returns:
- list : List of resized face images.
- """
- img_files = [f for f in os.listdir(path_to_data) if re.search(r'.*\.(jpe?g|png)', f)]
- img_files_locs = [os.path.join(path_to_data, f) for f in img_files]
-
- image_list = []
-
- for loc in img_files_locs:
- image_list.append(detect_face(loc))
-
- return image_list
|